blob: 9204d19fb30c2032cadccb1c70bfcafe4c4dd38f [file] [log] [blame]
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001#include <linux/etherdevice.h>
2#include <linux/if_macvlan.h>
Basil Gorf09e2242012-05-03 22:55:24 +00003#include <linux/if_vlan.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +00004#include <linux/interrupt.h>
5#include <linux/nsproxy.h>
6#include <linux/compat.h>
7#include <linux/if_tun.h>
8#include <linux/module.h>
9#include <linux/skbuff.h>
10#include <linux/cache.h>
11#include <linux/sched.h>
12#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090013#include <linux/slab.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000014#include <linux/wait.h>
15#include <linux/cdev.h>
Al Viro40401532012-02-13 03:58:52 +000016#include <linux/idr.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000017#include <linux/fs.h>
Herbert Xu6c36d2e2014-11-07 21:22:25 +080018#include <linux/uio.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000019
20#include <net/net_namespace.h>
21#include <net/rtnetlink.h>
22#include <net/sock.h>
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000023#include <linux/virtio_net.h>
Jason Wang362899b2016-07-15 03:46:31 -040024#include <linux/skb_array.h>
Arnd Bergmann20d29d72010-01-30 12:24:26 +000025
26/*
27 * A macvtap queue is the central object of this driver, it connects
28 * an open character device to a macvlan interface. There can be
29 * multiple queues on one interface, which map back to queues
30 * implemented in hardware on the underlying device.
31 *
32 * macvtap_proto is used to allocate queues through the sock allocation
33 * mechanism.
34 *
Arnd Bergmann20d29d72010-01-30 12:24:26 +000035 */
36struct macvtap_queue {
37 struct sock sk;
38 struct socket sock;
Eric Dumazet43815482010-04-29 11:01:49 +000039 struct socket_wq wq;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +030040 int vnet_hdr_sz;
Eric Dumazet13707f92011-01-26 19:28:23 +000041 struct macvlan_dev __rcu *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000042 struct file *file;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +000043 unsigned int flags;
Jason Wang376b1aa2013-06-05 23:54:38 +000044 u16 queue_index;
Jason Wang815f2362013-06-05 23:54:39 +000045 bool enabled;
46 struct list_head next;
Jason Wang362899b2016-07-15 03:46:31 -040047 struct skb_array skb_array;
Arnd Bergmann20d29d72010-01-30 12:24:26 +000048};
49
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +020050#define MACVTAP_FEATURES (IFF_VNET_HDR | IFF_MULTI_QUEUE)
51
52#define MACVTAP_VNET_LE 0x80000000
Greg Kurz8b8e6582015-04-24 14:50:36 +020053#define MACVTAP_VNET_BE 0x40000000
54
55#ifdef CONFIG_TUN_VNET_CROSS_LE
56static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
57{
58 return q->flags & MACVTAP_VNET_BE ? false :
59 virtio_legacy_is_little_endian();
60}
61
62static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *sp)
63{
64 int s = !!(q->flags & MACVTAP_VNET_BE);
65
66 if (put_user(s, sp))
67 return -EFAULT;
68
69 return 0;
70}
71
72static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *sp)
73{
74 int s;
75
76 if (get_user(s, sp))
77 return -EFAULT;
78
79 if (s)
80 q->flags |= MACVTAP_VNET_BE;
81 else
82 q->flags &= ~MACVTAP_VNET_BE;
83
84 return 0;
85}
86#else
87static inline bool macvtap_legacy_is_little_endian(struct macvtap_queue *q)
88{
89 return virtio_legacy_is_little_endian();
90}
91
92static long macvtap_get_vnet_be(struct macvtap_queue *q, int __user *argp)
93{
94 return -EINVAL;
95}
96
97static long macvtap_set_vnet_be(struct macvtap_queue *q, int __user *argp)
98{
99 return -EINVAL;
100}
101#endif /* CONFIG_TUN_VNET_CROSS_LE */
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200102
Greg Kurz5b11e152015-04-24 14:24:48 +0200103static inline bool macvtap_is_little_endian(struct macvtap_queue *q)
104{
Greg Kurz7d824102015-04-24 14:26:24 +0200105 return q->flags & MACVTAP_VNET_LE ||
Greg Kurz8b8e6582015-04-24 14:50:36 +0200106 macvtap_legacy_is_little_endian(q);
Greg Kurz5b11e152015-04-24 14:24:48 +0200107}
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200108
109static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val)
110{
Greg Kurz5b11e152015-04-24 14:24:48 +0200111 return __virtio16_to_cpu(macvtap_is_little_endian(q), val);
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200112}
113
114static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val)
115{
Greg Kurz5b11e152015-04-24 14:24:48 +0200116 return __cpu_to_virtio16(macvtap_is_little_endian(q), val);
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200117}
118
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000119static struct proto macvtap_proto = {
120 .name = "macvtap",
121 .owner = THIS_MODULE,
122 .obj_size = sizeof (struct macvtap_queue),
123};
124
125/*
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000126 * Variables for dealing with macvtaps device numbers.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000127 */
David S. Miller1ebed712010-07-10 19:25:50 -0700128static dev_t macvtap_major;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000129#define MACVTAP_NUM_DEVS (1U << MINORBITS)
130static DEFINE_MUTEX(minor_lock);
131static DEFINE_IDR(minor_idr);
132
Shirley Ma97bc3632011-07-06 12:26:11 +0000133#define GOODCOPY_LEN 128
Marc Angel17af2bc2016-05-05 12:14:26 +0200134static const void *macvtap_net_namespace(struct device *d)
135{
136 struct net_device *dev = to_net_dev(d->parent);
137 return dev_net(dev);
138}
139
140static struct class macvtap_class = {
141 .name = "macvtap",
142 .owner = THIS_MODULE,
143 .ns_type = &net_ns_type_operations,
144 .namespace = macvtap_net_namespace,
145};
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000146static struct cdev macvtap_cdev;
147
Arnd Bergmann501c7742010-02-18 05:46:50 +0000148static const struct proto_ops macvtap_socket_ops;
149
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400150#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
Vlad Yaseviche3e3c422015-02-03 16:36:17 -0500151 NETIF_F_TSO6 | NETIF_F_UFO)
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400152#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
Jason Wangf23d5382015-10-23 00:57:05 -0400153#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG | NETIF_F_FRAGLIST)
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400154
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500155static struct macvlan_dev *macvtap_get_vlan_rcu(const struct net_device *dev)
156{
157 return rcu_dereference(dev->rx_handler_data);
158}
159
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000160/*
161 * RCU usage:
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000162 * The macvtap_queue and the macvlan_dev are loosely coupled, the
163 * pointers from one to the other can only be read while rcu_read_lock
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400164 * or rtnl is held.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000165 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000166 * Both the file and the macvlan_dev hold a reference on the macvtap_queue
167 * through sock_hold(&q->sk). When the macvlan_dev goes away first,
168 * q->vlan becomes inaccessible. When the files gets closed,
169 * macvtap_get_queue() fails.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000170 *
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000171 * There may still be references to the struct sock inside of the
172 * queue from outbound SKBs, but these never reference back to the
173 * file or the dev. The data structure is freed through __sk_free
174 * when both our references and any pending SKBs are gone.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000175 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000176
Jason Wang815f2362013-06-05 23:54:39 +0000177static int macvtap_enable_queue(struct net_device *dev, struct file *file,
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000178 struct macvtap_queue *q)
179{
180 struct macvlan_dev *vlan = netdev_priv(dev);
Jason Wang815f2362013-06-05 23:54:39 +0000181 int err = -EINVAL;
182
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400183 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000184
185 if (q->enabled)
186 goto out;
187
188 err = 0;
189 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
190 q->queue_index = vlan->numvtaps;
191 q->enabled = true;
192
193 vlan->numvtaps++;
194out:
Jason Wang815f2362013-06-05 23:54:39 +0000195 return err;
196}
197
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400198/* Requires RTNL */
Jason Wang815f2362013-06-05 23:54:39 +0000199static int macvtap_set_queue(struct net_device *dev, struct file *file,
200 struct macvtap_queue *q)
201{
202 struct macvlan_dev *vlan = netdev_priv(dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000203
Jason Wang815f2362013-06-05 23:54:39 +0000204 if (vlan->numqueues == MAX_MACVTAP_QUEUES)
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400205 return -EBUSY;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000206
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000207 rcu_assign_pointer(q->vlan, vlan);
Jason Wang376b1aa2013-06-05 23:54:38 +0000208 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000209 sock_hold(&q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000210
211 q->file = file;
Jason Wang376b1aa2013-06-05 23:54:38 +0000212 q->queue_index = vlan->numvtaps;
Jason Wang815f2362013-06-05 23:54:39 +0000213 q->enabled = true;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000214 file->private_data = q;
Jason Wang815f2362013-06-05 23:54:39 +0000215 list_add_tail(&q->next, &vlan->queue_list);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000216
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000217 vlan->numvtaps++;
Jason Wang815f2362013-06-05 23:54:39 +0000218 vlan->numqueues++;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000219
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400220 return 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000221}
222
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400223static int macvtap_disable_queue(struct macvtap_queue *q)
Jason Wang815f2362013-06-05 23:54:39 +0000224{
225 struct macvlan_dev *vlan;
226 struct macvtap_queue *nq;
227
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400228 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000229 if (!q->enabled)
230 return -EINVAL;
231
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400232 vlan = rtnl_dereference(q->vlan);
233
Jason Wang815f2362013-06-05 23:54:39 +0000234 if (vlan) {
235 int index = q->queue_index;
236 BUG_ON(index >= vlan->numvtaps);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400237 nq = rtnl_dereference(vlan->taps[vlan->numvtaps - 1]);
Jason Wang815f2362013-06-05 23:54:39 +0000238 nq->queue_index = index;
239
240 rcu_assign_pointer(vlan->taps[index], nq);
241 RCU_INIT_POINTER(vlan->taps[vlan->numvtaps - 1], NULL);
242 q->enabled = false;
243
244 vlan->numvtaps--;
245 }
246
247 return 0;
248}
249
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000250/*
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000251 * The file owning the queue got closed, give up both
252 * the reference that the files holds as well as the
253 * one from the macvlan_dev if that still exists.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000254 *
255 * Using the spinlock makes sure that we don't get
256 * to the queue again after destroying it.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000257 */
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000258static void macvtap_put_queue(struct macvtap_queue *q)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000259{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000260 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000261
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400262 rtnl_lock();
263 vlan = rtnl_dereference(q->vlan);
264
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000265 if (vlan) {
Jason Wang815f2362013-06-05 23:54:39 +0000266 if (q->enabled)
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400267 BUG_ON(macvtap_disable_queue(q));
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000268
Jason Wang815f2362013-06-05 23:54:39 +0000269 vlan->numqueues--;
Eric Dumazet2cfa5a02011-11-23 07:09:32 +0000270 RCU_INIT_POINTER(q->vlan, NULL);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000271 sock_put(&q->sk);
Jason Wang815f2362013-06-05 23:54:39 +0000272 list_del_init(&q->next);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000273 }
274
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400275 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000276
277 synchronize_rcu();
Jason Wang362899b2016-07-15 03:46:31 -0400278 skb_array_cleanup(&q->skb_array);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000279 sock_put(&q->sk);
280}
281
282/*
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000283 * Select a queue based on the rxq of the device on which this packet
284 * arrived. If the incoming device is not mq, calculate a flow hash
285 * to select a queue. If all fails, find the first available queue.
286 * Cache vlan->numvtaps since it can become zero during the execution
287 * of this function.
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000288 */
289static struct macvtap_queue *macvtap_get_queue(struct net_device *dev,
290 struct sk_buff *skb)
291{
292 struct macvlan_dev *vlan = netdev_priv(dev);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000293 struct macvtap_queue *tap = NULL;
Jason Wang815f2362013-06-05 23:54:39 +0000294 /* Access to taps array is protected by rcu, but access to numvtaps
295 * isn't. Below we use it to lookup a queue, but treat it as a hint
296 * and validate that the result isn't NULL - in case we are
297 * racing against queue removal.
298 */
Jason Wanged0483f2013-06-05 23:54:33 +0000299 int numvtaps = ACCESS_ONCE(vlan->numvtaps);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000300 __u32 rxq;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000301
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000302 if (!numvtaps)
303 goto out;
304
Jason Wang1b16bf42016-07-15 03:46:30 -0400305 if (numvtaps == 1)
306 goto single;
307
Krishna Kumaref0002b2011-11-23 22:17:14 +0000308 /* Check if we can use flow to select a queue */
Tom Herbert3958afa1b2013-12-15 22:12:06 -0800309 rxq = skb_get_hash(skb);
Krishna Kumaref0002b2011-11-23 22:17:14 +0000310 if (rxq) {
311 tap = rcu_dereference(vlan->taps[rxq % numvtaps]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000312 goto out;
Krishna Kumaref0002b2011-11-23 22:17:14 +0000313 }
314
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000315 if (likely(skb_rx_queue_recorded(skb))) {
316 rxq = skb_get_rx_queue(skb);
317
318 while (unlikely(rxq >= numvtaps))
319 rxq -= numvtaps;
320
321 tap = rcu_dereference(vlan->taps[rxq]);
Jason Wang376b1aa2013-06-05 23:54:38 +0000322 goto out;
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000323 }
324
Jason Wang1b16bf42016-07-15 03:46:30 -0400325single:
Jason Wang376b1aa2013-06-05 23:54:38 +0000326 tap = rcu_dereference(vlan->taps[0]);
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000327out:
328 return tap;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000329}
330
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000331/*
332 * The net_device is going away, give up the reference
Krishna Kumar1565c7c2010-08-04 06:15:59 +0000333 * that it holds on all queues and safely set the pointer
334 * from the queues to NULL.
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000335 */
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000336static void macvtap_del_queues(struct net_device *dev)
337{
338 struct macvlan_dev *vlan = netdev_priv(dev);
Pankaj Guptadfe816c2015-06-19 19:47:53 +0530339 struct macvtap_queue *q, *tmp;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000340
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400341 ASSERT_RTNL();
Jason Wang815f2362013-06-05 23:54:39 +0000342 list_for_each_entry_safe(q, tmp, &vlan->queue_list, next) {
343 list_del_init(&q->next);
Jason Wang376b1aa2013-06-05 23:54:38 +0000344 RCU_INIT_POINTER(q->vlan, NULL);
Jason Wang815f2362013-06-05 23:54:39 +0000345 if (q->enabled)
346 vlan->numvtaps--;
347 vlan->numqueues--;
Pankaj Guptadfe816c2015-06-19 19:47:53 +0530348 sock_put(&q->sk);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000349 }
Jason Wang815f2362013-06-05 23:54:39 +0000350 BUG_ON(vlan->numvtaps);
351 BUG_ON(vlan->numqueues);
Eric W. Biederman99f34b32011-10-20 04:26:01 +0000352 /* guarantee that any future macvtap_set_queue will fail */
353 vlan->numvtaps = MAX_MACVTAP_QUEUES;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000354}
355
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500356static rx_handler_result_t macvtap_handle_frame(struct sk_buff **pskb)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000357{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500358 struct sk_buff *skb = *pskb;
359 struct net_device *dev = skb->dev;
360 struct macvlan_dev *vlan;
361 struct macvtap_queue *q;
Vlad Yasevicha567dd62013-08-16 15:25:00 -0400362 netdev_features_t features = TAP_FEATURES;
363
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500364 vlan = macvtap_get_vlan_rcu(dev);
365 if (!vlan)
366 return RX_HANDLER_PASS;
367
368 q = macvtap_get_queue(dev, skb);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000369 if (!q)
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500370 return RX_HANDLER_PASS;
Herbert Xu8a357472010-07-21 21:44:31 +0000371
Jason Wang362899b2016-07-15 03:46:31 -0400372 if (__skb_array_full(&q->skb_array))
Herbert Xu8a357472010-07-21 21:44:31 +0000373 goto drop;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000374
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500375 skb_push(skb, ETH_HLEN);
376
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400377 /* Apply the forward feature mask so that we perform segmentation
Vlad Yaseviche5733322013-08-16 15:25:02 -0400378 * according to users wishes. This only works if VNET_HDR is
379 * enabled.
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400380 */
Vlad Yaseviche5733322013-08-16 15:25:02 -0400381 if (q->flags & IFF_VNET_HDR)
382 features |= vlan->tap_features;
Johannes Berg8b86a612015-04-17 15:45:04 +0200383 if (netif_needs_gso(skb, features)) {
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400384 struct sk_buff *segs = __skb_gso_segment(skb, features, false);
385
386 if (IS_ERR(segs))
387 goto drop;
388
389 if (!segs) {
Jason Wang362899b2016-07-15 03:46:31 -0400390 if (skb_array_produce(&q->skb_array, skb))
391 goto drop;
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400392 goto wake_up;
393 }
394
Eric Dumazetbe0bd312016-05-06 05:58:21 -0700395 consume_skb(skb);
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400396 while (segs) {
397 struct sk_buff *nskb = segs->next;
398
399 segs->next = NULL;
Jason Wang362899b2016-07-15 03:46:31 -0400400 if (skb_array_produce(&q->skb_array, segs)) {
401 kfree_skb(segs);
402 kfree_skb_list(nskb);
403 break;
404 }
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400405 segs = nskb;
406 }
407 } else {
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400408 /* If we receive a partial checksum and the tap side
409 * doesn't support checksum offload, compute the checksum.
410 * Note: it doesn't matter which checksum feature to
411 * check, we either support them all or none.
412 */
413 if (skb->ip_summed == CHECKSUM_PARTIAL &&
Tom Herberta1882222015-12-14 11:19:43 -0800414 !(features & NETIF_F_CSUM_MASK) &&
Vlad Yasevichcbdb0422014-04-29 10:09:50 -0400415 skb_checksum_help(skb))
416 goto drop;
Jason Wang362899b2016-07-15 03:46:31 -0400417 if (skb_array_produce(&q->skb_array, skb))
418 goto drop;
Vlad Yasevich3e4f8b72013-06-25 16:04:22 -0400419 }
420
421wake_up:
Eric Dumazet4a4771a2010-04-25 22:20:06 +0000422 wake_up_interruptible_poll(sk_sleep(&q->sk), POLLIN | POLLRDNORM | POLLRDBAND);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500423 return RX_HANDLER_CONSUMED;
Herbert Xu8a357472010-07-21 21:44:31 +0000424
425drop:
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500426 /* Count errors/drops only here, thus don't care about args. */
427 macvlan_count_rx(vlan, 0, 0, 0);
Herbert Xu8a357472010-07-21 21:44:31 +0000428 kfree_skb(skb);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500429 return RX_HANDLER_CONSUMED;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000430}
431
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000432static int macvtap_get_minor(struct macvlan_dev *vlan)
433{
434 int retval = -ENOMEM;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000435
436 mutex_lock(&minor_lock);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800437 retval = idr_alloc(&minor_idr, vlan, 1, MACVTAP_NUM_DEVS, GFP_KERNEL);
438 if (retval >= 0) {
439 vlan->minor = retval;
440 } else if (retval == -ENOSPC) {
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000441 printk(KERN_ERR "too many macvtap devices\n");
442 retval = -EINVAL;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000443 }
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000444 mutex_unlock(&minor_lock);
Tejun Heoec09ebc2013-02-27 17:04:34 -0800445 return retval < 0 ? retval : 0;
Eric W. Biedermane09eff72011-10-20 04:29:24 +0000446}
447
448static void macvtap_free_minor(struct macvlan_dev *vlan)
449{
450 mutex_lock(&minor_lock);
451 if (vlan->minor) {
452 idr_remove(&minor_idr, vlan->minor);
453 vlan->minor = 0;
454 }
455 mutex_unlock(&minor_lock);
456}
457
458static struct net_device *dev_get_by_macvtap_minor(int minor)
459{
460 struct net_device *dev = NULL;
461 struct macvlan_dev *vlan;
462
463 mutex_lock(&minor_lock);
464 vlan = idr_find(&minor_idr, minor);
465 if (vlan) {
466 dev = vlan->dev;
467 dev_hold(dev);
468 }
469 mutex_unlock(&minor_lock);
470 return dev;
471}
472
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000473static int macvtap_newlink(struct net *src_net,
474 struct net_device *dev,
475 struct nlattr *tb[],
476 struct nlattr *data[])
477{
Jason Wang815f2362013-06-05 23:54:39 +0000478 struct macvlan_dev *vlan = netdev_priv(dev);
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500479 int err;
480
Jason Wang815f2362013-06-05 23:54:39 +0000481 INIT_LIST_HEAD(&vlan->queue_list);
482
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400483 /* Since macvlan supports all offloads by default, make
484 * tap support all offloads also.
485 */
486 vlan->tap_features = TUN_OFFLOADS;
487
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500488 err = netdev_rx_handler_register(dev, macvtap_handle_frame, vlan);
489 if (err)
490 return err;
491
Eric W. Biederman9bf19072011-10-20 04:28:46 +0000492 /* Don't put anything that may fail after macvlan_common_newlink
493 * because we can't undo what it does.
494 */
Vlad Yasevich2f6a1b62013-12-11 13:27:11 -0500495 return macvlan_common_newlink(src_net, dev, tb, data);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000496}
497
498static void macvtap_dellink(struct net_device *dev,
499 struct list_head *head)
500{
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500501 netdev_rx_handler_unregister(dev);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000502 macvtap_del_queues(dev);
503 macvlan_dellink(dev, head);
504}
505
Herbert Xu8a357472010-07-21 21:44:31 +0000506static void macvtap_setup(struct net_device *dev)
507{
508 macvlan_common_setup(dev);
509 dev->tx_queue_len = TUN_READQ_SIZE;
510}
511
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000512static struct rtnl_link_ops macvtap_link_ops __read_mostly = {
513 .kind = "macvtap",
Herbert Xu8a357472010-07-21 21:44:31 +0000514 .setup = macvtap_setup,
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000515 .newlink = macvtap_newlink,
516 .dellink = macvtap_dellink,
517};
518
519
520static void macvtap_sock_write_space(struct sock *sk)
521{
Eric Dumazet43815482010-04-29 11:01:49 +0000522 wait_queue_head_t *wqueue;
523
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000524 if (!sock_writeable(sk) ||
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800525 !test_and_clear_bit(SOCKWQ_ASYNC_NOSPACE, &sk->sk_socket->flags))
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000526 return;
527
Eric Dumazet43815482010-04-29 11:01:49 +0000528 wqueue = sk_sleep(sk);
529 if (wqueue && waitqueue_active(wqueue))
530 wake_up_interruptible_poll(wqueue, POLLOUT | POLLWRNORM | POLLWRBAND);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000531}
532
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000533static void macvtap_sock_destruct(struct sock *sk)
534{
Jason Wang362899b2016-07-15 03:46:31 -0400535 struct macvtap_queue *q = container_of(sk, struct macvtap_queue, sk);
536 struct sk_buff *skb;
537
538 while ((skb = skb_array_consume(&q->skb_array)) != NULL)
539 kfree(skb);
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000540}
541
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000542static int macvtap_open(struct inode *inode, struct file *file)
543{
544 struct net *net = current->nsproxy->net_ns;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400545 struct net_device *dev;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000546 struct macvtap_queue *q;
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400547 int err = -ENODEV;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000548
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400549 rtnl_lock();
550 dev = dev_get_by_macvtap_minor(iminor(inode));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000551 if (!dev)
Jason Wang362899b2016-07-15 03:46:31 -0400552 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000553
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000554 err = -ENOMEM;
555 q = (struct macvtap_queue *)sk_alloc(net, AF_UNSPEC, GFP_KERNEL,
Eric W. Biederman11aa9c22015-05-08 21:09:13 -0500556 &macvtap_proto, 0);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000557 if (!q)
Jason Wang362899b2016-07-15 03:46:31 -0400558 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000559
Jason Wangd9a90a32013-06-13 14:23:35 +0800560 RCU_INIT_POINTER(q->sock.wq, &q->wq);
Eric Dumazet43815482010-04-29 11:01:49 +0000561 init_waitqueue_head(&q->wq.wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000562 q->sock.type = SOCK_RAW;
563 q->sock.state = SS_CONNECTED;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000564 q->sock.file = file;
565 q->sock.ops = &macvtap_socket_ops;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000566 sock_init_data(&q->sock, &q->sk);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000567 q->sk.sk_write_space = macvtap_sock_write_space;
Eric W. Biederman2259fef2011-10-20 04:27:24 +0000568 q->sk.sk_destruct = macvtap_sock_destruct;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000569 q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300570 q->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000571
Shirley Ma97bc3632011-07-06 12:26:11 +0000572 /*
573 * so far only KVM virtio_net uses macvtap, enable zero copy between
574 * guest kernel and host kernel when lower device supports zerocopy
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000575 *
576 * The macvlan supports zerocopy iff the lower device supports zero
577 * copy so we don't have to look at the lower device directly.
Shirley Ma97bc3632011-07-06 12:26:11 +0000578 */
Eric W. Biederman047af9cf2011-10-20 04:26:39 +0000579 if ((dev->features & NETIF_F_HIGHDMA) && (dev->features & NETIF_F_SG))
580 sock_set_flag(&q->sk, SOCK_ZEROCOPY);
Shirley Ma97bc3632011-07-06 12:26:11 +0000581
Jason Wang362899b2016-07-15 03:46:31 -0400582 err = -ENOMEM;
583 if (skb_array_init(&q->skb_array, dev->tx_queue_len, GFP_KERNEL))
584 goto err_array;
585
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000586 err = macvtap_set_queue(dev, file, q);
587 if (err)
Jason Wang362899b2016-07-15 03:46:31 -0400588 goto err_queue;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000589
Jason Wang362899b2016-07-15 03:46:31 -0400590 dev_put(dev);
591
592 rtnl_unlock();
593 return err;
594
595err_queue:
596 skb_array_cleanup(&q->skb_array);
597err_array:
598 sock_put(&q->sk);
599err:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000600 if (dev)
601 dev_put(dev);
602
Vlad Yasevich40b8fe42014-09-22 16:34:17 -0400603 rtnl_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000604 return err;
605}
606
607static int macvtap_release(struct inode *inode, struct file *file)
608{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000609 struct macvtap_queue *q = file->private_data;
610 macvtap_put_queue(q);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000611 return 0;
612}
613
614static unsigned int macvtap_poll(struct file *file, poll_table * wait)
615{
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000616 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000617 unsigned int mask = POLLERR;
618
619 if (!q)
620 goto out;
621
622 mask = 0;
Eric Dumazet43815482010-04-29 11:01:49 +0000623 poll_wait(file, &q->wq.wait, wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000624
Jason Wang362899b2016-07-15 03:46:31 -0400625 if (!skb_array_empty(&q->skb_array))
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000626 mask |= POLLIN | POLLRDNORM;
627
628 if (sock_writeable(&q->sk) ||
Eric Dumazet9cd3e072015-11-29 20:03:10 -0800629 (!test_and_set_bit(SOCKWQ_ASYNC_NOSPACE, &q->sock.flags) &&
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000630 sock_writeable(&q->sk)))
631 mask |= POLLOUT | POLLWRNORM;
632
633out:
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000634 return mask;
635}
636
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000637static inline struct sk_buff *macvtap_alloc_skb(struct sock *sk, size_t prepad,
638 size_t len, size_t linear,
639 int noblock, int *err)
640{
641 struct sk_buff *skb;
642
643 /* Under a page? Don't bother with paged skb. */
644 if (prepad + len < PAGE_SIZE || !linear)
645 linear = len;
646
647 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
Eric Dumazet28d64272013-08-08 14:38:47 -0700648 err, 0);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000649 if (!skb)
650 return NULL;
651
652 skb_reserve(skb, prepad);
653 skb_put(skb, linear);
654 skb->data_len = len - linear;
655 skb->len += len - linear;
656
657 return skb;
658}
659
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800660/* Neighbour code has some assumptions on HH_DATA_MOD alignment */
661#define MACVTAP_RESERVE HH_DATA_OFF(ETH_HLEN)
662
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000663/* Get packet from user space buffer */
Shirley Ma97bc3632011-07-06 12:26:11 +0000664static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
Al Virof5ff53b2014-06-19 15:36:49 -0400665 struct iov_iter *from, int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000666{
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800667 int good_linear = SKB_MAX_HEAD(MACVTAP_RESERVE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000668 struct sk_buff *skb;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000669 struct macvlan_dev *vlan;
Al Virof5ff53b2014-06-19 15:36:49 -0400670 unsigned long total_len = iov_iter_count(from);
Shirley Ma97bc3632011-07-06 12:26:11 +0000671 unsigned long len = total_len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000672 int err;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000673 struct virtio_net_hdr vnet_hdr = { 0 };
674 int vnet_hdr_len = 0;
Jason Wangb92946e2012-05-02 11:42:15 +0800675 int copylen = 0;
Ivan Vecerac5c62f12015-07-23 16:37:43 +0200676 int depth;
Shirley Ma97bc3632011-07-06 12:26:11 +0000677 bool zerocopy = false;
Jason Wang61d46bf2013-07-10 13:43:28 +0800678 size_t linear;
Al Virof5ff53b2014-06-19 15:36:49 -0400679 ssize_t n;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000680
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000681 if (q->flags & IFF_VNET_HDR) {
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300682 vnet_hdr_len = q->vnet_hdr_sz;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000683
684 err = -EINVAL;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000685 if (len < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000686 goto err;
Nicolas Kaiserce3c8692011-03-04 13:49:41 +0000687 len -= vnet_hdr_len;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000688
Al Virof5ff53b2014-06-19 15:36:49 -0400689 err = -EFAULT;
690 n = copy_from_iter(&vnet_hdr, sizeof(vnet_hdr), from);
691 if (n != sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000692 goto err;
Al Virof5ff53b2014-06-19 15:36:49 -0400693 iov_iter_advance(from, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000694 if ((vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200695 macvtap16_to_cpu(q, vnet_hdr.csum_start) +
696 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2 >
697 macvtap16_to_cpu(q, vnet_hdr.hdr_len))
698 vnet_hdr.hdr_len = cpu_to_macvtap16(q,
699 macvtap16_to_cpu(q, vnet_hdr.csum_start) +
700 macvtap16_to_cpu(q, vnet_hdr.csum_offset) + 2);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000701 err = -EINVAL;
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200702 if (macvtap16_to_cpu(q, vnet_hdr.hdr_len) > len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000703 goto err;
704 }
705
706 err = -EINVAL;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000707 if (unlikely(len < ETH_HLEN))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000708 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000709
Jason Wangece793f2013-07-18 10:55:16 +0800710 if (m && m->msg_control && sock_flag(&q->sk, SOCK_ZEROCOPY)) {
Al Virof5ff53b2014-06-19 15:36:49 -0400711 struct iov_iter i;
712
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +0200713 copylen = vnet_hdr.hdr_len ?
714 macvtap16_to_cpu(q, vnet_hdr.hdr_len) : GOODCOPY_LEN;
Jason Wang16a3fa22013-11-13 14:00:40 +0800715 if (copylen > good_linear)
716 copylen = good_linear;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500717 else if (copylen < ETH_HLEN)
718 copylen = ETH_HLEN;
Jason Wang61d46bf2013-07-10 13:43:28 +0800719 linear = copylen;
Al Virof5ff53b2014-06-19 15:36:49 -0400720 i = *from;
721 iov_iter_advance(&i, copylen);
722 if (iov_iter_npages(&i, INT_MAX) <= MAX_SKB_FRAGS)
Jason Wangece793f2013-07-18 10:55:16 +0800723 zerocopy = true;
724 }
725
726 if (!zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000727 copylen = len;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500728 linear = macvtap16_to_cpu(q, vnet_hdr.hdr_len);
729 if (linear > good_linear)
Jason Wang16a3fa22013-11-13 14:00:40 +0800730 linear = good_linear;
Willem de Bruijn8e2ad412016-03-08 15:18:54 -0500731 else if (linear < ETH_HLEN)
732 linear = ETH_HLEN;
Jason Wang61d46bf2013-07-10 13:43:28 +0800733 }
Shirley Ma97bc3632011-07-06 12:26:11 +0000734
Eric Dumazet2f1d8b92015-02-27 18:35:35 -0800735 skb = macvtap_alloc_skb(&q->sk, MACVTAP_RESERVE, copylen,
Jason Wang61d46bf2013-07-10 13:43:28 +0800736 linear, noblock, &err);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000737 if (!skb)
738 goto err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000739
Jason Wang01d66572012-05-02 11:42:06 +0800740 if (zerocopy)
Al Virof5ff53b2014-06-19 15:36:49 -0400741 err = zerocopy_sg_from_iter(skb, from);
Jason Wangece793f2013-07-18 10:55:16 +0800742 else {
Al Virof5ff53b2014-06-19 15:36:49 -0400743 err = skb_copy_datagram_from_iter(skb, 0, from, len);
Jason Wangece793f2013-07-18 10:55:16 +0800744 if (!err && m && m->msg_control) {
745 struct ubuf_info *uarg = m->msg_control;
746 uarg->callback(uarg, false);
747 }
748 }
749
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000750 if (err)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000751 goto err_kfree;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000752
753 skb_set_network_header(skb, ETH_HLEN);
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000754 skb_reset_mac_header(skb);
755 skb->protocol = eth_hdr(skb)->h_proto;
756
757 if (vnet_hdr_len) {
Mike Rapoportfd88d682016-06-08 16:09:19 +0300758 err = virtio_net_hdr_to_skb(skb, &vnet_hdr,
759 macvtap_is_little_endian(q));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000760 if (err)
761 goto err_kfree;
762 }
763
Jason Wang40893fd2013-03-26 23:11:22 +0000764 skb_probe_transport_header(skb, ETH_HLEN);
Jason Wang9b4d6692013-03-25 20:19:55 +0000765
Ivan Vecerac5c62f12015-07-23 16:37:43 +0200766 /* Move network header to the right position for VLAN tagged packets */
767 if ((skb->protocol == htons(ETH_P_8021Q) ||
768 skb->protocol == htons(ETH_P_8021AD)) &&
769 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
770 skb_set_network_header(skb, depth);
771
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400772 rcu_read_lock();
773 vlan = rcu_dereference(q->vlan);
Shirley Ma97bc3632011-07-06 12:26:11 +0000774 /* copy skb_ubuf_info for callback when skb has no error */
Jason Wang01d66572012-05-02 11:42:06 +0800775 if (zerocopy) {
Shirley Ma97bc3632011-07-06 12:26:11 +0000776 skb_shinfo(skb)->destructor_arg = m->msg_control;
Jason Wang01d66572012-05-02 11:42:06 +0800777 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
Pravin B Shelarc9af6db2013-02-11 09:27:41 +0000778 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
Jason Wang01d66572012-05-02 11:42:06 +0800779 }
Eric Dumazet29d79192013-08-08 08:06:14 -0700780 if (vlan) {
Vlad Yasevich6acf54f2013-12-11 13:27:10 -0500781 skb->dev = vlan->dev;
782 dev_queue_xmit(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700783 } else {
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000784 kfree_skb(skb);
Eric Dumazet29d79192013-08-08 08:06:14 -0700785 }
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400786 rcu_read_unlock();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000787
Shirley Ma97bc3632011-07-06 12:26:11 +0000788 return total_len;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000789
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000790err_kfree:
791 kfree_skb(skb);
792
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000793err:
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400794 rcu_read_lock();
795 vlan = rcu_dereference(q->vlan);
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000796 if (vlan)
Jason Wangcd3e22b2013-11-25 17:19:04 +0800797 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
Vlad Yasevichac4e4af2013-06-25 16:04:20 -0400798 rcu_read_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000799
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000800 return err;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000801}
802
Al Virof5ff53b2014-06-19 15:36:49 -0400803static ssize_t macvtap_write_iter(struct kiocb *iocb, struct iov_iter *from)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000804{
805 struct file *file = iocb->ki_filp;
Arnd Bergmann02df55d2010-02-18 05:45:36 +0000806 struct macvtap_queue *q = file->private_data;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000807
Al Virof5ff53b2014-06-19 15:36:49 -0400808 return macvtap_get_user(q, NULL, from, file->f_flags & O_NONBLOCK);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000809}
810
811/* Put packet to the user space buffer */
812static ssize_t macvtap_put_user(struct macvtap_queue *q,
813 const struct sk_buff *skb,
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800814 struct iov_iter *iter)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000815{
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000816 int ret;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000817 int vnet_hdr_len = 0;
Basil Gorf09e2242012-05-03 22:55:24 +0000818 int vlan_offset = 0;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800819 int total;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000820
821 if (q->flags & IFF_VNET_HDR) {
822 struct virtio_net_hdr vnet_hdr;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +0300823 vnet_hdr_len = q->vnet_hdr_sz;
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800824 if (iov_iter_count(iter) < vnet_hdr_len)
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000825 return -EINVAL;
826
Mike Rapoportfd88d682016-06-08 16:09:19 +0300827 ret = virtio_net_hdr_from_skb(skb, &vnet_hdr,
828 macvtap_is_little_endian(q));
829 if (ret)
830 BUG();
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000831
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800832 if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
833 sizeof(vnet_hdr))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000834 return -EFAULT;
Jason Wang7cc76f52014-11-20 16:31:05 +0800835
836 iov_iter_advance(iter, vnet_hdr_len - sizeof(vnet_hdr));
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +0000837 }
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800838 total = vnet_hdr_len;
Jason Wangce232ce2013-12-11 13:08:34 +0800839 total += skb->len;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000840
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100841 if (skb_vlan_tag_present(skb)) {
Basil Gorf09e2242012-05-03 22:55:24 +0000842 struct {
843 __be16 h_vlan_proto;
844 __be16 h_vlan_TCI;
845 } veth;
Jason Wang0fbe0d42013-07-16 13:36:34 +0800846 veth.h_vlan_proto = skb->vlan_proto;
Jiri Pirkodf8a39d2015-01-13 17:13:44 +0100847 veth.h_vlan_TCI = htons(skb_vlan_tag_get(skb));
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000848
Basil Gorf09e2242012-05-03 22:55:24 +0000849 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
Jason Wangce232ce2013-12-11 13:08:34 +0800850 total += VLAN_HLEN;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000851
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800852 ret = skb_copy_datagram_iter(skb, 0, iter, vlan_offset);
853 if (ret || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000854 goto done;
855
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800856 ret = copy_to_iter(&veth, sizeof(veth), iter);
857 if (ret != sizeof(veth) || !iov_iter_count(iter))
Basil Gorf09e2242012-05-03 22:55:24 +0000858 goto done;
859 }
860
Herbert Xu6c36d2e2014-11-07 21:22:25 +0800861 ret = skb_copy_datagram_iter(skb, vlan_offset, iter,
862 skb->len - vlan_offset);
Basil Gorf09e2242012-05-03 22:55:24 +0000863
864done:
Jason Wangce232ce2013-12-11 13:08:34 +0800865 return ret ? ret : total;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000866}
867
Zhi Yong Wu55ec8e22013-12-07 04:13:05 +0800868static ssize_t macvtap_do_read(struct macvtap_queue *q,
Al Viro3af0bfe2014-11-07 14:13:53 -0500869 struct iov_iter *to,
Arnd Bergmann501c7742010-02-18 05:46:50 +0000870 int noblock)
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000871{
Hong zhi guoccf7e722012-06-06 22:36:27 +0000872 DEFINE_WAIT(wait);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000873 struct sk_buff *skb;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000874 ssize_t ret = 0;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000875
Al Viro3af0bfe2014-11-07 14:13:53 -0500876 if (!iov_iter_count(to))
877 return 0;
878
879 while (1) {
Jason Wang89cee912013-06-05 23:54:34 +0000880 if (!noblock)
881 prepare_to_wait(sk_sleep(&q->sk), &wait,
882 TASK_INTERRUPTIBLE);
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000883
884 /* Read frames from the queue */
Jason Wang362899b2016-07-15 03:46:31 -0400885 skb = skb_array_consume(&q->skb_array);
Al Viro3af0bfe2014-11-07 14:13:53 -0500886 if (skb)
887 break;
888 if (noblock) {
889 ret = -EAGAIN;
890 break;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000891 }
Al Viro3af0bfe2014-11-07 14:13:53 -0500892 if (signal_pending(current)) {
893 ret = -ERESTARTSYS;
894 break;
895 }
896 /* Nothing to read, let's sleep */
897 schedule();
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000898 }
Vlad Yasevicha499a2e2015-11-09 09:14:17 -0500899 if (!noblock)
900 finish_wait(sk_sleep(&q->sk), &wait);
901
Al Viro3af0bfe2014-11-07 14:13:53 -0500902 if (skb) {
903 ret = macvtap_put_user(q, skb, to);
Jason Wangf51a5e82014-12-01 16:53:15 +0800904 if (unlikely(ret < 0))
905 kfree_skb(skb);
906 else
907 consume_skb(skb);
Al Viro3af0bfe2014-11-07 14:13:53 -0500908 }
Arnd Bergmann501c7742010-02-18 05:46:50 +0000909 return ret;
910}
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000911
Al Viro3af0bfe2014-11-07 14:13:53 -0500912static ssize_t macvtap_read_iter(struct kiocb *iocb, struct iov_iter *to)
Arnd Bergmann501c7742010-02-18 05:46:50 +0000913{
914 struct file *file = iocb->ki_filp;
915 struct macvtap_queue *q = file->private_data;
Al Viro3af0bfe2014-11-07 14:13:53 -0500916 ssize_t len = iov_iter_count(to), ret;
Arnd Bergmann501c7742010-02-18 05:46:50 +0000917
Al Viro3af0bfe2014-11-07 14:13:53 -0500918 ret = macvtap_do_read(q, to, file->f_flags & O_NONBLOCK);
Jason Wangce232ce2013-12-11 13:08:34 +0800919 ret = min_t(ssize_t, ret, len);
Zhi Yong Wue6ebc7f2013-12-06 14:16:50 +0800920 if (ret > 0)
921 iocb->ki_pos = ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +0000922 return ret;
923}
924
Jason Wang8f475a32013-06-05 23:54:36 +0000925static struct macvlan_dev *macvtap_get_vlan(struct macvtap_queue *q)
926{
927 struct macvlan_dev *vlan;
928
Vlad Yasevich441ac0f2013-06-25 16:04:19 -0400929 ASSERT_RTNL();
930 vlan = rtnl_dereference(q->vlan);
Jason Wang8f475a32013-06-05 23:54:36 +0000931 if (vlan)
932 dev_hold(vlan->dev);
Jason Wang8f475a32013-06-05 23:54:36 +0000933
934 return vlan;
935}
936
937static void macvtap_put_vlan(struct macvlan_dev *vlan)
938{
939 dev_put(vlan->dev);
940}
941
Jason Wang815f2362013-06-05 23:54:39 +0000942static int macvtap_ioctl_set_queue(struct file *file, unsigned int flags)
943{
944 struct macvtap_queue *q = file->private_data;
945 struct macvlan_dev *vlan;
946 int ret;
947
948 vlan = macvtap_get_vlan(q);
949 if (!vlan)
950 return -EINVAL;
951
952 if (flags & IFF_ATTACH_QUEUE)
953 ret = macvtap_enable_queue(vlan->dev, file, q);
954 else if (flags & IFF_DETACH_QUEUE)
955 ret = macvtap_disable_queue(q);
Jason Wangf57855a2013-06-13 14:23:36 +0800956 else
957 ret = -EINVAL;
Jason Wang815f2362013-06-05 23:54:39 +0000958
959 macvtap_put_vlan(vlan);
960 return ret;
961}
962
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400963static int set_offload(struct macvtap_queue *q, unsigned long arg)
964{
965 struct macvlan_dev *vlan;
966 netdev_features_t features;
967 netdev_features_t feature_mask = 0;
968
969 vlan = rtnl_dereference(q->vlan);
970 if (!vlan)
971 return -ENOLINK;
972
973 features = vlan->dev->features;
974
975 if (arg & TUN_F_CSUM) {
976 feature_mask = NETIF_F_HW_CSUM;
977
978 if (arg & (TUN_F_TSO4 | TUN_F_TSO6)) {
979 if (arg & TUN_F_TSO_ECN)
980 feature_mask |= NETIF_F_TSO_ECN;
981 if (arg & TUN_F_TSO4)
982 feature_mask |= NETIF_F_TSO;
983 if (arg & TUN_F_TSO6)
984 feature_mask |= NETIF_F_TSO6;
985 }
Vlad Yaseviche3e3c422015-02-03 16:36:17 -0500986
987 if (arg & TUN_F_UFO)
988 feature_mask |= NETIF_F_UFO;
Vlad Yasevich2be5c762013-06-25 16:04:21 -0400989 }
990
991 /* tun/tap driver inverts the usage for TSO offloads, where
992 * setting the TSO bit means that the userspace wants to
993 * accept TSO frames and turning it off means that user space
994 * does not support TSO.
995 * For macvtap, we have to invert it to mean the same thing.
996 * When user space turns off TSO, we turn off GSO/LRO so that
997 * user-space will not receive TSO frames.
998 */
Vlad Yaseviche3e3c422015-02-03 16:36:17 -0500999 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO))
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001000 features |= RX_OFFLOADS;
1001 else
1002 features &= ~RX_OFFLOADS;
1003
1004 /* tap_features are the same as features on tun/tap and
1005 * reflect user expectations.
1006 */
Vlad Yasevicha567dd62013-08-16 15:25:00 -04001007 vlan->tap_features = feature_mask;
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001008 vlan->set_features = features;
1009 netdev_update_features(vlan->dev);
1010
1011 return 0;
1012}
1013
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001014/*
1015 * provide compatibility with generic tun/tap interface
1016 */
1017static long macvtap_ioctl(struct file *file, unsigned int cmd,
1018 unsigned long arg)
1019{
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001020 struct macvtap_queue *q = file->private_data;
1021 struct macvlan_dev *vlan;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001022 void __user *argp = (void __user *)arg;
1023 struct ifreq __user *ifr = argp;
1024 unsigned int __user *up = argp;
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001025 unsigned short u;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001026 int __user *sp = argp;
Justin Cormack7f460d32015-05-13 19:19:02 +01001027 struct sockaddr sa;
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001028 int s;
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001029 int ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001030
1031 switch (cmd) {
1032 case TUNSETIFF:
1033 /* ignore the name, just look at flags */
1034 if (get_user(u, &ifr->ifr_flags))
1035 return -EFAULT;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001036
1037 ret = 0;
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +02001038 if ((u & ~MACVTAP_FEATURES) != (IFF_NO_PI | IFF_TAP))
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001039 ret = -EINVAL;
1040 else
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001041 q->flags = (q->flags & ~MACVTAP_FEATURES) | u;
Arnd Bergmannb9fb9ee2010-02-18 05:48:17 +00001042
1043 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001044
1045 case TUNGETIFF:
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001046 rtnl_lock();
Jason Wang8f475a32013-06-05 23:54:36 +00001047 vlan = macvtap_get_vlan(q);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001048 if (!vlan) {
1049 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001050 return -ENOLINK;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001051 }
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001052
1053 ret = 0;
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001054 u = q->flags;
Eric Dumazet13707f92011-01-26 19:28:23 +00001055 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
Michael S. Tsirkin39ec7de2014-12-16 15:04:56 +02001056 put_user(u, &ifr->ifr_flags))
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001057 ret = -EFAULT;
Jason Wang8f475a32013-06-05 23:54:36 +00001058 macvtap_put_vlan(vlan);
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001059 rtnl_unlock();
Arnd Bergmann02df55d2010-02-18 05:45:36 +00001060 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001061
Jason Wang815f2362013-06-05 23:54:39 +00001062 case TUNSETQUEUE:
1063 if (get_user(u, &ifr->ifr_flags))
1064 return -EFAULT;
Vlad Yasevich441ac0f2013-06-25 16:04:19 -04001065 rtnl_lock();
1066 ret = macvtap_ioctl_set_queue(file, u);
1067 rtnl_unlock();
Jason Wang82a19eb2013-07-16 13:36:33 +08001068 return ret;
Jason Wang815f2362013-06-05 23:54:39 +00001069
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001070 case TUNGETFEATURES:
Michael S. Tsirkin6ae7feb2014-11-23 17:24:15 +02001071 if (put_user(IFF_TAP | IFF_NO_PI | MACVTAP_FEATURES, up))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001072 return -EFAULT;
1073 return 0;
1074
1075 case TUNSETSNDBUF:
Michael S. Tsirkin3ea79242015-09-18 13:41:09 +03001076 if (get_user(s, sp))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001077 return -EFAULT;
1078
Michael S. Tsirkin3ea79242015-09-18 13:41:09 +03001079 q->sk.sk_sndbuf = s;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001080 return 0;
1081
Michael S. Tsirkin55afbd02010-04-29 13:50:48 +03001082 case TUNGETVNETHDRSZ:
1083 s = q->vnet_hdr_sz;
1084 if (put_user(s, sp))
1085 return -EFAULT;
1086 return 0;
1087
1088 case TUNSETVNETHDRSZ:
1089 if (get_user(s, sp))
1090 return -EFAULT;
1091 if (s < (int)sizeof(struct virtio_net_hdr))
1092 return -EINVAL;
1093
1094 q->vnet_hdr_sz = s;
1095 return 0;
1096
Michael S. Tsirkin01b07fb2014-12-16 15:05:10 +02001097 case TUNGETVNETLE:
1098 s = !!(q->flags & MACVTAP_VNET_LE);
1099 if (put_user(s, sp))
1100 return -EFAULT;
1101 return 0;
1102
1103 case TUNSETVNETLE:
1104 if (get_user(s, sp))
1105 return -EFAULT;
1106 if (s)
1107 q->flags |= MACVTAP_VNET_LE;
1108 else
1109 q->flags &= ~MACVTAP_VNET_LE;
1110 return 0;
1111
Greg Kurz8b8e6582015-04-24 14:50:36 +02001112 case TUNGETVNETBE:
1113 return macvtap_get_vnet_be(q, sp);
1114
1115 case TUNSETVNETBE:
1116 return macvtap_set_vnet_be(q, sp);
1117
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001118 case TUNSETOFFLOAD:
1119 /* let the user check for future flags */
1120 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
Vlad Yaseviche3e3c422015-02-03 16:36:17 -05001121 TUN_F_TSO_ECN | TUN_F_UFO))
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001122 return -EINVAL;
1123
Vlad Yasevich2be5c762013-06-25 16:04:21 -04001124 rtnl_lock();
1125 ret = set_offload(q, arg);
1126 rtnl_unlock();
1127 return ret;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001128
Justin Cormackb5082082015-05-11 20:00:10 +01001129 case SIOCGIFHWADDR:
1130 rtnl_lock();
1131 vlan = macvtap_get_vlan(q);
1132 if (!vlan) {
1133 rtnl_unlock();
1134 return -ENOLINK;
1135 }
1136 ret = 0;
1137 u = vlan->dev->type;
1138 if (copy_to_user(&ifr->ifr_name, vlan->dev->name, IFNAMSIZ) ||
1139 copy_to_user(&ifr->ifr_hwaddr.sa_data, vlan->dev->dev_addr, ETH_ALEN) ||
1140 put_user(u, &ifr->ifr_hwaddr.sa_family))
1141 ret = -EFAULT;
1142 macvtap_put_vlan(vlan);
1143 rtnl_unlock();
1144 return ret;
1145
1146 case SIOCSIFHWADDR:
Justin Cormack7f460d32015-05-13 19:19:02 +01001147 if (copy_from_user(&sa, &ifr->ifr_hwaddr, sizeof(sa)))
1148 return -EFAULT;
Justin Cormackb5082082015-05-11 20:00:10 +01001149 rtnl_lock();
1150 vlan = macvtap_get_vlan(q);
1151 if (!vlan) {
1152 rtnl_unlock();
1153 return -ENOLINK;
1154 }
Justin Cormack7f460d32015-05-13 19:19:02 +01001155 ret = dev_set_mac_address(vlan->dev, &sa);
Justin Cormackb5082082015-05-11 20:00:10 +01001156 macvtap_put_vlan(vlan);
1157 rtnl_unlock();
1158 return ret;
1159
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001160 default:
1161 return -EINVAL;
1162 }
1163}
1164
1165#ifdef CONFIG_COMPAT
1166static long macvtap_compat_ioctl(struct file *file, unsigned int cmd,
1167 unsigned long arg)
1168{
1169 return macvtap_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
1170}
1171#endif
1172
1173static const struct file_operations macvtap_fops = {
1174 .owner = THIS_MODULE,
1175 .open = macvtap_open,
1176 .release = macvtap_release,
Al Viro3af0bfe2014-11-07 14:13:53 -05001177 .read_iter = macvtap_read_iter,
Al Virof5ff53b2014-06-19 15:36:49 -04001178 .write_iter = macvtap_write_iter,
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001179 .poll = macvtap_poll,
1180 .llseek = no_llseek,
1181 .unlocked_ioctl = macvtap_ioctl,
1182#ifdef CONFIG_COMPAT
1183 .compat_ioctl = macvtap_compat_ioctl,
1184#endif
1185};
1186
Ying Xue1b784142015-03-02 15:37:48 +08001187static int macvtap_sendmsg(struct socket *sock, struct msghdr *m,
1188 size_t total_len)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001189{
1190 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
Al Viroc0371da2014-11-24 10:42:55 -05001191 return macvtap_get_user(q, m, &m->msg_iter, m->msg_flags & MSG_DONTWAIT);
Arnd Bergmann501c7742010-02-18 05:46:50 +00001192}
1193
Ying Xue1b784142015-03-02 15:37:48 +08001194static int macvtap_recvmsg(struct socket *sock, struct msghdr *m,
1195 size_t total_len, int flags)
Arnd Bergmann501c7742010-02-18 05:46:50 +00001196{
1197 struct macvtap_queue *q = container_of(sock, struct macvtap_queue, sock);
1198 int ret;
1199 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1200 return -EINVAL;
Al Viroc0371da2014-11-24 10:42:55 -05001201 ret = macvtap_do_read(q, &m->msg_iter, flags & MSG_DONTWAIT);
David S. Millerde2aa472013-12-10 22:06:18 -05001202 if (ret > total_len) {
1203 m->msg_flags |= MSG_TRUNC;
1204 ret = flags & MSG_TRUNC ? ret : total_len;
1205 }
Arnd Bergmann501c7742010-02-18 05:46:50 +00001206 return ret;
1207}
1208
Jason Wang362899b2016-07-15 03:46:31 -04001209static int macvtap_peek_len(struct socket *sock)
1210{
1211 struct macvtap_queue *q = container_of(sock, struct macvtap_queue,
1212 sock);
1213 return skb_array_peek_len(&q->skb_array);
1214}
1215
Arnd Bergmann501c7742010-02-18 05:46:50 +00001216/* Ops structure to mimic raw sockets with tun */
1217static const struct proto_ops macvtap_socket_ops = {
1218 .sendmsg = macvtap_sendmsg,
1219 .recvmsg = macvtap_recvmsg,
Jason Wang362899b2016-07-15 03:46:31 -04001220 .peek_len = macvtap_peek_len,
Arnd Bergmann501c7742010-02-18 05:46:50 +00001221};
1222
1223/* Get an underlying socket object from tun file. Returns error unless file is
1224 * attached to a device. The returned object works like a packet socket, it
1225 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
1226 * holding a reference to the file for as long as the socket is in use. */
1227struct socket *macvtap_get_socket(struct file *file)
1228{
1229 struct macvtap_queue *q;
1230 if (file->f_op != &macvtap_fops)
1231 return ERR_PTR(-EINVAL);
1232 q = file->private_data;
1233 if (!q)
1234 return ERR_PTR(-EBADFD);
1235 return &q->sock;
1236}
1237EXPORT_SYMBOL_GPL(macvtap_get_socket);
1238
Jason Wang362899b2016-07-15 03:46:31 -04001239static int macvtap_queue_resize(struct macvlan_dev *vlan)
1240{
1241 struct net_device *dev = vlan->dev;
1242 struct macvtap_queue *q;
1243 struct skb_array **arrays;
1244 int n = vlan->numqueues;
1245 int ret, i = 0;
1246
1247 arrays = kmalloc(sizeof *arrays * n, GFP_KERNEL);
1248 if (!arrays)
1249 return -ENOMEM;
1250
1251 list_for_each_entry(q, &vlan->queue_list, next)
1252 arrays[i++] = &q->skb_array;
1253
1254 ret = skb_array_resize_multiple(arrays, n,
1255 dev->tx_queue_len, GFP_KERNEL);
1256
1257 kfree(arrays);
1258 return ret;
1259}
1260
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001261static int macvtap_device_event(struct notifier_block *unused,
1262 unsigned long event, void *ptr)
1263{
Jiri Pirko351638e2013-05-28 01:30:21 +00001264 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001265 struct macvlan_dev *vlan;
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001266 struct device *classdev;
1267 dev_t devt;
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001268 int err;
Marc Angel17af2bc2016-05-05 12:14:26 +02001269 char tap_name[IFNAMSIZ];
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001270
1271 if (dev->rtnl_link_ops != &macvtap_link_ops)
1272 return NOTIFY_DONE;
1273
Marc Angel17af2bc2016-05-05 12:14:26 +02001274 snprintf(tap_name, IFNAMSIZ, "tap%d", dev->ifindex);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001275 vlan = netdev_priv(dev);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001276
1277 switch (event) {
1278 case NETDEV_REGISTER:
1279 /* Create the device node here after the network device has
1280 * been registered but before register_netdevice has
1281 * finished running.
1282 */
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001283 err = macvtap_get_minor(vlan);
1284 if (err)
1285 return notifier_from_errno(err);
1286
1287 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
Marc Angel17af2bc2016-05-05 12:14:26 +02001288 classdev = device_create(&macvtap_class, &dev->dev, devt,
1289 dev, tap_name);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001290 if (IS_ERR(classdev)) {
1291 macvtap_free_minor(vlan);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001292 return notifier_from_errno(PTR_ERR(classdev));
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001293 }
Marc Angel17af2bc2016-05-05 12:14:26 +02001294 err = sysfs_create_link(&dev->dev.kobj, &classdev->kobj,
1295 tap_name);
1296 if (err)
1297 return notifier_from_errno(err);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001298 break;
1299 case NETDEV_UNREGISTER:
Francesco Ruggerie96c37f2016-04-23 15:04:31 -07001300 /* vlan->minor == 0 if NETDEV_REGISTER above failed */
1301 if (vlan->minor == 0)
1302 break;
Marc Angel17af2bc2016-05-05 12:14:26 +02001303 sysfs_remove_link(&dev->dev.kobj, tap_name);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001304 devt = MKDEV(MAJOR(macvtap_major), vlan->minor);
Marc Angel17af2bc2016-05-05 12:14:26 +02001305 device_destroy(&macvtap_class, devt);
Eric W. Biedermane09eff72011-10-20 04:29:24 +00001306 macvtap_free_minor(vlan);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001307 break;
Jason Wang362899b2016-07-15 03:46:31 -04001308 case NETDEV_CHANGE_TX_QUEUE_LEN:
1309 if (macvtap_queue_resize(vlan))
1310 return NOTIFY_BAD;
1311 break;
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001312 }
1313
1314 return NOTIFY_DONE;
1315}
1316
1317static struct notifier_block macvtap_notifier_block __read_mostly = {
1318 .notifier_call = macvtap_device_event,
1319};
1320
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001321static int macvtap_init(void)
1322{
1323 int err;
1324
1325 err = alloc_chrdev_region(&macvtap_major, 0,
1326 MACVTAP_NUM_DEVS, "macvtap");
1327 if (err)
1328 goto out1;
1329
1330 cdev_init(&macvtap_cdev, &macvtap_fops);
1331 err = cdev_add(&macvtap_cdev, macvtap_major, MACVTAP_NUM_DEVS);
1332 if (err)
1333 goto out2;
1334
Marc Angel17af2bc2016-05-05 12:14:26 +02001335 err = class_register(&macvtap_class);
1336 if (err)
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001337 goto out3;
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001338
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001339 err = register_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001340 if (err)
1341 goto out4;
1342
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001343 err = macvlan_link_register(&macvtap_link_ops);
1344 if (err)
1345 goto out5;
1346
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001347 return 0;
1348
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001349out5:
1350 unregister_netdevice_notifier(&macvtap_notifier_block);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001351out4:
Marc Angel17af2bc2016-05-05 12:14:26 +02001352 class_unregister(&macvtap_class);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001353out3:
1354 cdev_del(&macvtap_cdev);
1355out2:
1356 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
1357out1:
1358 return err;
1359}
1360module_init(macvtap_init);
1361
1362static void macvtap_exit(void)
1363{
1364 rtnl_link_unregister(&macvtap_link_ops);
Eric W. Biederman9bf19072011-10-20 04:28:46 +00001365 unregister_netdevice_notifier(&macvtap_notifier_block);
Marc Angel17af2bc2016-05-05 12:14:26 +02001366 class_unregister(&macvtap_class);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001367 cdev_del(&macvtap_cdev);
1368 unregister_chrdev_region(macvtap_major, MACVTAP_NUM_DEVS);
Johannes Thumshirnd5de1982015-07-08 17:16:49 +02001369 idr_destroy(&minor_idr);
Arnd Bergmann20d29d72010-01-30 12:24:26 +00001370}
1371module_exit(macvtap_exit);
1372
1373MODULE_ALIAS_RTNL_LINK("macvtap");
1374MODULE_AUTHOR("Arnd Bergmann <arnd@arndb.de>");
1375MODULE_LICENSE("GPL");