blob: 9e287680cd2ecee393c1acad6f79c925d85bdf72 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * TUN - Universal TUN/TAP device driver.
3 * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
16 */
17
18/*
19 * Changes:
20 *
Mike Kershawff4cc3a2005-09-01 17:40:05 -070021 * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
22 * Add TUNSETLINK ioctl to set the link encapsulation
23 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 * Mark Smith <markzzzsmith@yahoo.com.au>
Joe Perches344dc8e2012-07-12 19:33:09 +000025 * Use eth_random_addr() for tap MAC address.
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 *
27 * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
28 * Fixes in packet dropping, queue length setting and queue wakeup.
29 * Increased default tx queue length.
30 * Added ethtool API.
31 * Minor cleanups
32 *
33 * Daniel Podlejski <underley@underley.eu.org>
34 * Modifications for 2.3.99-pre5 kernel.
35 */
36
Joe Perches6b8a66e2011-03-02 07:18:10 +000037#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
38
Linus Torvalds1da177e2005-04-16 15:20:36 -070039#define DRV_NAME "tun"
40#define DRV_VERSION "1.6"
41#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
42#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
43
Linus Torvalds1da177e2005-04-16 15:20:36 -070044#include <linux/module.h>
45#include <linux/errno.h>
46#include <linux/kernel.h>
47#include <linux/major.h>
48#include <linux/slab.h>
49#include <linux/poll.h>
50#include <linux/fcntl.h>
51#include <linux/init.h>
52#include <linux/skbuff.h>
53#include <linux/netdevice.h>
54#include <linux/etherdevice.h>
55#include <linux/miscdevice.h>
56#include <linux/ethtool.h>
57#include <linux/rtnetlink.h>
Arnd Bergmann50857e22009-11-06 22:52:32 -080058#include <linux/compat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070059#include <linux/if.h>
60#include <linux/if_arp.h>
61#include <linux/if_ether.h>
62#include <linux/if_tun.h>
63#include <linux/crc32.h>
Pavel Emelyanovd647a592008-04-16 00:41:16 -070064#include <linux/nsproxy.h>
Rusty Russellf43798c2008-07-03 03:48:02 -070065#include <linux/virtio_net.h>
Michael S. Tsirkin99405162010-02-14 01:01:10 +000066#include <linux/rcupdate.h>
Eric W. Biederman881d9662007-09-17 11:56:21 -070067#include <net/net_namespace.h>
Pavel Emelyanov79d17602008-04-16 00:40:46 -070068#include <net/netns/generic.h>
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -080069#include <net/rtnetlink.h>
Herbert Xu33dccbb2009-02-05 21:25:32 -080070#include <net/sock.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Linus Torvalds1da177e2005-04-16 15:20:36 -070072#include <asm/uaccess.h>
73
Rusty Russell14daa022008-04-12 18:48:58 -070074/* Uncomment to enable debugging */
75/* #define TUN_DEBUG 1 */
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#ifdef TUN_DEBUG
78static int debug;
Rusty Russell14daa022008-04-12 18:48:58 -070079
Joe Perches6b8a66e2011-03-02 07:18:10 +000080#define tun_debug(level, tun, fmt, args...) \
81do { \
82 if (tun->debug) \
83 netdev_printk(level, tun->dev, fmt, ##args); \
84} while (0)
85#define DBG1(level, fmt, args...) \
86do { \
87 if (debug == 2) \
88 printk(level fmt, ##args); \
89} while (0)
Rusty Russell14daa022008-04-12 18:48:58 -070090#else
Joe Perches6b8a66e2011-03-02 07:18:10 +000091#define tun_debug(level, tun, fmt, args...) \
92do { \
93 if (0) \
94 netdev_printk(level, tun->dev, fmt, ##args); \
95} while (0)
96#define DBG1(level, fmt, args...) \
97do { \
98 if (0) \
99 printk(level fmt, ##args); \
100} while (0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101#endif
102
Michael S. Tsirkin06908992012-07-20 09:23:23 +0000103#define GOODCOPY_LEN 128
104
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700105#define FLT_EXACT_COUNT 8
106struct tap_filter {
107 unsigned int count; /* Number of addrs. Zero means disabled */
108 u32 mask[2]; /* Mask of the hashed addrs */
109 unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
110};
111
Jason Wangc8d68e62012-10-31 19:46:00 +0000112/* 1024 is probably a high enough limit: modern hypervisors seem to support on
113 * the order of 100-200 CPUs so this leaves us some breathing space if we want
114 * to match a queue per guest CPU.
115 */
116#define MAX_TAP_QUEUES 1024
117
Jason Wang96442e422012-10-31 19:46:02 +0000118#define TUN_FLOW_EXPIRE (3 * HZ)
119
Jason Wang54f968d2012-10-31 19:45:57 +0000120/* A tun_file connects an open character device to a tuntap netdevice. It
121 * also contains all socket related strctures (except sock_fprog and tap_filter)
122 * to serve as one transmit queue for tuntap device. The sock_fprog and
123 * tap_filter were kept in tun_struct since they were used for filtering for the
124 * netdevice not for a specific queue (at least I didn't see the reqirement for
125 * this).
Jason Wang6e914fc2012-10-31 19:45:58 +0000126 *
127 * RCU usage:
128 * The tun_file and tun_struct are loosely coupled, the pointer from on to the
129 * other can only be read while rcu_read_lock or rtnl_lock is held.
Jason Wang54f968d2012-10-31 19:45:57 +0000130 */
Eric W. Biederman631ab462009-01-20 11:00:40 +0000131struct tun_file {
Jason Wang54f968d2012-10-31 19:45:57 +0000132 struct sock sk;
133 struct socket socket;
134 struct socket_wq wq;
Jason Wang6e914fc2012-10-31 19:45:58 +0000135 struct tun_struct __rcu *tun;
Eric W. Biederman36b50ba2009-01-20 11:01:48 +0000136 struct net *net;
Jason Wang54f968d2012-10-31 19:45:57 +0000137 struct fasync_struct *fasync;
138 /* only used for fasnyc */
139 unsigned int flags;
Jason Wangc8d68e62012-10-31 19:46:00 +0000140 u16 queue_index;
Eric W. Biederman631ab462009-01-20 11:00:40 +0000141};
142
Jason Wang96442e422012-10-31 19:46:02 +0000143struct tun_flow_entry {
144 struct hlist_node hash_link;
145 struct rcu_head rcu;
146 struct tun_struct *tun;
147
148 u32 rxhash;
149 int queue_index;
150 unsigned long updated;
151};
152
153#define TUN_NUM_FLOW_ENTRIES 1024
154
Jason Wang54f968d2012-10-31 19:45:57 +0000155/* Since the socket were moved to tun_file, to preserve the behavior of persist
156 * device, socket fileter, sndbuf and vnet header size were restore when the
157 * file were attached to a persist device.
158 */
Rusty Russell14daa022008-04-12 18:48:58 -0700159struct tun_struct {
Jason Wangc8d68e62012-10-31 19:46:00 +0000160 struct tun_file __rcu *tfiles[MAX_TAP_QUEUES];
161 unsigned int numqueues;
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700162 unsigned int flags;
Eric W. Biederman0625c882012-02-07 16:48:55 -0800163 kuid_t owner;
164 kgid_t group;
Rusty Russell14daa022008-04-12 18:48:58 -0700165
Rusty Russell14daa022008-04-12 18:48:58 -0700166 struct net_device *dev;
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000167 netdev_features_t set_features;
Michał Mirosław88255372011-04-19 06:13:10 +0000168#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
169 NETIF_F_TSO6|NETIF_F_UFO)
Michael S. Tsirkind9d52b52010-03-17 17:45:01 +0200170
171 int vnet_hdr_sz;
Jason Wang54f968d2012-10-31 19:45:57 +0000172 int sndbuf;
173 struct tap_filter txflt;
174 struct sock_fprog fprog;
175 /* protected by rtnl lock */
176 bool filter_attached;
Rusty Russell14daa022008-04-12 18:48:58 -0700177#ifdef TUN_DEBUG
178 int debug;
179#endif
Jason Wang96442e422012-10-31 19:46:02 +0000180 spinlock_t lock;
181 struct kmem_cache *flow_cache;
182 struct hlist_head flows[TUN_NUM_FLOW_ENTRIES];
183 struct timer_list flow_gc_timer;
184 unsigned long ageing_time;
Rusty Russell14daa022008-04-12 18:48:58 -0700185};
186
Jason Wang96442e422012-10-31 19:46:02 +0000187static inline u32 tun_hashfn(u32 rxhash)
188{
189 return rxhash & 0x3ff;
190}
191
192static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash)
193{
194 struct tun_flow_entry *e;
195 struct hlist_node *n;
196
197 hlist_for_each_entry_rcu(e, n, head, hash_link) {
198 if (e->rxhash == rxhash)
199 return e;
200 }
201 return NULL;
202}
203
204static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun,
205 struct hlist_head *head,
206 u32 rxhash, u16 queue_index)
207{
208 struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache,
209 GFP_ATOMIC);
210 if (e) {
211 tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n",
212 rxhash, queue_index);
213 e->updated = jiffies;
214 e->rxhash = rxhash;
215 e->queue_index = queue_index;
216 e->tun = tun;
217 hlist_add_head_rcu(&e->hash_link, head);
218 }
219 return e;
220}
221
222static void tun_flow_free(struct rcu_head *head)
223{
224 struct tun_flow_entry *e
225 = container_of(head, struct tun_flow_entry, rcu);
226 kmem_cache_free(e->tun->flow_cache, e);
227}
228
229static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e)
230{
231 tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n",
232 e->rxhash, e->queue_index);
233 hlist_del_rcu(&e->hash_link);
234 call_rcu(&e->rcu, tun_flow_free);
235}
236
237static void tun_flow_flush(struct tun_struct *tun)
238{
239 int i;
240
241 spin_lock_bh(&tun->lock);
242 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
243 struct tun_flow_entry *e;
244 struct hlist_node *h, *n;
245
246 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link)
247 tun_flow_delete(tun, e);
248 }
249 spin_unlock_bh(&tun->lock);
250}
251
252static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index)
253{
254 int i;
255
256 spin_lock_bh(&tun->lock);
257 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
258 struct tun_flow_entry *e;
259 struct hlist_node *h, *n;
260
261 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
262 if (e->queue_index == queue_index)
263 tun_flow_delete(tun, e);
264 }
265 }
266 spin_unlock_bh(&tun->lock);
267}
268
269static void tun_flow_cleanup(unsigned long data)
270{
271 struct tun_struct *tun = (struct tun_struct *)data;
272 unsigned long delay = tun->ageing_time;
273 unsigned long next_timer = jiffies + delay;
274 unsigned long count = 0;
275 int i;
276
277 tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n");
278
279 spin_lock_bh(&tun->lock);
280 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) {
281 struct tun_flow_entry *e;
282 struct hlist_node *h, *n;
283
284 hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) {
285 unsigned long this_timer;
286 count++;
287 this_timer = e->updated + delay;
288 if (time_before_eq(this_timer, jiffies))
289 tun_flow_delete(tun, e);
290 else if (time_before(this_timer, next_timer))
291 next_timer = this_timer;
292 }
293 }
294
295 if (count)
296 mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer));
297 spin_unlock_bh(&tun->lock);
298}
299
300static void tun_flow_update(struct tun_struct *tun, struct sk_buff *skb,
301 u16 queue_index)
302{
303 struct hlist_head *head;
304 struct tun_flow_entry *e;
305 unsigned long delay = tun->ageing_time;
306 u32 rxhash = skb_get_rxhash(skb);
307
308 if (!rxhash)
309 return;
310 else
311 head = &tun->flows[tun_hashfn(rxhash)];
312
313 rcu_read_lock();
314
315 if (tun->numqueues == 1)
316 goto unlock;
317
318 e = tun_flow_find(head, rxhash);
319 if (likely(e)) {
320 /* TODO: keep queueing to old queue until it's empty? */
321 e->queue_index = queue_index;
322 e->updated = jiffies;
323 } else {
324 spin_lock_bh(&tun->lock);
325 if (!tun_flow_find(head, rxhash))
326 tun_flow_create(tun, head, rxhash, queue_index);
327
328 if (!timer_pending(&tun->flow_gc_timer))
329 mod_timer(&tun->flow_gc_timer,
330 round_jiffies_up(jiffies + delay));
331 spin_unlock_bh(&tun->lock);
332 }
333
334unlock:
335 rcu_read_unlock();
336}
337
Jason Wangc8d68e62012-10-31 19:46:00 +0000338/* We try to identify a flow through its rxhash first. The reason that
339 * we do not check rxq no. is becuase some cards(e.g 82599), chooses
340 * the rxq based on the txq where the last packet of the flow comes. As
341 * the userspace application move between processors, we may get a
342 * different rxq no. here. If we could not get rxhash, then we would
343 * hope the rxq no. may help here.
344 */
345static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb)
346{
347 struct tun_struct *tun = netdev_priv(dev);
Jason Wang96442e422012-10-31 19:46:02 +0000348 struct tun_flow_entry *e;
Jason Wangc8d68e62012-10-31 19:46:00 +0000349 u32 txq = 0;
350 u32 numqueues = 0;
351
352 rcu_read_lock();
353 numqueues = tun->numqueues;
354
355 txq = skb_get_rxhash(skb);
356 if (txq) {
Jason Wang96442e422012-10-31 19:46:02 +0000357 e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq);
358 if (e)
359 txq = e->queue_index;
360 else
361 /* use multiply and shift instead of expensive divide */
362 txq = ((u64)txq * numqueues) >> 32;
Jason Wangc8d68e62012-10-31 19:46:00 +0000363 } else if (likely(skb_rx_queue_recorded(skb))) {
364 txq = skb_get_rx_queue(skb);
365 while (unlikely(txq >= numqueues))
366 txq -= numqueues;
367 }
368
369 rcu_read_unlock();
370 return txq;
371}
372
Jason Wangcde8b152012-10-31 19:46:01 +0000373static inline bool tun_not_capable(struct tun_struct *tun)
374{
375 const struct cred *cred = current_cred();
376
377 return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) ||
378 (gid_valid(tun->group) && !in_egroup_p(tun->group))) &&
379 !capable(CAP_NET_ADMIN);
380}
381
Jason Wangc8d68e62012-10-31 19:46:00 +0000382static void tun_set_real_num_queues(struct tun_struct *tun)
383{
384 netif_set_real_num_tx_queues(tun->dev, tun->numqueues);
385 netif_set_real_num_rx_queues(tun->dev, tun->numqueues);
386}
387
388static void __tun_detach(struct tun_file *tfile, bool clean)
389{
390 struct tun_file *ntfile;
391 struct tun_struct *tun;
392 struct net_device *dev;
393
394 tun = rcu_dereference_protected(tfile->tun,
395 lockdep_rtnl_is_held());
396 if (tun) {
397 u16 index = tfile->queue_index;
398 BUG_ON(index >= tun->numqueues);
399 dev = tun->dev;
400
401 rcu_assign_pointer(tun->tfiles[index],
402 tun->tfiles[tun->numqueues - 1]);
403 rcu_assign_pointer(tfile->tun, NULL);
404 ntfile = rcu_dereference_protected(tun->tfiles[index],
405 lockdep_rtnl_is_held());
406 ntfile->queue_index = index;
407
408 --tun->numqueues;
409 sock_put(&tfile->sk);
410
411 synchronize_net();
Jason Wang96442e422012-10-31 19:46:02 +0000412 tun_flow_delete_by_queue(tun, tun->numqueues + 1);
Jason Wangc8d68e62012-10-31 19:46:00 +0000413 /* Drop read queue */
414 skb_queue_purge(&tfile->sk.sk_receive_queue);
415 tun_set_real_num_queues(tun);
416
417 if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST))
418 if (dev->reg_state == NETREG_REGISTERED)
419 unregister_netdevice(dev);
420 }
421
422 if (clean) {
423 BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED,
424 &tfile->socket.flags));
425 sk_release_kernel(&tfile->sk);
426 }
427}
428
429static void tun_detach(struct tun_file *tfile, bool clean)
430{
431 rtnl_lock();
432 __tun_detach(tfile, clean);
433 rtnl_unlock();
434}
435
436static void tun_detach_all(struct net_device *dev)
437{
438 struct tun_struct *tun = netdev_priv(dev);
439 struct tun_file *tfile;
440 int i, n = tun->numqueues;
441
442 for (i = 0; i < n; i++) {
443 tfile = rcu_dereference_protected(tun->tfiles[i],
444 lockdep_rtnl_is_held());
445 BUG_ON(!tfile);
446 wake_up_all(&tfile->wq.wait);
447 rcu_assign_pointer(tfile->tun, NULL);
448 --tun->numqueues;
449 }
450 BUG_ON(tun->numqueues != 0);
451
452 synchronize_net();
453 for (i = 0; i < n; i++) {
454 tfile = rcu_dereference_protected(tun->tfiles[i],
455 lockdep_rtnl_is_held());
456 /* Drop read queue */
457 skb_queue_purge(&tfile->sk.sk_receive_queue);
458 sock_put(&tfile->sk);
459 }
460}
461
Eric W. Biedermana7385ba2009-01-20 10:57:48 +0000462static int tun_attach(struct tun_struct *tun, struct file *file)
463{
Eric W. Biederman631ab462009-01-20 11:00:40 +0000464 struct tun_file *tfile = file->private_data;
Eric W. Biederman38231b72009-01-20 11:02:28 +0000465 int err;
Eric W. Biedermana7385ba2009-01-20 10:57:48 +0000466
Eric W. Biederman38231b72009-01-20 11:02:28 +0000467 err = -EINVAL;
Jason Wangc8d68e62012-10-31 19:46:00 +0000468 if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held()))
Eric W. Biederman38231b72009-01-20 11:02:28 +0000469 goto out;
470
471 err = -EBUSY;
Jason Wangc8d68e62012-10-31 19:46:00 +0000472 if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1)
473 goto out;
474
475 err = -E2BIG;
476 if (tun->numqueues == MAX_TAP_QUEUES)
Eric W. Biederman38231b72009-01-20 11:02:28 +0000477 goto out;
478
479 err = 0;
Jason Wang54f968d2012-10-31 19:45:57 +0000480
Jason Wangc8d68e62012-10-31 19:46:00 +0000481 /* Re-attach the filter to presist device */
Jason Wang54f968d2012-10-31 19:45:57 +0000482 if (tun->filter_attached == true) {
483 err = sk_attach_filter(&tun->fprog, tfile->socket.sk);
484 if (!err)
485 goto out;
486 }
Jason Wangc8d68e62012-10-31 19:46:00 +0000487 tfile->queue_index = tun->numqueues;
Jason Wang6e914fc2012-10-31 19:45:58 +0000488 rcu_assign_pointer(tfile->tun, tun);
Jason Wangc8d68e62012-10-31 19:46:00 +0000489 rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile);
Jason Wang54f968d2012-10-31 19:45:57 +0000490 sock_hold(&tfile->sk);
Jason Wangc8d68e62012-10-31 19:46:00 +0000491 tun->numqueues++;
492
493 tun_set_real_num_queues(tun);
494
495 if (tun->numqueues == 1)
496 netif_carrier_on(tun->dev);
497
498 /* device is allowed to go away first, so no need to hold extra
499 * refcnt.
500 */
Eric W. Biedermana7385ba2009-01-20 10:57:48 +0000501
Eric W. Biederman38231b72009-01-20 11:02:28 +0000502out:
Eric W. Biederman38231b72009-01-20 11:02:28 +0000503 return err;
Eric W. Biedermana7385ba2009-01-20 10:57:48 +0000504}
505
Eric W. Biederman631ab462009-01-20 11:00:40 +0000506static struct tun_struct *__tun_get(struct tun_file *tfile)
507{
Jason Wang6e914fc2012-10-31 19:45:58 +0000508 struct tun_struct *tun;
Eric W. Biedermanc70f1822009-01-20 11:07:17 +0000509
Jason Wang6e914fc2012-10-31 19:45:58 +0000510 rcu_read_lock();
511 tun = rcu_dereference(tfile->tun);
512 if (tun)
513 dev_hold(tun->dev);
514 rcu_read_unlock();
Eric W. Biedermanc70f1822009-01-20 11:07:17 +0000515
516 return tun;
Eric W. Biederman631ab462009-01-20 11:00:40 +0000517}
518
519static struct tun_struct *tun_get(struct file *file)
520{
521 return __tun_get(file->private_data);
522}
523
524static void tun_put(struct tun_struct *tun)
525{
Jason Wang6e914fc2012-10-31 19:45:58 +0000526 dev_put(tun->dev);
Eric W. Biederman631ab462009-01-20 11:00:40 +0000527}
528
Joe Perches6b8a66e2011-03-02 07:18:10 +0000529/* TAP filtering */
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700530static void addr_hash_set(u32 *mask, const u8 *addr)
531{
532 int n = ether_crc(ETH_ALEN, addr) >> 26;
533 mask[n >> 5] |= (1 << (n & 31));
534}
535
536static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
537{
538 int n = ether_crc(ETH_ALEN, addr) >> 26;
539 return mask[n >> 5] & (1 << (n & 31));
540}
541
542static int update_filter(struct tap_filter *filter, void __user *arg)
543{
544 struct { u8 u[ETH_ALEN]; } *addr;
545 struct tun_filter uf;
546 int err, alen, n, nexact;
547
548 if (copy_from_user(&uf, arg, sizeof(uf)))
549 return -EFAULT;
550
551 if (!uf.count) {
552 /* Disabled */
553 filter->count = 0;
554 return 0;
555 }
556
557 alen = ETH_ALEN * uf.count;
558 addr = kmalloc(alen, GFP_KERNEL);
559 if (!addr)
560 return -ENOMEM;
561
562 if (copy_from_user(addr, arg + sizeof(uf), alen)) {
563 err = -EFAULT;
564 goto done;
565 }
566
567 /* The filter is updated without holding any locks. Which is
568 * perfectly safe. We disable it first and in the worst
569 * case we'll accept a few undesired packets. */
570 filter->count = 0;
571 wmb();
572
573 /* Use first set of addresses as an exact filter */
574 for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
575 memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
576
577 nexact = n;
578
Alex Williamsoncfbf84f2009-02-08 17:49:17 -0800579 /* Remaining multicast addresses are hashed,
580 * unicast will leave the filter disabled. */
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700581 memset(filter->mask, 0, sizeof(filter->mask));
Alex Williamsoncfbf84f2009-02-08 17:49:17 -0800582 for (; n < uf.count; n++) {
583 if (!is_multicast_ether_addr(addr[n].u)) {
584 err = 0; /* no filter */
585 goto done;
586 }
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700587 addr_hash_set(filter->mask, addr[n].u);
Alex Williamsoncfbf84f2009-02-08 17:49:17 -0800588 }
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700589
590 /* For ALLMULTI just set the mask to all ones.
591 * This overrides the mask populated above. */
592 if ((uf.flags & TUN_FLT_ALLMULTI))
593 memset(filter->mask, ~0, sizeof(filter->mask));
594
595 /* Now enable the filter */
596 wmb();
597 filter->count = nexact;
598
599 /* Return the number of exact filters */
600 err = nexact;
601
602done:
603 kfree(addr);
604 return err;
605}
606
607/* Returns: 0 - drop, !=0 - accept */
608static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
609{
610 /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
611 * at this point. */
612 struct ethhdr *eh = (struct ethhdr *) skb->data;
613 int i;
614
615 /* Exact match */
616 for (i = 0; i < filter->count; i++)
Joe Perches2e42e472012-05-09 17:17:46 +0000617 if (ether_addr_equal(eh->h_dest, filter->addr[i]))
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700618 return 1;
619
620 /* Inexact match (multicast only) */
621 if (is_multicast_ether_addr(eh->h_dest))
622 return addr_hash_test(filter->mask, eh->h_dest);
623
624 return 0;
625}
626
627/*
628 * Checks whether the packet is accepted or not.
629 * Returns: 0 - drop, !=0 - accept
630 */
631static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
632{
633 if (!filter->count)
634 return 1;
635
636 return run_filter(filter, skb);
637}
638
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639/* Network device part of the driver */
640
Jeff Garzik7282d492006-09-13 14:30:00 -0400641static const struct ethtool_ops tun_ethtool_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642
Eric W. Biedermanc70f1822009-01-20 11:07:17 +0000643/* Net device detach from fd. */
644static void tun_net_uninit(struct net_device *dev)
645{
Jason Wangc8d68e62012-10-31 19:46:00 +0000646 tun_detach_all(dev);
Eric W. Biedermanc70f1822009-01-20 11:07:17 +0000647}
648
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649/* Net device open. */
650static int tun_net_open(struct net_device *dev)
651{
Jason Wangc8d68e62012-10-31 19:46:00 +0000652 netif_tx_start_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653 return 0;
654}
655
656/* Net device close. */
657static int tun_net_close(struct net_device *dev)
658{
Jason Wangc8d68e62012-10-31 19:46:00 +0000659 netif_tx_stop_all_queues(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 return 0;
661}
662
663/* Net device start xmit */
Stephen Hemminger424efe92009-08-31 19:50:51 +0000664static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
666 struct tun_struct *tun = netdev_priv(dev);
Jason Wangc8d68e62012-10-31 19:46:00 +0000667 int txq = skb->queue_mapping;
Jason Wang6e914fc2012-10-31 19:45:58 +0000668 struct tun_file *tfile;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Jason Wang6e914fc2012-10-31 19:45:58 +0000670 rcu_read_lock();
Jason Wangc8d68e62012-10-31 19:46:00 +0000671 tfile = rcu_dereference(tun->tfiles[txq]);
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 /* Drop packet if interface is not attached */
Jason Wangc8d68e62012-10-31 19:46:00 +0000674 if (txq >= tun->numqueues)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 goto drop;
676
Jason Wang6e914fc2012-10-31 19:45:58 +0000677 tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
678
Jason Wangc8d68e62012-10-31 19:46:00 +0000679 BUG_ON(!tfile);
680
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700681 /* Drop if the filter does not like it.
682 * This is a noop if the filter is disabled.
683 * Filter can be enabled only for the TAP devices. */
684 if (!check_filter(&tun->txflt, skb))
685 goto drop;
686
Jason Wang54f968d2012-10-31 19:45:57 +0000687 if (tfile->socket.sk->sk_filter &&
688 sk_filter(tfile->socket.sk, skb))
Michael S. Tsirkin99405162010-02-14 01:01:10 +0000689 goto drop;
690
Jason Wangc8d68e62012-10-31 19:46:00 +0000691 /* Limit the number of packets queued by divining txq length with the
692 * number of queues.
693 */
Jason Wang54f968d2012-10-31 19:45:57 +0000694 if (skb_queue_len(&tfile->socket.sk->sk_receive_queue)
Jason Wangc8d68e62012-10-31 19:46:00 +0000695 >= dev->tx_queue_len / tun->numqueues){
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 if (!(tun->flags & TUN_ONE_QUEUE)) {
697 /* Normal queueing mode. */
698 /* Packet scheduler handles dropping of further packets. */
Jason Wangc8d68e62012-10-31 19:46:00 +0000699 netif_stop_subqueue(dev, txq);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
701 /* We won't see all dropped packets individually, so overrun
702 * error is more appropriate. */
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700703 dev->stats.tx_fifo_errors++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 } else {
705 /* Single queue mode.
706 * Driver handles dropping of all packets itself. */
707 goto drop;
708 }
709 }
710
Michael S. Tsirkin0110d6f2010-04-13 04:59:44 +0000711 /* Orphan the skb - required as we might hang on to it
712 * for indefinite time. */
Michael S. Tsirkin868eefe2012-07-20 09:23:14 +0000713 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
714 goto drop;
Michael S. Tsirkin0110d6f2010-04-13 04:59:44 +0000715 skb_orphan(skb);
716
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700717 /* Enqueue packet */
Jason Wang54f968d2012-10-31 19:45:57 +0000718 skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719
720 /* Notify and wake up reader process */
Jason Wang54f968d2012-10-31 19:45:57 +0000721 if (tfile->flags & TUN_FASYNC)
722 kill_fasync(&tfile->fasync, SIGIO, POLL_IN);
723 wake_up_interruptible_poll(&tfile->wq.wait, POLLIN |
Michael S. Tsirkin05c28282010-01-14 06:17:09 +0000724 POLLRDNORM | POLLRDBAND);
Jason Wang6e914fc2012-10-31 19:45:58 +0000725
726 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +0000727 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729drop:
Jeff Garzik09f75cd2007-10-03 17:41:50 -0700730 dev->stats.tx_dropped++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 kfree_skb(skb);
Jason Wang6e914fc2012-10-31 19:45:58 +0000732 rcu_read_unlock();
Patrick McHardy6ed10652009-06-23 06:03:08 +0000733 return NETDEV_TX_OK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734}
735
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700736static void tun_net_mclist(struct net_device *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700738 /*
739 * This callback is supposed to deal with mc filter in
740 * _rx_ path and has nothing to do with the _tx_ path.
741 * In rx path we always accept everything userspace gives us.
742 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743}
744
Ed Swierk4885a502007-09-16 12:21:38 -0700745#define MIN_MTU 68
746#define MAX_MTU 65535
747
748static int
749tun_net_change_mtu(struct net_device *dev, int new_mtu)
750{
751 if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
752 return -EINVAL;
753 dev->mtu = new_mtu;
754 return 0;
755}
756
Michał Mirosławc8f44af2011-11-15 15:29:55 +0000757static netdev_features_t tun_net_fix_features(struct net_device *dev,
758 netdev_features_t features)
Michał Mirosław88255372011-04-19 06:13:10 +0000759{
760 struct tun_struct *tun = netdev_priv(dev);
761
762 return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
763}
Neil Hormanbebd0972011-06-15 05:25:01 +0000764#ifdef CONFIG_NET_POLL_CONTROLLER
765static void tun_poll_controller(struct net_device *dev)
766{
767 /*
768 * Tun only receives frames when:
769 * 1) the char device endpoint gets data from user space
770 * 2) the tun socket gets a sendmsg call from user space
771 * Since both of those are syncronous operations, we are guaranteed
772 * never to have pending data when we poll for it
773 * so theres nothing to do here but return.
774 * We need this though so netpoll recognizes us as an interface that
775 * supports polling, which enables bridge devices in virt setups to
776 * still use netconsole
777 */
778 return;
779}
780#endif
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800781static const struct net_device_ops tun_netdev_ops = {
Eric W. Biedermanc70f1822009-01-20 11:07:17 +0000782 .ndo_uninit = tun_net_uninit,
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800783 .ndo_open = tun_net_open,
784 .ndo_stop = tun_net_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800785 .ndo_start_xmit = tun_net_xmit,
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800786 .ndo_change_mtu = tun_net_change_mtu,
Michał Mirosław88255372011-04-19 06:13:10 +0000787 .ndo_fix_features = tun_net_fix_features,
Jason Wangc8d68e62012-10-31 19:46:00 +0000788 .ndo_select_queue = tun_select_queue,
Neil Hormanbebd0972011-06-15 05:25:01 +0000789#ifdef CONFIG_NET_POLL_CONTROLLER
790 .ndo_poll_controller = tun_poll_controller,
791#endif
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800792};
793
794static const struct net_device_ops tap_netdev_ops = {
Eric W. Biedermanc70f1822009-01-20 11:07:17 +0000795 .ndo_uninit = tun_net_uninit,
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800796 .ndo_open = tun_net_open,
797 .ndo_stop = tun_net_close,
Stephen Hemminger00829822008-11-20 20:14:53 -0800798 .ndo_start_xmit = tun_net_xmit,
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800799 .ndo_change_mtu = tun_net_change_mtu,
Michał Mirosław88255372011-04-19 06:13:10 +0000800 .ndo_fix_features = tun_net_fix_features,
Jiri Pirkoafc4b132011-08-16 06:29:01 +0000801 .ndo_set_rx_mode = tun_net_mclist,
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800802 .ndo_set_mac_address = eth_mac_addr,
803 .ndo_validate_addr = eth_validate_addr,
Jason Wangc8d68e62012-10-31 19:46:00 +0000804 .ndo_select_queue = tun_select_queue,
Neil Hormanbebd0972011-06-15 05:25:01 +0000805#ifdef CONFIG_NET_POLL_CONTROLLER
806 .ndo_poll_controller = tun_poll_controller,
807#endif
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800808};
809
Jason Wang96442e422012-10-31 19:46:02 +0000810static int tun_flow_init(struct tun_struct *tun)
811{
812 int i;
813
814 tun->flow_cache = kmem_cache_create("tun_flow_cache",
815 sizeof(struct tun_flow_entry), 0, 0,
816 NULL);
817 if (!tun->flow_cache)
818 return -ENOMEM;
819
820 for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++)
821 INIT_HLIST_HEAD(&tun->flows[i]);
822
823 tun->ageing_time = TUN_FLOW_EXPIRE;
824 setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun);
825 mod_timer(&tun->flow_gc_timer,
826 round_jiffies_up(jiffies + tun->ageing_time));
827
828 return 0;
829}
830
831static void tun_flow_uninit(struct tun_struct *tun)
832{
833 del_timer_sync(&tun->flow_gc_timer);
834 tun_flow_flush(tun);
835
836 /* Wait for completion of call_rcu()'s */
837 rcu_barrier();
838 kmem_cache_destroy(tun->flow_cache);
839}
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841/* Initialize net device. */
842static void tun_net_init(struct net_device *dev)
843{
844 struct tun_struct *tun = netdev_priv(dev);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 switch (tun->flags & TUN_TYPE_MASK) {
847 case TUN_TUN_DEV:
Stephen Hemminger758e43b2008-11-19 22:10:37 -0800848 dev->netdev_ops = &tun_netdev_ops;
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 /* Point-to-Point TUN Device */
851 dev->hard_header_len = 0;
852 dev->addr_len = 0;
853 dev->mtu = 1500;
854
855 /* Zero header length */
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400856 dev->type = ARPHRD_NONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
858 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
859 break;
860
861 case TUN_TAP_DEV:
Kusanagi Kouichi7a0a9602008-12-29 18:23:28 -0800862 dev->netdev_ops = &tap_netdev_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 /* Ethernet TAP Device */
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -0700864 ether_setup(dev);
Neil Horman550fd082011-07-26 06:05:38 +0000865 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Danny Kukawkaf2cedb62012-02-15 06:45:39 +0000867 eth_hw_addr_random(dev);
Brian Braunstein36226a82007-04-26 01:00:55 -0700868
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
870 break;
871 }
872}
873
874/* Character device part */
875
876/* Poll */
Jason Wangc8d68e62012-10-31 19:46:00 +0000877static unsigned int tun_chr_poll(struct file *file, poll_table *wait)
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400878{
Eric W. Biedermanb2430de2009-01-20 11:03:21 +0000879 struct tun_file *tfile = file->private_data;
880 struct tun_struct *tun = __tun_get(tfile);
Mariusz Kozlowski3c8a9c62009-07-05 19:48:35 +0000881 struct sock *sk;
Herbert Xu33dccbb2009-02-05 21:25:32 -0800882 unsigned int mask = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883
884 if (!tun)
Eric W. Biedermaneac9e902009-01-20 10:59:05 +0000885 return POLLERR;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
Jason Wang54f968d2012-10-31 19:45:57 +0000887 sk = tfile->socket.sk;
Mariusz Kozlowski3c8a9c62009-07-05 19:48:35 +0000888
Joe Perches6b8a66e2011-03-02 07:18:10 +0000889 tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
Jason Wang54f968d2012-10-31 19:45:57 +0000891 poll_wait(file, &tfile->wq.wait, wait);
Jeff Garzik6aa20a22006-09-13 13:24:59 -0400892
Michael S. Tsirkin89f56d12009-08-30 07:04:42 +0000893 if (!skb_queue_empty(&sk->sk_receive_queue))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 mask |= POLLIN | POLLRDNORM;
895
Herbert Xu33dccbb2009-02-05 21:25:32 -0800896 if (sock_writeable(sk) ||
897 (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
898 sock_writeable(sk)))
899 mask |= POLLOUT | POLLWRNORM;
900
Eric W. Biedermanc70f1822009-01-20 11:07:17 +0000901 if (tun->dev->reg_state != NETREG_REGISTERED)
902 mask = POLLERR;
903
Eric W. Biederman631ab462009-01-20 11:00:40 +0000904 tun_put(tun);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 return mask;
906}
907
Rusty Russellf42157c2008-08-15 15:15:10 -0700908/* prepad is the amount to reserve at front. len is length after that.
909 * linear is a hint as to how much to copy (usually headers). */
Jason Wang54f968d2012-10-31 19:45:57 +0000910static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
stephen hemminger6f7c1562011-06-08 14:33:08 +0000911 size_t prepad, size_t len,
912 size_t linear, int noblock)
Rusty Russellf42157c2008-08-15 15:15:10 -0700913{
Jason Wang54f968d2012-10-31 19:45:57 +0000914 struct sock *sk = tfile->socket.sk;
Rusty Russellf42157c2008-08-15 15:15:10 -0700915 struct sk_buff *skb;
Herbert Xu33dccbb2009-02-05 21:25:32 -0800916 int err;
Rusty Russellf42157c2008-08-15 15:15:10 -0700917
918 /* Under a page? Don't bother with paged skb. */
Herbert Xu0eca93b2009-04-14 02:09:43 -0700919 if (prepad + len < PAGE_SIZE || !linear)
Herbert Xu33dccbb2009-02-05 21:25:32 -0800920 linear = len;
Rusty Russellf42157c2008-08-15 15:15:10 -0700921
Herbert Xu33dccbb2009-02-05 21:25:32 -0800922 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
923 &err);
Rusty Russellf42157c2008-08-15 15:15:10 -0700924 if (!skb)
Herbert Xu33dccbb2009-02-05 21:25:32 -0800925 return ERR_PTR(err);
Rusty Russellf42157c2008-08-15 15:15:10 -0700926
927 skb_reserve(skb, prepad);
928 skb_put(skb, linear);
Herbert Xu33dccbb2009-02-05 21:25:32 -0800929 skb->data_len = len - linear;
930 skb->len += len - linear;
Rusty Russellf42157c2008-08-15 15:15:10 -0700931
932 return skb;
933}
934
Michael S. Tsirkin06908992012-07-20 09:23:23 +0000935/* set skb frags from iovec, this can move to core network code for reuse */
936static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from,
937 int offset, size_t count)
938{
939 int len = iov_length(from, count) - offset;
940 int copy = skb_headlen(skb);
941 int size, offset1 = 0;
942 int i = 0;
943
944 /* Skip over from offset */
945 while (count && (offset >= from->iov_len)) {
946 offset -= from->iov_len;
947 ++from;
948 --count;
949 }
950
951 /* copy up to skb headlen */
952 while (count && (copy > 0)) {
953 size = min_t(unsigned int, copy, from->iov_len - offset);
954 if (copy_from_user(skb->data + offset1, from->iov_base + offset,
955 size))
956 return -EFAULT;
957 if (copy > size) {
958 ++from;
959 --count;
960 offset = 0;
961 } else
962 offset += size;
963 copy -= size;
964 offset1 += size;
965 }
966
967 if (len == offset1)
968 return 0;
969
970 while (count--) {
971 struct page *page[MAX_SKB_FRAGS];
972 int num_pages;
973 unsigned long base;
974 unsigned long truesize;
975
976 len = from->iov_len - offset;
977 if (!len) {
978 offset = 0;
979 ++from;
980 continue;
981 }
982 base = (unsigned long)from->iov_base + offset;
983 size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT;
984 if (i + size > MAX_SKB_FRAGS)
985 return -EMSGSIZE;
986 num_pages = get_user_pages_fast(base, size, 0, &page[i]);
987 if (num_pages != size) {
988 for (i = 0; i < num_pages; i++)
989 put_page(page[i]);
990 return -EFAULT;
991 }
992 truesize = size * PAGE_SIZE;
993 skb->data_len += len;
994 skb->len += len;
995 skb->truesize += truesize;
996 atomic_add(truesize, &skb->sk->sk_wmem_alloc);
997 while (len) {
998 int off = base & ~PAGE_MASK;
999 int size = min_t(int, len, PAGE_SIZE - off);
1000 __skb_fill_page_desc(skb, i, page[i], off, size);
1001 skb_shinfo(skb)->nr_frags++;
1002 /* increase sk_wmem_alloc */
1003 base += size;
1004 len -= size;
1005 i++;
1006 }
1007 offset = 0;
1008 ++from;
1009 }
1010 return 0;
1011}
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013/* Get packet from user space buffer */
Jason Wang54f968d2012-10-31 19:45:57 +00001014static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1015 void *msg_control, const struct iovec *iv,
1016 size_t total_len, size_t count, int noblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017{
Harvey Harrison09640e62009-02-01 00:45:17 -08001018 struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 struct sk_buff *skb;
Michael S. Tsirkin06908992012-07-20 09:23:23 +00001020 size_t len = total_len, align = NET_SKB_PAD;
Rusty Russellf43798c2008-07-03 03:48:02 -07001021 struct virtio_net_hdr gso = { 0 };
Michael S. Tsirkin6f26c9a2009-04-20 01:26:11 +00001022 int offset = 0;
Michael S. Tsirkin06908992012-07-20 09:23:23 +00001023 int copylen;
1024 bool zerocopy = false;
1025 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026
1027 if (!(tun->flags & TUN_NO_PI)) {
Michael S. Tsirkin06908992012-07-20 09:23:23 +00001028 if ((len -= sizeof(pi)) > total_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 return -EINVAL;
1030
Michael S. Tsirkin6f26c9a2009-04-20 01:26:11 +00001031 if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 return -EFAULT;
Michael S. Tsirkin6f26c9a2009-04-20 01:26:11 +00001033 offset += sizeof(pi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001034 }
1035
Rusty Russellf43798c2008-07-03 03:48:02 -07001036 if (tun->flags & TUN_VNET_HDR) {
Michael S. Tsirkin06908992012-07-20 09:23:23 +00001037 if ((len -= tun->vnet_hdr_sz) > total_len)
Rusty Russellf43798c2008-07-03 03:48:02 -07001038 return -EINVAL;
1039
Michael S. Tsirkin6f26c9a2009-04-20 01:26:11 +00001040 if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
Rusty Russellf43798c2008-07-03 03:48:02 -07001041 return -EFAULT;
1042
Herbert Xu49091222009-06-08 00:20:01 -07001043 if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
1044 gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
1045 gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
1046
Rusty Russellf43798c2008-07-03 03:48:02 -07001047 if (gso.hdr_len > len)
1048 return -EINVAL;
Michael S. Tsirkind9d52b52010-03-17 17:45:01 +02001049 offset += tun->vnet_hdr_sz;
Rusty Russellf43798c2008-07-03 03:48:02 -07001050 }
1051
Rusty Russelle01bf1c2008-04-12 18:49:30 -07001052 if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
stephen hemmingera504b862011-06-08 14:33:07 +00001053 align += NET_IP_ALIGN;
Herbert Xu0eca93b2009-04-14 02:09:43 -07001054 if (unlikely(len < ETH_HLEN ||
1055 (gso.hdr_len && gso.hdr_len < ETH_HLEN)))
Rusty Russelle01bf1c2008-04-12 18:49:30 -07001056 return -EINVAL;
1057 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001058
Michael S. Tsirkin06908992012-07-20 09:23:23 +00001059 if (msg_control)
1060 zerocopy = true;
1061
1062 if (zerocopy) {
1063 /* Userspace may produce vectors with count greater than
1064 * MAX_SKB_FRAGS, so we need to linearize parts of the skb
1065 * to let the rest of data to be fit in the frags.
1066 */
1067 if (count > MAX_SKB_FRAGS) {
1068 copylen = iov_length(iv, count - MAX_SKB_FRAGS);
1069 if (copylen < offset)
1070 copylen = 0;
1071 else
1072 copylen -= offset;
1073 } else
1074 copylen = 0;
1075 /* There are 256 bytes to be copied in skb, so there is enough
1076 * room for skb expand head in case it is used.
1077 * The rest of the buffer is mapped from userspace.
1078 */
1079 if (copylen < gso.hdr_len)
1080 copylen = gso.hdr_len;
1081 if (!copylen)
1082 copylen = GOODCOPY_LEN;
1083 } else
1084 copylen = len;
1085
Jason Wang54f968d2012-10-31 19:45:57 +00001086 skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock);
Herbert Xu33dccbb2009-02-05 21:25:32 -08001087 if (IS_ERR(skb)) {
1088 if (PTR_ERR(skb) != -EAGAIN)
1089 tun->dev->stats.rx_dropped++;
1090 return PTR_ERR(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091 }
1092
Michael S. Tsirkin06908992012-07-20 09:23:23 +00001093 if (zerocopy)
1094 err = zerocopy_sg_from_iovec(skb, iv, offset, count);
1095 else
1096 err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len);
1097
1098 if (err) {
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001099 tun->dev->stats.rx_dropped++;
Dave Jones8f227572006-03-11 18:49:13 -08001100 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 return -EFAULT;
Dave Jones8f227572006-03-11 18:49:13 -08001102 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103
Rusty Russellf43798c2008-07-03 03:48:02 -07001104 if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
1105 if (!skb_partial_csum_set(skb, gso.csum_start,
1106 gso.csum_offset)) {
1107 tun->dev->stats.rx_frame_errors++;
1108 kfree_skb(skb);
1109 return -EINVAL;
1110 }
Michał Mirosław88255372011-04-19 06:13:10 +00001111 }
Rusty Russellf43798c2008-07-03 03:48:02 -07001112
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 switch (tun->flags & TUN_TYPE_MASK) {
1114 case TUN_TUN_DEV:
Ang Way Chuangf09f7ee2008-06-17 21:10:33 -07001115 if (tun->flags & TUN_NO_PI) {
1116 switch (skb->data[0] & 0xf0) {
1117 case 0x40:
1118 pi.proto = htons(ETH_P_IP);
1119 break;
1120 case 0x60:
1121 pi.proto = htons(ETH_P_IPV6);
1122 break;
1123 default:
1124 tun->dev->stats.rx_dropped++;
1125 kfree_skb(skb);
1126 return -EINVAL;
1127 }
1128 }
1129
Arnaldo Carvalho de Melo459a98e2007-03-19 15:30:44 -07001130 skb_reset_mac_header(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 skb->protocol = pi.proto;
Arnaldo Carvalho de Melo4c13eb62007-04-25 17:40:23 -07001132 skb->dev = tun->dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133 break;
1134 case TUN_TAP_DEV:
1135 skb->protocol = eth_type_trans(skb, tun->dev);
1136 break;
Joe Perches6403eab2011-06-03 11:51:20 +00001137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138
Rusty Russellf43798c2008-07-03 03:48:02 -07001139 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1140 pr_debug("GSO!\n");
1141 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
1142 case VIRTIO_NET_HDR_GSO_TCPV4:
1143 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1144 break;
1145 case VIRTIO_NET_HDR_GSO_TCPV6:
1146 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1147 break;
Sridhar Samudralae36aa252009-07-14 14:21:04 +00001148 case VIRTIO_NET_HDR_GSO_UDP:
1149 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1150 break;
Rusty Russellf43798c2008-07-03 03:48:02 -07001151 default:
1152 tun->dev->stats.rx_frame_errors++;
1153 kfree_skb(skb);
1154 return -EINVAL;
1155 }
1156
1157 if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
1158 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1159
1160 skb_shinfo(skb)->gso_size = gso.gso_size;
1161 if (skb_shinfo(skb)->gso_size == 0) {
1162 tun->dev->stats.rx_frame_errors++;
1163 kfree_skb(skb);
1164 return -EINVAL;
1165 }
1166
1167 /* Header must be checked, and gso_segs computed. */
1168 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1169 skb_shinfo(skb)->gso_segs = 0;
1170 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001171
Michael S. Tsirkin06908992012-07-20 09:23:23 +00001172 /* copy skb_ubuf_info for callback when skb has no error */
1173 if (zerocopy) {
1174 skb_shinfo(skb)->destructor_arg = msg_control;
1175 skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY;
1176 }
1177
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178 netif_rx_ni(skb);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001179
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001180 tun->dev->stats.rx_packets++;
1181 tun->dev->stats.rx_bytes += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182
Jason Wang96442e422012-10-31 19:46:02 +00001183 tun_flow_update(tun, skb, tfile->queue_index);
Michael S. Tsirkin06908992012-07-20 09:23:23 +00001184 return total_len;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001185}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186
Badari Pulavartyee0b3e62006-09-30 23:28:47 -07001187static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
1188 unsigned long count, loff_t pos)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189{
Herbert Xu33dccbb2009-02-05 21:25:32 -08001190 struct file *file = iocb->ki_filp;
Herbert Xuab46d772009-02-14 20:46:39 -08001191 struct tun_struct *tun = tun_get(file);
Jason Wang54f968d2012-10-31 19:45:57 +00001192 struct tun_file *tfile = file->private_data;
Eric W. Biederman631ab462009-01-20 11:00:40 +00001193 ssize_t result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
1195 if (!tun)
1196 return -EBADFD;
1197
Joe Perches6b8a66e2011-03-02 07:18:10 +00001198 tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199
Jason Wang54f968d2012-10-31 19:45:57 +00001200 result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count),
1201 count, file->f_flags & O_NONBLOCK);
Eric W. Biederman631ab462009-01-20 11:00:40 +00001202
1203 tun_put(tun);
1204 return result;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205}
1206
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207/* Put packet to the user space buffer */
stephen hemminger6f7c1562011-06-08 14:33:08 +00001208static ssize_t tun_put_user(struct tun_struct *tun,
Jason Wang54f968d2012-10-31 19:45:57 +00001209 struct tun_file *tfile,
stephen hemminger6f7c1562011-06-08 14:33:08 +00001210 struct sk_buff *skb,
1211 const struct iovec *iv, int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212{
1213 struct tun_pi pi = { 0, skb->protocol };
1214 ssize_t total = 0;
1215
1216 if (!(tun->flags & TUN_NO_PI)) {
1217 if ((len -= sizeof(pi)) < 0)
1218 return -EINVAL;
1219
1220 if (len < skb->len) {
1221 /* Packet will be striped */
1222 pi.flags |= TUN_PKT_STRIP;
1223 }
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001224
Michael S. Tsirkin43b39dc2009-04-20 01:25:59 +00001225 if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226 return -EFAULT;
1227 total += sizeof(pi);
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001228 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
Rusty Russellf43798c2008-07-03 03:48:02 -07001230 if (tun->flags & TUN_VNET_HDR) {
1231 struct virtio_net_hdr gso = { 0 }; /* no info leak */
Michael S. Tsirkind9d52b52010-03-17 17:45:01 +02001232 if ((len -= tun->vnet_hdr_sz) < 0)
Rusty Russellf43798c2008-07-03 03:48:02 -07001233 return -EINVAL;
1234
1235 if (skb_is_gso(skb)) {
1236 struct skb_shared_info *sinfo = skb_shinfo(skb);
1237
1238 /* This is a hint as to how much should be linear. */
1239 gso.hdr_len = skb_headlen(skb);
1240 gso.gso_size = sinfo->gso_size;
1241 if (sinfo->gso_type & SKB_GSO_TCPV4)
1242 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1243 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1244 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
Sridhar Samudralae36aa252009-07-14 14:21:04 +00001245 else if (sinfo->gso_type & SKB_GSO_UDP)
1246 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
Michael S. Tsirkinef3db4a2010-07-21 04:32:45 +00001247 else {
Joe Perches6b8a66e2011-03-02 07:18:10 +00001248 pr_err("unexpected GSO type: "
Michael S. Tsirkinef3db4a2010-07-21 04:32:45 +00001249 "0x%x, gso_size %d, hdr_len %d\n",
1250 sinfo->gso_type, gso.gso_size,
1251 gso.hdr_len);
1252 print_hex_dump(KERN_ERR, "tun: ",
1253 DUMP_PREFIX_NONE,
1254 16, 1, skb->head,
1255 min((int)gso.hdr_len, 64), true);
1256 WARN_ON_ONCE(1);
1257 return -EINVAL;
1258 }
Rusty Russellf43798c2008-07-03 03:48:02 -07001259 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
1260 gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
1261 } else
1262 gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
1263
1264 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1265 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
Michał Mirosław55508d62010-12-14 15:24:08 +00001266 gso.csum_start = skb_checksum_start_offset(skb);
Rusty Russellf43798c2008-07-03 03:48:02 -07001267 gso.csum_offset = skb->csum_offset;
Jason Wang10a8d942011-06-10 00:56:17 +00001268 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1269 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
Rusty Russellf43798c2008-07-03 03:48:02 -07001270 } /* else everything is zero */
1271
Michael S. Tsirkin43b39dc2009-04-20 01:25:59 +00001272 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1273 sizeof(gso))))
Rusty Russellf43798c2008-07-03 03:48:02 -07001274 return -EFAULT;
Michael S. Tsirkind9d52b52010-03-17 17:45:01 +02001275 total += tun->vnet_hdr_sz;
Rusty Russellf43798c2008-07-03 03:48:02 -07001276 }
1277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 len = min_t(int, skb->len, len);
1279
Michael S. Tsirkin43b39dc2009-04-20 01:25:59 +00001280 skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001281 total += skb->len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Jeff Garzik09f75cd2007-10-03 17:41:50 -07001283 tun->dev->stats.tx_packets++;
1284 tun->dev->stats.tx_bytes += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285
1286 return total;
1287}
1288
Jason Wang54f968d2012-10-31 19:45:57 +00001289static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile,
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001290 struct kiocb *iocb, const struct iovec *iv,
1291 ssize_t len, int noblock)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 DECLARE_WAITQUEUE(wait, current);
1294 struct sk_buff *skb;
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001295 ssize_t ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296
Joe Perches6b8a66e2011-03-02 07:18:10 +00001297 tun_debug(KERN_INFO, tun, "tun_chr_read\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298
Amos Kong61a5ff12011-06-09 00:27:10 -07001299 if (unlikely(!noblock))
Jason Wang54f968d2012-10-31 19:45:57 +00001300 add_wait_queue(&tfile->wq.wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301 while (len) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 current->state = TASK_INTERRUPTIBLE;
1303
1304 /* Read frames from the queue */
Jason Wang54f968d2012-10-31 19:45:57 +00001305 if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) {
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001306 if (noblock) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 ret = -EAGAIN;
1308 break;
1309 }
1310 if (signal_pending(current)) {
1311 ret = -ERESTARTSYS;
1312 break;
1313 }
Eric W. Biedermanc70f1822009-01-20 11:07:17 +00001314 if (tun->dev->reg_state != NETREG_REGISTERED) {
1315 ret = -EIO;
1316 break;
1317 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
1319 /* Nothing to read, let's sleep */
1320 schedule();
1321 continue;
1322 }
Jason Wangc8d68e62012-10-31 19:46:00 +00001323 netif_wake_subqueue(tun->dev, tfile->queue_index);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
Jason Wang54f968d2012-10-31 19:45:57 +00001325 ret = tun_put_user(tun, tfile, skb, iv, len);
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -07001326 kfree_skb(skb);
1327 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 }
1329
1330 current->state = TASK_RUNNING;
Amos Kong61a5ff12011-06-09 00:27:10 -07001331 if (unlikely(!noblock))
Jason Wang54f968d2012-10-31 19:45:57 +00001332 remove_wait_queue(&tfile->wq.wait, &wait);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001334 return ret;
1335}
1336
1337static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
1338 unsigned long count, loff_t pos)
1339{
1340 struct file *file = iocb->ki_filp;
1341 struct tun_file *tfile = file->private_data;
1342 struct tun_struct *tun = __tun_get(tfile);
1343 ssize_t len, ret;
1344
1345 if (!tun)
1346 return -EBADFD;
1347 len = iov_length(iv, count);
1348 if (len < 0) {
1349 ret = -EINVAL;
1350 goto out;
1351 }
1352
Jason Wang54f968d2012-10-31 19:45:57 +00001353 ret = tun_do_read(tun, tfile, iocb, iv, len,
1354 file->f_flags & O_NONBLOCK);
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001355 ret = min_t(ssize_t, ret, len);
Eric W. Biederman631ab462009-01-20 11:00:40 +00001356out:
1357 tun_put(tun);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 return ret;
1359}
1360
Jason Wang96442e422012-10-31 19:46:02 +00001361static void tun_free_netdev(struct net_device *dev)
1362{
1363 struct tun_struct *tun = netdev_priv(dev);
1364
1365 tun_flow_uninit(tun);
1366 free_netdev(dev);
1367}
1368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369static void tun_setup(struct net_device *dev)
1370{
1371 struct tun_struct *tun = netdev_priv(dev);
1372
Eric W. Biederman0625c882012-02-07 16:48:55 -08001373 tun->owner = INVALID_UID;
1374 tun->group = INVALID_GID;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 dev->ethtool_ops = &tun_ethtool_ops;
Jason Wang96442e422012-10-31 19:46:02 +00001377 dev->destructor = tun_free_netdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378}
1379
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -08001380/* Trivial set of netlink ops to allow deleting tun or tap
1381 * device with netlink.
1382 */
1383static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
1384{
1385 return -EINVAL;
1386}
1387
1388static struct rtnl_link_ops tun_link_ops __read_mostly = {
1389 .kind = DRV_NAME,
1390 .priv_size = sizeof(struct tun_struct),
1391 .setup = tun_setup,
1392 .validate = tun_validate,
1393};
1394
Herbert Xu33dccbb2009-02-05 21:25:32 -08001395static void tun_sock_write_space(struct sock *sk)
1396{
Jason Wang54f968d2012-10-31 19:45:57 +00001397 struct tun_file *tfile;
Eric Dumazet43815482010-04-29 11:01:49 +00001398 wait_queue_head_t *wqueue;
Herbert Xu33dccbb2009-02-05 21:25:32 -08001399
1400 if (!sock_writeable(sk))
1401 return;
1402
Herbert Xu33dccbb2009-02-05 21:25:32 -08001403 if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
1404 return;
1405
Eric Dumazet43815482010-04-29 11:01:49 +00001406 wqueue = sk_sleep(sk);
1407 if (wqueue && waitqueue_active(wqueue))
1408 wake_up_interruptible_sync_poll(wqueue, POLLOUT |
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001409 POLLWRNORM | POLLWRBAND);
Herbert Xuc722c622009-06-03 21:45:55 -07001410
Jason Wang54f968d2012-10-31 19:45:57 +00001411 tfile = container_of(sk, struct tun_file, sk);
1412 kill_fasync(&tfile->fasync, SIGIO, POLL_OUT);
Herbert Xu33dccbb2009-02-05 21:25:32 -08001413}
1414
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001415static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
1416 struct msghdr *m, size_t total_len)
1417{
Jason Wang54f968d2012-10-31 19:45:57 +00001418 int ret;
1419 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1420 struct tun_struct *tun = __tun_get(tfile);
1421
1422 if (!tun)
1423 return -EBADFD;
Jason Wang54f968d2012-10-31 19:45:57 +00001424 ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len,
1425 m->msg_iovlen, m->msg_flags & MSG_DONTWAIT);
1426 tun_put(tun);
1427 return ret;
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001428}
1429
Jason Wang54f968d2012-10-31 19:45:57 +00001430
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001431static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
1432 struct msghdr *m, size_t total_len,
1433 int flags)
1434{
Jason Wang54f968d2012-10-31 19:45:57 +00001435 struct tun_file *tfile = container_of(sock, struct tun_file, socket);
1436 struct tun_struct *tun = __tun_get(tfile);
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001437 int ret;
Jason Wang54f968d2012-10-31 19:45:57 +00001438
1439 if (!tun)
1440 return -EBADFD;
1441
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001442 if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
1443 return -EINVAL;
Jason Wang54f968d2012-10-31 19:45:57 +00001444 ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len,
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001445 flags & MSG_DONTWAIT);
1446 if (ret > total_len) {
1447 m->msg_flags |= MSG_TRUNC;
1448 ret = flags & MSG_TRUNC ? ret : total_len;
1449 }
Jason Wang54f968d2012-10-31 19:45:57 +00001450 tun_put(tun);
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001451 return ret;
1452}
1453
Stanislav Kinsbursky1ab5ecb2012-03-12 02:59:41 +00001454static int tun_release(struct socket *sock)
1455{
1456 if (sock->sk)
1457 sock_put(sock->sk);
1458 return 0;
1459}
1460
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001461/* Ops structure to mimic raw sockets with tun */
1462static const struct proto_ops tun_socket_ops = {
1463 .sendmsg = tun_sendmsg,
1464 .recvmsg = tun_recvmsg,
Stanislav Kinsbursky1ab5ecb2012-03-12 02:59:41 +00001465 .release = tun_release,
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00001466};
1467
Herbert Xu33dccbb2009-02-05 21:25:32 -08001468static struct proto tun_proto = {
1469 .name = "tun",
1470 .owner = THIS_MODULE,
Jason Wang54f968d2012-10-31 19:45:57 +00001471 .obj_size = sizeof(struct tun_file),
Herbert Xu33dccbb2009-02-05 21:25:32 -08001472};
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -08001473
David Woodhouse980c9e82009-05-09 22:54:21 -07001474static int tun_flags(struct tun_struct *tun)
1475{
1476 int flags = 0;
1477
1478 if (tun->flags & TUN_TUN_DEV)
1479 flags |= IFF_TUN;
1480 else
1481 flags |= IFF_TAP;
1482
1483 if (tun->flags & TUN_NO_PI)
1484 flags |= IFF_NO_PI;
1485
1486 if (tun->flags & TUN_ONE_QUEUE)
1487 flags |= IFF_ONE_QUEUE;
1488
1489 if (tun->flags & TUN_VNET_HDR)
1490 flags |= IFF_VNET_HDR;
1491
Jason Wangc8d68e62012-10-31 19:46:00 +00001492 if (tun->flags & TUN_TAP_MQ)
1493 flags |= IFF_MULTI_QUEUE;
1494
David Woodhouse980c9e82009-05-09 22:54:21 -07001495 return flags;
1496}
1497
1498static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
1499 char *buf)
1500{
1501 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
1502 return sprintf(buf, "0x%x\n", tun_flags(tun));
1503}
1504
1505static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
1506 char *buf)
1507{
1508 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
Eric W. Biederman0625c882012-02-07 16:48:55 -08001509 return uid_valid(tun->owner)?
1510 sprintf(buf, "%u\n",
1511 from_kuid_munged(current_user_ns(), tun->owner)):
1512 sprintf(buf, "-1\n");
David Woodhouse980c9e82009-05-09 22:54:21 -07001513}
1514
1515static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
1516 char *buf)
1517{
1518 struct tun_struct *tun = netdev_priv(to_net_dev(dev));
Eric W. Biederman0625c882012-02-07 16:48:55 -08001519 return gid_valid(tun->group) ?
1520 sprintf(buf, "%u\n",
1521 from_kgid_munged(current_user_ns(), tun->group)):
1522 sprintf(buf, "-1\n");
David Woodhouse980c9e82009-05-09 22:54:21 -07001523}
1524
1525static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
1526static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
1527static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
1528
Pavel Emelyanovd647a592008-04-16 00:41:16 -07001529static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530{
1531 struct tun_struct *tun;
Jason Wang54f968d2012-10-31 19:45:57 +00001532 struct tun_file *tfile = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533 struct net_device *dev;
1534 int err;
1535
Eric W. Biederman74a3e5a2009-01-20 10:56:20 +00001536 dev = __dev_get_by_name(net, ifr->ifr_name);
1537 if (dev) {
David Woodhousef85ba782009-04-27 03:23:54 -07001538 if (ifr->ifr_flags & IFF_TUN_EXCL)
1539 return -EBUSY;
Eric W. Biederman74a3e5a2009-01-20 10:56:20 +00001540 if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
1541 tun = netdev_priv(dev);
1542 else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
1543 tun = netdev_priv(dev);
1544 else
1545 return -EINVAL;
1546
Jason Wangcde8b152012-10-31 19:46:01 +00001547 if (tun_not_capable(tun))
Paul Moore2b980db2009-08-28 18:12:43 -04001548 return -EPERM;
Jason Wang54f968d2012-10-31 19:45:57 +00001549 err = security_tun_dev_attach(tfile->socket.sk);
Paul Moore2b980db2009-08-28 18:12:43 -04001550 if (err < 0)
1551 return err;
1552
Eric W. Biedermana7385ba2009-01-20 10:57:48 +00001553 err = tun_attach(tun, file);
1554 if (err < 0)
1555 return err;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001556 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 else {
1558 char *name;
1559 unsigned long flags = 0;
1560
David Woodhouseca6bb5d2006-06-22 16:07:52 -07001561 if (!capable(CAP_NET_ADMIN))
1562 return -EPERM;
Paul Moore2b980db2009-08-28 18:12:43 -04001563 err = security_tun_dev_create();
1564 if (err < 0)
1565 return err;
David Woodhouseca6bb5d2006-06-22 16:07:52 -07001566
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 /* Set dev type */
1568 if (ifr->ifr_flags & IFF_TUN) {
1569 /* TUN device */
1570 flags |= TUN_TUN_DEV;
1571 name = "tun%d";
1572 } else if (ifr->ifr_flags & IFF_TAP) {
1573 /* TAP device */
1574 flags |= TUN_TAP_DEV;
1575 name = "tap%d";
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001576 } else
Kusanagi Kouichi36989b92009-09-16 21:36:13 +00001577 return -EINVAL;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04001578
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579 if (*ifr->ifr_name)
1580 name = ifr->ifr_name;
1581
Jason Wangc8d68e62012-10-31 19:46:00 +00001582 dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
1583 tun_setup,
1584 MAX_TAP_QUEUES, MAX_TAP_QUEUES);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 if (!dev)
1586 return -ENOMEM;
1587
Pavel Emelyanovfc54c652008-04-16 00:41:53 -07001588 dev_net_set(dev, net);
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -08001589 dev->rtnl_link_ops = &tun_link_ops;
Stephen Hemminger758e43b2008-11-19 22:10:37 -08001590
Linus Torvalds1da177e2005-04-16 15:20:36 -07001591 tun = netdev_priv(dev);
1592 tun->dev = dev;
1593 tun->flags = flags;
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -07001594 tun->txflt.count = 0;
Michael S. Tsirkind9d52b52010-03-17 17:45:01 +02001595 tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596
Jason Wang54f968d2012-10-31 19:45:57 +00001597 tun->filter_attached = false;
1598 tun->sndbuf = tfile->socket.sk->sk_sndbuf;
Herbert Xu33dccbb2009-02-05 21:25:32 -08001599
Jason Wang96442e422012-10-31 19:46:02 +00001600 spin_lock_init(&tun->lock);
1601
Jason Wang54f968d2012-10-31 19:45:57 +00001602 security_tun_dev_post_create(&tfile->sk);
Paul Moore2b980db2009-08-28 18:12:43 -04001603
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 tun_net_init(dev);
1605
Jason Wang96442e422012-10-31 19:46:02 +00001606 if (tun_flow_init(tun))
1607 goto err_free_dev;
1608
Michał Mirosław88255372011-04-19 06:13:10 +00001609 dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
1610 TUN_USER_FEATURES;
1611 dev->features = dev->hw_features;
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 err = register_netdevice(tun->dev);
1614 if (err < 0)
Jason Wang54f968d2012-10-31 19:45:57 +00001615 goto err_free_dev;
Herbert Xu9c3fea62009-04-18 14:15:52 +00001616
David Woodhouse980c9e82009-05-09 22:54:21 -07001617 if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
1618 device_create_file(&tun->dev->dev, &dev_attr_owner) ||
1619 device_create_file(&tun->dev->dev, &dev_attr_group))
Joe Perches6b8a66e2011-03-02 07:18:10 +00001620 pr_err("Failed to create tun sysfs files\n");
David Woodhouse980c9e82009-05-09 22:54:21 -07001621
Eric W. Biedermana7385ba2009-01-20 10:57:48 +00001622 err = tun_attach(tun, file);
1623 if (err < 0)
Jason Wangc8d68e62012-10-31 19:46:00 +00001624 goto err_free_dev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001625 }
1626
Joe Perches6b8a66e2011-03-02 07:18:10 +00001627 tun_debug(KERN_INFO, tun, "tun_set_iff\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628
1629 if (ifr->ifr_flags & IFF_NO_PI)
1630 tun->flags |= TUN_NO_PI;
Nathaniel Filardoa26af1e2008-02-05 03:05:07 -08001631 else
1632 tun->flags &= ~TUN_NO_PI;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001633
1634 if (ifr->ifr_flags & IFF_ONE_QUEUE)
1635 tun->flags |= TUN_ONE_QUEUE;
Nathaniel Filardoa26af1e2008-02-05 03:05:07 -08001636 else
1637 tun->flags &= ~TUN_ONE_QUEUE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001638
Rusty Russellf43798c2008-07-03 03:48:02 -07001639 if (ifr->ifr_flags & IFF_VNET_HDR)
1640 tun->flags |= TUN_VNET_HDR;
1641 else
1642 tun->flags &= ~TUN_VNET_HDR;
1643
Jason Wangc8d68e62012-10-31 19:46:00 +00001644 if (ifr->ifr_flags & IFF_MULTI_QUEUE)
1645 tun->flags |= TUN_TAP_MQ;
1646 else
1647 tun->flags &= ~TUN_TAP_MQ;
1648
Max Krasnyanskye35259a2008-07-10 16:59:11 -07001649 /* Make sure persistent devices do not get stuck in
1650 * xoff state.
1651 */
1652 if (netif_running(tun->dev))
Jason Wangc8d68e62012-10-31 19:46:00 +00001653 netif_tx_wake_all_queues(tun->dev);
Max Krasnyanskye35259a2008-07-10 16:59:11 -07001654
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655 strcpy(ifr->ifr_name, tun->dev->name);
1656 return 0;
1657
1658 err_free_dev:
1659 free_netdev(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 return err;
1661}
1662
Herbert Xu876bfd42009-08-06 14:22:44 +00001663static int tun_get_iff(struct net *net, struct tun_struct *tun,
1664 struct ifreq *ifr)
Mark McLoughline3b99552008-08-15 15:09:56 -07001665{
Joe Perches6b8a66e2011-03-02 07:18:10 +00001666 tun_debug(KERN_INFO, tun, "tun_get_iff\n");
Mark McLoughline3b99552008-08-15 15:09:56 -07001667
1668 strcpy(ifr->ifr_name, tun->dev->name);
1669
David Woodhouse980c9e82009-05-09 22:54:21 -07001670 ifr->ifr_flags = tun_flags(tun);
Mark McLoughline3b99552008-08-15 15:09:56 -07001671
1672 return 0;
1673}
1674
Rusty Russell5228ddc2008-07-03 03:46:16 -07001675/* This is like a cut-down ethtool ops, except done via tun fd so no
1676 * privs required. */
Michał Mirosław88255372011-04-19 06:13:10 +00001677static int set_offload(struct tun_struct *tun, unsigned long arg)
Rusty Russell5228ddc2008-07-03 03:46:16 -07001678{
Michał Mirosławc8f44af2011-11-15 15:29:55 +00001679 netdev_features_t features = 0;
Rusty Russell5228ddc2008-07-03 03:46:16 -07001680
1681 if (arg & TUN_F_CSUM) {
Michał Mirosław88255372011-04-19 06:13:10 +00001682 features |= NETIF_F_HW_CSUM;
Rusty Russell5228ddc2008-07-03 03:46:16 -07001683 arg &= ~TUN_F_CSUM;
1684
1685 if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
1686 if (arg & TUN_F_TSO_ECN) {
1687 features |= NETIF_F_TSO_ECN;
1688 arg &= ~TUN_F_TSO_ECN;
1689 }
1690 if (arg & TUN_F_TSO4)
1691 features |= NETIF_F_TSO;
1692 if (arg & TUN_F_TSO6)
1693 features |= NETIF_F_TSO6;
1694 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1695 }
Sridhar Samudralae36aa252009-07-14 14:21:04 +00001696
1697 if (arg & TUN_F_UFO) {
1698 features |= NETIF_F_UFO;
1699 arg &= ~TUN_F_UFO;
1700 }
Rusty Russell5228ddc2008-07-03 03:46:16 -07001701 }
1702
1703 /* This gives the user a way to test for new features in future by
1704 * trying to set them. */
1705 if (arg)
1706 return -EINVAL;
1707
Michał Mirosław88255372011-04-19 06:13:10 +00001708 tun->set_features = features;
1709 netdev_update_features(tun->dev);
Rusty Russell5228ddc2008-07-03 03:46:16 -07001710
1711 return 0;
1712}
1713
Jason Wangc8d68e62012-10-31 19:46:00 +00001714static void tun_detach_filter(struct tun_struct *tun, int n)
1715{
1716 int i;
1717 struct tun_file *tfile;
1718
1719 for (i = 0; i < n; i++) {
1720 tfile = rcu_dereference_protected(tun->tfiles[i],
1721 lockdep_rtnl_is_held());
1722 sk_detach_filter(tfile->socket.sk);
1723 }
1724
1725 tun->filter_attached = false;
1726}
1727
1728static int tun_attach_filter(struct tun_struct *tun)
1729{
1730 int i, ret = 0;
1731 struct tun_file *tfile;
1732
1733 for (i = 0; i < tun->numqueues; i++) {
1734 tfile = rcu_dereference_protected(tun->tfiles[i],
1735 lockdep_rtnl_is_held());
1736 ret = sk_attach_filter(&tun->fprog, tfile->socket.sk);
1737 if (ret) {
1738 tun_detach_filter(tun, i);
1739 return ret;
1740 }
1741 }
1742
1743 tun->filter_attached = true;
1744 return ret;
1745}
1746
1747static void tun_set_sndbuf(struct tun_struct *tun)
1748{
1749 struct tun_file *tfile;
1750 int i;
1751
1752 for (i = 0; i < tun->numqueues; i++) {
1753 tfile = rcu_dereference_protected(tun->tfiles[i],
1754 lockdep_rtnl_is_held());
1755 tfile->socket.sk->sk_sndbuf = tun->sndbuf;
1756 }
1757}
1758
Jason Wangcde8b152012-10-31 19:46:01 +00001759static int tun_set_queue(struct file *file, struct ifreq *ifr)
1760{
1761 struct tun_file *tfile = file->private_data;
1762 struct tun_struct *tun;
1763 struct net_device *dev;
1764 int ret = 0;
1765
1766 rtnl_lock();
1767
1768 if (ifr->ifr_flags & IFF_ATTACH_QUEUE) {
1769 dev = __dev_get_by_name(tfile->net, ifr->ifr_name);
1770 if (!dev) {
1771 ret = -EINVAL;
1772 goto unlock;
1773 }
1774
1775 tun = netdev_priv(dev);
1776 if (dev->netdev_ops != &tap_netdev_ops &&
1777 dev->netdev_ops != &tun_netdev_ops)
1778 ret = -EINVAL;
1779 else if (tun_not_capable(tun))
1780 ret = -EPERM;
1781 else
1782 ret = tun_attach(tun, file);
1783 } else if (ifr->ifr_flags & IFF_DETACH_QUEUE)
1784 __tun_detach(tfile, false);
1785 else
1786 ret = -EINVAL;
1787
1788unlock:
1789 rtnl_unlock();
1790 return ret;
1791}
1792
Arnd Bergmann50857e22009-11-06 22:52:32 -08001793static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1794 unsigned long arg, int ifreq_len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795{
Eric W. Biederman36b50ba2009-01-20 11:01:48 +00001796 struct tun_file *tfile = file->private_data;
Eric W. Biederman631ab462009-01-20 11:00:40 +00001797 struct tun_struct *tun;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798 void __user* argp = (void __user*)arg;
1799 struct ifreq ifr;
Eric W. Biederman0625c882012-02-07 16:48:55 -08001800 kuid_t owner;
1801 kgid_t group;
Herbert Xu33dccbb2009-02-05 21:25:32 -08001802 int sndbuf;
Michael S. Tsirkind9d52b52010-03-17 17:45:01 +02001803 int vnet_hdr_sz;
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -07001804 int ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
Jason Wangcde8b152012-10-31 19:46:01 +00001806 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
Arnd Bergmann50857e22009-11-06 22:52:32 -08001807 if (copy_from_user(&ifr, argp, ifreq_len))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808 return -EFAULT;
David S. Miller8bbb1812012-07-30 14:52:48 -07001809 } else {
Mathias Krausea117dac2012-07-29 19:45:14 +00001810 memset(&ifr, 0, sizeof(ifr));
David S. Miller8bbb1812012-07-30 14:52:48 -07001811 }
Eric W. Biederman631ab462009-01-20 11:00:40 +00001812 if (cmd == TUNGETFEATURES) {
1813 /* Currently this just means: "what IFF flags are valid?".
1814 * This is needed because we never checked for invalid flags on
1815 * TUNSETIFF. */
1816 return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
Jason Wangcde8b152012-10-31 19:46:01 +00001817 IFF_VNET_HDR | IFF_MULTI_QUEUE,
Eric W. Biederman631ab462009-01-20 11:00:40 +00001818 (unsigned int __user*)argp);
Jason Wangcde8b152012-10-31 19:46:01 +00001819 } else if (cmd == TUNSETQUEUE)
1820 return tun_set_queue(file, &ifr);
Eric W. Biederman631ab462009-01-20 11:00:40 +00001821
Jason Wangc8d68e62012-10-31 19:46:00 +00001822 ret = 0;
Herbert Xu876bfd42009-08-06 14:22:44 +00001823 rtnl_lock();
1824
Eric W. Biederman36b50ba2009-01-20 11:01:48 +00001825 tun = __tun_get(tfile);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001826 if (cmd == TUNSETIFF && !tun) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 ifr.ifr_name[IFNAMSIZ-1] = '\0';
1828
Herbert Xu876bfd42009-08-06 14:22:44 +00001829 ret = tun_set_iff(tfile->net, file, &ifr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
Herbert Xu876bfd42009-08-06 14:22:44 +00001831 if (ret)
1832 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833
Arnd Bergmann50857e22009-11-06 22:52:32 -08001834 if (copy_to_user(argp, &ifr, ifreq_len))
Herbert Xu876bfd42009-08-06 14:22:44 +00001835 ret = -EFAULT;
1836 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837 }
1838
Herbert Xu876bfd42009-08-06 14:22:44 +00001839 ret = -EBADFD;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840 if (!tun)
Herbert Xu876bfd42009-08-06 14:22:44 +00001841 goto unlock;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842
Jason Wang1e588332012-10-31 19:45:56 +00001843 tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844
Eric W. Biederman631ab462009-01-20 11:00:40 +00001845 ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846 switch (cmd) {
Mark McLoughline3b99552008-08-15 15:09:56 -07001847 case TUNGETIFF:
Herbert Xu876bfd42009-08-06 14:22:44 +00001848 ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
Mark McLoughline3b99552008-08-15 15:09:56 -07001849 if (ret)
Eric W. Biederman631ab462009-01-20 11:00:40 +00001850 break;
Mark McLoughline3b99552008-08-15 15:09:56 -07001851
Arnd Bergmann50857e22009-11-06 22:52:32 -08001852 if (copy_to_user(argp, &ifr, ifreq_len))
Eric W. Biederman631ab462009-01-20 11:00:40 +00001853 ret = -EFAULT;
Mark McLoughline3b99552008-08-15 15:09:56 -07001854 break;
1855
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856 case TUNSETNOCSUM:
1857 /* Disable/Enable checksum */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
Michał Mirosław88255372011-04-19 06:13:10 +00001859 /* [unimplemented] */
1860 tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
Joe Perches6b8a66e2011-03-02 07:18:10 +00001861 arg ? "disabled" : "enabled");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862 break;
1863
1864 case TUNSETPERSIST:
Jason Wang54f968d2012-10-31 19:45:57 +00001865 /* Disable/Enable persist mode. Keep an extra reference to the
1866 * module to prevent the module being unprobed.
1867 */
1868 if (arg) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001869 tun->flags |= TUN_PERSIST;
Jason Wang54f968d2012-10-31 19:45:57 +00001870 __module_get(THIS_MODULE);
1871 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 tun->flags &= ~TUN_PERSIST;
Jason Wang54f968d2012-10-31 19:45:57 +00001873 module_put(THIS_MODULE);
1874 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001875
Joe Perches6b8a66e2011-03-02 07:18:10 +00001876 tun_debug(KERN_INFO, tun, "persist %s\n",
1877 arg ? "enabled" : "disabled");
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 break;
1879
1880 case TUNSETOWNER:
1881 /* Set owner of the device */
Eric W. Biederman0625c882012-02-07 16:48:55 -08001882 owner = make_kuid(current_user_ns(), arg);
1883 if (!uid_valid(owner)) {
1884 ret = -EINVAL;
1885 break;
1886 }
1887 tun->owner = owner;
Jason Wang1e588332012-10-31 19:45:56 +00001888 tun_debug(KERN_INFO, tun, "owner set to %u\n",
Eric W. Biederman0625c882012-02-07 16:48:55 -08001889 from_kuid(&init_user_ns, tun->owner));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890 break;
1891
Guido Guenther8c644622007-07-02 22:50:25 -07001892 case TUNSETGROUP:
1893 /* Set group of the device */
Eric W. Biederman0625c882012-02-07 16:48:55 -08001894 group = make_kgid(current_user_ns(), arg);
1895 if (!gid_valid(group)) {
1896 ret = -EINVAL;
1897 break;
1898 }
1899 tun->group = group;
Jason Wang1e588332012-10-31 19:45:56 +00001900 tun_debug(KERN_INFO, tun, "group set to %u\n",
Eric W. Biederman0625c882012-02-07 16:48:55 -08001901 from_kgid(&init_user_ns, tun->group));
Guido Guenther8c644622007-07-02 22:50:25 -07001902 break;
1903
Mike Kershawff4cc3a2005-09-01 17:40:05 -07001904 case TUNSETLINK:
1905 /* Only allow setting the type when the interface is down */
1906 if (tun->dev->flags & IFF_UP) {
Joe Perches6b8a66e2011-03-02 07:18:10 +00001907 tun_debug(KERN_INFO, tun,
1908 "Linktype set failed because interface is up\n");
David S. Miller48abfe02008-04-23 19:37:58 -07001909 ret = -EBUSY;
Mike Kershawff4cc3a2005-09-01 17:40:05 -07001910 } else {
1911 tun->dev->type = (int) arg;
Joe Perches6b8a66e2011-03-02 07:18:10 +00001912 tun_debug(KERN_INFO, tun, "linktype set to %d\n",
1913 tun->dev->type);
David S. Miller48abfe02008-04-23 19:37:58 -07001914 ret = 0;
Mike Kershawff4cc3a2005-09-01 17:40:05 -07001915 }
Eric W. Biederman631ab462009-01-20 11:00:40 +00001916 break;
Mike Kershawff4cc3a2005-09-01 17:40:05 -07001917
Linus Torvalds1da177e2005-04-16 15:20:36 -07001918#ifdef TUN_DEBUG
1919 case TUNSETDEBUG:
1920 tun->debug = arg;
1921 break;
1922#endif
Rusty Russell5228ddc2008-07-03 03:46:16 -07001923 case TUNSETOFFLOAD:
Michał Mirosław88255372011-04-19 06:13:10 +00001924 ret = set_offload(tun, arg);
Eric W. Biederman631ab462009-01-20 11:00:40 +00001925 break;
Rusty Russell5228ddc2008-07-03 03:46:16 -07001926
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -07001927 case TUNSETTXFILTER:
1928 /* Can be set only for TAPs */
Eric W. Biederman631ab462009-01-20 11:00:40 +00001929 ret = -EINVAL;
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -07001930 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
Eric W. Biederman631ab462009-01-20 11:00:40 +00001931 break;
Harvey Harrisonc0e5a8c2008-07-16 12:45:34 -07001932 ret = update_filter(&tun->txflt, (void __user *)arg);
Eric W. Biederman631ab462009-01-20 11:00:40 +00001933 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934
1935 case SIOCGIFHWADDR:
Uwe Kleine-Königb5950762010-11-01 15:38:34 -04001936 /* Get hw address */
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -07001937 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1938 ifr.ifr_hwaddr.sa_family = tun->dev->type;
Arnd Bergmann50857e22009-11-06 22:52:32 -08001939 if (copy_to_user(argp, &ifr, ifreq_len))
Eric W. Biederman631ab462009-01-20 11:00:40 +00001940 ret = -EFAULT;
1941 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
1943 case SIOCSIFHWADDR:
Max Krasnyanskyf271b2c2008-07-14 22:18:19 -07001944 /* Set hw address */
Joe Perches6b8a66e2011-03-02 07:18:10 +00001945 tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
1946 ifr.ifr_hwaddr.sa_data);
Kim B. Heino40102372008-02-29 12:26:21 -08001947
Kim B. Heino40102372008-02-29 12:26:21 -08001948 ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
Eric W. Biederman631ab462009-01-20 11:00:40 +00001949 break;
Herbert Xu33dccbb2009-02-05 21:25:32 -08001950
1951 case TUNGETSNDBUF:
Jason Wang54f968d2012-10-31 19:45:57 +00001952 sndbuf = tfile->socket.sk->sk_sndbuf;
Herbert Xu33dccbb2009-02-05 21:25:32 -08001953 if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
1954 ret = -EFAULT;
1955 break;
1956
1957 case TUNSETSNDBUF:
1958 if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
1959 ret = -EFAULT;
1960 break;
1961 }
1962
Jason Wangc8d68e62012-10-31 19:46:00 +00001963 tun->sndbuf = sndbuf;
1964 tun_set_sndbuf(tun);
Herbert Xu33dccbb2009-02-05 21:25:32 -08001965 break;
1966
Michael S. Tsirkind9d52b52010-03-17 17:45:01 +02001967 case TUNGETVNETHDRSZ:
1968 vnet_hdr_sz = tun->vnet_hdr_sz;
1969 if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
1970 ret = -EFAULT;
1971 break;
1972
1973 case TUNSETVNETHDRSZ:
1974 if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
1975 ret = -EFAULT;
1976 break;
1977 }
1978 if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
1979 ret = -EINVAL;
1980 break;
1981 }
1982
1983 tun->vnet_hdr_sz = vnet_hdr_sz;
1984 break;
1985
Michael S. Tsirkin99405162010-02-14 01:01:10 +00001986 case TUNATTACHFILTER:
1987 /* Can be set only for TAPs */
1988 ret = -EINVAL;
1989 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
1990 break;
1991 ret = -EFAULT;
Jason Wang54f968d2012-10-31 19:45:57 +00001992 if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog)))
Michael S. Tsirkin99405162010-02-14 01:01:10 +00001993 break;
1994
Jason Wangc8d68e62012-10-31 19:46:00 +00001995 ret = tun_attach_filter(tun);
Michael S. Tsirkin99405162010-02-14 01:01:10 +00001996 break;
1997
1998 case TUNDETACHFILTER:
1999 /* Can be set only for TAPs */
2000 ret = -EINVAL;
2001 if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
2002 break;
Jason Wangc8d68e62012-10-31 19:46:00 +00002003 ret = 0;
2004 tun_detach_filter(tun, tun->numqueues);
Michael S. Tsirkin99405162010-02-14 01:01:10 +00002005 break;
2006
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007 default:
Eric W. Biederman631ab462009-01-20 11:00:40 +00002008 ret = -EINVAL;
2009 break;
Joe Perchesee289b62010-05-17 22:47:34 -07002010 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011
Herbert Xu876bfd42009-08-06 14:22:44 +00002012unlock:
2013 rtnl_unlock();
2014 if (tun)
2015 tun_put(tun);
Eric W. Biederman631ab462009-01-20 11:00:40 +00002016 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017}
2018
Arnd Bergmann50857e22009-11-06 22:52:32 -08002019static long tun_chr_ioctl(struct file *file,
2020 unsigned int cmd, unsigned long arg)
2021{
2022 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
2023}
2024
2025#ifdef CONFIG_COMPAT
2026static long tun_chr_compat_ioctl(struct file *file,
2027 unsigned int cmd, unsigned long arg)
2028{
2029 switch (cmd) {
2030 case TUNSETIFF:
2031 case TUNGETIFF:
2032 case TUNSETTXFILTER:
2033 case TUNGETSNDBUF:
2034 case TUNSETSNDBUF:
2035 case SIOCGIFHWADDR:
2036 case SIOCSIFHWADDR:
2037 arg = (unsigned long)compat_ptr(arg);
2038 break;
2039 default:
2040 arg = (compat_ulong_t)arg;
2041 break;
2042 }
2043
2044 /*
2045 * compat_ifreq is shorter than ifreq, so we must not access beyond
2046 * the end of that structure. All fields that are used in this
2047 * driver are compatible though, we don't need to convert the
2048 * contents.
2049 */
2050 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
2051}
2052#endif /* CONFIG_COMPAT */
2053
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054static int tun_chr_fasync(int fd, struct file *file, int on)
2055{
Jason Wang54f968d2012-10-31 19:45:57 +00002056 struct tun_file *tfile = file->private_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 int ret;
2058
Jason Wang54f968d2012-10-31 19:45:57 +00002059 if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0)
Jonathan Corbet9d319522008-06-19 15:50:37 -06002060 goto out;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 if (on) {
Eric W. Biederman609d7fa2006-10-02 02:17:15 -07002063 ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 if (ret)
Jonathan Corbet9d319522008-06-19 15:50:37 -06002065 goto out;
Jason Wang54f968d2012-10-31 19:45:57 +00002066 tfile->flags |= TUN_FASYNC;
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002067 } else
Jason Wang54f968d2012-10-31 19:45:57 +00002068 tfile->flags &= ~TUN_FASYNC;
Jonathan Corbet9d319522008-06-19 15:50:37 -06002069 ret = 0;
2070out:
Jonathan Corbet9d319522008-06-19 15:50:37 -06002071 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072}
2073
2074static int tun_chr_open(struct inode *inode, struct file * file)
2075{
Eric W. Biederman631ab462009-01-20 11:00:40 +00002076 struct tun_file *tfile;
Thomas Gleixnerdeed49f2009-10-14 01:19:46 -07002077
Joe Perches6b8a66e2011-03-02 07:18:10 +00002078 DBG1(KERN_INFO, "tunX: tun_chr_open\n");
Eric W. Biederman631ab462009-01-20 11:00:40 +00002079
Jason Wang54f968d2012-10-31 19:45:57 +00002080 tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL,
2081 &tun_proto);
Eric W. Biederman631ab462009-01-20 11:00:40 +00002082 if (!tfile)
2083 return -ENOMEM;
Jason Wang6e914fc2012-10-31 19:45:58 +00002084 rcu_assign_pointer(tfile->tun, NULL);
Eric W. Biederman36b50ba2009-01-20 11:01:48 +00002085 tfile->net = get_net(current->nsproxy->net_ns);
Jason Wang54f968d2012-10-31 19:45:57 +00002086 tfile->flags = 0;
2087
2088 rcu_assign_pointer(tfile->socket.wq, &tfile->wq);
2089 init_waitqueue_head(&tfile->wq.wait);
2090
2091 tfile->socket.file = file;
2092 tfile->socket.ops = &tun_socket_ops;
2093
2094 sock_init_data(&tfile->socket, &tfile->sk);
2095 sk_change_net(&tfile->sk, tfile->net);
2096
2097 tfile->sk.sk_write_space = tun_sock_write_space;
2098 tfile->sk.sk_sndbuf = INT_MAX;
2099
Eric W. Biederman631ab462009-01-20 11:00:40 +00002100 file->private_data = tfile;
Jason Wang54f968d2012-10-31 19:45:57 +00002101 set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags);
2102
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103 return 0;
2104}
2105
2106static int tun_chr_close(struct inode *inode, struct file *file)
2107{
Eric W. Biederman631ab462009-01-20 11:00:40 +00002108 struct tun_file *tfile = file->private_data;
Jason Wang54f968d2012-10-31 19:45:57 +00002109 struct net *net = tfile->net;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
Jason Wangc8d68e62012-10-31 19:46:00 +00002111 tun_detach(tfile, true);
Jason Wang54f968d2012-10-31 19:45:57 +00002112 put_net(net);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113
2114 return 0;
2115}
2116
Arjan van de Vend54b1fd2007-02-12 00:55:34 -08002117static const struct file_operations tun_fops = {
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002118 .owner = THIS_MODULE,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 .llseek = no_llseek,
Badari Pulavartyee0b3e62006-09-30 23:28:47 -07002120 .read = do_sync_read,
2121 .aio_read = tun_chr_aio_read,
2122 .write = do_sync_write,
2123 .aio_write = tun_chr_aio_write,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 .poll = tun_chr_poll,
Arnd Bergmann50857e22009-11-06 22:52:32 -08002125 .unlocked_ioctl = tun_chr_ioctl,
2126#ifdef CONFIG_COMPAT
2127 .compat_ioctl = tun_chr_compat_ioctl,
2128#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 .open = tun_chr_open,
2130 .release = tun_chr_close,
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002131 .fasync = tun_chr_fasync
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132};
2133
2134static struct miscdevice tun_miscdev = {
2135 .minor = TUN_MINOR,
2136 .name = "tun",
Kay Sieverse454cea2009-09-18 23:01:12 +02002137 .nodename = "net/tun",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 .fops = &tun_fops,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139};
2140
2141/* ethtool interface */
2142
2143static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2144{
2145 cmd->supported = 0;
2146 cmd->advertising = 0;
David Decotigny70739492011-04-27 18:32:40 +00002147 ethtool_cmd_speed_set(cmd, SPEED_10);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 cmd->duplex = DUPLEX_FULL;
2149 cmd->port = PORT_TP;
2150 cmd->phy_address = 0;
2151 cmd->transceiver = XCVR_INTERNAL;
2152 cmd->autoneg = AUTONEG_DISABLE;
2153 cmd->maxtxpkt = 0;
2154 cmd->maxrxpkt = 0;
2155 return 0;
2156}
2157
2158static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2159{
2160 struct tun_struct *tun = netdev_priv(dev);
2161
Rick Jones33a5ba12011-11-15 14:59:53 +00002162 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2163 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
2165 switch (tun->flags & TUN_TYPE_MASK) {
2166 case TUN_TUN_DEV:
Rick Jones33a5ba12011-11-15 14:59:53 +00002167 strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 break;
2169 case TUN_TAP_DEV:
Rick Jones33a5ba12011-11-15 14:59:53 +00002170 strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171 break;
2172 }
2173}
2174
2175static u32 tun_get_msglevel(struct net_device *dev)
2176{
2177#ifdef TUN_DEBUG
2178 struct tun_struct *tun = netdev_priv(dev);
2179 return tun->debug;
2180#else
2181 return -EOPNOTSUPP;
2182#endif
2183}
2184
2185static void tun_set_msglevel(struct net_device *dev, u32 value)
2186{
2187#ifdef TUN_DEBUG
2188 struct tun_struct *tun = netdev_priv(dev);
2189 tun->debug = value;
2190#endif
2191}
2192
Jeff Garzik7282d492006-09-13 14:30:00 -04002193static const struct ethtool_ops tun_ethtool_ops = {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 .get_settings = tun_get_settings,
2195 .get_drvinfo = tun_get_drvinfo,
2196 .get_msglevel = tun_get_msglevel,
2197 .set_msglevel = tun_set_msglevel,
Nolan Leakebee31362010-07-27 13:53:43 +00002198 .get_link = ethtool_op_get_link,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199};
2200
Pavel Emelyanov79d17602008-04-16 00:40:46 -07002201
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202static int __init tun_init(void)
2203{
2204 int ret = 0;
2205
Joe Perches6b8a66e2011-03-02 07:18:10 +00002206 pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
2207 pr_info("%s\n", DRV_COPYRIGHT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -08002209 ret = rtnl_link_register(&tun_link_ops);
Pavel Emelyanov79d17602008-04-16 00:40:46 -07002210 if (ret) {
Joe Perches6b8a66e2011-03-02 07:18:10 +00002211 pr_err("Can't register link_ops\n");
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -08002212 goto err_linkops;
Pavel Emelyanov79d17602008-04-16 00:40:46 -07002213 }
2214
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215 ret = misc_register(&tun_miscdev);
Pavel Emelyanov79d17602008-04-16 00:40:46 -07002216 if (ret) {
Joe Perches6b8a66e2011-03-02 07:18:10 +00002217 pr_err("Can't register misc device %d\n", TUN_MINOR);
Pavel Emelyanov79d17602008-04-16 00:40:46 -07002218 goto err_misc;
2219 }
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -08002220 return 0;
Pavel Emelyanov79d17602008-04-16 00:40:46 -07002221err_misc:
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -08002222 rtnl_link_unregister(&tun_link_ops);
2223err_linkops:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 return ret;
2225}
2226
2227static void tun_cleanup(void)
2228{
Jeff Garzik6aa20a22006-09-13 13:24:59 -04002229 misc_deregister(&tun_miscdev);
Eric W. Biedermanf019a7a2009-01-21 16:02:16 -08002230 rtnl_link_unregister(&tun_link_ops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231}
2232
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00002233/* Get an underlying socket object from tun file. Returns error unless file is
2234 * attached to a device. The returned object works like a packet socket, it
2235 * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
2236 * holding a reference to the file for as long as the socket is in use. */
2237struct socket *tun_get_socket(struct file *file)
2238{
Jason Wang6e914fc2012-10-31 19:45:58 +00002239 struct tun_file *tfile;
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00002240 if (file->f_op != &tun_fops)
2241 return ERR_PTR(-EINVAL);
Jason Wang6e914fc2012-10-31 19:45:58 +00002242 tfile = file->private_data;
2243 if (!tfile)
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00002244 return ERR_PTR(-EBADFD);
Jason Wang54f968d2012-10-31 19:45:57 +00002245 return &tfile->socket;
Michael S. Tsirkin05c28282010-01-14 06:17:09 +00002246}
2247EXPORT_SYMBOL_GPL(tun_get_socket);
2248
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249module_init(tun_init);
2250module_exit(tun_cleanup);
2251MODULE_DESCRIPTION(DRV_DESCRIPTION);
2252MODULE_AUTHOR(DRV_COPYRIGHT);
2253MODULE_LICENSE("GPL");
2254MODULE_ALIAS_MISCDEV(TUN_MINOR);
Kay Sievers578454f2010-05-20 18:07:20 +02002255MODULE_ALIAS("devname:net/tun");