Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * TUN - Universal TUN/TAP device driver. |
| 3 | * Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $ |
| 16 | */ |
| 17 | |
| 18 | /* |
| 19 | * Changes: |
| 20 | * |
Mike Kershaw | ff4cc3a | 2005-09-01 17:40:05 -0700 | [diff] [blame] | 21 | * Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14 |
| 22 | * Add TUNSETLINK ioctl to set the link encapsulation |
| 23 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | * Mark Smith <markzzzsmith@yahoo.com.au> |
Joe Perches | 344dc8e | 2012-07-12 19:33:09 +0000 | [diff] [blame] | 25 | * Use eth_random_addr() for tap MAC address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | * |
| 27 | * Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20 |
| 28 | * Fixes in packet dropping, queue length setting and queue wakeup. |
| 29 | * Increased default tx queue length. |
| 30 | * Added ethtool API. |
| 31 | * Minor cleanups |
| 32 | * |
| 33 | * Daniel Podlejski <underley@underley.eu.org> |
| 34 | * Modifications for 2.3.99-pre5 kernel. |
| 35 | */ |
| 36 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 37 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
| 38 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | #define DRV_NAME "tun" |
| 40 | #define DRV_VERSION "1.6" |
| 41 | #define DRV_DESCRIPTION "Universal TUN/TAP device driver" |
| 42 | #define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>" |
| 43 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | #include <linux/module.h> |
| 45 | #include <linux/errno.h> |
| 46 | #include <linux/kernel.h> |
| 47 | #include <linux/major.h> |
| 48 | #include <linux/slab.h> |
| 49 | #include <linux/poll.h> |
| 50 | #include <linux/fcntl.h> |
| 51 | #include <linux/init.h> |
| 52 | #include <linux/skbuff.h> |
| 53 | #include <linux/netdevice.h> |
| 54 | #include <linux/etherdevice.h> |
| 55 | #include <linux/miscdevice.h> |
| 56 | #include <linux/ethtool.h> |
| 57 | #include <linux/rtnetlink.h> |
Arnd Bergmann | 50857e2 | 2009-11-06 22:52:32 -0800 | [diff] [blame] | 58 | #include <linux/compat.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #include <linux/if.h> |
| 60 | #include <linux/if_arp.h> |
| 61 | #include <linux/if_ether.h> |
| 62 | #include <linux/if_tun.h> |
| 63 | #include <linux/crc32.h> |
Pavel Emelyanov | d647a59 | 2008-04-16 00:41:16 -0700 | [diff] [blame] | 64 | #include <linux/nsproxy.h> |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 65 | #include <linux/virtio_net.h> |
Michael S. Tsirkin | 9940516 | 2010-02-14 01:01:10 +0000 | [diff] [blame] | 66 | #include <linux/rcupdate.h> |
Eric W. Biederman | 881d966 | 2007-09-17 11:56:21 -0700 | [diff] [blame] | 67 | #include <net/net_namespace.h> |
Pavel Emelyanov | 79d1760 | 2008-04-16 00:40:46 -0700 | [diff] [blame] | 68 | #include <net/netns/generic.h> |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 69 | #include <net/rtnetlink.h> |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 70 | #include <net/sock.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | #include <asm/uaccess.h> |
| 73 | |
Rusty Russell | 14daa02 | 2008-04-12 18:48:58 -0700 | [diff] [blame] | 74 | /* Uncomment to enable debugging */ |
| 75 | /* #define TUN_DEBUG 1 */ |
| 76 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | #ifdef TUN_DEBUG |
| 78 | static int debug; |
Rusty Russell | 14daa02 | 2008-04-12 18:48:58 -0700 | [diff] [blame] | 79 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 80 | #define tun_debug(level, tun, fmt, args...) \ |
| 81 | do { \ |
| 82 | if (tun->debug) \ |
| 83 | netdev_printk(level, tun->dev, fmt, ##args); \ |
| 84 | } while (0) |
| 85 | #define DBG1(level, fmt, args...) \ |
| 86 | do { \ |
| 87 | if (debug == 2) \ |
| 88 | printk(level fmt, ##args); \ |
| 89 | } while (0) |
Rusty Russell | 14daa02 | 2008-04-12 18:48:58 -0700 | [diff] [blame] | 90 | #else |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 91 | #define tun_debug(level, tun, fmt, args...) \ |
| 92 | do { \ |
| 93 | if (0) \ |
| 94 | netdev_printk(level, tun->dev, fmt, ##args); \ |
| 95 | } while (0) |
| 96 | #define DBG1(level, fmt, args...) \ |
| 97 | do { \ |
| 98 | if (0) \ |
| 99 | printk(level fmt, ##args); \ |
| 100 | } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | #endif |
| 102 | |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 103 | #define GOODCOPY_LEN 128 |
| 104 | |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 105 | #define FLT_EXACT_COUNT 8 |
| 106 | struct tap_filter { |
| 107 | unsigned int count; /* Number of addrs. Zero means disabled */ |
| 108 | u32 mask[2]; /* Mask of the hashed addrs */ |
| 109 | unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN]; |
| 110 | }; |
| 111 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 112 | /* 1024 is probably a high enough limit: modern hypervisors seem to support on |
| 113 | * the order of 100-200 CPUs so this leaves us some breathing space if we want |
| 114 | * to match a queue per guest CPU. |
| 115 | */ |
| 116 | #define MAX_TAP_QUEUES 1024 |
| 117 | |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 118 | #define TUN_FLOW_EXPIRE (3 * HZ) |
| 119 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 120 | /* A tun_file connects an open character device to a tuntap netdevice. It |
| 121 | * also contains all socket related strctures (except sock_fprog and tap_filter) |
| 122 | * to serve as one transmit queue for tuntap device. The sock_fprog and |
| 123 | * tap_filter were kept in tun_struct since they were used for filtering for the |
| 124 | * netdevice not for a specific queue (at least I didn't see the reqirement for |
| 125 | * this). |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 126 | * |
| 127 | * RCU usage: |
| 128 | * The tun_file and tun_struct are loosely coupled, the pointer from on to the |
| 129 | * other can only be read while rcu_read_lock or rtnl_lock is held. |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 130 | */ |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 131 | struct tun_file { |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 132 | struct sock sk; |
| 133 | struct socket socket; |
| 134 | struct socket_wq wq; |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 135 | struct tun_struct __rcu *tun; |
Eric W. Biederman | 36b50ba | 2009-01-20 11:01:48 +0000 | [diff] [blame] | 136 | struct net *net; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 137 | struct fasync_struct *fasync; |
| 138 | /* only used for fasnyc */ |
| 139 | unsigned int flags; |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 140 | u16 queue_index; |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 141 | }; |
| 142 | |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 143 | struct tun_flow_entry { |
| 144 | struct hlist_node hash_link; |
| 145 | struct rcu_head rcu; |
| 146 | struct tun_struct *tun; |
| 147 | |
| 148 | u32 rxhash; |
| 149 | int queue_index; |
| 150 | unsigned long updated; |
| 151 | }; |
| 152 | |
| 153 | #define TUN_NUM_FLOW_ENTRIES 1024 |
| 154 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 155 | /* Since the socket were moved to tun_file, to preserve the behavior of persist |
| 156 | * device, socket fileter, sndbuf and vnet header size were restore when the |
| 157 | * file were attached to a persist device. |
| 158 | */ |
Rusty Russell | 14daa02 | 2008-04-12 18:48:58 -0700 | [diff] [blame] | 159 | struct tun_struct { |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 160 | struct tun_file __rcu *tfiles[MAX_TAP_QUEUES]; |
| 161 | unsigned int numqueues; |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 162 | unsigned int flags; |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 163 | kuid_t owner; |
| 164 | kgid_t group; |
Rusty Russell | 14daa02 | 2008-04-12 18:48:58 -0700 | [diff] [blame] | 165 | |
Rusty Russell | 14daa02 | 2008-04-12 18:48:58 -0700 | [diff] [blame] | 166 | struct net_device *dev; |
Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 167 | netdev_features_t set_features; |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 168 | #define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ |
| 169 | NETIF_F_TSO6|NETIF_F_UFO) |
Michael S. Tsirkin | d9d52b5 | 2010-03-17 17:45:01 +0200 | [diff] [blame] | 170 | |
| 171 | int vnet_hdr_sz; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 172 | int sndbuf; |
| 173 | struct tap_filter txflt; |
| 174 | struct sock_fprog fprog; |
| 175 | /* protected by rtnl lock */ |
| 176 | bool filter_attached; |
Rusty Russell | 14daa02 | 2008-04-12 18:48:58 -0700 | [diff] [blame] | 177 | #ifdef TUN_DEBUG |
| 178 | int debug; |
| 179 | #endif |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 180 | spinlock_t lock; |
| 181 | struct kmem_cache *flow_cache; |
| 182 | struct hlist_head flows[TUN_NUM_FLOW_ENTRIES]; |
| 183 | struct timer_list flow_gc_timer; |
| 184 | unsigned long ageing_time; |
Rusty Russell | 14daa02 | 2008-04-12 18:48:58 -0700 | [diff] [blame] | 185 | }; |
| 186 | |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 187 | static inline u32 tun_hashfn(u32 rxhash) |
| 188 | { |
| 189 | return rxhash & 0x3ff; |
| 190 | } |
| 191 | |
| 192 | static struct tun_flow_entry *tun_flow_find(struct hlist_head *head, u32 rxhash) |
| 193 | { |
| 194 | struct tun_flow_entry *e; |
| 195 | struct hlist_node *n; |
| 196 | |
| 197 | hlist_for_each_entry_rcu(e, n, head, hash_link) { |
| 198 | if (e->rxhash == rxhash) |
| 199 | return e; |
| 200 | } |
| 201 | return NULL; |
| 202 | } |
| 203 | |
| 204 | static struct tun_flow_entry *tun_flow_create(struct tun_struct *tun, |
| 205 | struct hlist_head *head, |
| 206 | u32 rxhash, u16 queue_index) |
| 207 | { |
| 208 | struct tun_flow_entry *e = kmem_cache_alloc(tun->flow_cache, |
| 209 | GFP_ATOMIC); |
| 210 | if (e) { |
| 211 | tun_debug(KERN_INFO, tun, "create flow: hash %u index %u\n", |
| 212 | rxhash, queue_index); |
| 213 | e->updated = jiffies; |
| 214 | e->rxhash = rxhash; |
| 215 | e->queue_index = queue_index; |
| 216 | e->tun = tun; |
| 217 | hlist_add_head_rcu(&e->hash_link, head); |
| 218 | } |
| 219 | return e; |
| 220 | } |
| 221 | |
| 222 | static void tun_flow_free(struct rcu_head *head) |
| 223 | { |
| 224 | struct tun_flow_entry *e |
| 225 | = container_of(head, struct tun_flow_entry, rcu); |
| 226 | kmem_cache_free(e->tun->flow_cache, e); |
| 227 | } |
| 228 | |
| 229 | static void tun_flow_delete(struct tun_struct *tun, struct tun_flow_entry *e) |
| 230 | { |
| 231 | tun_debug(KERN_INFO, tun, "delete flow: hash %u index %u\n", |
| 232 | e->rxhash, e->queue_index); |
| 233 | hlist_del_rcu(&e->hash_link); |
| 234 | call_rcu(&e->rcu, tun_flow_free); |
| 235 | } |
| 236 | |
| 237 | static void tun_flow_flush(struct tun_struct *tun) |
| 238 | { |
| 239 | int i; |
| 240 | |
| 241 | spin_lock_bh(&tun->lock); |
| 242 | for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { |
| 243 | struct tun_flow_entry *e; |
| 244 | struct hlist_node *h, *n; |
| 245 | |
| 246 | hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) |
| 247 | tun_flow_delete(tun, e); |
| 248 | } |
| 249 | spin_unlock_bh(&tun->lock); |
| 250 | } |
| 251 | |
| 252 | static void tun_flow_delete_by_queue(struct tun_struct *tun, u16 queue_index) |
| 253 | { |
| 254 | int i; |
| 255 | |
| 256 | spin_lock_bh(&tun->lock); |
| 257 | for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { |
| 258 | struct tun_flow_entry *e; |
| 259 | struct hlist_node *h, *n; |
| 260 | |
| 261 | hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) { |
| 262 | if (e->queue_index == queue_index) |
| 263 | tun_flow_delete(tun, e); |
| 264 | } |
| 265 | } |
| 266 | spin_unlock_bh(&tun->lock); |
| 267 | } |
| 268 | |
| 269 | static void tun_flow_cleanup(unsigned long data) |
| 270 | { |
| 271 | struct tun_struct *tun = (struct tun_struct *)data; |
| 272 | unsigned long delay = tun->ageing_time; |
| 273 | unsigned long next_timer = jiffies + delay; |
| 274 | unsigned long count = 0; |
| 275 | int i; |
| 276 | |
| 277 | tun_debug(KERN_INFO, tun, "tun_flow_cleanup\n"); |
| 278 | |
| 279 | spin_lock_bh(&tun->lock); |
| 280 | for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) { |
| 281 | struct tun_flow_entry *e; |
| 282 | struct hlist_node *h, *n; |
| 283 | |
| 284 | hlist_for_each_entry_safe(e, h, n, &tun->flows[i], hash_link) { |
| 285 | unsigned long this_timer; |
| 286 | count++; |
| 287 | this_timer = e->updated + delay; |
| 288 | if (time_before_eq(this_timer, jiffies)) |
| 289 | tun_flow_delete(tun, e); |
| 290 | else if (time_before(this_timer, next_timer)) |
| 291 | next_timer = this_timer; |
| 292 | } |
| 293 | } |
| 294 | |
| 295 | if (count) |
| 296 | mod_timer(&tun->flow_gc_timer, round_jiffies_up(next_timer)); |
| 297 | spin_unlock_bh(&tun->lock); |
| 298 | } |
| 299 | |
| 300 | static void tun_flow_update(struct tun_struct *tun, struct sk_buff *skb, |
| 301 | u16 queue_index) |
| 302 | { |
| 303 | struct hlist_head *head; |
| 304 | struct tun_flow_entry *e; |
| 305 | unsigned long delay = tun->ageing_time; |
| 306 | u32 rxhash = skb_get_rxhash(skb); |
| 307 | |
| 308 | if (!rxhash) |
| 309 | return; |
| 310 | else |
| 311 | head = &tun->flows[tun_hashfn(rxhash)]; |
| 312 | |
| 313 | rcu_read_lock(); |
| 314 | |
| 315 | if (tun->numqueues == 1) |
| 316 | goto unlock; |
| 317 | |
| 318 | e = tun_flow_find(head, rxhash); |
| 319 | if (likely(e)) { |
| 320 | /* TODO: keep queueing to old queue until it's empty? */ |
| 321 | e->queue_index = queue_index; |
| 322 | e->updated = jiffies; |
| 323 | } else { |
| 324 | spin_lock_bh(&tun->lock); |
| 325 | if (!tun_flow_find(head, rxhash)) |
| 326 | tun_flow_create(tun, head, rxhash, queue_index); |
| 327 | |
| 328 | if (!timer_pending(&tun->flow_gc_timer)) |
| 329 | mod_timer(&tun->flow_gc_timer, |
| 330 | round_jiffies_up(jiffies + delay)); |
| 331 | spin_unlock_bh(&tun->lock); |
| 332 | } |
| 333 | |
| 334 | unlock: |
| 335 | rcu_read_unlock(); |
| 336 | } |
| 337 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 338 | /* We try to identify a flow through its rxhash first. The reason that |
| 339 | * we do not check rxq no. is becuase some cards(e.g 82599), chooses |
| 340 | * the rxq based on the txq where the last packet of the flow comes. As |
| 341 | * the userspace application move between processors, we may get a |
| 342 | * different rxq no. here. If we could not get rxhash, then we would |
| 343 | * hope the rxq no. may help here. |
| 344 | */ |
| 345 | static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb) |
| 346 | { |
| 347 | struct tun_struct *tun = netdev_priv(dev); |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 348 | struct tun_flow_entry *e; |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 349 | u32 txq = 0; |
| 350 | u32 numqueues = 0; |
| 351 | |
| 352 | rcu_read_lock(); |
| 353 | numqueues = tun->numqueues; |
| 354 | |
| 355 | txq = skb_get_rxhash(skb); |
| 356 | if (txq) { |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 357 | e = tun_flow_find(&tun->flows[tun_hashfn(txq)], txq); |
| 358 | if (e) |
| 359 | txq = e->queue_index; |
| 360 | else |
| 361 | /* use multiply and shift instead of expensive divide */ |
| 362 | txq = ((u64)txq * numqueues) >> 32; |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 363 | } else if (likely(skb_rx_queue_recorded(skb))) { |
| 364 | txq = skb_get_rx_queue(skb); |
| 365 | while (unlikely(txq >= numqueues)) |
| 366 | txq -= numqueues; |
| 367 | } |
| 368 | |
| 369 | rcu_read_unlock(); |
| 370 | return txq; |
| 371 | } |
| 372 | |
Jason Wang | cde8b15 | 2012-10-31 19:46:01 +0000 | [diff] [blame] | 373 | static inline bool tun_not_capable(struct tun_struct *tun) |
| 374 | { |
| 375 | const struct cred *cred = current_cred(); |
| 376 | |
| 377 | return ((uid_valid(tun->owner) && !uid_eq(cred->euid, tun->owner)) || |
| 378 | (gid_valid(tun->group) && !in_egroup_p(tun->group))) && |
| 379 | !capable(CAP_NET_ADMIN); |
| 380 | } |
| 381 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 382 | static void tun_set_real_num_queues(struct tun_struct *tun) |
| 383 | { |
| 384 | netif_set_real_num_tx_queues(tun->dev, tun->numqueues); |
| 385 | netif_set_real_num_rx_queues(tun->dev, tun->numqueues); |
| 386 | } |
| 387 | |
| 388 | static void __tun_detach(struct tun_file *tfile, bool clean) |
| 389 | { |
| 390 | struct tun_file *ntfile; |
| 391 | struct tun_struct *tun; |
| 392 | struct net_device *dev; |
| 393 | |
| 394 | tun = rcu_dereference_protected(tfile->tun, |
| 395 | lockdep_rtnl_is_held()); |
| 396 | if (tun) { |
| 397 | u16 index = tfile->queue_index; |
| 398 | BUG_ON(index >= tun->numqueues); |
| 399 | dev = tun->dev; |
| 400 | |
| 401 | rcu_assign_pointer(tun->tfiles[index], |
| 402 | tun->tfiles[tun->numqueues - 1]); |
| 403 | rcu_assign_pointer(tfile->tun, NULL); |
| 404 | ntfile = rcu_dereference_protected(tun->tfiles[index], |
| 405 | lockdep_rtnl_is_held()); |
| 406 | ntfile->queue_index = index; |
| 407 | |
| 408 | --tun->numqueues; |
| 409 | sock_put(&tfile->sk); |
| 410 | |
| 411 | synchronize_net(); |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 412 | tun_flow_delete_by_queue(tun, tun->numqueues + 1); |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 413 | /* Drop read queue */ |
| 414 | skb_queue_purge(&tfile->sk.sk_receive_queue); |
| 415 | tun_set_real_num_queues(tun); |
| 416 | |
| 417 | if (tun->numqueues == 0 && !(tun->flags & TUN_PERSIST)) |
| 418 | if (dev->reg_state == NETREG_REGISTERED) |
| 419 | unregister_netdevice(dev); |
| 420 | } |
| 421 | |
| 422 | if (clean) { |
| 423 | BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, |
| 424 | &tfile->socket.flags)); |
| 425 | sk_release_kernel(&tfile->sk); |
| 426 | } |
| 427 | } |
| 428 | |
| 429 | static void tun_detach(struct tun_file *tfile, bool clean) |
| 430 | { |
| 431 | rtnl_lock(); |
| 432 | __tun_detach(tfile, clean); |
| 433 | rtnl_unlock(); |
| 434 | } |
| 435 | |
| 436 | static void tun_detach_all(struct net_device *dev) |
| 437 | { |
| 438 | struct tun_struct *tun = netdev_priv(dev); |
| 439 | struct tun_file *tfile; |
| 440 | int i, n = tun->numqueues; |
| 441 | |
| 442 | for (i = 0; i < n; i++) { |
| 443 | tfile = rcu_dereference_protected(tun->tfiles[i], |
| 444 | lockdep_rtnl_is_held()); |
| 445 | BUG_ON(!tfile); |
| 446 | wake_up_all(&tfile->wq.wait); |
| 447 | rcu_assign_pointer(tfile->tun, NULL); |
| 448 | --tun->numqueues; |
| 449 | } |
| 450 | BUG_ON(tun->numqueues != 0); |
| 451 | |
| 452 | synchronize_net(); |
| 453 | for (i = 0; i < n; i++) { |
| 454 | tfile = rcu_dereference_protected(tun->tfiles[i], |
| 455 | lockdep_rtnl_is_held()); |
| 456 | /* Drop read queue */ |
| 457 | skb_queue_purge(&tfile->sk.sk_receive_queue); |
| 458 | sock_put(&tfile->sk); |
| 459 | } |
| 460 | } |
| 461 | |
Eric W. Biederman | a7385ba | 2009-01-20 10:57:48 +0000 | [diff] [blame] | 462 | static int tun_attach(struct tun_struct *tun, struct file *file) |
| 463 | { |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 464 | struct tun_file *tfile = file->private_data; |
Eric W. Biederman | 38231b7 | 2009-01-20 11:02:28 +0000 | [diff] [blame] | 465 | int err; |
Eric W. Biederman | a7385ba | 2009-01-20 10:57:48 +0000 | [diff] [blame] | 466 | |
Eric W. Biederman | 38231b7 | 2009-01-20 11:02:28 +0000 | [diff] [blame] | 467 | err = -EINVAL; |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 468 | if (rcu_dereference_protected(tfile->tun, lockdep_rtnl_is_held())) |
Eric W. Biederman | 38231b7 | 2009-01-20 11:02:28 +0000 | [diff] [blame] | 469 | goto out; |
| 470 | |
| 471 | err = -EBUSY; |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 472 | if (!(tun->flags & TUN_TAP_MQ) && tun->numqueues == 1) |
| 473 | goto out; |
| 474 | |
| 475 | err = -E2BIG; |
| 476 | if (tun->numqueues == MAX_TAP_QUEUES) |
Eric W. Biederman | 38231b7 | 2009-01-20 11:02:28 +0000 | [diff] [blame] | 477 | goto out; |
| 478 | |
| 479 | err = 0; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 480 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 481 | /* Re-attach the filter to presist device */ |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 482 | if (tun->filter_attached == true) { |
| 483 | err = sk_attach_filter(&tun->fprog, tfile->socket.sk); |
| 484 | if (!err) |
| 485 | goto out; |
| 486 | } |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 487 | tfile->queue_index = tun->numqueues; |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 488 | rcu_assign_pointer(tfile->tun, tun); |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 489 | rcu_assign_pointer(tun->tfiles[tun->numqueues], tfile); |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 490 | sock_hold(&tfile->sk); |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 491 | tun->numqueues++; |
| 492 | |
| 493 | tun_set_real_num_queues(tun); |
| 494 | |
| 495 | if (tun->numqueues == 1) |
| 496 | netif_carrier_on(tun->dev); |
| 497 | |
| 498 | /* device is allowed to go away first, so no need to hold extra |
| 499 | * refcnt. |
| 500 | */ |
Eric W. Biederman | a7385ba | 2009-01-20 10:57:48 +0000 | [diff] [blame] | 501 | |
Eric W. Biederman | 38231b7 | 2009-01-20 11:02:28 +0000 | [diff] [blame] | 502 | out: |
Eric W. Biederman | 38231b7 | 2009-01-20 11:02:28 +0000 | [diff] [blame] | 503 | return err; |
Eric W. Biederman | a7385ba | 2009-01-20 10:57:48 +0000 | [diff] [blame] | 504 | } |
| 505 | |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 506 | static struct tun_struct *__tun_get(struct tun_file *tfile) |
| 507 | { |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 508 | struct tun_struct *tun; |
Eric W. Biederman | c70f182 | 2009-01-20 11:07:17 +0000 | [diff] [blame] | 509 | |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 510 | rcu_read_lock(); |
| 511 | tun = rcu_dereference(tfile->tun); |
| 512 | if (tun) |
| 513 | dev_hold(tun->dev); |
| 514 | rcu_read_unlock(); |
Eric W. Biederman | c70f182 | 2009-01-20 11:07:17 +0000 | [diff] [blame] | 515 | |
| 516 | return tun; |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 517 | } |
| 518 | |
| 519 | static struct tun_struct *tun_get(struct file *file) |
| 520 | { |
| 521 | return __tun_get(file->private_data); |
| 522 | } |
| 523 | |
| 524 | static void tun_put(struct tun_struct *tun) |
| 525 | { |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 526 | dev_put(tun->dev); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 527 | } |
| 528 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 529 | /* TAP filtering */ |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 530 | static void addr_hash_set(u32 *mask, const u8 *addr) |
| 531 | { |
| 532 | int n = ether_crc(ETH_ALEN, addr) >> 26; |
| 533 | mask[n >> 5] |= (1 << (n & 31)); |
| 534 | } |
| 535 | |
| 536 | static unsigned int addr_hash_test(const u32 *mask, const u8 *addr) |
| 537 | { |
| 538 | int n = ether_crc(ETH_ALEN, addr) >> 26; |
| 539 | return mask[n >> 5] & (1 << (n & 31)); |
| 540 | } |
| 541 | |
| 542 | static int update_filter(struct tap_filter *filter, void __user *arg) |
| 543 | { |
| 544 | struct { u8 u[ETH_ALEN]; } *addr; |
| 545 | struct tun_filter uf; |
| 546 | int err, alen, n, nexact; |
| 547 | |
| 548 | if (copy_from_user(&uf, arg, sizeof(uf))) |
| 549 | return -EFAULT; |
| 550 | |
| 551 | if (!uf.count) { |
| 552 | /* Disabled */ |
| 553 | filter->count = 0; |
| 554 | return 0; |
| 555 | } |
| 556 | |
| 557 | alen = ETH_ALEN * uf.count; |
| 558 | addr = kmalloc(alen, GFP_KERNEL); |
| 559 | if (!addr) |
| 560 | return -ENOMEM; |
| 561 | |
| 562 | if (copy_from_user(addr, arg + sizeof(uf), alen)) { |
| 563 | err = -EFAULT; |
| 564 | goto done; |
| 565 | } |
| 566 | |
| 567 | /* The filter is updated without holding any locks. Which is |
| 568 | * perfectly safe. We disable it first and in the worst |
| 569 | * case we'll accept a few undesired packets. */ |
| 570 | filter->count = 0; |
| 571 | wmb(); |
| 572 | |
| 573 | /* Use first set of addresses as an exact filter */ |
| 574 | for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++) |
| 575 | memcpy(filter->addr[n], addr[n].u, ETH_ALEN); |
| 576 | |
| 577 | nexact = n; |
| 578 | |
Alex Williamson | cfbf84f | 2009-02-08 17:49:17 -0800 | [diff] [blame] | 579 | /* Remaining multicast addresses are hashed, |
| 580 | * unicast will leave the filter disabled. */ |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 581 | memset(filter->mask, 0, sizeof(filter->mask)); |
Alex Williamson | cfbf84f | 2009-02-08 17:49:17 -0800 | [diff] [blame] | 582 | for (; n < uf.count; n++) { |
| 583 | if (!is_multicast_ether_addr(addr[n].u)) { |
| 584 | err = 0; /* no filter */ |
| 585 | goto done; |
| 586 | } |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 587 | addr_hash_set(filter->mask, addr[n].u); |
Alex Williamson | cfbf84f | 2009-02-08 17:49:17 -0800 | [diff] [blame] | 588 | } |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 589 | |
| 590 | /* For ALLMULTI just set the mask to all ones. |
| 591 | * This overrides the mask populated above. */ |
| 592 | if ((uf.flags & TUN_FLT_ALLMULTI)) |
| 593 | memset(filter->mask, ~0, sizeof(filter->mask)); |
| 594 | |
| 595 | /* Now enable the filter */ |
| 596 | wmb(); |
| 597 | filter->count = nexact; |
| 598 | |
| 599 | /* Return the number of exact filters */ |
| 600 | err = nexact; |
| 601 | |
| 602 | done: |
| 603 | kfree(addr); |
| 604 | return err; |
| 605 | } |
| 606 | |
| 607 | /* Returns: 0 - drop, !=0 - accept */ |
| 608 | static int run_filter(struct tap_filter *filter, const struct sk_buff *skb) |
| 609 | { |
| 610 | /* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect |
| 611 | * at this point. */ |
| 612 | struct ethhdr *eh = (struct ethhdr *) skb->data; |
| 613 | int i; |
| 614 | |
| 615 | /* Exact match */ |
| 616 | for (i = 0; i < filter->count; i++) |
Joe Perches | 2e42e47 | 2012-05-09 17:17:46 +0000 | [diff] [blame] | 617 | if (ether_addr_equal(eh->h_dest, filter->addr[i])) |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 618 | return 1; |
| 619 | |
| 620 | /* Inexact match (multicast only) */ |
| 621 | if (is_multicast_ether_addr(eh->h_dest)) |
| 622 | return addr_hash_test(filter->mask, eh->h_dest); |
| 623 | |
| 624 | return 0; |
| 625 | } |
| 626 | |
| 627 | /* |
| 628 | * Checks whether the packet is accepted or not. |
| 629 | * Returns: 0 - drop, !=0 - accept |
| 630 | */ |
| 631 | static int check_filter(struct tap_filter *filter, const struct sk_buff *skb) |
| 632 | { |
| 633 | if (!filter->count) |
| 634 | return 1; |
| 635 | |
| 636 | return run_filter(filter, skb); |
| 637 | } |
| 638 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 639 | /* Network device part of the driver */ |
| 640 | |
Jeff Garzik | 7282d49 | 2006-09-13 14:30:00 -0400 | [diff] [blame] | 641 | static const struct ethtool_ops tun_ethtool_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 642 | |
Eric W. Biederman | c70f182 | 2009-01-20 11:07:17 +0000 | [diff] [blame] | 643 | /* Net device detach from fd. */ |
| 644 | static void tun_net_uninit(struct net_device *dev) |
| 645 | { |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 646 | tun_detach_all(dev); |
Eric W. Biederman | c70f182 | 2009-01-20 11:07:17 +0000 | [diff] [blame] | 647 | } |
| 648 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 649 | /* Net device open. */ |
| 650 | static int tun_net_open(struct net_device *dev) |
| 651 | { |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 652 | netif_tx_start_all_queues(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 653 | return 0; |
| 654 | } |
| 655 | |
| 656 | /* Net device close. */ |
| 657 | static int tun_net_close(struct net_device *dev) |
| 658 | { |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 659 | netif_tx_stop_all_queues(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 660 | return 0; |
| 661 | } |
| 662 | |
| 663 | /* Net device start xmit */ |
Stephen Hemminger | 424efe9 | 2009-08-31 19:50:51 +0000 | [diff] [blame] | 664 | static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | { |
| 666 | struct tun_struct *tun = netdev_priv(dev); |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 667 | int txq = skb->queue_mapping; |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 668 | struct tun_file *tfile; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 669 | |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 670 | rcu_read_lock(); |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 671 | tfile = rcu_dereference(tun->tfiles[txq]); |
| 672 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 673 | /* Drop packet if interface is not attached */ |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 674 | if (txq >= tun->numqueues) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 675 | goto drop; |
| 676 | |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 677 | tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len); |
| 678 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 679 | BUG_ON(!tfile); |
| 680 | |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 681 | /* Drop if the filter does not like it. |
| 682 | * This is a noop if the filter is disabled. |
| 683 | * Filter can be enabled only for the TAP devices. */ |
| 684 | if (!check_filter(&tun->txflt, skb)) |
| 685 | goto drop; |
| 686 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 687 | if (tfile->socket.sk->sk_filter && |
| 688 | sk_filter(tfile->socket.sk, skb)) |
Michael S. Tsirkin | 9940516 | 2010-02-14 01:01:10 +0000 | [diff] [blame] | 689 | goto drop; |
| 690 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 691 | /* Limit the number of packets queued by divining txq length with the |
| 692 | * number of queues. |
| 693 | */ |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 694 | if (skb_queue_len(&tfile->socket.sk->sk_receive_queue) |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 695 | >= dev->tx_queue_len / tun->numqueues){ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 696 | if (!(tun->flags & TUN_ONE_QUEUE)) { |
| 697 | /* Normal queueing mode. */ |
| 698 | /* Packet scheduler handles dropping of further packets. */ |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 699 | netif_stop_subqueue(dev, txq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 700 | |
| 701 | /* We won't see all dropped packets individually, so overrun |
| 702 | * error is more appropriate. */ |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 703 | dev->stats.tx_fifo_errors++; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 704 | } else { |
| 705 | /* Single queue mode. |
| 706 | * Driver handles dropping of all packets itself. */ |
| 707 | goto drop; |
| 708 | } |
| 709 | } |
| 710 | |
Michael S. Tsirkin | 0110d6f | 2010-04-13 04:59:44 +0000 | [diff] [blame] | 711 | /* Orphan the skb - required as we might hang on to it |
| 712 | * for indefinite time. */ |
Michael S. Tsirkin | 868eefe | 2012-07-20 09:23:14 +0000 | [diff] [blame] | 713 | if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC))) |
| 714 | goto drop; |
Michael S. Tsirkin | 0110d6f | 2010-04-13 04:59:44 +0000 | [diff] [blame] | 715 | skb_orphan(skb); |
| 716 | |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 717 | /* Enqueue packet */ |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 718 | skb_queue_tail(&tfile->socket.sk->sk_receive_queue, skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 719 | |
| 720 | /* Notify and wake up reader process */ |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 721 | if (tfile->flags & TUN_FASYNC) |
| 722 | kill_fasync(&tfile->fasync, SIGIO, POLL_IN); |
| 723 | wake_up_interruptible_poll(&tfile->wq.wait, POLLIN | |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 724 | POLLRDNORM | POLLRDBAND); |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 725 | |
| 726 | rcu_read_unlock(); |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 727 | return NETDEV_TX_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 728 | |
| 729 | drop: |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 730 | dev->stats.tx_dropped++; |
Michael S. Tsirkin | 149d36f | 2012-11-01 09:16:32 +0000 | [diff] [blame^] | 731 | skb_tx_error(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 732 | kfree_skb(skb); |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 733 | rcu_read_unlock(); |
Patrick McHardy | 6ed1065 | 2009-06-23 06:03:08 +0000 | [diff] [blame] | 734 | return NETDEV_TX_OK; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 735 | } |
| 736 | |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 737 | static void tun_net_mclist(struct net_device *dev) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 738 | { |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 739 | /* |
| 740 | * This callback is supposed to deal with mc filter in |
| 741 | * _rx_ path and has nothing to do with the _tx_ path. |
| 742 | * In rx path we always accept everything userspace gives us. |
| 743 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 744 | } |
| 745 | |
Ed Swierk | 4885a50 | 2007-09-16 12:21:38 -0700 | [diff] [blame] | 746 | #define MIN_MTU 68 |
| 747 | #define MAX_MTU 65535 |
| 748 | |
| 749 | static int |
| 750 | tun_net_change_mtu(struct net_device *dev, int new_mtu) |
| 751 | { |
| 752 | if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU) |
| 753 | return -EINVAL; |
| 754 | dev->mtu = new_mtu; |
| 755 | return 0; |
| 756 | } |
| 757 | |
Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 758 | static netdev_features_t tun_net_fix_features(struct net_device *dev, |
| 759 | netdev_features_t features) |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 760 | { |
| 761 | struct tun_struct *tun = netdev_priv(dev); |
| 762 | |
| 763 | return (features & tun->set_features) | (features & ~TUN_USER_FEATURES); |
| 764 | } |
Neil Horman | bebd097 | 2011-06-15 05:25:01 +0000 | [diff] [blame] | 765 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 766 | static void tun_poll_controller(struct net_device *dev) |
| 767 | { |
| 768 | /* |
| 769 | * Tun only receives frames when: |
| 770 | * 1) the char device endpoint gets data from user space |
| 771 | * 2) the tun socket gets a sendmsg call from user space |
| 772 | * Since both of those are syncronous operations, we are guaranteed |
| 773 | * never to have pending data when we poll for it |
| 774 | * so theres nothing to do here but return. |
| 775 | * We need this though so netpoll recognizes us as an interface that |
| 776 | * supports polling, which enables bridge devices in virt setups to |
| 777 | * still use netconsole |
| 778 | */ |
| 779 | return; |
| 780 | } |
| 781 | #endif |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 782 | static const struct net_device_ops tun_netdev_ops = { |
Eric W. Biederman | c70f182 | 2009-01-20 11:07:17 +0000 | [diff] [blame] | 783 | .ndo_uninit = tun_net_uninit, |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 784 | .ndo_open = tun_net_open, |
| 785 | .ndo_stop = tun_net_close, |
Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 786 | .ndo_start_xmit = tun_net_xmit, |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 787 | .ndo_change_mtu = tun_net_change_mtu, |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 788 | .ndo_fix_features = tun_net_fix_features, |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 789 | .ndo_select_queue = tun_select_queue, |
Neil Horman | bebd097 | 2011-06-15 05:25:01 +0000 | [diff] [blame] | 790 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 791 | .ndo_poll_controller = tun_poll_controller, |
| 792 | #endif |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 793 | }; |
| 794 | |
| 795 | static const struct net_device_ops tap_netdev_ops = { |
Eric W. Biederman | c70f182 | 2009-01-20 11:07:17 +0000 | [diff] [blame] | 796 | .ndo_uninit = tun_net_uninit, |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 797 | .ndo_open = tun_net_open, |
| 798 | .ndo_stop = tun_net_close, |
Stephen Hemminger | 0082982 | 2008-11-20 20:14:53 -0800 | [diff] [blame] | 799 | .ndo_start_xmit = tun_net_xmit, |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 800 | .ndo_change_mtu = tun_net_change_mtu, |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 801 | .ndo_fix_features = tun_net_fix_features, |
Jiri Pirko | afc4b13 | 2011-08-16 06:29:01 +0000 | [diff] [blame] | 802 | .ndo_set_rx_mode = tun_net_mclist, |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 803 | .ndo_set_mac_address = eth_mac_addr, |
| 804 | .ndo_validate_addr = eth_validate_addr, |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 805 | .ndo_select_queue = tun_select_queue, |
Neil Horman | bebd097 | 2011-06-15 05:25:01 +0000 | [diff] [blame] | 806 | #ifdef CONFIG_NET_POLL_CONTROLLER |
| 807 | .ndo_poll_controller = tun_poll_controller, |
| 808 | #endif |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 809 | }; |
| 810 | |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 811 | static int tun_flow_init(struct tun_struct *tun) |
| 812 | { |
| 813 | int i; |
| 814 | |
| 815 | tun->flow_cache = kmem_cache_create("tun_flow_cache", |
| 816 | sizeof(struct tun_flow_entry), 0, 0, |
| 817 | NULL); |
| 818 | if (!tun->flow_cache) |
| 819 | return -ENOMEM; |
| 820 | |
| 821 | for (i = 0; i < TUN_NUM_FLOW_ENTRIES; i++) |
| 822 | INIT_HLIST_HEAD(&tun->flows[i]); |
| 823 | |
| 824 | tun->ageing_time = TUN_FLOW_EXPIRE; |
| 825 | setup_timer(&tun->flow_gc_timer, tun_flow_cleanup, (unsigned long)tun); |
| 826 | mod_timer(&tun->flow_gc_timer, |
| 827 | round_jiffies_up(jiffies + tun->ageing_time)); |
| 828 | |
| 829 | return 0; |
| 830 | } |
| 831 | |
| 832 | static void tun_flow_uninit(struct tun_struct *tun) |
| 833 | { |
| 834 | del_timer_sync(&tun->flow_gc_timer); |
| 835 | tun_flow_flush(tun); |
| 836 | |
| 837 | /* Wait for completion of call_rcu()'s */ |
| 838 | rcu_barrier(); |
| 839 | kmem_cache_destroy(tun->flow_cache); |
| 840 | } |
| 841 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 842 | /* Initialize net device. */ |
| 843 | static void tun_net_init(struct net_device *dev) |
| 844 | { |
| 845 | struct tun_struct *tun = netdev_priv(dev); |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 846 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 847 | switch (tun->flags & TUN_TYPE_MASK) { |
| 848 | case TUN_TUN_DEV: |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 849 | dev->netdev_ops = &tun_netdev_ops; |
| 850 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 851 | /* Point-to-Point TUN Device */ |
| 852 | dev->hard_header_len = 0; |
| 853 | dev->addr_len = 0; |
| 854 | dev->mtu = 1500; |
| 855 | |
| 856 | /* Zero header length */ |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 857 | dev->type = ARPHRD_NONE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 858 | dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST; |
| 859 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ |
| 860 | break; |
| 861 | |
| 862 | case TUN_TAP_DEV: |
Kusanagi Kouichi | 7a0a960 | 2008-12-29 18:23:28 -0800 | [diff] [blame] | 863 | dev->netdev_ops = &tap_netdev_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 864 | /* Ethernet TAP Device */ |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 865 | ether_setup(dev); |
Neil Horman | 550fd08 | 2011-07-26 06:05:38 +0000 | [diff] [blame] | 866 | dev->priv_flags &= ~IFF_TX_SKB_SHARING; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 | |
Danny Kukawka | f2cedb6 | 2012-02-15 06:45:39 +0000 | [diff] [blame] | 868 | eth_hw_addr_random(dev); |
Brian Braunstein | 36226a8 | 2007-04-26 01:00:55 -0700 | [diff] [blame] | 869 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 870 | dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */ |
| 871 | break; |
| 872 | } |
| 873 | } |
| 874 | |
| 875 | /* Character device part */ |
| 876 | |
| 877 | /* Poll */ |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 878 | static unsigned int tun_chr_poll(struct file *file, poll_table *wait) |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 879 | { |
Eric W. Biederman | b2430de | 2009-01-20 11:03:21 +0000 | [diff] [blame] | 880 | struct tun_file *tfile = file->private_data; |
| 881 | struct tun_struct *tun = __tun_get(tfile); |
Mariusz Kozlowski | 3c8a9c6 | 2009-07-05 19:48:35 +0000 | [diff] [blame] | 882 | struct sock *sk; |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 883 | unsigned int mask = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 884 | |
| 885 | if (!tun) |
Eric W. Biederman | eac9e90 | 2009-01-20 10:59:05 +0000 | [diff] [blame] | 886 | return POLLERR; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 887 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 888 | sk = tfile->socket.sk; |
Mariusz Kozlowski | 3c8a9c6 | 2009-07-05 19:48:35 +0000 | [diff] [blame] | 889 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 890 | tun_debug(KERN_INFO, tun, "tun_chr_poll\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 891 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 892 | poll_wait(file, &tfile->wq.wait, wait); |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 893 | |
Michael S. Tsirkin | 89f56d1 | 2009-08-30 07:04:42 +0000 | [diff] [blame] | 894 | if (!skb_queue_empty(&sk->sk_receive_queue)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 895 | mask |= POLLIN | POLLRDNORM; |
| 896 | |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 897 | if (sock_writeable(sk) || |
| 898 | (!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) && |
| 899 | sock_writeable(sk))) |
| 900 | mask |= POLLOUT | POLLWRNORM; |
| 901 | |
Eric W. Biederman | c70f182 | 2009-01-20 11:07:17 +0000 | [diff] [blame] | 902 | if (tun->dev->reg_state != NETREG_REGISTERED) |
| 903 | mask = POLLERR; |
| 904 | |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 905 | tun_put(tun); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 906 | return mask; |
| 907 | } |
| 908 | |
Rusty Russell | f42157c | 2008-08-15 15:15:10 -0700 | [diff] [blame] | 909 | /* prepad is the amount to reserve at front. len is length after that. |
| 910 | * linear is a hint as to how much to copy (usually headers). */ |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 911 | static struct sk_buff *tun_alloc_skb(struct tun_file *tfile, |
stephen hemminger | 6f7c156 | 2011-06-08 14:33:08 +0000 | [diff] [blame] | 912 | size_t prepad, size_t len, |
| 913 | size_t linear, int noblock) |
Rusty Russell | f42157c | 2008-08-15 15:15:10 -0700 | [diff] [blame] | 914 | { |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 915 | struct sock *sk = tfile->socket.sk; |
Rusty Russell | f42157c | 2008-08-15 15:15:10 -0700 | [diff] [blame] | 916 | struct sk_buff *skb; |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 917 | int err; |
Rusty Russell | f42157c | 2008-08-15 15:15:10 -0700 | [diff] [blame] | 918 | |
| 919 | /* Under a page? Don't bother with paged skb. */ |
Herbert Xu | 0eca93b | 2009-04-14 02:09:43 -0700 | [diff] [blame] | 920 | if (prepad + len < PAGE_SIZE || !linear) |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 921 | linear = len; |
Rusty Russell | f42157c | 2008-08-15 15:15:10 -0700 | [diff] [blame] | 922 | |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 923 | skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock, |
| 924 | &err); |
Rusty Russell | f42157c | 2008-08-15 15:15:10 -0700 | [diff] [blame] | 925 | if (!skb) |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 926 | return ERR_PTR(err); |
Rusty Russell | f42157c | 2008-08-15 15:15:10 -0700 | [diff] [blame] | 927 | |
| 928 | skb_reserve(skb, prepad); |
| 929 | skb_put(skb, linear); |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 930 | skb->data_len = len - linear; |
| 931 | skb->len += len - linear; |
Rusty Russell | f42157c | 2008-08-15 15:15:10 -0700 | [diff] [blame] | 932 | |
| 933 | return skb; |
| 934 | } |
| 935 | |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 936 | /* set skb frags from iovec, this can move to core network code for reuse */ |
| 937 | static int zerocopy_sg_from_iovec(struct sk_buff *skb, const struct iovec *from, |
| 938 | int offset, size_t count) |
| 939 | { |
| 940 | int len = iov_length(from, count) - offset; |
| 941 | int copy = skb_headlen(skb); |
| 942 | int size, offset1 = 0; |
| 943 | int i = 0; |
| 944 | |
| 945 | /* Skip over from offset */ |
| 946 | while (count && (offset >= from->iov_len)) { |
| 947 | offset -= from->iov_len; |
| 948 | ++from; |
| 949 | --count; |
| 950 | } |
| 951 | |
| 952 | /* copy up to skb headlen */ |
| 953 | while (count && (copy > 0)) { |
| 954 | size = min_t(unsigned int, copy, from->iov_len - offset); |
| 955 | if (copy_from_user(skb->data + offset1, from->iov_base + offset, |
| 956 | size)) |
| 957 | return -EFAULT; |
| 958 | if (copy > size) { |
| 959 | ++from; |
| 960 | --count; |
| 961 | offset = 0; |
| 962 | } else |
| 963 | offset += size; |
| 964 | copy -= size; |
| 965 | offset1 += size; |
| 966 | } |
| 967 | |
| 968 | if (len == offset1) |
| 969 | return 0; |
| 970 | |
| 971 | while (count--) { |
| 972 | struct page *page[MAX_SKB_FRAGS]; |
| 973 | int num_pages; |
| 974 | unsigned long base; |
| 975 | unsigned long truesize; |
| 976 | |
| 977 | len = from->iov_len - offset; |
| 978 | if (!len) { |
| 979 | offset = 0; |
| 980 | ++from; |
| 981 | continue; |
| 982 | } |
| 983 | base = (unsigned long)from->iov_base + offset; |
| 984 | size = ((base & ~PAGE_MASK) + len + ~PAGE_MASK) >> PAGE_SHIFT; |
| 985 | if (i + size > MAX_SKB_FRAGS) |
| 986 | return -EMSGSIZE; |
| 987 | num_pages = get_user_pages_fast(base, size, 0, &page[i]); |
| 988 | if (num_pages != size) { |
| 989 | for (i = 0; i < num_pages; i++) |
| 990 | put_page(page[i]); |
| 991 | return -EFAULT; |
| 992 | } |
| 993 | truesize = size * PAGE_SIZE; |
| 994 | skb->data_len += len; |
| 995 | skb->len += len; |
| 996 | skb->truesize += truesize; |
| 997 | atomic_add(truesize, &skb->sk->sk_wmem_alloc); |
| 998 | while (len) { |
| 999 | int off = base & ~PAGE_MASK; |
| 1000 | int size = min_t(int, len, PAGE_SIZE - off); |
| 1001 | __skb_fill_page_desc(skb, i, page[i], off, size); |
| 1002 | skb_shinfo(skb)->nr_frags++; |
| 1003 | /* increase sk_wmem_alloc */ |
| 1004 | base += size; |
| 1005 | len -= size; |
| 1006 | i++; |
| 1007 | } |
| 1008 | offset = 0; |
| 1009 | ++from; |
| 1010 | } |
| 1011 | return 0; |
| 1012 | } |
| 1013 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | /* Get packet from user space buffer */ |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1015 | static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, |
| 1016 | void *msg_control, const struct iovec *iv, |
| 1017 | size_t total_len, size_t count, int noblock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1018 | { |
Harvey Harrison | 09640e6 | 2009-02-01 00:45:17 -0800 | [diff] [blame] | 1019 | struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) }; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1020 | struct sk_buff *skb; |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 1021 | size_t len = total_len, align = NET_SKB_PAD; |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1022 | struct virtio_net_hdr gso = { 0 }; |
Michael S. Tsirkin | 6f26c9a | 2009-04-20 01:26:11 +0000 | [diff] [blame] | 1023 | int offset = 0; |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 1024 | int copylen; |
| 1025 | bool zerocopy = false; |
| 1026 | int err; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1027 | |
| 1028 | if (!(tun->flags & TUN_NO_PI)) { |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 1029 | if ((len -= sizeof(pi)) > total_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1030 | return -EINVAL; |
| 1031 | |
Michael S. Tsirkin | 6f26c9a | 2009-04-20 01:26:11 +0000 | [diff] [blame] | 1032 | if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1033 | return -EFAULT; |
Michael S. Tsirkin | 6f26c9a | 2009-04-20 01:26:11 +0000 | [diff] [blame] | 1034 | offset += sizeof(pi); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1035 | } |
| 1036 | |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1037 | if (tun->flags & TUN_VNET_HDR) { |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 1038 | if ((len -= tun->vnet_hdr_sz) > total_len) |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1039 | return -EINVAL; |
| 1040 | |
Michael S. Tsirkin | 6f26c9a | 2009-04-20 01:26:11 +0000 | [diff] [blame] | 1041 | if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1042 | return -EFAULT; |
| 1043 | |
Herbert Xu | 4909122 | 2009-06-08 00:20:01 -0700 | [diff] [blame] | 1044 | if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) && |
| 1045 | gso.csum_start + gso.csum_offset + 2 > gso.hdr_len) |
| 1046 | gso.hdr_len = gso.csum_start + gso.csum_offset + 2; |
| 1047 | |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1048 | if (gso.hdr_len > len) |
| 1049 | return -EINVAL; |
Michael S. Tsirkin | d9d52b5 | 2010-03-17 17:45:01 +0200 | [diff] [blame] | 1050 | offset += tun->vnet_hdr_sz; |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1051 | } |
| 1052 | |
Rusty Russell | e01bf1c | 2008-04-12 18:49:30 -0700 | [diff] [blame] | 1053 | if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) { |
stephen hemminger | a504b86 | 2011-06-08 14:33:07 +0000 | [diff] [blame] | 1054 | align += NET_IP_ALIGN; |
Herbert Xu | 0eca93b | 2009-04-14 02:09:43 -0700 | [diff] [blame] | 1055 | if (unlikely(len < ETH_HLEN || |
| 1056 | (gso.hdr_len && gso.hdr_len < ETH_HLEN))) |
Rusty Russell | e01bf1c | 2008-04-12 18:49:30 -0700 | [diff] [blame] | 1057 | return -EINVAL; |
| 1058 | } |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1059 | |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 1060 | if (msg_control) |
| 1061 | zerocopy = true; |
| 1062 | |
| 1063 | if (zerocopy) { |
| 1064 | /* Userspace may produce vectors with count greater than |
| 1065 | * MAX_SKB_FRAGS, so we need to linearize parts of the skb |
| 1066 | * to let the rest of data to be fit in the frags. |
| 1067 | */ |
| 1068 | if (count > MAX_SKB_FRAGS) { |
| 1069 | copylen = iov_length(iv, count - MAX_SKB_FRAGS); |
| 1070 | if (copylen < offset) |
| 1071 | copylen = 0; |
| 1072 | else |
| 1073 | copylen -= offset; |
| 1074 | } else |
| 1075 | copylen = 0; |
| 1076 | /* There are 256 bytes to be copied in skb, so there is enough |
| 1077 | * room for skb expand head in case it is used. |
| 1078 | * The rest of the buffer is mapped from userspace. |
| 1079 | */ |
| 1080 | if (copylen < gso.hdr_len) |
| 1081 | copylen = gso.hdr_len; |
| 1082 | if (!copylen) |
| 1083 | copylen = GOODCOPY_LEN; |
| 1084 | } else |
| 1085 | copylen = len; |
| 1086 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1087 | skb = tun_alloc_skb(tfile, align, copylen, gso.hdr_len, noblock); |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1088 | if (IS_ERR(skb)) { |
| 1089 | if (PTR_ERR(skb) != -EAGAIN) |
| 1090 | tun->dev->stats.rx_dropped++; |
| 1091 | return PTR_ERR(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 | } |
| 1093 | |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 1094 | if (zerocopy) |
| 1095 | err = zerocopy_sg_from_iovec(skb, iv, offset, count); |
| 1096 | else |
| 1097 | err = skb_copy_datagram_from_iovec(skb, 0, iv, offset, len); |
| 1098 | |
| 1099 | if (err) { |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 1100 | tun->dev->stats.rx_dropped++; |
Dave Jones | 8f22757 | 2006-03-11 18:49:13 -0800 | [diff] [blame] | 1101 | kfree_skb(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1102 | return -EFAULT; |
Dave Jones | 8f22757 | 2006-03-11 18:49:13 -0800 | [diff] [blame] | 1103 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1104 | |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1105 | if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { |
| 1106 | if (!skb_partial_csum_set(skb, gso.csum_start, |
| 1107 | gso.csum_offset)) { |
| 1108 | tun->dev->stats.rx_frame_errors++; |
| 1109 | kfree_skb(skb); |
| 1110 | return -EINVAL; |
| 1111 | } |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 1112 | } |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1113 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1114 | switch (tun->flags & TUN_TYPE_MASK) { |
| 1115 | case TUN_TUN_DEV: |
Ang Way Chuang | f09f7ee | 2008-06-17 21:10:33 -0700 | [diff] [blame] | 1116 | if (tun->flags & TUN_NO_PI) { |
| 1117 | switch (skb->data[0] & 0xf0) { |
| 1118 | case 0x40: |
| 1119 | pi.proto = htons(ETH_P_IP); |
| 1120 | break; |
| 1121 | case 0x60: |
| 1122 | pi.proto = htons(ETH_P_IPV6); |
| 1123 | break; |
| 1124 | default: |
| 1125 | tun->dev->stats.rx_dropped++; |
| 1126 | kfree_skb(skb); |
| 1127 | return -EINVAL; |
| 1128 | } |
| 1129 | } |
| 1130 | |
Arnaldo Carvalho de Melo | 459a98e | 2007-03-19 15:30:44 -0700 | [diff] [blame] | 1131 | skb_reset_mac_header(skb); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1132 | skb->protocol = pi.proto; |
Arnaldo Carvalho de Melo | 4c13eb6 | 2007-04-25 17:40:23 -0700 | [diff] [blame] | 1133 | skb->dev = tun->dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1134 | break; |
| 1135 | case TUN_TAP_DEV: |
| 1136 | skb->protocol = eth_type_trans(skb, tun->dev); |
| 1137 | break; |
Joe Perches | 6403eab | 2011-06-03 11:51:20 +0000 | [diff] [blame] | 1138 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1139 | |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1140 | if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
| 1141 | pr_debug("GSO!\n"); |
| 1142 | switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { |
| 1143 | case VIRTIO_NET_HDR_GSO_TCPV4: |
| 1144 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; |
| 1145 | break; |
| 1146 | case VIRTIO_NET_HDR_GSO_TCPV6: |
| 1147 | skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; |
| 1148 | break; |
Sridhar Samudrala | e36aa25 | 2009-07-14 14:21:04 +0000 | [diff] [blame] | 1149 | case VIRTIO_NET_HDR_GSO_UDP: |
| 1150 | skb_shinfo(skb)->gso_type = SKB_GSO_UDP; |
| 1151 | break; |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1152 | default: |
| 1153 | tun->dev->stats.rx_frame_errors++; |
| 1154 | kfree_skb(skb); |
| 1155 | return -EINVAL; |
| 1156 | } |
| 1157 | |
| 1158 | if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN) |
| 1159 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; |
| 1160 | |
| 1161 | skb_shinfo(skb)->gso_size = gso.gso_size; |
| 1162 | if (skb_shinfo(skb)->gso_size == 0) { |
| 1163 | tun->dev->stats.rx_frame_errors++; |
| 1164 | kfree_skb(skb); |
| 1165 | return -EINVAL; |
| 1166 | } |
| 1167 | |
| 1168 | /* Header must be checked, and gso_segs computed. */ |
| 1169 | skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY; |
| 1170 | skb_shinfo(skb)->gso_segs = 0; |
| 1171 | } |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1172 | |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 1173 | /* copy skb_ubuf_info for callback when skb has no error */ |
| 1174 | if (zerocopy) { |
| 1175 | skb_shinfo(skb)->destructor_arg = msg_control; |
| 1176 | skb_shinfo(skb)->tx_flags |= SKBTX_DEV_ZEROCOPY; |
| 1177 | } |
| 1178 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1179 | netif_rx_ni(skb); |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1180 | |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 1181 | tun->dev->stats.rx_packets++; |
| 1182 | tun->dev->stats.rx_bytes += len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1183 | |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 1184 | tun_flow_update(tun, skb, tfile->queue_index); |
Michael S. Tsirkin | 0690899 | 2012-07-20 09:23:23 +0000 | [diff] [blame] | 1185 | return total_len; |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1186 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1187 | |
Badari Pulavarty | ee0b3e6 | 2006-09-30 23:28:47 -0700 | [diff] [blame] | 1188 | static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv, |
| 1189 | unsigned long count, loff_t pos) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1190 | { |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1191 | struct file *file = iocb->ki_filp; |
Herbert Xu | ab46d77 | 2009-02-14 20:46:39 -0800 | [diff] [blame] | 1192 | struct tun_struct *tun = tun_get(file); |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1193 | struct tun_file *tfile = file->private_data; |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1194 | ssize_t result; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1195 | |
| 1196 | if (!tun) |
| 1197 | return -EBADFD; |
| 1198 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1199 | tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1200 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1201 | result = tun_get_user(tun, tfile, NULL, iv, iov_length(iv, count), |
| 1202 | count, file->f_flags & O_NONBLOCK); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1203 | |
| 1204 | tun_put(tun); |
| 1205 | return result; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1206 | } |
| 1207 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1208 | /* Put packet to the user space buffer */ |
stephen hemminger | 6f7c156 | 2011-06-08 14:33:08 +0000 | [diff] [blame] | 1209 | static ssize_t tun_put_user(struct tun_struct *tun, |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1210 | struct tun_file *tfile, |
stephen hemminger | 6f7c156 | 2011-06-08 14:33:08 +0000 | [diff] [blame] | 1211 | struct sk_buff *skb, |
| 1212 | const struct iovec *iv, int len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1213 | { |
| 1214 | struct tun_pi pi = { 0, skb->protocol }; |
| 1215 | ssize_t total = 0; |
| 1216 | |
| 1217 | if (!(tun->flags & TUN_NO_PI)) { |
| 1218 | if ((len -= sizeof(pi)) < 0) |
| 1219 | return -EINVAL; |
| 1220 | |
| 1221 | if (len < skb->len) { |
| 1222 | /* Packet will be striped */ |
| 1223 | pi.flags |= TUN_PKT_STRIP; |
| 1224 | } |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1225 | |
Michael S. Tsirkin | 43b39dc | 2009-04-20 01:25:59 +0000 | [diff] [blame] | 1226 | if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi))) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1227 | return -EFAULT; |
| 1228 | total += sizeof(pi); |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1229 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1230 | |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1231 | if (tun->flags & TUN_VNET_HDR) { |
| 1232 | struct virtio_net_hdr gso = { 0 }; /* no info leak */ |
Michael S. Tsirkin | d9d52b5 | 2010-03-17 17:45:01 +0200 | [diff] [blame] | 1233 | if ((len -= tun->vnet_hdr_sz) < 0) |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1234 | return -EINVAL; |
| 1235 | |
| 1236 | if (skb_is_gso(skb)) { |
| 1237 | struct skb_shared_info *sinfo = skb_shinfo(skb); |
| 1238 | |
| 1239 | /* This is a hint as to how much should be linear. */ |
| 1240 | gso.hdr_len = skb_headlen(skb); |
| 1241 | gso.gso_size = sinfo->gso_size; |
| 1242 | if (sinfo->gso_type & SKB_GSO_TCPV4) |
| 1243 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; |
| 1244 | else if (sinfo->gso_type & SKB_GSO_TCPV6) |
| 1245 | gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; |
Sridhar Samudrala | e36aa25 | 2009-07-14 14:21:04 +0000 | [diff] [blame] | 1246 | else if (sinfo->gso_type & SKB_GSO_UDP) |
| 1247 | gso.gso_type = VIRTIO_NET_HDR_GSO_UDP; |
Michael S. Tsirkin | ef3db4a | 2010-07-21 04:32:45 +0000 | [diff] [blame] | 1248 | else { |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1249 | pr_err("unexpected GSO type: " |
Michael S. Tsirkin | ef3db4a | 2010-07-21 04:32:45 +0000 | [diff] [blame] | 1250 | "0x%x, gso_size %d, hdr_len %d\n", |
| 1251 | sinfo->gso_type, gso.gso_size, |
| 1252 | gso.hdr_len); |
| 1253 | print_hex_dump(KERN_ERR, "tun: ", |
| 1254 | DUMP_PREFIX_NONE, |
| 1255 | 16, 1, skb->head, |
| 1256 | min((int)gso.hdr_len, 64), true); |
| 1257 | WARN_ON_ONCE(1); |
| 1258 | return -EINVAL; |
| 1259 | } |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1260 | if (sinfo->gso_type & SKB_GSO_TCP_ECN) |
| 1261 | gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN; |
| 1262 | } else |
| 1263 | gso.gso_type = VIRTIO_NET_HDR_GSO_NONE; |
| 1264 | |
| 1265 | if (skb->ip_summed == CHECKSUM_PARTIAL) { |
| 1266 | gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; |
Michał Mirosław | 55508d6 | 2010-12-14 15:24:08 +0000 | [diff] [blame] | 1267 | gso.csum_start = skb_checksum_start_offset(skb); |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1268 | gso.csum_offset = skb->csum_offset; |
Jason Wang | 10a8d94 | 2011-06-10 00:56:17 +0000 | [diff] [blame] | 1269 | } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { |
| 1270 | gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1271 | } /* else everything is zero */ |
| 1272 | |
Michael S. Tsirkin | 43b39dc | 2009-04-20 01:25:59 +0000 | [diff] [blame] | 1273 | if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, |
| 1274 | sizeof(gso)))) |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1275 | return -EFAULT; |
Michael S. Tsirkin | d9d52b5 | 2010-03-17 17:45:01 +0200 | [diff] [blame] | 1276 | total += tun->vnet_hdr_sz; |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1277 | } |
| 1278 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1279 | len = min_t(int, skb->len, len); |
| 1280 | |
Michael S. Tsirkin | 43b39dc | 2009-04-20 01:25:59 +0000 | [diff] [blame] | 1281 | skb_copy_datagram_const_iovec(skb, 0, iv, total, len); |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1282 | total += skb->len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1283 | |
Jeff Garzik | 09f75cd | 2007-10-03 17:41:50 -0700 | [diff] [blame] | 1284 | tun->dev->stats.tx_packets++; |
| 1285 | tun->dev->stats.tx_bytes += len; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1286 | |
| 1287 | return total; |
| 1288 | } |
| 1289 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1290 | static ssize_t tun_do_read(struct tun_struct *tun, struct tun_file *tfile, |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1291 | struct kiocb *iocb, const struct iovec *iv, |
| 1292 | ssize_t len, int noblock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1293 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1294 | DECLARE_WAITQUEUE(wait, current); |
| 1295 | struct sk_buff *skb; |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1296 | ssize_t ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1297 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1298 | tun_debug(KERN_INFO, tun, "tun_chr_read\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1299 | |
Amos Kong | 61a5ff1 | 2011-06-09 00:27:10 -0700 | [diff] [blame] | 1300 | if (unlikely(!noblock)) |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1301 | add_wait_queue(&tfile->wq.wait, &wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1302 | while (len) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1303 | current->state = TASK_INTERRUPTIBLE; |
| 1304 | |
| 1305 | /* Read frames from the queue */ |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1306 | if (!(skb = skb_dequeue(&tfile->socket.sk->sk_receive_queue))) { |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1307 | if (noblock) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1308 | ret = -EAGAIN; |
| 1309 | break; |
| 1310 | } |
| 1311 | if (signal_pending(current)) { |
| 1312 | ret = -ERESTARTSYS; |
| 1313 | break; |
| 1314 | } |
Eric W. Biederman | c70f182 | 2009-01-20 11:07:17 +0000 | [diff] [blame] | 1315 | if (tun->dev->reg_state != NETREG_REGISTERED) { |
| 1316 | ret = -EIO; |
| 1317 | break; |
| 1318 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | |
| 1320 | /* Nothing to read, let's sleep */ |
| 1321 | schedule(); |
| 1322 | continue; |
| 1323 | } |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1324 | netif_wake_subqueue(tun->dev, tfile->queue_index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1325 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1326 | ret = tun_put_user(tun, tfile, skb, iv, len); |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 1327 | kfree_skb(skb); |
| 1328 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1329 | } |
| 1330 | |
| 1331 | current->state = TASK_RUNNING; |
Amos Kong | 61a5ff1 | 2011-06-09 00:27:10 -0700 | [diff] [blame] | 1332 | if (unlikely(!noblock)) |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1333 | remove_wait_queue(&tfile->wq.wait, &wait); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1334 | |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1335 | return ret; |
| 1336 | } |
| 1337 | |
| 1338 | static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv, |
| 1339 | unsigned long count, loff_t pos) |
| 1340 | { |
| 1341 | struct file *file = iocb->ki_filp; |
| 1342 | struct tun_file *tfile = file->private_data; |
| 1343 | struct tun_struct *tun = __tun_get(tfile); |
| 1344 | ssize_t len, ret; |
| 1345 | |
| 1346 | if (!tun) |
| 1347 | return -EBADFD; |
| 1348 | len = iov_length(iv, count); |
| 1349 | if (len < 0) { |
| 1350 | ret = -EINVAL; |
| 1351 | goto out; |
| 1352 | } |
| 1353 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1354 | ret = tun_do_read(tun, tfile, iocb, iv, len, |
| 1355 | file->f_flags & O_NONBLOCK); |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1356 | ret = min_t(ssize_t, ret, len); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1357 | out: |
| 1358 | tun_put(tun); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | return ret; |
| 1360 | } |
| 1361 | |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 1362 | static void tun_free_netdev(struct net_device *dev) |
| 1363 | { |
| 1364 | struct tun_struct *tun = netdev_priv(dev); |
| 1365 | |
| 1366 | tun_flow_uninit(tun); |
| 1367 | free_netdev(dev); |
| 1368 | } |
| 1369 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1370 | static void tun_setup(struct net_device *dev) |
| 1371 | { |
| 1372 | struct tun_struct *tun = netdev_priv(dev); |
| 1373 | |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 1374 | tun->owner = INVALID_UID; |
| 1375 | tun->group = INVALID_GID; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1376 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1377 | dev->ethtool_ops = &tun_ethtool_ops; |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 1378 | dev->destructor = tun_free_netdev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1379 | } |
| 1380 | |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 1381 | /* Trivial set of netlink ops to allow deleting tun or tap |
| 1382 | * device with netlink. |
| 1383 | */ |
| 1384 | static int tun_validate(struct nlattr *tb[], struct nlattr *data[]) |
| 1385 | { |
| 1386 | return -EINVAL; |
| 1387 | } |
| 1388 | |
| 1389 | static struct rtnl_link_ops tun_link_ops __read_mostly = { |
| 1390 | .kind = DRV_NAME, |
| 1391 | .priv_size = sizeof(struct tun_struct), |
| 1392 | .setup = tun_setup, |
| 1393 | .validate = tun_validate, |
| 1394 | }; |
| 1395 | |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1396 | static void tun_sock_write_space(struct sock *sk) |
| 1397 | { |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1398 | struct tun_file *tfile; |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 1399 | wait_queue_head_t *wqueue; |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1400 | |
| 1401 | if (!sock_writeable(sk)) |
| 1402 | return; |
| 1403 | |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1404 | if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags)) |
| 1405 | return; |
| 1406 | |
Eric Dumazet | 4381548 | 2010-04-29 11:01:49 +0000 | [diff] [blame] | 1407 | wqueue = sk_sleep(sk); |
| 1408 | if (wqueue && waitqueue_active(wqueue)) |
| 1409 | wake_up_interruptible_sync_poll(wqueue, POLLOUT | |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1410 | POLLWRNORM | POLLWRBAND); |
Herbert Xu | c722c62 | 2009-06-03 21:45:55 -0700 | [diff] [blame] | 1411 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1412 | tfile = container_of(sk, struct tun_file, sk); |
| 1413 | kill_fasync(&tfile->fasync, SIGIO, POLL_OUT); |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1414 | } |
| 1415 | |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1416 | static int tun_sendmsg(struct kiocb *iocb, struct socket *sock, |
| 1417 | struct msghdr *m, size_t total_len) |
| 1418 | { |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1419 | int ret; |
| 1420 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); |
| 1421 | struct tun_struct *tun = __tun_get(tfile); |
| 1422 | |
| 1423 | if (!tun) |
| 1424 | return -EBADFD; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1425 | ret = tun_get_user(tun, tfile, m->msg_control, m->msg_iov, total_len, |
| 1426 | m->msg_iovlen, m->msg_flags & MSG_DONTWAIT); |
| 1427 | tun_put(tun); |
| 1428 | return ret; |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1429 | } |
| 1430 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1431 | |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1432 | static int tun_recvmsg(struct kiocb *iocb, struct socket *sock, |
| 1433 | struct msghdr *m, size_t total_len, |
| 1434 | int flags) |
| 1435 | { |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1436 | struct tun_file *tfile = container_of(sock, struct tun_file, socket); |
| 1437 | struct tun_struct *tun = __tun_get(tfile); |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1438 | int ret; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1439 | |
| 1440 | if (!tun) |
| 1441 | return -EBADFD; |
| 1442 | |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1443 | if (flags & ~(MSG_DONTWAIT|MSG_TRUNC)) |
| 1444 | return -EINVAL; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1445 | ret = tun_do_read(tun, tfile, iocb, m->msg_iov, total_len, |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1446 | flags & MSG_DONTWAIT); |
| 1447 | if (ret > total_len) { |
| 1448 | m->msg_flags |= MSG_TRUNC; |
| 1449 | ret = flags & MSG_TRUNC ? ret : total_len; |
| 1450 | } |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1451 | tun_put(tun); |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1452 | return ret; |
| 1453 | } |
| 1454 | |
Stanislav Kinsbursky | 1ab5ecb | 2012-03-12 02:59:41 +0000 | [diff] [blame] | 1455 | static int tun_release(struct socket *sock) |
| 1456 | { |
| 1457 | if (sock->sk) |
| 1458 | sock_put(sock->sk); |
| 1459 | return 0; |
| 1460 | } |
| 1461 | |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1462 | /* Ops structure to mimic raw sockets with tun */ |
| 1463 | static const struct proto_ops tun_socket_ops = { |
| 1464 | .sendmsg = tun_sendmsg, |
| 1465 | .recvmsg = tun_recvmsg, |
Stanislav Kinsbursky | 1ab5ecb | 2012-03-12 02:59:41 +0000 | [diff] [blame] | 1466 | .release = tun_release, |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 1467 | }; |
| 1468 | |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1469 | static struct proto tun_proto = { |
| 1470 | .name = "tun", |
| 1471 | .owner = THIS_MODULE, |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1472 | .obj_size = sizeof(struct tun_file), |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1473 | }; |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 1474 | |
David Woodhouse | 980c9e8 | 2009-05-09 22:54:21 -0700 | [diff] [blame] | 1475 | static int tun_flags(struct tun_struct *tun) |
| 1476 | { |
| 1477 | int flags = 0; |
| 1478 | |
| 1479 | if (tun->flags & TUN_TUN_DEV) |
| 1480 | flags |= IFF_TUN; |
| 1481 | else |
| 1482 | flags |= IFF_TAP; |
| 1483 | |
| 1484 | if (tun->flags & TUN_NO_PI) |
| 1485 | flags |= IFF_NO_PI; |
| 1486 | |
| 1487 | if (tun->flags & TUN_ONE_QUEUE) |
| 1488 | flags |= IFF_ONE_QUEUE; |
| 1489 | |
| 1490 | if (tun->flags & TUN_VNET_HDR) |
| 1491 | flags |= IFF_VNET_HDR; |
| 1492 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1493 | if (tun->flags & TUN_TAP_MQ) |
| 1494 | flags |= IFF_MULTI_QUEUE; |
| 1495 | |
David Woodhouse | 980c9e8 | 2009-05-09 22:54:21 -0700 | [diff] [blame] | 1496 | return flags; |
| 1497 | } |
| 1498 | |
| 1499 | static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr, |
| 1500 | char *buf) |
| 1501 | { |
| 1502 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); |
| 1503 | return sprintf(buf, "0x%x\n", tun_flags(tun)); |
| 1504 | } |
| 1505 | |
| 1506 | static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr, |
| 1507 | char *buf) |
| 1508 | { |
| 1509 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 1510 | return uid_valid(tun->owner)? |
| 1511 | sprintf(buf, "%u\n", |
| 1512 | from_kuid_munged(current_user_ns(), tun->owner)): |
| 1513 | sprintf(buf, "-1\n"); |
David Woodhouse | 980c9e8 | 2009-05-09 22:54:21 -0700 | [diff] [blame] | 1514 | } |
| 1515 | |
| 1516 | static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr, |
| 1517 | char *buf) |
| 1518 | { |
| 1519 | struct tun_struct *tun = netdev_priv(to_net_dev(dev)); |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 1520 | return gid_valid(tun->group) ? |
| 1521 | sprintf(buf, "%u\n", |
| 1522 | from_kgid_munged(current_user_ns(), tun->group)): |
| 1523 | sprintf(buf, "-1\n"); |
David Woodhouse | 980c9e8 | 2009-05-09 22:54:21 -0700 | [diff] [blame] | 1524 | } |
| 1525 | |
| 1526 | static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL); |
| 1527 | static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL); |
| 1528 | static DEVICE_ATTR(group, 0444, tun_show_group, NULL); |
| 1529 | |
Pavel Emelyanov | d647a59 | 2008-04-16 00:41:16 -0700 | [diff] [blame] | 1530 | static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1531 | { |
| 1532 | struct tun_struct *tun; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1533 | struct tun_file *tfile = file->private_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1534 | struct net_device *dev; |
| 1535 | int err; |
| 1536 | |
Eric W. Biederman | 74a3e5a | 2009-01-20 10:56:20 +0000 | [diff] [blame] | 1537 | dev = __dev_get_by_name(net, ifr->ifr_name); |
| 1538 | if (dev) { |
David Woodhouse | f85ba78 | 2009-04-27 03:23:54 -0700 | [diff] [blame] | 1539 | if (ifr->ifr_flags & IFF_TUN_EXCL) |
| 1540 | return -EBUSY; |
Eric W. Biederman | 74a3e5a | 2009-01-20 10:56:20 +0000 | [diff] [blame] | 1541 | if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops) |
| 1542 | tun = netdev_priv(dev); |
| 1543 | else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops) |
| 1544 | tun = netdev_priv(dev); |
| 1545 | else |
| 1546 | return -EINVAL; |
| 1547 | |
Jason Wang | cde8b15 | 2012-10-31 19:46:01 +0000 | [diff] [blame] | 1548 | if (tun_not_capable(tun)) |
Paul Moore | 2b980db | 2009-08-28 18:12:43 -0400 | [diff] [blame] | 1549 | return -EPERM; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1550 | err = security_tun_dev_attach(tfile->socket.sk); |
Paul Moore | 2b980db | 2009-08-28 18:12:43 -0400 | [diff] [blame] | 1551 | if (err < 0) |
| 1552 | return err; |
| 1553 | |
Eric W. Biederman | a7385ba | 2009-01-20 10:57:48 +0000 | [diff] [blame] | 1554 | err = tun_attach(tun, file); |
| 1555 | if (err < 0) |
| 1556 | return err; |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1557 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1558 | else { |
| 1559 | char *name; |
| 1560 | unsigned long flags = 0; |
| 1561 | |
David Woodhouse | ca6bb5d | 2006-06-22 16:07:52 -0700 | [diff] [blame] | 1562 | if (!capable(CAP_NET_ADMIN)) |
| 1563 | return -EPERM; |
Paul Moore | 2b980db | 2009-08-28 18:12:43 -0400 | [diff] [blame] | 1564 | err = security_tun_dev_create(); |
| 1565 | if (err < 0) |
| 1566 | return err; |
David Woodhouse | ca6bb5d | 2006-06-22 16:07:52 -0700 | [diff] [blame] | 1567 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1568 | /* Set dev type */ |
| 1569 | if (ifr->ifr_flags & IFF_TUN) { |
| 1570 | /* TUN device */ |
| 1571 | flags |= TUN_TUN_DEV; |
| 1572 | name = "tun%d"; |
| 1573 | } else if (ifr->ifr_flags & IFF_TAP) { |
| 1574 | /* TAP device */ |
| 1575 | flags |= TUN_TAP_DEV; |
| 1576 | name = "tap%d"; |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1577 | } else |
Kusanagi Kouichi | 36989b9 | 2009-09-16 21:36:13 +0000 | [diff] [blame] | 1578 | return -EINVAL; |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 1579 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1580 | if (*ifr->ifr_name) |
| 1581 | name = ifr->ifr_name; |
| 1582 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1583 | dev = alloc_netdev_mqs(sizeof(struct tun_struct), name, |
| 1584 | tun_setup, |
| 1585 | MAX_TAP_QUEUES, MAX_TAP_QUEUES); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1586 | if (!dev) |
| 1587 | return -ENOMEM; |
| 1588 | |
Pavel Emelyanov | fc54c65 | 2008-04-16 00:41:53 -0700 | [diff] [blame] | 1589 | dev_net_set(dev, net); |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 1590 | dev->rtnl_link_ops = &tun_link_ops; |
Stephen Hemminger | 758e43b | 2008-11-19 22:10:37 -0800 | [diff] [blame] | 1591 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1592 | tun = netdev_priv(dev); |
| 1593 | tun->dev = dev; |
| 1594 | tun->flags = flags; |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 1595 | tun->txflt.count = 0; |
Michael S. Tsirkin | d9d52b5 | 2010-03-17 17:45:01 +0200 | [diff] [blame] | 1596 | tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1597 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1598 | tun->filter_attached = false; |
| 1599 | tun->sndbuf = tfile->socket.sk->sk_sndbuf; |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1600 | |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 1601 | spin_lock_init(&tun->lock); |
| 1602 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1603 | security_tun_dev_post_create(&tfile->sk); |
Paul Moore | 2b980db | 2009-08-28 18:12:43 -0400 | [diff] [blame] | 1604 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1605 | tun_net_init(dev); |
| 1606 | |
Jason Wang | 96442e42 | 2012-10-31 19:46:02 +0000 | [diff] [blame] | 1607 | if (tun_flow_init(tun)) |
| 1608 | goto err_free_dev; |
| 1609 | |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 1610 | dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | |
| 1611 | TUN_USER_FEATURES; |
| 1612 | dev->features = dev->hw_features; |
| 1613 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1614 | err = register_netdevice(tun->dev); |
| 1615 | if (err < 0) |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1616 | goto err_free_dev; |
Herbert Xu | 9c3fea6 | 2009-04-18 14:15:52 +0000 | [diff] [blame] | 1617 | |
David Woodhouse | 980c9e8 | 2009-05-09 22:54:21 -0700 | [diff] [blame] | 1618 | if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) || |
| 1619 | device_create_file(&tun->dev->dev, &dev_attr_owner) || |
| 1620 | device_create_file(&tun->dev->dev, &dev_attr_group)) |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1621 | pr_err("Failed to create tun sysfs files\n"); |
David Woodhouse | 980c9e8 | 2009-05-09 22:54:21 -0700 | [diff] [blame] | 1622 | |
Eric W. Biederman | a7385ba | 2009-01-20 10:57:48 +0000 | [diff] [blame] | 1623 | err = tun_attach(tun, file); |
| 1624 | if (err < 0) |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1625 | goto err_free_dev; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1626 | } |
| 1627 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1628 | tun_debug(KERN_INFO, tun, "tun_set_iff\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1629 | |
| 1630 | if (ifr->ifr_flags & IFF_NO_PI) |
| 1631 | tun->flags |= TUN_NO_PI; |
Nathaniel Filardo | a26af1e | 2008-02-05 03:05:07 -0800 | [diff] [blame] | 1632 | else |
| 1633 | tun->flags &= ~TUN_NO_PI; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1634 | |
| 1635 | if (ifr->ifr_flags & IFF_ONE_QUEUE) |
| 1636 | tun->flags |= TUN_ONE_QUEUE; |
Nathaniel Filardo | a26af1e | 2008-02-05 03:05:07 -0800 | [diff] [blame] | 1637 | else |
| 1638 | tun->flags &= ~TUN_ONE_QUEUE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1639 | |
Rusty Russell | f43798c | 2008-07-03 03:48:02 -0700 | [diff] [blame] | 1640 | if (ifr->ifr_flags & IFF_VNET_HDR) |
| 1641 | tun->flags |= TUN_VNET_HDR; |
| 1642 | else |
| 1643 | tun->flags &= ~TUN_VNET_HDR; |
| 1644 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1645 | if (ifr->ifr_flags & IFF_MULTI_QUEUE) |
| 1646 | tun->flags |= TUN_TAP_MQ; |
| 1647 | else |
| 1648 | tun->flags &= ~TUN_TAP_MQ; |
| 1649 | |
Max Krasnyansky | e35259a | 2008-07-10 16:59:11 -0700 | [diff] [blame] | 1650 | /* Make sure persistent devices do not get stuck in |
| 1651 | * xoff state. |
| 1652 | */ |
| 1653 | if (netif_running(tun->dev)) |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1654 | netif_tx_wake_all_queues(tun->dev); |
Max Krasnyansky | e35259a | 2008-07-10 16:59:11 -0700 | [diff] [blame] | 1655 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1656 | strcpy(ifr->ifr_name, tun->dev->name); |
| 1657 | return 0; |
| 1658 | |
| 1659 | err_free_dev: |
| 1660 | free_netdev(dev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1661 | return err; |
| 1662 | } |
| 1663 | |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 1664 | static int tun_get_iff(struct net *net, struct tun_struct *tun, |
| 1665 | struct ifreq *ifr) |
Mark McLoughlin | e3b9955 | 2008-08-15 15:09:56 -0700 | [diff] [blame] | 1666 | { |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1667 | tun_debug(KERN_INFO, tun, "tun_get_iff\n"); |
Mark McLoughlin | e3b9955 | 2008-08-15 15:09:56 -0700 | [diff] [blame] | 1668 | |
| 1669 | strcpy(ifr->ifr_name, tun->dev->name); |
| 1670 | |
David Woodhouse | 980c9e8 | 2009-05-09 22:54:21 -0700 | [diff] [blame] | 1671 | ifr->ifr_flags = tun_flags(tun); |
Mark McLoughlin | e3b9955 | 2008-08-15 15:09:56 -0700 | [diff] [blame] | 1672 | |
| 1673 | return 0; |
| 1674 | } |
| 1675 | |
Rusty Russell | 5228ddc | 2008-07-03 03:46:16 -0700 | [diff] [blame] | 1676 | /* This is like a cut-down ethtool ops, except done via tun fd so no |
| 1677 | * privs required. */ |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 1678 | static int set_offload(struct tun_struct *tun, unsigned long arg) |
Rusty Russell | 5228ddc | 2008-07-03 03:46:16 -0700 | [diff] [blame] | 1679 | { |
Michał Mirosław | c8f44af | 2011-11-15 15:29:55 +0000 | [diff] [blame] | 1680 | netdev_features_t features = 0; |
Rusty Russell | 5228ddc | 2008-07-03 03:46:16 -0700 | [diff] [blame] | 1681 | |
| 1682 | if (arg & TUN_F_CSUM) { |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 1683 | features |= NETIF_F_HW_CSUM; |
Rusty Russell | 5228ddc | 2008-07-03 03:46:16 -0700 | [diff] [blame] | 1684 | arg &= ~TUN_F_CSUM; |
| 1685 | |
| 1686 | if (arg & (TUN_F_TSO4|TUN_F_TSO6)) { |
| 1687 | if (arg & TUN_F_TSO_ECN) { |
| 1688 | features |= NETIF_F_TSO_ECN; |
| 1689 | arg &= ~TUN_F_TSO_ECN; |
| 1690 | } |
| 1691 | if (arg & TUN_F_TSO4) |
| 1692 | features |= NETIF_F_TSO; |
| 1693 | if (arg & TUN_F_TSO6) |
| 1694 | features |= NETIF_F_TSO6; |
| 1695 | arg &= ~(TUN_F_TSO4|TUN_F_TSO6); |
| 1696 | } |
Sridhar Samudrala | e36aa25 | 2009-07-14 14:21:04 +0000 | [diff] [blame] | 1697 | |
| 1698 | if (arg & TUN_F_UFO) { |
| 1699 | features |= NETIF_F_UFO; |
| 1700 | arg &= ~TUN_F_UFO; |
| 1701 | } |
Rusty Russell | 5228ddc | 2008-07-03 03:46:16 -0700 | [diff] [blame] | 1702 | } |
| 1703 | |
| 1704 | /* This gives the user a way to test for new features in future by |
| 1705 | * trying to set them. */ |
| 1706 | if (arg) |
| 1707 | return -EINVAL; |
| 1708 | |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 1709 | tun->set_features = features; |
| 1710 | netdev_update_features(tun->dev); |
Rusty Russell | 5228ddc | 2008-07-03 03:46:16 -0700 | [diff] [blame] | 1711 | |
| 1712 | return 0; |
| 1713 | } |
| 1714 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1715 | static void tun_detach_filter(struct tun_struct *tun, int n) |
| 1716 | { |
| 1717 | int i; |
| 1718 | struct tun_file *tfile; |
| 1719 | |
| 1720 | for (i = 0; i < n; i++) { |
| 1721 | tfile = rcu_dereference_protected(tun->tfiles[i], |
| 1722 | lockdep_rtnl_is_held()); |
| 1723 | sk_detach_filter(tfile->socket.sk); |
| 1724 | } |
| 1725 | |
| 1726 | tun->filter_attached = false; |
| 1727 | } |
| 1728 | |
| 1729 | static int tun_attach_filter(struct tun_struct *tun) |
| 1730 | { |
| 1731 | int i, ret = 0; |
| 1732 | struct tun_file *tfile; |
| 1733 | |
| 1734 | for (i = 0; i < tun->numqueues; i++) { |
| 1735 | tfile = rcu_dereference_protected(tun->tfiles[i], |
| 1736 | lockdep_rtnl_is_held()); |
| 1737 | ret = sk_attach_filter(&tun->fprog, tfile->socket.sk); |
| 1738 | if (ret) { |
| 1739 | tun_detach_filter(tun, i); |
| 1740 | return ret; |
| 1741 | } |
| 1742 | } |
| 1743 | |
| 1744 | tun->filter_attached = true; |
| 1745 | return ret; |
| 1746 | } |
| 1747 | |
| 1748 | static void tun_set_sndbuf(struct tun_struct *tun) |
| 1749 | { |
| 1750 | struct tun_file *tfile; |
| 1751 | int i; |
| 1752 | |
| 1753 | for (i = 0; i < tun->numqueues; i++) { |
| 1754 | tfile = rcu_dereference_protected(tun->tfiles[i], |
| 1755 | lockdep_rtnl_is_held()); |
| 1756 | tfile->socket.sk->sk_sndbuf = tun->sndbuf; |
| 1757 | } |
| 1758 | } |
| 1759 | |
Jason Wang | cde8b15 | 2012-10-31 19:46:01 +0000 | [diff] [blame] | 1760 | static int tun_set_queue(struct file *file, struct ifreq *ifr) |
| 1761 | { |
| 1762 | struct tun_file *tfile = file->private_data; |
| 1763 | struct tun_struct *tun; |
| 1764 | struct net_device *dev; |
| 1765 | int ret = 0; |
| 1766 | |
| 1767 | rtnl_lock(); |
| 1768 | |
| 1769 | if (ifr->ifr_flags & IFF_ATTACH_QUEUE) { |
| 1770 | dev = __dev_get_by_name(tfile->net, ifr->ifr_name); |
| 1771 | if (!dev) { |
| 1772 | ret = -EINVAL; |
| 1773 | goto unlock; |
| 1774 | } |
| 1775 | |
| 1776 | tun = netdev_priv(dev); |
| 1777 | if (dev->netdev_ops != &tap_netdev_ops && |
| 1778 | dev->netdev_ops != &tun_netdev_ops) |
| 1779 | ret = -EINVAL; |
| 1780 | else if (tun_not_capable(tun)) |
| 1781 | ret = -EPERM; |
| 1782 | else |
| 1783 | ret = tun_attach(tun, file); |
| 1784 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) |
| 1785 | __tun_detach(tfile, false); |
| 1786 | else |
| 1787 | ret = -EINVAL; |
| 1788 | |
| 1789 | unlock: |
| 1790 | rtnl_unlock(); |
| 1791 | return ret; |
| 1792 | } |
| 1793 | |
Arnd Bergmann | 50857e2 | 2009-11-06 22:52:32 -0800 | [diff] [blame] | 1794 | static long __tun_chr_ioctl(struct file *file, unsigned int cmd, |
| 1795 | unsigned long arg, int ifreq_len) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1796 | { |
Eric W. Biederman | 36b50ba | 2009-01-20 11:01:48 +0000 | [diff] [blame] | 1797 | struct tun_file *tfile = file->private_data; |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1798 | struct tun_struct *tun; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1799 | void __user* argp = (void __user*)arg; |
| 1800 | struct ifreq ifr; |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 1801 | kuid_t owner; |
| 1802 | kgid_t group; |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1803 | int sndbuf; |
Michael S. Tsirkin | d9d52b5 | 2010-03-17 17:45:01 +0200 | [diff] [blame] | 1804 | int vnet_hdr_sz; |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 1805 | int ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1806 | |
Jason Wang | cde8b15 | 2012-10-31 19:46:01 +0000 | [diff] [blame] | 1807 | if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) { |
Arnd Bergmann | 50857e2 | 2009-11-06 22:52:32 -0800 | [diff] [blame] | 1808 | if (copy_from_user(&ifr, argp, ifreq_len)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1809 | return -EFAULT; |
David S. Miller | 8bbb181 | 2012-07-30 14:52:48 -0700 | [diff] [blame] | 1810 | } else { |
Mathias Krause | a117dac | 2012-07-29 19:45:14 +0000 | [diff] [blame] | 1811 | memset(&ifr, 0, sizeof(ifr)); |
David S. Miller | 8bbb181 | 2012-07-30 14:52:48 -0700 | [diff] [blame] | 1812 | } |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1813 | if (cmd == TUNGETFEATURES) { |
| 1814 | /* Currently this just means: "what IFF flags are valid?". |
| 1815 | * This is needed because we never checked for invalid flags on |
| 1816 | * TUNSETIFF. */ |
| 1817 | return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE | |
Jason Wang | cde8b15 | 2012-10-31 19:46:01 +0000 | [diff] [blame] | 1818 | IFF_VNET_HDR | IFF_MULTI_QUEUE, |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1819 | (unsigned int __user*)argp); |
Jason Wang | cde8b15 | 2012-10-31 19:46:01 +0000 | [diff] [blame] | 1820 | } else if (cmd == TUNSETQUEUE) |
| 1821 | return tun_set_queue(file, &ifr); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1822 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1823 | ret = 0; |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 1824 | rtnl_lock(); |
| 1825 | |
Eric W. Biederman | 36b50ba | 2009-01-20 11:01:48 +0000 | [diff] [blame] | 1826 | tun = __tun_get(tfile); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1827 | if (cmd == TUNSETIFF && !tun) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1828 | ifr.ifr_name[IFNAMSIZ-1] = '\0'; |
| 1829 | |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 1830 | ret = tun_set_iff(tfile->net, file, &ifr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1831 | |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 1832 | if (ret) |
| 1833 | goto unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1834 | |
Arnd Bergmann | 50857e2 | 2009-11-06 22:52:32 -0800 | [diff] [blame] | 1835 | if (copy_to_user(argp, &ifr, ifreq_len)) |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 1836 | ret = -EFAULT; |
| 1837 | goto unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1838 | } |
| 1839 | |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 1840 | ret = -EBADFD; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1841 | if (!tun) |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 1842 | goto unlock; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1843 | |
Jason Wang | 1e58833 | 2012-10-31 19:45:56 +0000 | [diff] [blame] | 1844 | tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %u\n", cmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1845 | |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1846 | ret = 0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1847 | switch (cmd) { |
Mark McLoughlin | e3b9955 | 2008-08-15 15:09:56 -0700 | [diff] [blame] | 1848 | case TUNGETIFF: |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 1849 | ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr); |
Mark McLoughlin | e3b9955 | 2008-08-15 15:09:56 -0700 | [diff] [blame] | 1850 | if (ret) |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1851 | break; |
Mark McLoughlin | e3b9955 | 2008-08-15 15:09:56 -0700 | [diff] [blame] | 1852 | |
Arnd Bergmann | 50857e2 | 2009-11-06 22:52:32 -0800 | [diff] [blame] | 1853 | if (copy_to_user(argp, &ifr, ifreq_len)) |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1854 | ret = -EFAULT; |
Mark McLoughlin | e3b9955 | 2008-08-15 15:09:56 -0700 | [diff] [blame] | 1855 | break; |
| 1856 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1857 | case TUNSETNOCSUM: |
| 1858 | /* Disable/Enable checksum */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1859 | |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 1860 | /* [unimplemented] */ |
| 1861 | tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n", |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1862 | arg ? "disabled" : "enabled"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1863 | break; |
| 1864 | |
| 1865 | case TUNSETPERSIST: |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1866 | /* Disable/Enable persist mode. Keep an extra reference to the |
| 1867 | * module to prevent the module being unprobed. |
| 1868 | */ |
| 1869 | if (arg) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1870 | tun->flags |= TUN_PERSIST; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1871 | __module_get(THIS_MODULE); |
| 1872 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1873 | tun->flags &= ~TUN_PERSIST; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1874 | module_put(THIS_MODULE); |
| 1875 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1876 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1877 | tun_debug(KERN_INFO, tun, "persist %s\n", |
| 1878 | arg ? "enabled" : "disabled"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1879 | break; |
| 1880 | |
| 1881 | case TUNSETOWNER: |
| 1882 | /* Set owner of the device */ |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 1883 | owner = make_kuid(current_user_ns(), arg); |
| 1884 | if (!uid_valid(owner)) { |
| 1885 | ret = -EINVAL; |
| 1886 | break; |
| 1887 | } |
| 1888 | tun->owner = owner; |
Jason Wang | 1e58833 | 2012-10-31 19:45:56 +0000 | [diff] [blame] | 1889 | tun_debug(KERN_INFO, tun, "owner set to %u\n", |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 1890 | from_kuid(&init_user_ns, tun->owner)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1891 | break; |
| 1892 | |
Guido Guenther | 8c64462 | 2007-07-02 22:50:25 -0700 | [diff] [blame] | 1893 | case TUNSETGROUP: |
| 1894 | /* Set group of the device */ |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 1895 | group = make_kgid(current_user_ns(), arg); |
| 1896 | if (!gid_valid(group)) { |
| 1897 | ret = -EINVAL; |
| 1898 | break; |
| 1899 | } |
| 1900 | tun->group = group; |
Jason Wang | 1e58833 | 2012-10-31 19:45:56 +0000 | [diff] [blame] | 1901 | tun_debug(KERN_INFO, tun, "group set to %u\n", |
Eric W. Biederman | 0625c88 | 2012-02-07 16:48:55 -0800 | [diff] [blame] | 1902 | from_kgid(&init_user_ns, tun->group)); |
Guido Guenther | 8c64462 | 2007-07-02 22:50:25 -0700 | [diff] [blame] | 1903 | break; |
| 1904 | |
Mike Kershaw | ff4cc3a | 2005-09-01 17:40:05 -0700 | [diff] [blame] | 1905 | case TUNSETLINK: |
| 1906 | /* Only allow setting the type when the interface is down */ |
| 1907 | if (tun->dev->flags & IFF_UP) { |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1908 | tun_debug(KERN_INFO, tun, |
| 1909 | "Linktype set failed because interface is up\n"); |
David S. Miller | 48abfe0 | 2008-04-23 19:37:58 -0700 | [diff] [blame] | 1910 | ret = -EBUSY; |
Mike Kershaw | ff4cc3a | 2005-09-01 17:40:05 -0700 | [diff] [blame] | 1911 | } else { |
| 1912 | tun->dev->type = (int) arg; |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1913 | tun_debug(KERN_INFO, tun, "linktype set to %d\n", |
| 1914 | tun->dev->type); |
David S. Miller | 48abfe0 | 2008-04-23 19:37:58 -0700 | [diff] [blame] | 1915 | ret = 0; |
Mike Kershaw | ff4cc3a | 2005-09-01 17:40:05 -0700 | [diff] [blame] | 1916 | } |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1917 | break; |
Mike Kershaw | ff4cc3a | 2005-09-01 17:40:05 -0700 | [diff] [blame] | 1918 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1919 | #ifdef TUN_DEBUG |
| 1920 | case TUNSETDEBUG: |
| 1921 | tun->debug = arg; |
| 1922 | break; |
| 1923 | #endif |
Rusty Russell | 5228ddc | 2008-07-03 03:46:16 -0700 | [diff] [blame] | 1924 | case TUNSETOFFLOAD: |
Michał Mirosław | 8825537 | 2011-04-19 06:13:10 +0000 | [diff] [blame] | 1925 | ret = set_offload(tun, arg); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1926 | break; |
Rusty Russell | 5228ddc | 2008-07-03 03:46:16 -0700 | [diff] [blame] | 1927 | |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 1928 | case TUNSETTXFILTER: |
| 1929 | /* Can be set only for TAPs */ |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1930 | ret = -EINVAL; |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 1931 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1932 | break; |
Harvey Harrison | c0e5a8c | 2008-07-16 12:45:34 -0700 | [diff] [blame] | 1933 | ret = update_filter(&tun->txflt, (void __user *)arg); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1934 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1935 | |
| 1936 | case SIOCGIFHWADDR: |
Uwe Kleine-König | b595076 | 2010-11-01 15:38:34 -0400 | [diff] [blame] | 1937 | /* Get hw address */ |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 1938 | memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); |
| 1939 | ifr.ifr_hwaddr.sa_family = tun->dev->type; |
Arnd Bergmann | 50857e2 | 2009-11-06 22:52:32 -0800 | [diff] [blame] | 1940 | if (copy_to_user(argp, &ifr, ifreq_len)) |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1941 | ret = -EFAULT; |
| 1942 | break; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1943 | |
| 1944 | case SIOCSIFHWADDR: |
Max Krasnyansky | f271b2c | 2008-07-14 22:18:19 -0700 | [diff] [blame] | 1945 | /* Set hw address */ |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 1946 | tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n", |
| 1947 | ifr.ifr_hwaddr.sa_data); |
Kim B. Heino | 4010237 | 2008-02-29 12:26:21 -0800 | [diff] [blame] | 1948 | |
Kim B. Heino | 4010237 | 2008-02-29 12:26:21 -0800 | [diff] [blame] | 1949 | ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 1950 | break; |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1951 | |
| 1952 | case TUNGETSNDBUF: |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1953 | sndbuf = tfile->socket.sk->sk_sndbuf; |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1954 | if (copy_to_user(argp, &sndbuf, sizeof(sndbuf))) |
| 1955 | ret = -EFAULT; |
| 1956 | break; |
| 1957 | |
| 1958 | case TUNSETSNDBUF: |
| 1959 | if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) { |
| 1960 | ret = -EFAULT; |
| 1961 | break; |
| 1962 | } |
| 1963 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1964 | tun->sndbuf = sndbuf; |
| 1965 | tun_set_sndbuf(tun); |
Herbert Xu | 33dccbb | 2009-02-05 21:25:32 -0800 | [diff] [blame] | 1966 | break; |
| 1967 | |
Michael S. Tsirkin | d9d52b5 | 2010-03-17 17:45:01 +0200 | [diff] [blame] | 1968 | case TUNGETVNETHDRSZ: |
| 1969 | vnet_hdr_sz = tun->vnet_hdr_sz; |
| 1970 | if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz))) |
| 1971 | ret = -EFAULT; |
| 1972 | break; |
| 1973 | |
| 1974 | case TUNSETVNETHDRSZ: |
| 1975 | if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) { |
| 1976 | ret = -EFAULT; |
| 1977 | break; |
| 1978 | } |
| 1979 | if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) { |
| 1980 | ret = -EINVAL; |
| 1981 | break; |
| 1982 | } |
| 1983 | |
| 1984 | tun->vnet_hdr_sz = vnet_hdr_sz; |
| 1985 | break; |
| 1986 | |
Michael S. Tsirkin | 9940516 | 2010-02-14 01:01:10 +0000 | [diff] [blame] | 1987 | case TUNATTACHFILTER: |
| 1988 | /* Can be set only for TAPs */ |
| 1989 | ret = -EINVAL; |
| 1990 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
| 1991 | break; |
| 1992 | ret = -EFAULT; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 1993 | if (copy_from_user(&tun->fprog, argp, sizeof(tun->fprog))) |
Michael S. Tsirkin | 9940516 | 2010-02-14 01:01:10 +0000 | [diff] [blame] | 1994 | break; |
| 1995 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 1996 | ret = tun_attach_filter(tun); |
Michael S. Tsirkin | 9940516 | 2010-02-14 01:01:10 +0000 | [diff] [blame] | 1997 | break; |
| 1998 | |
| 1999 | case TUNDETACHFILTER: |
| 2000 | /* Can be set only for TAPs */ |
| 2001 | ret = -EINVAL; |
| 2002 | if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV) |
| 2003 | break; |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 2004 | ret = 0; |
| 2005 | tun_detach_filter(tun, tun->numqueues); |
Michael S. Tsirkin | 9940516 | 2010-02-14 01:01:10 +0000 | [diff] [blame] | 2006 | break; |
| 2007 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2008 | default: |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 2009 | ret = -EINVAL; |
| 2010 | break; |
Joe Perches | ee289b6 | 2010-05-17 22:47:34 -0700 | [diff] [blame] | 2011 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2012 | |
Herbert Xu | 876bfd4 | 2009-08-06 14:22:44 +0000 | [diff] [blame] | 2013 | unlock: |
| 2014 | rtnl_unlock(); |
| 2015 | if (tun) |
| 2016 | tun_put(tun); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 2017 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2018 | } |
| 2019 | |
Arnd Bergmann | 50857e2 | 2009-11-06 22:52:32 -0800 | [diff] [blame] | 2020 | static long tun_chr_ioctl(struct file *file, |
| 2021 | unsigned int cmd, unsigned long arg) |
| 2022 | { |
| 2023 | return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq)); |
| 2024 | } |
| 2025 | |
| 2026 | #ifdef CONFIG_COMPAT |
| 2027 | static long tun_chr_compat_ioctl(struct file *file, |
| 2028 | unsigned int cmd, unsigned long arg) |
| 2029 | { |
| 2030 | switch (cmd) { |
| 2031 | case TUNSETIFF: |
| 2032 | case TUNGETIFF: |
| 2033 | case TUNSETTXFILTER: |
| 2034 | case TUNGETSNDBUF: |
| 2035 | case TUNSETSNDBUF: |
| 2036 | case SIOCGIFHWADDR: |
| 2037 | case SIOCSIFHWADDR: |
| 2038 | arg = (unsigned long)compat_ptr(arg); |
| 2039 | break; |
| 2040 | default: |
| 2041 | arg = (compat_ulong_t)arg; |
| 2042 | break; |
| 2043 | } |
| 2044 | |
| 2045 | /* |
| 2046 | * compat_ifreq is shorter than ifreq, so we must not access beyond |
| 2047 | * the end of that structure. All fields that are used in this |
| 2048 | * driver are compatible though, we don't need to convert the |
| 2049 | * contents. |
| 2050 | */ |
| 2051 | return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq)); |
| 2052 | } |
| 2053 | #endif /* CONFIG_COMPAT */ |
| 2054 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2055 | static int tun_chr_fasync(int fd, struct file *file, int on) |
| 2056 | { |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2057 | struct tun_file *tfile = file->private_data; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2058 | int ret; |
| 2059 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2060 | if ((ret = fasync_helper(fd, file, on, &tfile->fasync)) < 0) |
Jonathan Corbet | 9d31952 | 2008-06-19 15:50:37 -0600 | [diff] [blame] | 2061 | goto out; |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 2062 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2063 | if (on) { |
Eric W. Biederman | 609d7fa | 2006-10-02 02:17:15 -0700 | [diff] [blame] | 2064 | ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2065 | if (ret) |
Jonathan Corbet | 9d31952 | 2008-06-19 15:50:37 -0600 | [diff] [blame] | 2066 | goto out; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2067 | tfile->flags |= TUN_FASYNC; |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 2068 | } else |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2069 | tfile->flags &= ~TUN_FASYNC; |
Jonathan Corbet | 9d31952 | 2008-06-19 15:50:37 -0600 | [diff] [blame] | 2070 | ret = 0; |
| 2071 | out: |
Jonathan Corbet | 9d31952 | 2008-06-19 15:50:37 -0600 | [diff] [blame] | 2072 | return ret; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2073 | } |
| 2074 | |
| 2075 | static int tun_chr_open(struct inode *inode, struct file * file) |
| 2076 | { |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 2077 | struct tun_file *tfile; |
Thomas Gleixner | deed49f | 2009-10-14 01:19:46 -0700 | [diff] [blame] | 2078 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 2079 | DBG1(KERN_INFO, "tunX: tun_chr_open\n"); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 2080 | |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2081 | tfile = (struct tun_file *)sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, |
| 2082 | &tun_proto); |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 2083 | if (!tfile) |
| 2084 | return -ENOMEM; |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 2085 | rcu_assign_pointer(tfile->tun, NULL); |
Eric W. Biederman | 36b50ba | 2009-01-20 11:01:48 +0000 | [diff] [blame] | 2086 | tfile->net = get_net(current->nsproxy->net_ns); |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2087 | tfile->flags = 0; |
| 2088 | |
| 2089 | rcu_assign_pointer(tfile->socket.wq, &tfile->wq); |
| 2090 | init_waitqueue_head(&tfile->wq.wait); |
| 2091 | |
| 2092 | tfile->socket.file = file; |
| 2093 | tfile->socket.ops = &tun_socket_ops; |
| 2094 | |
| 2095 | sock_init_data(&tfile->socket, &tfile->sk); |
| 2096 | sk_change_net(&tfile->sk, tfile->net); |
| 2097 | |
| 2098 | tfile->sk.sk_write_space = tun_sock_write_space; |
| 2099 | tfile->sk.sk_sndbuf = INT_MAX; |
| 2100 | |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 2101 | file->private_data = tfile; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2102 | set_bit(SOCK_EXTERNALLY_ALLOCATED, &tfile->socket.flags); |
| 2103 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2104 | return 0; |
| 2105 | } |
| 2106 | |
| 2107 | static int tun_chr_close(struct inode *inode, struct file *file) |
| 2108 | { |
Eric W. Biederman | 631ab46 | 2009-01-20 11:00:40 +0000 | [diff] [blame] | 2109 | struct tun_file *tfile = file->private_data; |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2110 | struct net *net = tfile->net; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2111 | |
Jason Wang | c8d68e6 | 2012-10-31 19:46:00 +0000 | [diff] [blame] | 2112 | tun_detach(tfile, true); |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2113 | put_net(net); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2114 | |
| 2115 | return 0; |
| 2116 | } |
| 2117 | |
Arjan van de Ven | d54b1fd | 2007-02-12 00:55:34 -0800 | [diff] [blame] | 2118 | static const struct file_operations tun_fops = { |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 2119 | .owner = THIS_MODULE, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2120 | .llseek = no_llseek, |
Badari Pulavarty | ee0b3e6 | 2006-09-30 23:28:47 -0700 | [diff] [blame] | 2121 | .read = do_sync_read, |
| 2122 | .aio_read = tun_chr_aio_read, |
| 2123 | .write = do_sync_write, |
| 2124 | .aio_write = tun_chr_aio_write, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2125 | .poll = tun_chr_poll, |
Arnd Bergmann | 50857e2 | 2009-11-06 22:52:32 -0800 | [diff] [blame] | 2126 | .unlocked_ioctl = tun_chr_ioctl, |
| 2127 | #ifdef CONFIG_COMPAT |
| 2128 | .compat_ioctl = tun_chr_compat_ioctl, |
| 2129 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2130 | .open = tun_chr_open, |
| 2131 | .release = tun_chr_close, |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 2132 | .fasync = tun_chr_fasync |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2133 | }; |
| 2134 | |
| 2135 | static struct miscdevice tun_miscdev = { |
| 2136 | .minor = TUN_MINOR, |
| 2137 | .name = "tun", |
Kay Sievers | e454cea | 2009-09-18 23:01:12 +0200 | [diff] [blame] | 2138 | .nodename = "net/tun", |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2139 | .fops = &tun_fops, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2140 | }; |
| 2141 | |
| 2142 | /* ethtool interface */ |
| 2143 | |
| 2144 | static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) |
| 2145 | { |
| 2146 | cmd->supported = 0; |
| 2147 | cmd->advertising = 0; |
David Decotigny | 7073949 | 2011-04-27 18:32:40 +0000 | [diff] [blame] | 2148 | ethtool_cmd_speed_set(cmd, SPEED_10); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2149 | cmd->duplex = DUPLEX_FULL; |
| 2150 | cmd->port = PORT_TP; |
| 2151 | cmd->phy_address = 0; |
| 2152 | cmd->transceiver = XCVR_INTERNAL; |
| 2153 | cmd->autoneg = AUTONEG_DISABLE; |
| 2154 | cmd->maxtxpkt = 0; |
| 2155 | cmd->maxrxpkt = 0; |
| 2156 | return 0; |
| 2157 | } |
| 2158 | |
| 2159 | static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) |
| 2160 | { |
| 2161 | struct tun_struct *tun = netdev_priv(dev); |
| 2162 | |
Rick Jones | 33a5ba1 | 2011-11-15 14:59:53 +0000 | [diff] [blame] | 2163 | strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); |
| 2164 | strlcpy(info->version, DRV_VERSION, sizeof(info->version)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2165 | |
| 2166 | switch (tun->flags & TUN_TYPE_MASK) { |
| 2167 | case TUN_TUN_DEV: |
Rick Jones | 33a5ba1 | 2011-11-15 14:59:53 +0000 | [diff] [blame] | 2168 | strlcpy(info->bus_info, "tun", sizeof(info->bus_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2169 | break; |
| 2170 | case TUN_TAP_DEV: |
Rick Jones | 33a5ba1 | 2011-11-15 14:59:53 +0000 | [diff] [blame] | 2171 | strlcpy(info->bus_info, "tap", sizeof(info->bus_info)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2172 | break; |
| 2173 | } |
| 2174 | } |
| 2175 | |
| 2176 | static u32 tun_get_msglevel(struct net_device *dev) |
| 2177 | { |
| 2178 | #ifdef TUN_DEBUG |
| 2179 | struct tun_struct *tun = netdev_priv(dev); |
| 2180 | return tun->debug; |
| 2181 | #else |
| 2182 | return -EOPNOTSUPP; |
| 2183 | #endif |
| 2184 | } |
| 2185 | |
| 2186 | static void tun_set_msglevel(struct net_device *dev, u32 value) |
| 2187 | { |
| 2188 | #ifdef TUN_DEBUG |
| 2189 | struct tun_struct *tun = netdev_priv(dev); |
| 2190 | tun->debug = value; |
| 2191 | #endif |
| 2192 | } |
| 2193 | |
Jeff Garzik | 7282d49 | 2006-09-13 14:30:00 -0400 | [diff] [blame] | 2194 | static const struct ethtool_ops tun_ethtool_ops = { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2195 | .get_settings = tun_get_settings, |
| 2196 | .get_drvinfo = tun_get_drvinfo, |
| 2197 | .get_msglevel = tun_get_msglevel, |
| 2198 | .set_msglevel = tun_set_msglevel, |
Nolan Leake | bee3136 | 2010-07-27 13:53:43 +0000 | [diff] [blame] | 2199 | .get_link = ethtool_op_get_link, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2200 | }; |
| 2201 | |
Pavel Emelyanov | 79d1760 | 2008-04-16 00:40:46 -0700 | [diff] [blame] | 2202 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2203 | static int __init tun_init(void) |
| 2204 | { |
| 2205 | int ret = 0; |
| 2206 | |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 2207 | pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION); |
| 2208 | pr_info("%s\n", DRV_COPYRIGHT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2209 | |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 2210 | ret = rtnl_link_register(&tun_link_ops); |
Pavel Emelyanov | 79d1760 | 2008-04-16 00:40:46 -0700 | [diff] [blame] | 2211 | if (ret) { |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 2212 | pr_err("Can't register link_ops\n"); |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 2213 | goto err_linkops; |
Pavel Emelyanov | 79d1760 | 2008-04-16 00:40:46 -0700 | [diff] [blame] | 2214 | } |
| 2215 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2216 | ret = misc_register(&tun_miscdev); |
Pavel Emelyanov | 79d1760 | 2008-04-16 00:40:46 -0700 | [diff] [blame] | 2217 | if (ret) { |
Joe Perches | 6b8a66e | 2011-03-02 07:18:10 +0000 | [diff] [blame] | 2218 | pr_err("Can't register misc device %d\n", TUN_MINOR); |
Pavel Emelyanov | 79d1760 | 2008-04-16 00:40:46 -0700 | [diff] [blame] | 2219 | goto err_misc; |
| 2220 | } |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 2221 | return 0; |
Pavel Emelyanov | 79d1760 | 2008-04-16 00:40:46 -0700 | [diff] [blame] | 2222 | err_misc: |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 2223 | rtnl_link_unregister(&tun_link_ops); |
| 2224 | err_linkops: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2225 | return ret; |
| 2226 | } |
| 2227 | |
| 2228 | static void tun_cleanup(void) |
| 2229 | { |
Jeff Garzik | 6aa20a2 | 2006-09-13 13:24:59 -0400 | [diff] [blame] | 2230 | misc_deregister(&tun_miscdev); |
Eric W. Biederman | f019a7a | 2009-01-21 16:02:16 -0800 | [diff] [blame] | 2231 | rtnl_link_unregister(&tun_link_ops); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2232 | } |
| 2233 | |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 2234 | /* Get an underlying socket object from tun file. Returns error unless file is |
| 2235 | * attached to a device. The returned object works like a packet socket, it |
| 2236 | * can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for |
| 2237 | * holding a reference to the file for as long as the socket is in use. */ |
| 2238 | struct socket *tun_get_socket(struct file *file) |
| 2239 | { |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 2240 | struct tun_file *tfile; |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 2241 | if (file->f_op != &tun_fops) |
| 2242 | return ERR_PTR(-EINVAL); |
Jason Wang | 6e914fc | 2012-10-31 19:45:58 +0000 | [diff] [blame] | 2243 | tfile = file->private_data; |
| 2244 | if (!tfile) |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 2245 | return ERR_PTR(-EBADFD); |
Jason Wang | 54f968d | 2012-10-31 19:45:57 +0000 | [diff] [blame] | 2246 | return &tfile->socket; |
Michael S. Tsirkin | 05c2828 | 2010-01-14 06:17:09 +0000 | [diff] [blame] | 2247 | } |
| 2248 | EXPORT_SYMBOL_GPL(tun_get_socket); |
| 2249 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2250 | module_init(tun_init); |
| 2251 | module_exit(tun_cleanup); |
| 2252 | MODULE_DESCRIPTION(DRV_DESCRIPTION); |
| 2253 | MODULE_AUTHOR(DRV_COPYRIGHT); |
| 2254 | MODULE_LICENSE("GPL"); |
| 2255 | MODULE_ALIAS_MISCDEV(TUN_MINOR); |
Kay Sievers | 578454f | 2010-05-20 18:07:20 +0200 | [diff] [blame] | 2256 | MODULE_ALIAS("devname:net/tun"); |