| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1 | /* Copyright (C) 2009 Red Hat, Inc. | 
|  | 2 | * Author: Michael S. Tsirkin <mst@redhat.com> | 
|  | 3 | * | 
|  | 4 | * This work is licensed under the terms of the GNU GPL, version 2. | 
|  | 5 | * | 
|  | 6 | * virtio-net server in host kernel. | 
|  | 7 | */ | 
|  | 8 |  | 
|  | 9 | #include <linux/compat.h> | 
|  | 10 | #include <linux/eventfd.h> | 
|  | 11 | #include <linux/vhost.h> | 
|  | 12 | #include <linux/virtio_net.h> | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 13 | #include <linux/miscdevice.h> | 
|  | 14 | #include <linux/module.h> | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 15 | #include <linux/moduleparam.h> | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 16 | #include <linux/mutex.h> | 
|  | 17 | #include <linux/workqueue.h> | 
|  | 18 | #include <linux/rcupdate.h> | 
|  | 19 | #include <linux/file.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 21 |  | 
|  | 22 | #include <linux/net.h> | 
|  | 23 | #include <linux/if_packet.h> | 
|  | 24 | #include <linux/if_arp.h> | 
|  | 25 | #include <linux/if_tun.h> | 
| Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 26 | #include <linux/if_macvlan.h> | 
| Basil Gor | c53cff5e | 2012-05-03 22:55:23 +0000 | [diff] [blame] | 27 | #include <linux/if_vlan.h> | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 28 |  | 
|  | 29 | #include <net/sock.h> | 
|  | 30 |  | 
|  | 31 | #include "vhost.h" | 
|  | 32 |  | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 33 | static int experimental_zcopytx; | 
|  | 34 | module_param(experimental_zcopytx, int, 0444); | 
|  | 35 | MODULE_PARM_DESC(experimental_zcopytx, "Enable Experimental Zero Copy TX"); | 
|  | 36 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 37 | /* Max number of bytes transferred before requeueing the job. | 
|  | 38 | * Using this limit prevents one virtqueue from starving others. */ | 
|  | 39 | #define VHOST_NET_WEIGHT 0x80000 | 
|  | 40 |  | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 41 | /* MAX number of TX used buffers for outstanding zerocopy */ | 
|  | 42 | #define VHOST_MAX_PEND 128 | 
|  | 43 | #define VHOST_GOODCOPY_LEN 256 | 
|  | 44 |  | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 45 | /* | 
|  | 46 | * For transmit, used buffer len is unused; we override it to track buffer | 
|  | 47 | * status internally; used for zerocopy tx only. | 
|  | 48 | */ | 
|  | 49 | /* Lower device DMA failed */ | 
|  | 50 | #define VHOST_DMA_FAILED_LEN	3 | 
|  | 51 | /* Lower device DMA done */ | 
|  | 52 | #define VHOST_DMA_DONE_LEN	2 | 
|  | 53 | /* Lower device DMA in progress */ | 
|  | 54 | #define VHOST_DMA_IN_PROGRESS	1 | 
|  | 55 | /* Buffer unused */ | 
|  | 56 | #define VHOST_DMA_CLEAR_LEN	0 | 
|  | 57 |  | 
|  | 58 | #define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN) | 
|  | 59 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 60 | enum { | 
|  | 61 | VHOST_NET_VQ_RX = 0, | 
|  | 62 | VHOST_NET_VQ_TX = 1, | 
|  | 63 | VHOST_NET_VQ_MAX = 2, | 
|  | 64 | }; | 
|  | 65 |  | 
|  | 66 | enum vhost_net_poll_state { | 
|  | 67 | VHOST_NET_POLL_DISABLED = 0, | 
|  | 68 | VHOST_NET_POLL_STARTED = 1, | 
|  | 69 | VHOST_NET_POLL_STOPPED = 2, | 
|  | 70 | }; | 
|  | 71 |  | 
|  | 72 | struct vhost_net { | 
|  | 73 | struct vhost_dev dev; | 
|  | 74 | struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX]; | 
|  | 75 | struct vhost_poll poll[VHOST_NET_VQ_MAX]; | 
|  | 76 | /* Tells us whether we are polling a socket for TX. | 
|  | 77 | * We only do this when socket buffer fills up. | 
|  | 78 | * Protected by tx vq lock. */ | 
|  | 79 | enum vhost_net_poll_state tx_poll_state; | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 80 | /* Number of TX recently submitted. | 
|  | 81 | * Protected by tx vq lock. */ | 
|  | 82 | unsigned tx_packets; | 
|  | 83 | /* Number of times zerocopy TX recently failed. | 
|  | 84 | * Protected by tx vq lock. */ | 
|  | 85 | unsigned tx_zcopy_err; | 
| Michael S. Tsirkin | 1280c27 | 2012-12-04 00:17:14 +0200 | [diff] [blame] | 86 | /* Flush in progress. Protected by tx vq lock. */ | 
|  | 87 | bool tx_flush; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 88 | }; | 
|  | 89 |  | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 90 | static void vhost_net_tx_packet(struct vhost_net *net) | 
|  | 91 | { | 
|  | 92 | ++net->tx_packets; | 
|  | 93 | if (net->tx_packets < 1024) | 
|  | 94 | return; | 
|  | 95 | net->tx_packets = 0; | 
|  | 96 | net->tx_zcopy_err = 0; | 
|  | 97 | } | 
|  | 98 |  | 
|  | 99 | static void vhost_net_tx_err(struct vhost_net *net) | 
|  | 100 | { | 
|  | 101 | ++net->tx_zcopy_err; | 
|  | 102 | } | 
|  | 103 |  | 
|  | 104 | static bool vhost_net_tx_select_zcopy(struct vhost_net *net) | 
|  | 105 | { | 
| Michael S. Tsirkin | 1280c27 | 2012-12-04 00:17:14 +0200 | [diff] [blame] | 106 | /* TX flush waits for outstanding DMAs to be done. | 
|  | 107 | * Don't start new DMAs. | 
|  | 108 | */ | 
|  | 109 | return !net->tx_flush && | 
|  | 110 | net->tx_packets / 64 >= net->tx_zcopy_err; | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 111 | } | 
|  | 112 |  | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 113 | static bool vhost_sock_zcopy(struct socket *sock) | 
|  | 114 | { | 
|  | 115 | return unlikely(experimental_zcopytx) && | 
|  | 116 | sock_flag(sock->sk, SOCK_ZEROCOPY); | 
|  | 117 | } | 
|  | 118 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 119 | /* Pop first len bytes from iovec. Return number of segments used. */ | 
|  | 120 | static int move_iovec_hdr(struct iovec *from, struct iovec *to, | 
|  | 121 | size_t len, int iov_count) | 
|  | 122 | { | 
|  | 123 | int seg = 0; | 
|  | 124 | size_t size; | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 125 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 126 | while (len && seg < iov_count) { | 
|  | 127 | size = min(from->iov_len, len); | 
|  | 128 | to->iov_base = from->iov_base; | 
|  | 129 | to->iov_len = size; | 
|  | 130 | from->iov_len -= size; | 
|  | 131 | from->iov_base += size; | 
|  | 132 | len -= size; | 
|  | 133 | ++from; | 
|  | 134 | ++to; | 
|  | 135 | ++seg; | 
|  | 136 | } | 
|  | 137 | return seg; | 
|  | 138 | } | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 139 | /* Copy iovec entries for len bytes from iovec. */ | 
|  | 140 | static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, | 
|  | 141 | size_t len, int iovcount) | 
|  | 142 | { | 
|  | 143 | int seg = 0; | 
|  | 144 | size_t size; | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 145 |  | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 146 | while (len && seg < iovcount) { | 
|  | 147 | size = min(from->iov_len, len); | 
|  | 148 | to->iov_base = from->iov_base; | 
|  | 149 | to->iov_len = size; | 
|  | 150 | len -= size; | 
|  | 151 | ++from; | 
|  | 152 | ++to; | 
|  | 153 | ++seg; | 
|  | 154 | } | 
|  | 155 | } | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 156 |  | 
|  | 157 | /* Caller must have TX VQ lock */ | 
|  | 158 | static void tx_poll_stop(struct vhost_net *net) | 
|  | 159 | { | 
|  | 160 | if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED)) | 
|  | 161 | return; | 
|  | 162 | vhost_poll_stop(net->poll + VHOST_NET_VQ_TX); | 
|  | 163 | net->tx_poll_state = VHOST_NET_POLL_STOPPED; | 
|  | 164 | } | 
|  | 165 |  | 
|  | 166 | /* Caller must have TX VQ lock */ | 
|  | 167 | static void tx_poll_start(struct vhost_net *net, struct socket *sock) | 
|  | 168 | { | 
|  | 169 | if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) | 
|  | 170 | return; | 
|  | 171 | vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); | 
|  | 172 | net->tx_poll_state = VHOST_NET_POLL_STARTED; | 
|  | 173 | } | 
|  | 174 |  | 
| Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 175 | /* In case of DMA done not in order in lower device driver for some reason. | 
|  | 176 | * upend_idx is used to track end of used idx, done_idx is used to track head | 
|  | 177 | * of used idx. Once lower device DMA done contiguously, we will signal KVM | 
|  | 178 | * guest used idx. | 
|  | 179 | */ | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 180 | static int vhost_zerocopy_signal_used(struct vhost_net *net, | 
|  | 181 | struct vhost_virtqueue *vq) | 
| Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 182 | { | 
|  | 183 | int i; | 
|  | 184 | int j = 0; | 
|  | 185 |  | 
|  | 186 | for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) { | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 187 | if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) | 
|  | 188 | vhost_net_tx_err(net); | 
| Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 189 | if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { | 
|  | 190 | vq->heads[i].len = VHOST_DMA_CLEAR_LEN; | 
|  | 191 | vhost_add_used_and_signal(vq->dev, vq, | 
|  | 192 | vq->heads[i].id, 0); | 
|  | 193 | ++j; | 
|  | 194 | } else | 
|  | 195 | break; | 
|  | 196 | } | 
|  | 197 | if (j) | 
|  | 198 | vq->done_idx = i; | 
|  | 199 | return j; | 
|  | 200 | } | 
|  | 201 |  | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 202 | static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) | 
| Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 203 | { | 
|  | 204 | struct vhost_ubuf_ref *ubufs = ubuf->ctx; | 
|  | 205 | struct vhost_virtqueue *vq = ubufs->vq; | 
| Michael S. Tsirkin | 24eb21a | 2012-11-01 09:16:55 +0000 | [diff] [blame] | 206 | int cnt = atomic_read(&ubufs->kref.refcount); | 
| Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 207 |  | 
| Michael S. Tsirkin | 24eb21a | 2012-11-01 09:16:55 +0000 | [diff] [blame] | 208 | /* | 
|  | 209 | * Trigger polling thread if guest stopped submitting new buffers: | 
|  | 210 | * in this case, the refcount after decrement will eventually reach 1 | 
|  | 211 | * so here it is 2. | 
|  | 212 | * We also trigger polling periodically after each 16 packets | 
|  | 213 | * (the value 16 here is more or less arbitrary, it's tuned to trigger | 
|  | 214 | * less than 10% of times). | 
|  | 215 | */ | 
|  | 216 | if (cnt <= 2 || !(cnt % 16)) | 
|  | 217 | vhost_poll_queue(&vq->poll); | 
| Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 218 | /* set len to mark this desc buffers done DMA */ | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 219 | vq->heads[ubuf->desc].len = success ? | 
|  | 220 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; | 
| Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 221 | vhost_ubuf_put(ubufs); | 
|  | 222 | } | 
|  | 223 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 224 | /* Expects to be always run from workqueue - which acts as | 
|  | 225 | * read-size critical section for our kind of RCU. */ | 
|  | 226 | static void handle_tx(struct vhost_net *net) | 
|  | 227 | { | 
|  | 228 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; | 
| Michael S. Tsirkin | d5675bd | 2010-06-24 16:59:59 +0300 | [diff] [blame] | 229 | unsigned out, in, s; | 
|  | 230 | int head; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 231 | struct msghdr msg = { | 
|  | 232 | .msg_name = NULL, | 
|  | 233 | .msg_namelen = 0, | 
|  | 234 | .msg_control = NULL, | 
|  | 235 | .msg_controllen = 0, | 
|  | 236 | .msg_iov = vq->iov, | 
|  | 237 | .msg_flags = MSG_DONTWAIT, | 
|  | 238 | }; | 
|  | 239 | size_t len, total_len = 0; | 
|  | 240 | int err, wmem; | 
|  | 241 | size_t hdr_size; | 
| Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 242 | struct socket *sock; | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 243 | struct vhost_ubuf_ref *uninitialized_var(ubufs); | 
| Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame^] | 244 | bool zcopy, zcopy_used; | 
| Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 245 |  | 
| Michael S. Tsirkin | 5e18247 | 2011-01-18 13:04:43 +0200 | [diff] [blame] | 246 | /* TODO: check that we are running from vhost_worker? */ | 
| Michael S. Tsirkin | 11cd1a8 | 2010-11-14 17:31:52 +0200 | [diff] [blame] | 247 | sock = rcu_dereference_check(vq->private_data, 1); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 248 | if (!sock) | 
|  | 249 | return; | 
|  | 250 |  | 
|  | 251 | wmem = atomic_read(&sock->sk->sk_wmem_alloc); | 
| Sridhar Samudrala | 39286fa | 2010-02-28 19:39:16 +0200 | [diff] [blame] | 252 | if (wmem >= sock->sk->sk_sndbuf) { | 
|  | 253 | mutex_lock(&vq->mutex); | 
|  | 254 | tx_poll_start(net, sock); | 
|  | 255 | mutex_unlock(&vq->mutex); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 256 | return; | 
| Sridhar Samudrala | 39286fa | 2010-02-28 19:39:16 +0200 | [diff] [blame] | 257 | } | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 258 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 259 | mutex_lock(&vq->mutex); | 
| Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 260 | vhost_disable_notify(&net->dev, vq); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 261 |  | 
| Michael S. Tsirkin | 0e25557 | 2010-03-08 23:24:22 +0200 | [diff] [blame] | 262 | if (wmem < sock->sk->sk_sndbuf / 2) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 263 | tx_poll_stop(net); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 264 | hdr_size = vq->vhost_hlen; | 
| Jason Wang | c460f05 | 2012-05-02 11:42:23 +0800 | [diff] [blame] | 265 | zcopy = vq->ubufs; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 266 |  | 
|  | 267 | for (;;) { | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 268 | /* Release DMAs done buffers first */ | 
|  | 269 | if (zcopy) | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 270 | vhost_zerocopy_signal_used(net, vq); | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 271 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 272 | head = vhost_get_vq_desc(&net->dev, vq, vq->iov, | 
|  | 273 | ARRAY_SIZE(vq->iov), | 
|  | 274 | &out, &in, | 
|  | 275 | NULL, NULL); | 
| Michael S. Tsirkin | d5675bd | 2010-06-24 16:59:59 +0300 | [diff] [blame] | 276 | /* On error, stop handling until the next kick. */ | 
| Michael S. Tsirkin | 7b3384f | 2010-07-01 18:40:12 +0300 | [diff] [blame] | 277 | if (unlikely(head < 0)) | 
| Michael S. Tsirkin | d5675bd | 2010-06-24 16:59:59 +0300 | [diff] [blame] | 278 | break; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 279 | /* Nothing new?  Wait for eventfd to tell us they refilled. */ | 
|  | 280 | if (head == vq->num) { | 
| Shirley Ma | 9e38082 | 2011-07-20 10:23:12 -0700 | [diff] [blame] | 281 | int num_pends; | 
|  | 282 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 283 | wmem = atomic_read(&sock->sk->sk_wmem_alloc); | 
|  | 284 | if (wmem >= sock->sk->sk_sndbuf * 3 / 4) { | 
|  | 285 | tx_poll_start(net, sock); | 
|  | 286 | set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | 
|  | 287 | break; | 
|  | 288 | } | 
| Shirley Ma | 9e38082 | 2011-07-20 10:23:12 -0700 | [diff] [blame] | 289 | /* If more outstanding DMAs, queue the work. | 
|  | 290 | * Handle upend_idx wrap around | 
|  | 291 | */ | 
|  | 292 | num_pends = likely(vq->upend_idx >= vq->done_idx) ? | 
|  | 293 | (vq->upend_idx - vq->done_idx) : | 
|  | 294 | (vq->upend_idx + UIO_MAXIOV - vq->done_idx); | 
|  | 295 | if (unlikely(num_pends > VHOST_MAX_PEND)) { | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 296 | tx_poll_start(net, sock); | 
|  | 297 | set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); | 
|  | 298 | break; | 
|  | 299 | } | 
| Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 300 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { | 
|  | 301 | vhost_disable_notify(&net->dev, vq); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 302 | continue; | 
|  | 303 | } | 
|  | 304 | break; | 
|  | 305 | } | 
|  | 306 | if (in) { | 
|  | 307 | vq_err(vq, "Unexpected descriptor format for TX: " | 
|  | 308 | "out %d, int %d\n", out, in); | 
|  | 309 | break; | 
|  | 310 | } | 
|  | 311 | /* Skip header. TODO: support TSO. */ | 
|  | 312 | s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out); | 
|  | 313 | msg.msg_iovlen = out; | 
|  | 314 | len = iov_length(vq->iov, out); | 
|  | 315 | /* Sanity check */ | 
|  | 316 | if (!len) { | 
|  | 317 | vq_err(vq, "Unexpected header len for TX: " | 
|  | 318 | "%zd expected %zd\n", | 
|  | 319 | iov_length(vq->hdr, s), hdr_size); | 
|  | 320 | break; | 
|  | 321 | } | 
| Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame^] | 322 | zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || | 
|  | 323 | vq->upend_idx != vq->done_idx); | 
|  | 324 |  | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 325 | /* use msg_control to pass vhost zerocopy ubuf info to skb */ | 
| Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame^] | 326 | if (zcopy_used) { | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 327 | vq->heads[vq->upend_idx].id = head; | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 328 | if (!vhost_net_tx_select_zcopy(net) || | 
|  | 329 | len < VHOST_GOODCOPY_LEN) { | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 330 | /* copy don't need to wait for DMA done */ | 
|  | 331 | vq->heads[vq->upend_idx].len = | 
|  | 332 | VHOST_DMA_DONE_LEN; | 
|  | 333 | msg.msg_control = NULL; | 
|  | 334 | msg.msg_controllen = 0; | 
|  | 335 | ubufs = NULL; | 
|  | 336 | } else { | 
|  | 337 | struct ubuf_info *ubuf = &vq->ubuf_info[head]; | 
|  | 338 |  | 
| Michael S. Tsirkin | 70e4cb9 | 2012-11-01 09:16:37 +0000 | [diff] [blame] | 339 | vq->heads[vq->upend_idx].len = | 
|  | 340 | VHOST_DMA_IN_PROGRESS; | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 341 | ubuf->callback = vhost_zerocopy_callback; | 
| Michael S. Tsirkin | ca8f4fb | 2012-04-09 00:24:02 +0000 | [diff] [blame] | 342 | ubuf->ctx = vq->ubufs; | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 343 | ubuf->desc = vq->upend_idx; | 
|  | 344 | msg.msg_control = ubuf; | 
|  | 345 | msg.msg_controllen = sizeof(ubuf); | 
|  | 346 | ubufs = vq->ubufs; | 
|  | 347 | kref_get(&ubufs->kref); | 
|  | 348 | } | 
|  | 349 | vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV; | 
|  | 350 | } | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 351 | /* TODO: Check specific error and bomb out unless ENOBUFS? */ | 
|  | 352 | err = sock->ops->sendmsg(NULL, sock, &msg, len); | 
|  | 353 | if (unlikely(err < 0)) { | 
| Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame^] | 354 | if (zcopy_used) { | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 355 | if (ubufs) | 
|  | 356 | vhost_ubuf_put(ubufs); | 
|  | 357 | vq->upend_idx = ((unsigned)vq->upend_idx - 1) % | 
|  | 358 | UIO_MAXIOV; | 
|  | 359 | } | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 360 | vhost_discard_vq_desc(vq, 1); | 
| Jason Wang | dbf3420 | 2012-05-02 11:42:32 +0800 | [diff] [blame] | 361 | if (err == -EAGAIN || err == -ENOBUFS) | 
|  | 362 | tx_poll_start(net, sock); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 363 | break; | 
|  | 364 | } | 
|  | 365 | if (err != len) | 
| Michael S. Tsirkin | 95c0ec6 | 2010-06-24 17:10:25 +0300 | [diff] [blame] | 366 | pr_debug("Truncated TX packet: " | 
|  | 367 | " len %d != %zd\n", err, len); | 
| Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame^] | 368 | if (!zcopy_used) | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 369 | vhost_add_used_and_signal(&net->dev, vq, head, 0); | 
| Jason Wang | c8fb217 | 2012-05-02 11:42:41 +0800 | [diff] [blame] | 370 | else | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 371 | vhost_zerocopy_signal_used(net, vq); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 372 | total_len += len; | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 373 | vhost_net_tx_packet(net); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 374 | if (unlikely(total_len >= VHOST_NET_WEIGHT)) { | 
|  | 375 | vhost_poll_queue(&vq->poll); | 
|  | 376 | break; | 
|  | 377 | } | 
|  | 378 | } | 
|  | 379 |  | 
|  | 380 | mutex_unlock(&vq->mutex); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 381 | } | 
|  | 382 |  | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 383 | static int peek_head_len(struct sock *sk) | 
|  | 384 | { | 
|  | 385 | struct sk_buff *head; | 
|  | 386 | int len = 0; | 
| Jason Wang | 783e398 | 2011-01-17 16:11:17 +0800 | [diff] [blame] | 387 | unsigned long flags; | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 388 |  | 
| Jason Wang | 783e398 | 2011-01-17 16:11:17 +0800 | [diff] [blame] | 389 | spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 390 | head = skb_peek(&sk->sk_receive_queue); | 
| Basil Gor | c53cff5e | 2012-05-03 22:55:23 +0000 | [diff] [blame] | 391 | if (likely(head)) { | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 392 | len = head->len; | 
| Basil Gor | c53cff5e | 2012-05-03 22:55:23 +0000 | [diff] [blame] | 393 | if (vlan_tx_tag_present(head)) | 
|  | 394 | len += VLAN_HLEN; | 
|  | 395 | } | 
|  | 396 |  | 
| Jason Wang | 783e398 | 2011-01-17 16:11:17 +0800 | [diff] [blame] | 397 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 398 | return len; | 
|  | 399 | } | 
|  | 400 |  | 
|  | 401 | /* This is a multi-buffer version of vhost_get_desc, that works if | 
|  | 402 | *	vq has read descriptors only. | 
|  | 403 | * @vq		- the relevant virtqueue | 
|  | 404 | * @datalen	- data length we'll be reading | 
|  | 405 | * @iovcount	- returned count of io vectors we fill | 
|  | 406 | * @log		- vhost log | 
|  | 407 | * @log_num	- log offset | 
| Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 408 | * @quota       - headcount quota, 1 for big buffer | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 409 | *	returns number of buffer heads allocated, negative on error | 
|  | 410 | */ | 
|  | 411 | static int get_rx_bufs(struct vhost_virtqueue *vq, | 
|  | 412 | struct vring_used_elem *heads, | 
|  | 413 | int datalen, | 
|  | 414 | unsigned *iovcount, | 
|  | 415 | struct vhost_log *log, | 
| Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 416 | unsigned *log_num, | 
|  | 417 | unsigned int quota) | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 418 | { | 
|  | 419 | unsigned int out, in; | 
|  | 420 | int seg = 0; | 
|  | 421 | int headcount = 0; | 
|  | 422 | unsigned d; | 
|  | 423 | int r, nlogs = 0; | 
|  | 424 |  | 
| Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 425 | while (datalen > 0 && headcount < quota) { | 
| Jason Wang | e0e9b40 | 2010-09-14 23:53:05 +0800 | [diff] [blame] | 426 | if (unlikely(seg >= UIO_MAXIOV)) { | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 427 | r = -ENOBUFS; | 
|  | 428 | goto err; | 
|  | 429 | } | 
|  | 430 | d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, | 
|  | 431 | ARRAY_SIZE(vq->iov) - seg, &out, | 
|  | 432 | &in, log, log_num); | 
|  | 433 | if (d == vq->num) { | 
|  | 434 | r = 0; | 
|  | 435 | goto err; | 
|  | 436 | } | 
|  | 437 | if (unlikely(out || in <= 0)) { | 
|  | 438 | vq_err(vq, "unexpected descriptor format for RX: " | 
|  | 439 | "out %d, in %d\n", out, in); | 
|  | 440 | r = -EINVAL; | 
|  | 441 | goto err; | 
|  | 442 | } | 
|  | 443 | if (unlikely(log)) { | 
|  | 444 | nlogs += *log_num; | 
|  | 445 | log += *log_num; | 
|  | 446 | } | 
|  | 447 | heads[headcount].id = d; | 
|  | 448 | heads[headcount].len = iov_length(vq->iov + seg, in); | 
|  | 449 | datalen -= heads[headcount].len; | 
|  | 450 | ++headcount; | 
|  | 451 | seg += in; | 
|  | 452 | } | 
|  | 453 | heads[headcount - 1].len += datalen; | 
|  | 454 | *iovcount = seg; | 
|  | 455 | if (unlikely(log)) | 
|  | 456 | *log_num = nlogs; | 
|  | 457 | return headcount; | 
|  | 458 | err: | 
|  | 459 | vhost_discard_vq_desc(vq, headcount); | 
|  | 460 | return r; | 
|  | 461 | } | 
|  | 462 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 463 | /* Expects to be always run from workqueue - which acts as | 
|  | 464 | * read-size critical section for our kind of RCU. */ | 
| Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 465 | static void handle_rx(struct vhost_net *net) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 466 | { | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 467 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; | 
|  | 468 | unsigned uninitialized_var(in), log; | 
|  | 469 | struct vhost_log *vq_log; | 
|  | 470 | struct msghdr msg = { | 
|  | 471 | .msg_name = NULL, | 
|  | 472 | .msg_namelen = 0, | 
|  | 473 | .msg_control = NULL, /* FIXME: get and handle RX aux data. */ | 
|  | 474 | .msg_controllen = 0, | 
|  | 475 | .msg_iov = vq->iov, | 
|  | 476 | .msg_flags = MSG_DONTWAIT, | 
|  | 477 | }; | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 478 | struct virtio_net_hdr_mrg_rxbuf hdr = { | 
|  | 479 | .hdr.flags = 0, | 
|  | 480 | .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE | 
|  | 481 | }; | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 482 | size_t total_len = 0; | 
| Michael S. Tsirkin | 910a578 | 2012-10-24 20:37:51 +0200 | [diff] [blame] | 483 | int err, mergeable; | 
|  | 484 | s16 headcount; | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 485 | size_t vhost_hlen, sock_hlen; | 
|  | 486 | size_t vhost_len, sock_len; | 
| Michael S. Tsirkin | 5e18247 | 2011-01-18 13:04:43 +0200 | [diff] [blame] | 487 | /* TODO: check that we are running from vhost_worker? */ | 
|  | 488 | struct socket *sock = rcu_dereference_check(vq->private_data, 1); | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 489 |  | 
| Michael S. Tsirkin | de4d768 | 2011-03-13 23:00:52 +0200 | [diff] [blame] | 490 | if (!sock) | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 491 | return; | 
|  | 492 |  | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 493 | mutex_lock(&vq->mutex); | 
| Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 494 | vhost_disable_notify(&net->dev, vq); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 495 | vhost_hlen = vq->vhost_hlen; | 
|  | 496 | sock_hlen = vq->sock_hlen; | 
|  | 497 |  | 
|  | 498 | vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? | 
|  | 499 | vq->log : NULL; | 
| Jason Wang | cfbdab9 | 2011-01-17 16:10:59 +0800 | [diff] [blame] | 500 | mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 501 |  | 
|  | 502 | while ((sock_len = peek_head_len(sock->sk))) { | 
|  | 503 | sock_len += sock_hlen; | 
|  | 504 | vhost_len = sock_len + vhost_hlen; | 
|  | 505 | headcount = get_rx_bufs(vq, vq->heads, vhost_len, | 
| Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 506 | &in, vq_log, &log, | 
|  | 507 | likely(mergeable) ? UIO_MAXIOV : 1); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 508 | /* On error, stop handling until the next kick. */ | 
|  | 509 | if (unlikely(headcount < 0)) | 
|  | 510 | break; | 
|  | 511 | /* OK, now we need to know about added descriptors. */ | 
|  | 512 | if (!headcount) { | 
| Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 513 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 514 | /* They have slipped one in as we were | 
|  | 515 | * doing that: check again. */ | 
| Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 516 | vhost_disable_notify(&net->dev, vq); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 517 | continue; | 
|  | 518 | } | 
|  | 519 | /* Nothing new?  Wait for eventfd to tell us | 
|  | 520 | * they refilled. */ | 
|  | 521 | break; | 
|  | 522 | } | 
|  | 523 | /* We don't need to be notified again. */ | 
|  | 524 | if (unlikely((vhost_hlen))) | 
|  | 525 | /* Skip header. TODO: support TSO. */ | 
|  | 526 | move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in); | 
|  | 527 | else | 
|  | 528 | /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: | 
| Jason Wang | a290aec | 2010-11-29 13:48:40 +0800 | [diff] [blame] | 529 | * needed because recvmsg can modify msg_iov. */ | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 530 | copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in); | 
|  | 531 | msg.msg_iovlen = in; | 
|  | 532 | err = sock->ops->recvmsg(NULL, sock, &msg, | 
|  | 533 | sock_len, MSG_DONTWAIT | MSG_TRUNC); | 
|  | 534 | /* Userspace might have consumed the packet meanwhile: | 
|  | 535 | * it's not supposed to do this usually, but might be hard | 
|  | 536 | * to prevent. Discard data we got (if any) and keep going. */ | 
|  | 537 | if (unlikely(err != sock_len)) { | 
|  | 538 | pr_debug("Discarded rx packet: " | 
|  | 539 | " len %d, expected %zd\n", err, sock_len); | 
|  | 540 | vhost_discard_vq_desc(vq, headcount); | 
|  | 541 | continue; | 
|  | 542 | } | 
|  | 543 | if (unlikely(vhost_hlen) && | 
|  | 544 | memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0, | 
|  | 545 | vhost_hlen)) { | 
|  | 546 | vq_err(vq, "Unable to write vnet_hdr at addr %p\n", | 
|  | 547 | vq->iov->iov_base); | 
|  | 548 | break; | 
|  | 549 | } | 
|  | 550 | /* TODO: Should check and handle checksum. */ | 
| Jason Wang | cfbdab9 | 2011-01-17 16:10:59 +0800 | [diff] [blame] | 551 | if (likely(mergeable) && | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 552 | memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount, | 
|  | 553 | offsetof(typeof(hdr), num_buffers), | 
|  | 554 | sizeof hdr.num_buffers)) { | 
|  | 555 | vq_err(vq, "Failed num_buffers write"); | 
|  | 556 | vhost_discard_vq_desc(vq, headcount); | 
|  | 557 | break; | 
|  | 558 | } | 
|  | 559 | vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, | 
|  | 560 | headcount); | 
|  | 561 | if (unlikely(vq_log)) | 
|  | 562 | vhost_log_write(vq, vq_log, log, vhost_len); | 
|  | 563 | total_len += vhost_len; | 
|  | 564 | if (unlikely(total_len >= VHOST_NET_WEIGHT)) { | 
|  | 565 | vhost_poll_queue(&vq->poll); | 
|  | 566 | break; | 
|  | 567 | } | 
|  | 568 | } | 
|  | 569 |  | 
|  | 570 | mutex_unlock(&vq->mutex); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 571 | } | 
|  | 572 |  | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 573 | static void handle_tx_kick(struct vhost_work *work) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 574 | { | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 575 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | 
|  | 576 | poll.work); | 
|  | 577 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); | 
|  | 578 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 579 | handle_tx(net); | 
|  | 580 | } | 
|  | 581 |  | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 582 | static void handle_rx_kick(struct vhost_work *work) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 583 | { | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 584 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, | 
|  | 585 | poll.work); | 
|  | 586 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); | 
|  | 587 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 588 | handle_rx(net); | 
|  | 589 | } | 
|  | 590 |  | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 591 | static void handle_tx_net(struct vhost_work *work) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 592 | { | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 593 | struct vhost_net *net = container_of(work, struct vhost_net, | 
|  | 594 | poll[VHOST_NET_VQ_TX].work); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 595 | handle_tx(net); | 
|  | 596 | } | 
|  | 597 |  | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 598 | static void handle_rx_net(struct vhost_work *work) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 599 | { | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 600 | struct vhost_net *net = container_of(work, struct vhost_net, | 
|  | 601 | poll[VHOST_NET_VQ_RX].work); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 602 | handle_rx(net); | 
|  | 603 | } | 
|  | 604 |  | 
|  | 605 | static int vhost_net_open(struct inode *inode, struct file *f) | 
|  | 606 | { | 
|  | 607 | struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 608 | struct vhost_dev *dev; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 609 | int r; | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 610 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 611 | if (!n) | 
|  | 612 | return -ENOMEM; | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 613 |  | 
|  | 614 | dev = &n->dev; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 615 | n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; | 
|  | 616 | n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 617 | r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 618 | if (r < 0) { | 
|  | 619 | kfree(n); | 
|  | 620 | return r; | 
|  | 621 | } | 
|  | 622 |  | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 623 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); | 
|  | 624 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 625 | n->tx_poll_state = VHOST_NET_POLL_DISABLED; | 
|  | 626 |  | 
|  | 627 | f->private_data = n; | 
|  | 628 |  | 
|  | 629 | return 0; | 
|  | 630 | } | 
|  | 631 |  | 
|  | 632 | static void vhost_net_disable_vq(struct vhost_net *n, | 
|  | 633 | struct vhost_virtqueue *vq) | 
|  | 634 | { | 
|  | 635 | if (!vq->private_data) | 
|  | 636 | return; | 
|  | 637 | if (vq == n->vqs + VHOST_NET_VQ_TX) { | 
|  | 638 | tx_poll_stop(n); | 
|  | 639 | n->tx_poll_state = VHOST_NET_POLL_DISABLED; | 
|  | 640 | } else | 
|  | 641 | vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); | 
|  | 642 | } | 
|  | 643 |  | 
|  | 644 | static void vhost_net_enable_vq(struct vhost_net *n, | 
|  | 645 | struct vhost_virtqueue *vq) | 
|  | 646 | { | 
| Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 647 | struct socket *sock; | 
|  | 648 |  | 
|  | 649 | sock = rcu_dereference_protected(vq->private_data, | 
|  | 650 | lockdep_is_held(&vq->mutex)); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 651 | if (!sock) | 
|  | 652 | return; | 
|  | 653 | if (vq == n->vqs + VHOST_NET_VQ_TX) { | 
|  | 654 | n->tx_poll_state = VHOST_NET_POLL_STOPPED; | 
|  | 655 | tx_poll_start(n, sock); | 
|  | 656 | } else | 
|  | 657 | vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); | 
|  | 658 | } | 
|  | 659 |  | 
|  | 660 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, | 
|  | 661 | struct vhost_virtqueue *vq) | 
|  | 662 | { | 
|  | 663 | struct socket *sock; | 
|  | 664 |  | 
|  | 665 | mutex_lock(&vq->mutex); | 
| Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 666 | sock = rcu_dereference_protected(vq->private_data, | 
|  | 667 | lockdep_is_held(&vq->mutex)); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 668 | vhost_net_disable_vq(n, vq); | 
|  | 669 | rcu_assign_pointer(vq->private_data, NULL); | 
|  | 670 | mutex_unlock(&vq->mutex); | 
|  | 671 | return sock; | 
|  | 672 | } | 
|  | 673 |  | 
|  | 674 | static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, | 
|  | 675 | struct socket **rx_sock) | 
|  | 676 | { | 
|  | 677 | *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX); | 
|  | 678 | *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX); | 
|  | 679 | } | 
|  | 680 |  | 
|  | 681 | static void vhost_net_flush_vq(struct vhost_net *n, int index) | 
|  | 682 | { | 
|  | 683 | vhost_poll_flush(n->poll + index); | 
|  | 684 | vhost_poll_flush(&n->dev.vqs[index].poll); | 
|  | 685 | } | 
|  | 686 |  | 
|  | 687 | static void vhost_net_flush(struct vhost_net *n) | 
|  | 688 | { | 
|  | 689 | vhost_net_flush_vq(n, VHOST_NET_VQ_TX); | 
|  | 690 | vhost_net_flush_vq(n, VHOST_NET_VQ_RX); | 
| Michael S. Tsirkin | 1280c27 | 2012-12-04 00:17:14 +0200 | [diff] [blame] | 691 | if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) { | 
|  | 692 | mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | 
|  | 693 | n->tx_flush = true; | 
|  | 694 | mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | 
|  | 695 | /* Wait for all lower device DMAs done. */ | 
|  | 696 | vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs); | 
|  | 697 | mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | 
|  | 698 | n->tx_flush = false; | 
|  | 699 | kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref); | 
|  | 700 | mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); | 
|  | 701 | } | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 702 | } | 
|  | 703 |  | 
|  | 704 | static int vhost_net_release(struct inode *inode, struct file *f) | 
|  | 705 | { | 
|  | 706 | struct vhost_net *n = f->private_data; | 
|  | 707 | struct socket *tx_sock; | 
|  | 708 | struct socket *rx_sock; | 
|  | 709 |  | 
|  | 710 | vhost_net_stop(n, &tx_sock, &rx_sock); | 
|  | 711 | vhost_net_flush(n); | 
| Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 712 | vhost_dev_stop(&n->dev); | 
| Michael S. Tsirkin | ea5d404 | 2011-11-27 19:05:58 +0200 | [diff] [blame] | 713 | vhost_dev_cleanup(&n->dev, false); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 714 | if (tx_sock) | 
|  | 715 | fput(tx_sock->file); | 
|  | 716 | if (rx_sock) | 
|  | 717 | fput(rx_sock->file); | 
|  | 718 | /* We do an extra flush before freeing memory, | 
|  | 719 | * since jobs can re-queue themselves. */ | 
|  | 720 | vhost_net_flush(n); | 
|  | 721 | kfree(n); | 
|  | 722 | return 0; | 
|  | 723 | } | 
|  | 724 |  | 
|  | 725 | static struct socket *get_raw_socket(int fd) | 
|  | 726 | { | 
|  | 727 | struct { | 
|  | 728 | struct sockaddr_ll sa; | 
|  | 729 | char  buf[MAX_ADDR_LEN]; | 
|  | 730 | } uaddr; | 
|  | 731 | int uaddr_len = sizeof uaddr, r; | 
|  | 732 | struct socket *sock = sockfd_lookup(fd, &r); | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 733 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 734 | if (!sock) | 
|  | 735 | return ERR_PTR(-ENOTSOCK); | 
|  | 736 |  | 
|  | 737 | /* Parameter checking */ | 
|  | 738 | if (sock->sk->sk_type != SOCK_RAW) { | 
|  | 739 | r = -ESOCKTNOSUPPORT; | 
|  | 740 | goto err; | 
|  | 741 | } | 
|  | 742 |  | 
|  | 743 | r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, | 
|  | 744 | &uaddr_len, 0); | 
|  | 745 | if (r) | 
|  | 746 | goto err; | 
|  | 747 |  | 
|  | 748 | if (uaddr.sa.sll_family != AF_PACKET) { | 
|  | 749 | r = -EPFNOSUPPORT; | 
|  | 750 | goto err; | 
|  | 751 | } | 
|  | 752 | return sock; | 
|  | 753 | err: | 
|  | 754 | fput(sock->file); | 
|  | 755 | return ERR_PTR(r); | 
|  | 756 | } | 
|  | 757 |  | 
| Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 758 | static struct socket *get_tap_socket(int fd) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 759 | { | 
|  | 760 | struct file *file = fget(fd); | 
|  | 761 | struct socket *sock; | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 762 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 763 | if (!file) | 
|  | 764 | return ERR_PTR(-EBADF); | 
|  | 765 | sock = tun_get_socket(file); | 
| Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 766 | if (!IS_ERR(sock)) | 
|  | 767 | return sock; | 
|  | 768 | sock = macvtap_get_socket(file); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 769 | if (IS_ERR(sock)) | 
|  | 770 | fput(file); | 
|  | 771 | return sock; | 
|  | 772 | } | 
|  | 773 |  | 
|  | 774 | static struct socket *get_socket(int fd) | 
|  | 775 | { | 
|  | 776 | struct socket *sock; | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 777 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 778 | /* special case to disable backend */ | 
|  | 779 | if (fd == -1) | 
|  | 780 | return NULL; | 
|  | 781 | sock = get_raw_socket(fd); | 
|  | 782 | if (!IS_ERR(sock)) | 
|  | 783 | return sock; | 
| Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 784 | sock = get_tap_socket(fd); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 785 | if (!IS_ERR(sock)) | 
|  | 786 | return sock; | 
|  | 787 | return ERR_PTR(-ENOTSOCK); | 
|  | 788 | } | 
|  | 789 |  | 
|  | 790 | static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | 
|  | 791 | { | 
|  | 792 | struct socket *sock, *oldsock; | 
|  | 793 | struct vhost_virtqueue *vq; | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 794 | struct vhost_ubuf_ref *ubufs, *oldubufs = NULL; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 795 | int r; | 
|  | 796 |  | 
|  | 797 | mutex_lock(&n->dev.mutex); | 
|  | 798 | r = vhost_dev_check_owner(&n->dev); | 
|  | 799 | if (r) | 
|  | 800 | goto err; | 
|  | 801 |  | 
|  | 802 | if (index >= VHOST_NET_VQ_MAX) { | 
|  | 803 | r = -ENOBUFS; | 
|  | 804 | goto err; | 
|  | 805 | } | 
|  | 806 | vq = n->vqs + index; | 
|  | 807 | mutex_lock(&vq->mutex); | 
|  | 808 |  | 
|  | 809 | /* Verify that ring has been setup correctly. */ | 
|  | 810 | if (!vhost_vq_access_ok(vq)) { | 
|  | 811 | r = -EFAULT; | 
| Jeff Dike | 1dace8c | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 812 | goto err_vq; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 813 | } | 
|  | 814 | sock = get_socket(fd); | 
|  | 815 | if (IS_ERR(sock)) { | 
|  | 816 | r = PTR_ERR(sock); | 
| Jeff Dike | 1dace8c | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 817 | goto err_vq; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 818 | } | 
|  | 819 |  | 
|  | 820 | /* start polling new socket */ | 
| Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 821 | oldsock = rcu_dereference_protected(vq->private_data, | 
|  | 822 | lockdep_is_held(&vq->mutex)); | 
| David S. Miller | 11fe883 | 2010-07-20 18:25:24 -0700 | [diff] [blame] | 823 | if (sock != oldsock) { | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 824 | ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); | 
|  | 825 | if (IS_ERR(ubufs)) { | 
|  | 826 | r = PTR_ERR(ubufs); | 
|  | 827 | goto err_ubufs; | 
|  | 828 | } | 
|  | 829 | oldubufs = vq->ubufs; | 
|  | 830 | vq->ubufs = ubufs; | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 831 | vhost_net_disable_vq(n, vq); | 
|  | 832 | rcu_assign_pointer(vq->private_data, sock); | 
|  | 833 | vhost_net_enable_vq(n, vq); | 
| Jason Wang | f59281d | 2011-06-21 18:04:27 +0800 | [diff] [blame] | 834 |  | 
|  | 835 | r = vhost_init_used(vq); | 
|  | 836 | if (r) | 
|  | 837 | goto err_vq; | 
| Michael S. Tsirkin | 64e9a9b | 2012-12-03 07:31:51 +0000 | [diff] [blame] | 838 |  | 
|  | 839 | n->tx_packets = 0; | 
|  | 840 | n->tx_zcopy_err = 0; | 
| Michael S. Tsirkin | 1280c27 | 2012-12-04 00:17:14 +0200 | [diff] [blame] | 841 | n->tx_flush = false; | 
| Jeff Dike | dd1f407 | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 842 | } | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 843 |  | 
| Michael S. Tsirkin | 1680e90 | 2010-07-15 15:19:12 +0300 | [diff] [blame] | 844 | mutex_unlock(&vq->mutex); | 
|  | 845 |  | 
| Michael S. Tsirkin | c047e5f | 2011-07-20 13:41:31 +0300 | [diff] [blame] | 846 | if (oldubufs) { | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 847 | vhost_ubuf_put_and_wait(oldubufs); | 
| Michael S. Tsirkin | c047e5f | 2011-07-20 13:41:31 +0300 | [diff] [blame] | 848 | mutex_lock(&vq->mutex); | 
| Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 849 | vhost_zerocopy_signal_used(n, vq); | 
| Michael S. Tsirkin | c047e5f | 2011-07-20 13:41:31 +0300 | [diff] [blame] | 850 | mutex_unlock(&vq->mutex); | 
|  | 851 | } | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 852 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 853 | if (oldsock) { | 
|  | 854 | vhost_net_flush_vq(n, index); | 
|  | 855 | fput(oldsock->file); | 
|  | 856 | } | 
| Jeff Dike | 1dace8c | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 857 |  | 
| Michael S. Tsirkin | 1680e90 | 2010-07-15 15:19:12 +0300 | [diff] [blame] | 858 | mutex_unlock(&n->dev.mutex); | 
|  | 859 | return 0; | 
|  | 860 |  | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 861 | err_ubufs: | 
|  | 862 | fput(sock->file); | 
| Jeff Dike | 1dace8c | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 863 | err_vq: | 
|  | 864 | mutex_unlock(&vq->mutex); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 865 | err: | 
|  | 866 | mutex_unlock(&n->dev.mutex); | 
|  | 867 | return r; | 
|  | 868 | } | 
|  | 869 |  | 
|  | 870 | static long vhost_net_reset_owner(struct vhost_net *n) | 
|  | 871 | { | 
|  | 872 | struct socket *tx_sock = NULL; | 
|  | 873 | struct socket *rx_sock = NULL; | 
|  | 874 | long err; | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 875 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 876 | mutex_lock(&n->dev.mutex); | 
|  | 877 | err = vhost_dev_check_owner(&n->dev); | 
|  | 878 | if (err) | 
|  | 879 | goto done; | 
|  | 880 | vhost_net_stop(n, &tx_sock, &rx_sock); | 
|  | 881 | vhost_net_flush(n); | 
|  | 882 | err = vhost_dev_reset_owner(&n->dev); | 
|  | 883 | done: | 
|  | 884 | mutex_unlock(&n->dev.mutex); | 
|  | 885 | if (tx_sock) | 
|  | 886 | fput(tx_sock->file); | 
|  | 887 | if (rx_sock) | 
|  | 888 | fput(rx_sock->file); | 
|  | 889 | return err; | 
|  | 890 | } | 
|  | 891 |  | 
|  | 892 | static int vhost_net_set_features(struct vhost_net *n, u64 features) | 
|  | 893 | { | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 894 | size_t vhost_hlen, sock_hlen, hdr_len; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 895 | int i; | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 896 |  | 
|  | 897 | hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? | 
|  | 898 | sizeof(struct virtio_net_hdr_mrg_rxbuf) : | 
|  | 899 | sizeof(struct virtio_net_hdr); | 
|  | 900 | if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { | 
|  | 901 | /* vhost provides vnet_hdr */ | 
|  | 902 | vhost_hlen = hdr_len; | 
|  | 903 | sock_hlen = 0; | 
|  | 904 | } else { | 
|  | 905 | /* socket provides vnet_hdr */ | 
|  | 906 | vhost_hlen = 0; | 
|  | 907 | sock_hlen = hdr_len; | 
|  | 908 | } | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 909 | mutex_lock(&n->dev.mutex); | 
|  | 910 | if ((features & (1 << VHOST_F_LOG_ALL)) && | 
|  | 911 | !vhost_log_access_ok(&n->dev)) { | 
|  | 912 | mutex_unlock(&n->dev.mutex); | 
|  | 913 | return -EFAULT; | 
|  | 914 | } | 
|  | 915 | n->dev.acked_features = features; | 
|  | 916 | smp_wmb(); | 
|  | 917 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { | 
|  | 918 | mutex_lock(&n->vqs[i].mutex); | 
| David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 919 | n->vqs[i].vhost_hlen = vhost_hlen; | 
|  | 920 | n->vqs[i].sock_hlen = sock_hlen; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 921 | mutex_unlock(&n->vqs[i].mutex); | 
|  | 922 | } | 
|  | 923 | vhost_net_flush(n); | 
|  | 924 | mutex_unlock(&n->dev.mutex); | 
|  | 925 | return 0; | 
|  | 926 | } | 
|  | 927 |  | 
|  | 928 | static long vhost_net_ioctl(struct file *f, unsigned int ioctl, | 
|  | 929 | unsigned long arg) | 
|  | 930 | { | 
|  | 931 | struct vhost_net *n = f->private_data; | 
|  | 932 | void __user *argp = (void __user *)arg; | 
|  | 933 | u64 __user *featurep = argp; | 
|  | 934 | struct vhost_vring_file backend; | 
|  | 935 | u64 features; | 
|  | 936 | int r; | 
| Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 937 |  | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 938 | switch (ioctl) { | 
|  | 939 | case VHOST_NET_SET_BACKEND: | 
| Takuya Yoshikawa | d3553a5 | 2010-05-27 19:01:58 +0900 | [diff] [blame] | 940 | if (copy_from_user(&backend, argp, sizeof backend)) | 
|  | 941 | return -EFAULT; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 942 | return vhost_net_set_backend(n, backend.index, backend.fd); | 
|  | 943 | case VHOST_GET_FEATURES: | 
| Stefan Hajnoczi | 0dd05a3 | 2012-07-21 06:55:36 +0000 | [diff] [blame] | 944 | features = VHOST_NET_FEATURES; | 
| Takuya Yoshikawa | d3553a5 | 2010-05-27 19:01:58 +0900 | [diff] [blame] | 945 | if (copy_to_user(featurep, &features, sizeof features)) | 
|  | 946 | return -EFAULT; | 
|  | 947 | return 0; | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 948 | case VHOST_SET_FEATURES: | 
| Takuya Yoshikawa | d3553a5 | 2010-05-27 19:01:58 +0900 | [diff] [blame] | 949 | if (copy_from_user(&features, featurep, sizeof features)) | 
|  | 950 | return -EFAULT; | 
| Stefan Hajnoczi | 0dd05a3 | 2012-07-21 06:55:36 +0000 | [diff] [blame] | 951 | if (features & ~VHOST_NET_FEATURES) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 952 | return -EOPNOTSUPP; | 
|  | 953 | return vhost_net_set_features(n, features); | 
|  | 954 | case VHOST_RESET_OWNER: | 
|  | 955 | return vhost_net_reset_owner(n); | 
|  | 956 | default: | 
|  | 957 | mutex_lock(&n->dev.mutex); | 
| Michael S. Tsirkin | 935cdee | 2012-12-06 14:03:34 +0200 | [diff] [blame] | 958 | r = vhost_dev_ioctl(&n->dev, ioctl, argp); | 
|  | 959 | if (r == -ENOIOCTLCMD) | 
|  | 960 | r = vhost_vring_ioctl(&n->dev, ioctl, argp); | 
|  | 961 | else | 
|  | 962 | vhost_net_flush(n); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 963 | mutex_unlock(&n->dev.mutex); | 
|  | 964 | return r; | 
|  | 965 | } | 
|  | 966 | } | 
|  | 967 |  | 
|  | 968 | #ifdef CONFIG_COMPAT | 
|  | 969 | static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, | 
|  | 970 | unsigned long arg) | 
|  | 971 | { | 
|  | 972 | return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); | 
|  | 973 | } | 
|  | 974 | #endif | 
|  | 975 |  | 
| Tobias Klauser | 373a83a | 2010-05-17 15:12:49 +0200 | [diff] [blame] | 976 | static const struct file_operations vhost_net_fops = { | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 977 | .owner          = THIS_MODULE, | 
|  | 978 | .release        = vhost_net_release, | 
|  | 979 | .unlocked_ioctl = vhost_net_ioctl, | 
|  | 980 | #ifdef CONFIG_COMPAT | 
|  | 981 | .compat_ioctl   = vhost_net_compat_ioctl, | 
|  | 982 | #endif | 
|  | 983 | .open           = vhost_net_open, | 
| Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 984 | .llseek		= noop_llseek, | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 985 | }; | 
|  | 986 |  | 
|  | 987 | static struct miscdevice vhost_net_misc = { | 
| stephen hemminger | 7c7c7f0 | 2012-01-11 19:30:38 +0000 | [diff] [blame] | 988 | .minor = VHOST_NET_MINOR, | 
|  | 989 | .name = "vhost-net", | 
|  | 990 | .fops = &vhost_net_fops, | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 991 | }; | 
|  | 992 |  | 
| Christoph Hellwig | a8d3782 | 2010-04-13 14:11:25 -0400 | [diff] [blame] | 993 | static int vhost_net_init(void) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 994 | { | 
| Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 995 | if (experimental_zcopytx) | 
|  | 996 | vhost_enable_zcopy(VHOST_NET_VQ_TX); | 
| Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 997 | return misc_register(&vhost_net_misc); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 998 | } | 
|  | 999 | module_init(vhost_net_init); | 
|  | 1000 |  | 
| Christoph Hellwig | a8d3782 | 2010-04-13 14:11:25 -0400 | [diff] [blame] | 1001 | static void vhost_net_exit(void) | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1002 | { | 
|  | 1003 | misc_deregister(&vhost_net_misc); | 
| Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1004 | } | 
|  | 1005 | module_exit(vhost_net_exit); | 
|  | 1006 |  | 
|  | 1007 | MODULE_VERSION("0.0.1"); | 
|  | 1008 | MODULE_LICENSE("GPL v2"); | 
|  | 1009 | MODULE_AUTHOR("Michael S. Tsirkin"); | 
|  | 1010 | MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); | 
| stephen hemminger | 7c7c7f0 | 2012-01-11 19:30:38 +0000 | [diff] [blame] | 1011 | MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); | 
|  | 1012 | MODULE_ALIAS("devname:vhost-net"); |