Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1 | /* Copyright (C) 2009 Red Hat, Inc. |
| 2 | * Author: Michael S. Tsirkin <mst@redhat.com> |
| 3 | * |
| 4 | * This work is licensed under the terms of the GNU GPL, version 2. |
| 5 | * |
| 6 | * virtio-net server in host kernel. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/compat.h> |
| 10 | #include <linux/eventfd.h> |
| 11 | #include <linux/vhost.h> |
| 12 | #include <linux/virtio_net.h> |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 13 | #include <linux/miscdevice.h> |
| 14 | #include <linux/module.h> |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 15 | #include <linux/moduleparam.h> |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 16 | #include <linux/mutex.h> |
| 17 | #include <linux/workqueue.h> |
| 18 | #include <linux/rcupdate.h> |
| 19 | #include <linux/file.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 20 | #include <linux/slab.h> |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 21 | |
| 22 | #include <linux/net.h> |
| 23 | #include <linux/if_packet.h> |
| 24 | #include <linux/if_arp.h> |
| 25 | #include <linux/if_tun.h> |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 26 | #include <linux/if_macvlan.h> |
Basil Gor | c53cff5e | 2012-05-03 22:55:23 +0000 | [diff] [blame] | 27 | #include <linux/if_vlan.h> |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 28 | |
| 29 | #include <net/sock.h> |
| 30 | |
| 31 | #include "vhost.h" |
| 32 | |
Michael S. Tsirkin | f9611c4 | 2012-12-06 14:56:00 +0200 | [diff] [blame] | 33 | static int experimental_zcopytx = 1; |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 34 | module_param(experimental_zcopytx, int, 0444); |
Michael S. Tsirkin | f9611c4 | 2012-12-06 14:56:00 +0200 | [diff] [blame] | 35 | MODULE_PARM_DESC(experimental_zcopytx, "Enable Zero Copy TX;" |
| 36 | " 1 -Enable; 0 - Disable"); |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 37 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 38 | /* Max number of bytes transferred before requeueing the job. |
| 39 | * Using this limit prevents one virtqueue from starving others. */ |
| 40 | #define VHOST_NET_WEIGHT 0x80000 |
| 41 | |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 42 | /* MAX number of TX used buffers for outstanding zerocopy */ |
| 43 | #define VHOST_MAX_PEND 128 |
| 44 | #define VHOST_GOODCOPY_LEN 256 |
| 45 | |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 46 | /* |
| 47 | * For transmit, used buffer len is unused; we override it to track buffer |
| 48 | * status internally; used for zerocopy tx only. |
| 49 | */ |
| 50 | /* Lower device DMA failed */ |
| 51 | #define VHOST_DMA_FAILED_LEN 3 |
| 52 | /* Lower device DMA done */ |
| 53 | #define VHOST_DMA_DONE_LEN 2 |
| 54 | /* Lower device DMA in progress */ |
| 55 | #define VHOST_DMA_IN_PROGRESS 1 |
| 56 | /* Buffer unused */ |
| 57 | #define VHOST_DMA_CLEAR_LEN 0 |
| 58 | |
| 59 | #define VHOST_DMA_IS_DONE(len) ((len) >= VHOST_DMA_DONE_LEN) |
| 60 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 61 | enum { |
| 62 | VHOST_NET_VQ_RX = 0, |
| 63 | VHOST_NET_VQ_TX = 1, |
| 64 | VHOST_NET_VQ_MAX = 2, |
| 65 | }; |
| 66 | |
| 67 | enum vhost_net_poll_state { |
| 68 | VHOST_NET_POLL_DISABLED = 0, |
| 69 | VHOST_NET_POLL_STARTED = 1, |
| 70 | VHOST_NET_POLL_STOPPED = 2, |
| 71 | }; |
| 72 | |
| 73 | struct vhost_net { |
| 74 | struct vhost_dev dev; |
| 75 | struct vhost_virtqueue vqs[VHOST_NET_VQ_MAX]; |
| 76 | struct vhost_poll poll[VHOST_NET_VQ_MAX]; |
| 77 | /* Tells us whether we are polling a socket for TX. |
| 78 | * We only do this when socket buffer fills up. |
| 79 | * Protected by tx vq lock. */ |
| 80 | enum vhost_net_poll_state tx_poll_state; |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 81 | /* Number of TX recently submitted. |
| 82 | * Protected by tx vq lock. */ |
| 83 | unsigned tx_packets; |
| 84 | /* Number of times zerocopy TX recently failed. |
| 85 | * Protected by tx vq lock. */ |
| 86 | unsigned tx_zcopy_err; |
Michael S. Tsirkin | 1280c27 | 2012-12-04 00:17:14 +0200 | [diff] [blame] | 87 | /* Flush in progress. Protected by tx vq lock. */ |
| 88 | bool tx_flush; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 89 | }; |
| 90 | |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 91 | static void vhost_net_tx_packet(struct vhost_net *net) |
| 92 | { |
| 93 | ++net->tx_packets; |
| 94 | if (net->tx_packets < 1024) |
| 95 | return; |
| 96 | net->tx_packets = 0; |
| 97 | net->tx_zcopy_err = 0; |
| 98 | } |
| 99 | |
| 100 | static void vhost_net_tx_err(struct vhost_net *net) |
| 101 | { |
| 102 | ++net->tx_zcopy_err; |
| 103 | } |
| 104 | |
| 105 | static bool vhost_net_tx_select_zcopy(struct vhost_net *net) |
| 106 | { |
Michael S. Tsirkin | 1280c27 | 2012-12-04 00:17:14 +0200 | [diff] [blame] | 107 | /* TX flush waits for outstanding DMAs to be done. |
| 108 | * Don't start new DMAs. |
| 109 | */ |
| 110 | return !net->tx_flush && |
| 111 | net->tx_packets / 64 >= net->tx_zcopy_err; |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 112 | } |
| 113 | |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 114 | static bool vhost_sock_zcopy(struct socket *sock) |
| 115 | { |
| 116 | return unlikely(experimental_zcopytx) && |
| 117 | sock_flag(sock->sk, SOCK_ZEROCOPY); |
| 118 | } |
| 119 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 120 | /* Pop first len bytes from iovec. Return number of segments used. */ |
| 121 | static int move_iovec_hdr(struct iovec *from, struct iovec *to, |
| 122 | size_t len, int iov_count) |
| 123 | { |
| 124 | int seg = 0; |
| 125 | size_t size; |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 126 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 127 | while (len && seg < iov_count) { |
| 128 | size = min(from->iov_len, len); |
| 129 | to->iov_base = from->iov_base; |
| 130 | to->iov_len = size; |
| 131 | from->iov_len -= size; |
| 132 | from->iov_base += size; |
| 133 | len -= size; |
| 134 | ++from; |
| 135 | ++to; |
| 136 | ++seg; |
| 137 | } |
| 138 | return seg; |
| 139 | } |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 140 | /* Copy iovec entries for len bytes from iovec. */ |
| 141 | static void copy_iovec_hdr(const struct iovec *from, struct iovec *to, |
| 142 | size_t len, int iovcount) |
| 143 | { |
| 144 | int seg = 0; |
| 145 | size_t size; |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 146 | |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 147 | while (len && seg < iovcount) { |
| 148 | size = min(from->iov_len, len); |
| 149 | to->iov_base = from->iov_base; |
| 150 | to->iov_len = size; |
| 151 | len -= size; |
| 152 | ++from; |
| 153 | ++to; |
| 154 | ++seg; |
| 155 | } |
| 156 | } |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 157 | |
| 158 | /* Caller must have TX VQ lock */ |
| 159 | static void tx_poll_stop(struct vhost_net *net) |
| 160 | { |
| 161 | if (likely(net->tx_poll_state != VHOST_NET_POLL_STARTED)) |
| 162 | return; |
| 163 | vhost_poll_stop(net->poll + VHOST_NET_VQ_TX); |
| 164 | net->tx_poll_state = VHOST_NET_POLL_STOPPED; |
| 165 | } |
| 166 | |
| 167 | /* Caller must have TX VQ lock */ |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 168 | static int tx_poll_start(struct vhost_net *net, struct socket *sock) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 169 | { |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 170 | int ret; |
| 171 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 172 | if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 173 | return 0; |
| 174 | ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); |
| 175 | if (!ret) |
| 176 | net->tx_poll_state = VHOST_NET_POLL_STARTED; |
| 177 | return ret; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 178 | } |
| 179 | |
Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 180 | /* In case of DMA done not in order in lower device driver for some reason. |
| 181 | * upend_idx is used to track end of used idx, done_idx is used to track head |
| 182 | * of used idx. Once lower device DMA done contiguously, we will signal KVM |
| 183 | * guest used idx. |
| 184 | */ |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 185 | static int vhost_zerocopy_signal_used(struct vhost_net *net, |
| 186 | struct vhost_virtqueue *vq) |
Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 187 | { |
| 188 | int i; |
| 189 | int j = 0; |
| 190 | |
| 191 | for (i = vq->done_idx; i != vq->upend_idx; i = (i + 1) % UIO_MAXIOV) { |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 192 | if (vq->heads[i].len == VHOST_DMA_FAILED_LEN) |
| 193 | vhost_net_tx_err(net); |
Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 194 | if (VHOST_DMA_IS_DONE(vq->heads[i].len)) { |
| 195 | vq->heads[i].len = VHOST_DMA_CLEAR_LEN; |
| 196 | vhost_add_used_and_signal(vq->dev, vq, |
| 197 | vq->heads[i].id, 0); |
| 198 | ++j; |
| 199 | } else |
| 200 | break; |
| 201 | } |
| 202 | if (j) |
| 203 | vq->done_idx = i; |
| 204 | return j; |
| 205 | } |
| 206 | |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 207 | static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success) |
Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 208 | { |
| 209 | struct vhost_ubuf_ref *ubufs = ubuf->ctx; |
| 210 | struct vhost_virtqueue *vq = ubufs->vq; |
Michael S. Tsirkin | 24eb21a | 2012-11-01 09:16:55 +0000 | [diff] [blame] | 211 | int cnt = atomic_read(&ubufs->kref.refcount); |
Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 212 | |
Michael S. Tsirkin | 24eb21a | 2012-11-01 09:16:55 +0000 | [diff] [blame] | 213 | /* |
| 214 | * Trigger polling thread if guest stopped submitting new buffers: |
| 215 | * in this case, the refcount after decrement will eventually reach 1 |
| 216 | * so here it is 2. |
| 217 | * We also trigger polling periodically after each 16 packets |
| 218 | * (the value 16 here is more or less arbitrary, it's tuned to trigger |
| 219 | * less than 10% of times). |
| 220 | */ |
| 221 | if (cnt <= 2 || !(cnt % 16)) |
| 222 | vhost_poll_queue(&vq->poll); |
Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 223 | /* set len to mark this desc buffers done DMA */ |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 224 | vq->heads[ubuf->desc].len = success ? |
| 225 | VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN; |
Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 226 | vhost_ubuf_put(ubufs); |
| 227 | } |
| 228 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 229 | /* Expects to be always run from workqueue - which acts as |
| 230 | * read-size critical section for our kind of RCU. */ |
| 231 | static void handle_tx(struct vhost_net *net) |
| 232 | { |
| 233 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; |
Michael S. Tsirkin | d5675bd | 2010-06-24 16:59:59 +0300 | [diff] [blame] | 234 | unsigned out, in, s; |
| 235 | int head; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 236 | struct msghdr msg = { |
| 237 | .msg_name = NULL, |
| 238 | .msg_namelen = 0, |
| 239 | .msg_control = NULL, |
| 240 | .msg_controllen = 0, |
| 241 | .msg_iov = vq->iov, |
| 242 | .msg_flags = MSG_DONTWAIT, |
| 243 | }; |
| 244 | size_t len, total_len = 0; |
| 245 | int err, wmem; |
| 246 | size_t hdr_size; |
Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 247 | struct socket *sock; |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 248 | struct vhost_ubuf_ref *uninitialized_var(ubufs); |
Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame] | 249 | bool zcopy, zcopy_used; |
Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 250 | |
Michael S. Tsirkin | 5e18247 | 2011-01-18 13:04:43 +0200 | [diff] [blame] | 251 | /* TODO: check that we are running from vhost_worker? */ |
Michael S. Tsirkin | 11cd1a8 | 2010-11-14 17:31:52 +0200 | [diff] [blame] | 252 | sock = rcu_dereference_check(vq->private_data, 1); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 253 | if (!sock) |
| 254 | return; |
| 255 | |
| 256 | wmem = atomic_read(&sock->sk->sk_wmem_alloc); |
Sridhar Samudrala | 39286fa | 2010-02-28 19:39:16 +0200 | [diff] [blame] | 257 | if (wmem >= sock->sk->sk_sndbuf) { |
| 258 | mutex_lock(&vq->mutex); |
| 259 | tx_poll_start(net, sock); |
| 260 | mutex_unlock(&vq->mutex); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 261 | return; |
Sridhar Samudrala | 39286fa | 2010-02-28 19:39:16 +0200 | [diff] [blame] | 262 | } |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 263 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 264 | mutex_lock(&vq->mutex); |
Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 265 | vhost_disable_notify(&net->dev, vq); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 266 | |
Michael S. Tsirkin | 0e25557 | 2010-03-08 23:24:22 +0200 | [diff] [blame] | 267 | if (wmem < sock->sk->sk_sndbuf / 2) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 268 | tx_poll_stop(net); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 269 | hdr_size = vq->vhost_hlen; |
Jason Wang | c460f05 | 2012-05-02 11:42:23 +0800 | [diff] [blame] | 270 | zcopy = vq->ubufs; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 271 | |
| 272 | for (;;) { |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 273 | /* Release DMAs done buffers first */ |
| 274 | if (zcopy) |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 275 | vhost_zerocopy_signal_used(net, vq); |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 276 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 277 | head = vhost_get_vq_desc(&net->dev, vq, vq->iov, |
| 278 | ARRAY_SIZE(vq->iov), |
| 279 | &out, &in, |
| 280 | NULL, NULL); |
Michael S. Tsirkin | d5675bd | 2010-06-24 16:59:59 +0300 | [diff] [blame] | 281 | /* On error, stop handling until the next kick. */ |
Michael S. Tsirkin | 7b3384f | 2010-07-01 18:40:12 +0300 | [diff] [blame] | 282 | if (unlikely(head < 0)) |
Michael S. Tsirkin | d5675bd | 2010-06-24 16:59:59 +0300 | [diff] [blame] | 283 | break; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 284 | /* Nothing new? Wait for eventfd to tell us they refilled. */ |
| 285 | if (head == vq->num) { |
Shirley Ma | 9e38082 | 2011-07-20 10:23:12 -0700 | [diff] [blame] | 286 | int num_pends; |
| 287 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 288 | wmem = atomic_read(&sock->sk->sk_wmem_alloc); |
| 289 | if (wmem >= sock->sk->sk_sndbuf * 3 / 4) { |
| 290 | tx_poll_start(net, sock); |
| 291 | set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); |
| 292 | break; |
| 293 | } |
Shirley Ma | 9e38082 | 2011-07-20 10:23:12 -0700 | [diff] [blame] | 294 | /* If more outstanding DMAs, queue the work. |
| 295 | * Handle upend_idx wrap around |
| 296 | */ |
| 297 | num_pends = likely(vq->upend_idx >= vq->done_idx) ? |
| 298 | (vq->upend_idx - vq->done_idx) : |
| 299 | (vq->upend_idx + UIO_MAXIOV - vq->done_idx); |
| 300 | if (unlikely(num_pends > VHOST_MAX_PEND)) { |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 301 | tx_poll_start(net, sock); |
| 302 | set_bit(SOCK_ASYNC_NOSPACE, &sock->flags); |
| 303 | break; |
| 304 | } |
Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 305 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
| 306 | vhost_disable_notify(&net->dev, vq); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 307 | continue; |
| 308 | } |
| 309 | break; |
| 310 | } |
| 311 | if (in) { |
| 312 | vq_err(vq, "Unexpected descriptor format for TX: " |
| 313 | "out %d, int %d\n", out, in); |
| 314 | break; |
| 315 | } |
| 316 | /* Skip header. TODO: support TSO. */ |
| 317 | s = move_iovec_hdr(vq->iov, vq->hdr, hdr_size, out); |
| 318 | msg.msg_iovlen = out; |
| 319 | len = iov_length(vq->iov, out); |
| 320 | /* Sanity check */ |
| 321 | if (!len) { |
| 322 | vq_err(vq, "Unexpected header len for TX: " |
| 323 | "%zd expected %zd\n", |
| 324 | iov_length(vq->hdr, s), hdr_size); |
| 325 | break; |
| 326 | } |
Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame] | 327 | zcopy_used = zcopy && (len >= VHOST_GOODCOPY_LEN || |
| 328 | vq->upend_idx != vq->done_idx); |
| 329 | |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 330 | /* use msg_control to pass vhost zerocopy ubuf info to skb */ |
Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame] | 331 | if (zcopy_used) { |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 332 | vq->heads[vq->upend_idx].id = head; |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 333 | if (!vhost_net_tx_select_zcopy(net) || |
| 334 | len < VHOST_GOODCOPY_LEN) { |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 335 | /* copy don't need to wait for DMA done */ |
| 336 | vq->heads[vq->upend_idx].len = |
| 337 | VHOST_DMA_DONE_LEN; |
| 338 | msg.msg_control = NULL; |
| 339 | msg.msg_controllen = 0; |
| 340 | ubufs = NULL; |
| 341 | } else { |
Michael S. Tsirkin | 46aa92d | 2013-03-17 02:46:09 +0000 | [diff] [blame] | 342 | struct ubuf_info *ubuf; |
| 343 | ubuf = vq->ubuf_info + vq->upend_idx; |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 344 | |
Michael S. Tsirkin | 70e4cb9 | 2012-11-01 09:16:37 +0000 | [diff] [blame] | 345 | vq->heads[vq->upend_idx].len = |
| 346 | VHOST_DMA_IN_PROGRESS; |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 347 | ubuf->callback = vhost_zerocopy_callback; |
Michael S. Tsirkin | ca8f4fb | 2012-04-09 00:24:02 +0000 | [diff] [blame] | 348 | ubuf->ctx = vq->ubufs; |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 349 | ubuf->desc = vq->upend_idx; |
| 350 | msg.msg_control = ubuf; |
| 351 | msg.msg_controllen = sizeof(ubuf); |
| 352 | ubufs = vq->ubufs; |
| 353 | kref_get(&ubufs->kref); |
| 354 | } |
| 355 | vq->upend_idx = (vq->upend_idx + 1) % UIO_MAXIOV; |
| 356 | } |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 357 | /* TODO: Check specific error and bomb out unless ENOBUFS? */ |
| 358 | err = sock->ops->sendmsg(NULL, sock, &msg, len); |
| 359 | if (unlikely(err < 0)) { |
Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame] | 360 | if (zcopy_used) { |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 361 | if (ubufs) |
| 362 | vhost_ubuf_put(ubufs); |
| 363 | vq->upend_idx = ((unsigned)vq->upend_idx - 1) % |
| 364 | UIO_MAXIOV; |
| 365 | } |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 366 | vhost_discard_vq_desc(vq, 1); |
Jason Wang | dbf3420 | 2012-05-02 11:42:32 +0800 | [diff] [blame] | 367 | if (err == -EAGAIN || err == -ENOBUFS) |
| 368 | tx_poll_start(net, sock); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 369 | break; |
| 370 | } |
| 371 | if (err != len) |
Michael S. Tsirkin | 95c0ec6 | 2010-06-24 17:10:25 +0300 | [diff] [blame] | 372 | pr_debug("Truncated TX packet: " |
| 373 | " len %d != %zd\n", err, len); |
Michael S. Tsirkin | cedb9bd | 2012-12-06 17:00:18 +0200 | [diff] [blame] | 374 | if (!zcopy_used) |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 375 | vhost_add_used_and_signal(&net->dev, vq, head, 0); |
Jason Wang | c8fb217 | 2012-05-02 11:42:41 +0800 | [diff] [blame] | 376 | else |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 377 | vhost_zerocopy_signal_used(net, vq); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 378 | total_len += len; |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 379 | vhost_net_tx_packet(net); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 380 | if (unlikely(total_len >= VHOST_NET_WEIGHT)) { |
| 381 | vhost_poll_queue(&vq->poll); |
| 382 | break; |
| 383 | } |
| 384 | } |
| 385 | |
| 386 | mutex_unlock(&vq->mutex); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 387 | } |
| 388 | |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 389 | static int peek_head_len(struct sock *sk) |
| 390 | { |
| 391 | struct sk_buff *head; |
| 392 | int len = 0; |
Jason Wang | 783e398 | 2011-01-17 16:11:17 +0800 | [diff] [blame] | 393 | unsigned long flags; |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 394 | |
Jason Wang | 783e398 | 2011-01-17 16:11:17 +0800 | [diff] [blame] | 395 | spin_lock_irqsave(&sk->sk_receive_queue.lock, flags); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 396 | head = skb_peek(&sk->sk_receive_queue); |
Basil Gor | c53cff5e | 2012-05-03 22:55:23 +0000 | [diff] [blame] | 397 | if (likely(head)) { |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 398 | len = head->len; |
Basil Gor | c53cff5e | 2012-05-03 22:55:23 +0000 | [diff] [blame] | 399 | if (vlan_tx_tag_present(head)) |
| 400 | len += VLAN_HLEN; |
| 401 | } |
| 402 | |
Jason Wang | 783e398 | 2011-01-17 16:11:17 +0800 | [diff] [blame] | 403 | spin_unlock_irqrestore(&sk->sk_receive_queue.lock, flags); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 404 | return len; |
| 405 | } |
| 406 | |
| 407 | /* This is a multi-buffer version of vhost_get_desc, that works if |
| 408 | * vq has read descriptors only. |
| 409 | * @vq - the relevant virtqueue |
| 410 | * @datalen - data length we'll be reading |
| 411 | * @iovcount - returned count of io vectors we fill |
| 412 | * @log - vhost log |
| 413 | * @log_num - log offset |
Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 414 | * @quota - headcount quota, 1 for big buffer |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 415 | * returns number of buffer heads allocated, negative on error |
| 416 | */ |
| 417 | static int get_rx_bufs(struct vhost_virtqueue *vq, |
| 418 | struct vring_used_elem *heads, |
| 419 | int datalen, |
| 420 | unsigned *iovcount, |
| 421 | struct vhost_log *log, |
Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 422 | unsigned *log_num, |
| 423 | unsigned int quota) |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 424 | { |
| 425 | unsigned int out, in; |
| 426 | int seg = 0; |
| 427 | int headcount = 0; |
| 428 | unsigned d; |
| 429 | int r, nlogs = 0; |
| 430 | |
Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 431 | while (datalen > 0 && headcount < quota) { |
Jason Wang | e0e9b40 | 2010-09-14 23:53:05 +0800 | [diff] [blame] | 432 | if (unlikely(seg >= UIO_MAXIOV)) { |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 433 | r = -ENOBUFS; |
| 434 | goto err; |
| 435 | } |
| 436 | d = vhost_get_vq_desc(vq->dev, vq, vq->iov + seg, |
| 437 | ARRAY_SIZE(vq->iov) - seg, &out, |
| 438 | &in, log, log_num); |
| 439 | if (d == vq->num) { |
| 440 | r = 0; |
| 441 | goto err; |
| 442 | } |
| 443 | if (unlikely(out || in <= 0)) { |
| 444 | vq_err(vq, "unexpected descriptor format for RX: " |
| 445 | "out %d, in %d\n", out, in); |
| 446 | r = -EINVAL; |
| 447 | goto err; |
| 448 | } |
| 449 | if (unlikely(log)) { |
| 450 | nlogs += *log_num; |
| 451 | log += *log_num; |
| 452 | } |
| 453 | heads[headcount].id = d; |
| 454 | heads[headcount].len = iov_length(vq->iov + seg, in); |
| 455 | datalen -= heads[headcount].len; |
| 456 | ++headcount; |
| 457 | seg += in; |
| 458 | } |
| 459 | heads[headcount - 1].len += datalen; |
| 460 | *iovcount = seg; |
| 461 | if (unlikely(log)) |
| 462 | *log_num = nlogs; |
| 463 | return headcount; |
| 464 | err: |
| 465 | vhost_discard_vq_desc(vq, headcount); |
| 466 | return r; |
| 467 | } |
| 468 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 469 | /* Expects to be always run from workqueue - which acts as |
| 470 | * read-size critical section for our kind of RCU. */ |
Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 471 | static void handle_rx(struct vhost_net *net) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 472 | { |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 473 | struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; |
| 474 | unsigned uninitialized_var(in), log; |
| 475 | struct vhost_log *vq_log; |
| 476 | struct msghdr msg = { |
| 477 | .msg_name = NULL, |
| 478 | .msg_namelen = 0, |
| 479 | .msg_control = NULL, /* FIXME: get and handle RX aux data. */ |
| 480 | .msg_controllen = 0, |
| 481 | .msg_iov = vq->iov, |
| 482 | .msg_flags = MSG_DONTWAIT, |
| 483 | }; |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 484 | struct virtio_net_hdr_mrg_rxbuf hdr = { |
| 485 | .hdr.flags = 0, |
| 486 | .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE |
| 487 | }; |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 488 | size_t total_len = 0; |
Michael S. Tsirkin | 910a578 | 2012-10-24 20:37:51 +0200 | [diff] [blame] | 489 | int err, mergeable; |
| 490 | s16 headcount; |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 491 | size_t vhost_hlen, sock_hlen; |
| 492 | size_t vhost_len, sock_len; |
Michael S. Tsirkin | 5e18247 | 2011-01-18 13:04:43 +0200 | [diff] [blame] | 493 | /* TODO: check that we are running from vhost_worker? */ |
| 494 | struct socket *sock = rcu_dereference_check(vq->private_data, 1); |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 495 | |
Michael S. Tsirkin | de4d768 | 2011-03-13 23:00:52 +0200 | [diff] [blame] | 496 | if (!sock) |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 497 | return; |
| 498 | |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 499 | mutex_lock(&vq->mutex); |
Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 500 | vhost_disable_notify(&net->dev, vq); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 501 | vhost_hlen = vq->vhost_hlen; |
| 502 | sock_hlen = vq->sock_hlen; |
| 503 | |
| 504 | vq_log = unlikely(vhost_has_feature(&net->dev, VHOST_F_LOG_ALL)) ? |
| 505 | vq->log : NULL; |
Jason Wang | cfbdab9 | 2011-01-17 16:10:59 +0800 | [diff] [blame] | 506 | mergeable = vhost_has_feature(&net->dev, VIRTIO_NET_F_MRG_RXBUF); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 507 | |
| 508 | while ((sock_len = peek_head_len(sock->sk))) { |
| 509 | sock_len += sock_hlen; |
| 510 | vhost_len = sock_len + vhost_hlen; |
| 511 | headcount = get_rx_bufs(vq, vq->heads, vhost_len, |
Jason Wang | 9424936 | 2011-01-17 16:11:08 +0800 | [diff] [blame] | 512 | &in, vq_log, &log, |
| 513 | likely(mergeable) ? UIO_MAXIOV : 1); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 514 | /* On error, stop handling until the next kick. */ |
| 515 | if (unlikely(headcount < 0)) |
| 516 | break; |
| 517 | /* OK, now we need to know about added descriptors. */ |
| 518 | if (!headcount) { |
Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 519 | if (unlikely(vhost_enable_notify(&net->dev, vq))) { |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 520 | /* They have slipped one in as we were |
| 521 | * doing that: check again. */ |
Michael S. Tsirkin | 8ea8cf8 | 2011-05-20 02:10:54 +0300 | [diff] [blame] | 522 | vhost_disable_notify(&net->dev, vq); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 523 | continue; |
| 524 | } |
| 525 | /* Nothing new? Wait for eventfd to tell us |
| 526 | * they refilled. */ |
| 527 | break; |
| 528 | } |
| 529 | /* We don't need to be notified again. */ |
| 530 | if (unlikely((vhost_hlen))) |
| 531 | /* Skip header. TODO: support TSO. */ |
| 532 | move_iovec_hdr(vq->iov, vq->hdr, vhost_hlen, in); |
| 533 | else |
| 534 | /* Copy the header for use in VIRTIO_NET_F_MRG_RXBUF: |
Jason Wang | a290aec | 2010-11-29 13:48:40 +0800 | [diff] [blame] | 535 | * needed because recvmsg can modify msg_iov. */ |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 536 | copy_iovec_hdr(vq->iov, vq->hdr, sock_hlen, in); |
| 537 | msg.msg_iovlen = in; |
| 538 | err = sock->ops->recvmsg(NULL, sock, &msg, |
| 539 | sock_len, MSG_DONTWAIT | MSG_TRUNC); |
| 540 | /* Userspace might have consumed the packet meanwhile: |
| 541 | * it's not supposed to do this usually, but might be hard |
| 542 | * to prevent. Discard data we got (if any) and keep going. */ |
| 543 | if (unlikely(err != sock_len)) { |
| 544 | pr_debug("Discarded rx packet: " |
| 545 | " len %d, expected %zd\n", err, sock_len); |
| 546 | vhost_discard_vq_desc(vq, headcount); |
| 547 | continue; |
| 548 | } |
| 549 | if (unlikely(vhost_hlen) && |
| 550 | memcpy_toiovecend(vq->hdr, (unsigned char *)&hdr, 0, |
| 551 | vhost_hlen)) { |
| 552 | vq_err(vq, "Unable to write vnet_hdr at addr %p\n", |
| 553 | vq->iov->iov_base); |
| 554 | break; |
| 555 | } |
| 556 | /* TODO: Should check and handle checksum. */ |
Jason Wang | cfbdab9 | 2011-01-17 16:10:59 +0800 | [diff] [blame] | 557 | if (likely(mergeable) && |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 558 | memcpy_toiovecend(vq->hdr, (unsigned char *)&headcount, |
| 559 | offsetof(typeof(hdr), num_buffers), |
| 560 | sizeof hdr.num_buffers)) { |
| 561 | vq_err(vq, "Failed num_buffers write"); |
| 562 | vhost_discard_vq_desc(vq, headcount); |
| 563 | break; |
| 564 | } |
| 565 | vhost_add_used_and_signal_n(&net->dev, vq, vq->heads, |
| 566 | headcount); |
| 567 | if (unlikely(vq_log)) |
| 568 | vhost_log_write(vq, vq_log, log, vhost_len); |
| 569 | total_len += vhost_len; |
| 570 | if (unlikely(total_len >= VHOST_NET_WEIGHT)) { |
| 571 | vhost_poll_queue(&vq->poll); |
| 572 | break; |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | mutex_unlock(&vq->mutex); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 577 | } |
| 578 | |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 579 | static void handle_tx_kick(struct vhost_work *work) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 580 | { |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 581 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
| 582 | poll.work); |
| 583 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); |
| 584 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 585 | handle_tx(net); |
| 586 | } |
| 587 | |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 588 | static void handle_rx_kick(struct vhost_work *work) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 589 | { |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 590 | struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue, |
| 591 | poll.work); |
| 592 | struct vhost_net *net = container_of(vq->dev, struct vhost_net, dev); |
| 593 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 594 | handle_rx(net); |
| 595 | } |
| 596 | |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 597 | static void handle_tx_net(struct vhost_work *work) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 598 | { |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 599 | struct vhost_net *net = container_of(work, struct vhost_net, |
| 600 | poll[VHOST_NET_VQ_TX].work); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 601 | handle_tx(net); |
| 602 | } |
| 603 | |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 604 | static void handle_rx_net(struct vhost_work *work) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 605 | { |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 606 | struct vhost_net *net = container_of(work, struct vhost_net, |
| 607 | poll[VHOST_NET_VQ_RX].work); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 608 | handle_rx(net); |
| 609 | } |
| 610 | |
| 611 | static int vhost_net_open(struct inode *inode, struct file *f) |
| 612 | { |
| 613 | struct vhost_net *n = kmalloc(sizeof *n, GFP_KERNEL); |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 614 | struct vhost_dev *dev; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 615 | int r; |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 616 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 617 | if (!n) |
| 618 | return -ENOMEM; |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 619 | |
| 620 | dev = &n->dev; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 621 | n->vqs[VHOST_NET_VQ_TX].handle_kick = handle_tx_kick; |
| 622 | n->vqs[VHOST_NET_VQ_RX].handle_kick = handle_rx_kick; |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 623 | r = vhost_dev_init(dev, n->vqs, VHOST_NET_VQ_MAX); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 624 | if (r < 0) { |
| 625 | kfree(n); |
| 626 | return r; |
| 627 | } |
| 628 | |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 629 | vhost_poll_init(n->poll + VHOST_NET_VQ_TX, handle_tx_net, POLLOUT, dev); |
| 630 | vhost_poll_init(n->poll + VHOST_NET_VQ_RX, handle_rx_net, POLLIN, dev); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 631 | n->tx_poll_state = VHOST_NET_POLL_DISABLED; |
| 632 | |
| 633 | f->private_data = n; |
| 634 | |
| 635 | return 0; |
| 636 | } |
| 637 | |
| 638 | static void vhost_net_disable_vq(struct vhost_net *n, |
| 639 | struct vhost_virtqueue *vq) |
| 640 | { |
| 641 | if (!vq->private_data) |
| 642 | return; |
| 643 | if (vq == n->vqs + VHOST_NET_VQ_TX) { |
| 644 | tx_poll_stop(n); |
| 645 | n->tx_poll_state = VHOST_NET_POLL_DISABLED; |
| 646 | } else |
| 647 | vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); |
| 648 | } |
| 649 | |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 650 | static int vhost_net_enable_vq(struct vhost_net *n, |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 651 | struct vhost_virtqueue *vq) |
| 652 | { |
Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 653 | struct socket *sock; |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 654 | int ret; |
Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 655 | |
| 656 | sock = rcu_dereference_protected(vq->private_data, |
| 657 | lockdep_is_held(&vq->mutex)); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 658 | if (!sock) |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 659 | return 0; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 660 | if (vq == n->vqs + VHOST_NET_VQ_TX) { |
| 661 | n->tx_poll_state = VHOST_NET_POLL_STOPPED; |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 662 | ret = tx_poll_start(n, sock); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 663 | } else |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 664 | ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); |
| 665 | |
| 666 | return ret; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 667 | } |
| 668 | |
| 669 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, |
| 670 | struct vhost_virtqueue *vq) |
| 671 | { |
| 672 | struct socket *sock; |
| 673 | |
| 674 | mutex_lock(&vq->mutex); |
Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 675 | sock = rcu_dereference_protected(vq->private_data, |
| 676 | lockdep_is_held(&vq->mutex)); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 677 | vhost_net_disable_vq(n, vq); |
| 678 | rcu_assign_pointer(vq->private_data, NULL); |
| 679 | mutex_unlock(&vq->mutex); |
| 680 | return sock; |
| 681 | } |
| 682 | |
| 683 | static void vhost_net_stop(struct vhost_net *n, struct socket **tx_sock, |
| 684 | struct socket **rx_sock) |
| 685 | { |
| 686 | *tx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_TX); |
| 687 | *rx_sock = vhost_net_stop_vq(n, n->vqs + VHOST_NET_VQ_RX); |
| 688 | } |
| 689 | |
| 690 | static void vhost_net_flush_vq(struct vhost_net *n, int index) |
| 691 | { |
| 692 | vhost_poll_flush(n->poll + index); |
| 693 | vhost_poll_flush(&n->dev.vqs[index].poll); |
| 694 | } |
| 695 | |
| 696 | static void vhost_net_flush(struct vhost_net *n) |
| 697 | { |
| 698 | vhost_net_flush_vq(n, VHOST_NET_VQ_TX); |
| 699 | vhost_net_flush_vq(n, VHOST_NET_VQ_RX); |
Michael S. Tsirkin | 1280c27 | 2012-12-04 00:17:14 +0200 | [diff] [blame] | 700 | if (n->dev.vqs[VHOST_NET_VQ_TX].ubufs) { |
| 701 | mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); |
| 702 | n->tx_flush = true; |
| 703 | mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); |
| 704 | /* Wait for all lower device DMAs done. */ |
| 705 | vhost_ubuf_put_and_wait(n->dev.vqs[VHOST_NET_VQ_TX].ubufs); |
| 706 | mutex_lock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); |
| 707 | n->tx_flush = false; |
| 708 | kref_init(&n->dev.vqs[VHOST_NET_VQ_TX].ubufs->kref); |
| 709 | mutex_unlock(&n->dev.vqs[VHOST_NET_VQ_TX].mutex); |
| 710 | } |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 711 | } |
| 712 | |
| 713 | static int vhost_net_release(struct inode *inode, struct file *f) |
| 714 | { |
| 715 | struct vhost_net *n = f->private_data; |
| 716 | struct socket *tx_sock; |
| 717 | struct socket *rx_sock; |
| 718 | |
| 719 | vhost_net_stop(n, &tx_sock, &rx_sock); |
| 720 | vhost_net_flush(n); |
Michael S. Tsirkin | b211616 | 2012-11-01 09:16:46 +0000 | [diff] [blame] | 721 | vhost_dev_stop(&n->dev); |
Michael S. Tsirkin | ea5d404 | 2011-11-27 19:05:58 +0200 | [diff] [blame] | 722 | vhost_dev_cleanup(&n->dev, false); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 723 | if (tx_sock) |
| 724 | fput(tx_sock->file); |
| 725 | if (rx_sock) |
| 726 | fput(rx_sock->file); |
| 727 | /* We do an extra flush before freeing memory, |
| 728 | * since jobs can re-queue themselves. */ |
| 729 | vhost_net_flush(n); |
| 730 | kfree(n); |
| 731 | return 0; |
| 732 | } |
| 733 | |
| 734 | static struct socket *get_raw_socket(int fd) |
| 735 | { |
| 736 | struct { |
| 737 | struct sockaddr_ll sa; |
| 738 | char buf[MAX_ADDR_LEN]; |
| 739 | } uaddr; |
| 740 | int uaddr_len = sizeof uaddr, r; |
| 741 | struct socket *sock = sockfd_lookup(fd, &r); |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 742 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 743 | if (!sock) |
| 744 | return ERR_PTR(-ENOTSOCK); |
| 745 | |
| 746 | /* Parameter checking */ |
| 747 | if (sock->sk->sk_type != SOCK_RAW) { |
| 748 | r = -ESOCKTNOSUPPORT; |
| 749 | goto err; |
| 750 | } |
| 751 | |
| 752 | r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, |
| 753 | &uaddr_len, 0); |
| 754 | if (r) |
| 755 | goto err; |
| 756 | |
| 757 | if (uaddr.sa.sll_family != AF_PACKET) { |
| 758 | r = -EPFNOSUPPORT; |
| 759 | goto err; |
| 760 | } |
| 761 | return sock; |
| 762 | err: |
| 763 | fput(sock->file); |
| 764 | return ERR_PTR(r); |
| 765 | } |
| 766 | |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 767 | static struct socket *get_tap_socket(int fd) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 768 | { |
| 769 | struct file *file = fget(fd); |
| 770 | struct socket *sock; |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 771 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 772 | if (!file) |
| 773 | return ERR_PTR(-EBADF); |
| 774 | sock = tun_get_socket(file); |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 775 | if (!IS_ERR(sock)) |
| 776 | return sock; |
| 777 | sock = macvtap_get_socket(file); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 778 | if (IS_ERR(sock)) |
| 779 | fput(file); |
| 780 | return sock; |
| 781 | } |
| 782 | |
| 783 | static struct socket *get_socket(int fd) |
| 784 | { |
| 785 | struct socket *sock; |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 786 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 787 | /* special case to disable backend */ |
| 788 | if (fd == -1) |
| 789 | return NULL; |
| 790 | sock = get_raw_socket(fd); |
| 791 | if (!IS_ERR(sock)) |
| 792 | return sock; |
Arnd Bergmann | 501c774 | 2010-02-18 05:46:50 +0000 | [diff] [blame] | 793 | sock = get_tap_socket(fd); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 794 | if (!IS_ERR(sock)) |
| 795 | return sock; |
| 796 | return ERR_PTR(-ENOTSOCK); |
| 797 | } |
| 798 | |
| 799 | static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) |
| 800 | { |
| 801 | struct socket *sock, *oldsock; |
| 802 | struct vhost_virtqueue *vq; |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 803 | struct vhost_ubuf_ref *ubufs, *oldubufs = NULL; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 804 | int r; |
| 805 | |
| 806 | mutex_lock(&n->dev.mutex); |
| 807 | r = vhost_dev_check_owner(&n->dev); |
| 808 | if (r) |
| 809 | goto err; |
| 810 | |
| 811 | if (index >= VHOST_NET_VQ_MAX) { |
| 812 | r = -ENOBUFS; |
| 813 | goto err; |
| 814 | } |
| 815 | vq = n->vqs + index; |
| 816 | mutex_lock(&vq->mutex); |
| 817 | |
| 818 | /* Verify that ring has been setup correctly. */ |
| 819 | if (!vhost_vq_access_ok(vq)) { |
| 820 | r = -EFAULT; |
Jeff Dike | 1dace8c | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 821 | goto err_vq; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 822 | } |
| 823 | sock = get_socket(fd); |
| 824 | if (IS_ERR(sock)) { |
| 825 | r = PTR_ERR(sock); |
Jeff Dike | 1dace8c | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 826 | goto err_vq; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 827 | } |
| 828 | |
| 829 | /* start polling new socket */ |
Arnd Bergmann | 28457ee | 2010-03-09 19:24:45 +0100 | [diff] [blame] | 830 | oldsock = rcu_dereference_protected(vq->private_data, |
| 831 | lockdep_is_held(&vq->mutex)); |
David S. Miller | 11fe883 | 2010-07-20 18:25:24 -0700 | [diff] [blame] | 832 | if (sock != oldsock) { |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 833 | ubufs = vhost_ubuf_alloc(vq, sock && vhost_sock_zcopy(sock)); |
| 834 | if (IS_ERR(ubufs)) { |
| 835 | r = PTR_ERR(ubufs); |
| 836 | goto err_ubufs; |
| 837 | } |
Jason Wang | 692a998 | 2013-01-28 01:05:17 +0000 | [diff] [blame] | 838 | |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 839 | vhost_net_disable_vq(n, vq); |
| 840 | rcu_assign_pointer(vq->private_data, sock); |
Jason Wang | f59281d | 2011-06-21 18:04:27 +0800 | [diff] [blame] | 841 | r = vhost_init_used(vq); |
| 842 | if (r) |
Jason Wang | 692a998 | 2013-01-28 01:05:17 +0000 | [diff] [blame] | 843 | goto err_used; |
Jason Wang | 2b8b328 | 2013-01-28 01:05:18 +0000 | [diff] [blame] | 844 | r = vhost_net_enable_vq(n, vq); |
| 845 | if (r) |
| 846 | goto err_used; |
Jason Wang | 692a998 | 2013-01-28 01:05:17 +0000 | [diff] [blame] | 847 | |
| 848 | oldubufs = vq->ubufs; |
| 849 | vq->ubufs = ubufs; |
Michael S. Tsirkin | 64e9a9b | 2012-12-03 07:31:51 +0000 | [diff] [blame] | 850 | |
| 851 | n->tx_packets = 0; |
| 852 | n->tx_zcopy_err = 0; |
Michael S. Tsirkin | 1280c27 | 2012-12-04 00:17:14 +0200 | [diff] [blame] | 853 | n->tx_flush = false; |
Jeff Dike | dd1f407 | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 854 | } |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 855 | |
Michael S. Tsirkin | 1680e90 | 2010-07-15 15:19:12 +0300 | [diff] [blame] | 856 | mutex_unlock(&vq->mutex); |
| 857 | |
Michael S. Tsirkin | c047e5f | 2011-07-20 13:41:31 +0300 | [diff] [blame] | 858 | if (oldubufs) { |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 859 | vhost_ubuf_put_and_wait(oldubufs); |
Michael S. Tsirkin | c047e5f | 2011-07-20 13:41:31 +0300 | [diff] [blame] | 860 | mutex_lock(&vq->mutex); |
Michael S. Tsirkin | eaae813 | 2012-11-01 09:16:51 +0000 | [diff] [blame] | 861 | vhost_zerocopy_signal_used(n, vq); |
Michael S. Tsirkin | c047e5f | 2011-07-20 13:41:31 +0300 | [diff] [blame] | 862 | mutex_unlock(&vq->mutex); |
| 863 | } |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 864 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 865 | if (oldsock) { |
| 866 | vhost_net_flush_vq(n, index); |
| 867 | fput(oldsock->file); |
| 868 | } |
Jeff Dike | 1dace8c | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 869 | |
Michael S. Tsirkin | 1680e90 | 2010-07-15 15:19:12 +0300 | [diff] [blame] | 870 | mutex_unlock(&n->dev.mutex); |
| 871 | return 0; |
| 872 | |
Jason Wang | 692a998 | 2013-01-28 01:05:17 +0000 | [diff] [blame] | 873 | err_used: |
| 874 | rcu_assign_pointer(vq->private_data, oldsock); |
| 875 | vhost_net_enable_vq(n, vq); |
| 876 | if (ubufs) |
| 877 | vhost_ubuf_put_and_wait(ubufs); |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 878 | err_ubufs: |
| 879 | fput(sock->file); |
Jeff Dike | 1dace8c | 2010-03-04 16:10:14 -0500 | [diff] [blame] | 880 | err_vq: |
| 881 | mutex_unlock(&vq->mutex); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 882 | err: |
| 883 | mutex_unlock(&n->dev.mutex); |
| 884 | return r; |
| 885 | } |
| 886 | |
| 887 | static long vhost_net_reset_owner(struct vhost_net *n) |
| 888 | { |
| 889 | struct socket *tx_sock = NULL; |
| 890 | struct socket *rx_sock = NULL; |
| 891 | long err; |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 892 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 893 | mutex_lock(&n->dev.mutex); |
| 894 | err = vhost_dev_check_owner(&n->dev); |
| 895 | if (err) |
| 896 | goto done; |
| 897 | vhost_net_stop(n, &tx_sock, &rx_sock); |
| 898 | vhost_net_flush(n); |
| 899 | err = vhost_dev_reset_owner(&n->dev); |
| 900 | done: |
| 901 | mutex_unlock(&n->dev.mutex); |
| 902 | if (tx_sock) |
| 903 | fput(tx_sock->file); |
| 904 | if (rx_sock) |
| 905 | fput(rx_sock->file); |
| 906 | return err; |
| 907 | } |
| 908 | |
| 909 | static int vhost_net_set_features(struct vhost_net *n, u64 features) |
| 910 | { |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 911 | size_t vhost_hlen, sock_hlen, hdr_len; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 912 | int i; |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 913 | |
| 914 | hdr_len = (features & (1 << VIRTIO_NET_F_MRG_RXBUF)) ? |
| 915 | sizeof(struct virtio_net_hdr_mrg_rxbuf) : |
| 916 | sizeof(struct virtio_net_hdr); |
| 917 | if (features & (1 << VHOST_NET_F_VIRTIO_NET_HDR)) { |
| 918 | /* vhost provides vnet_hdr */ |
| 919 | vhost_hlen = hdr_len; |
| 920 | sock_hlen = 0; |
| 921 | } else { |
| 922 | /* socket provides vnet_hdr */ |
| 923 | vhost_hlen = 0; |
| 924 | sock_hlen = hdr_len; |
| 925 | } |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 926 | mutex_lock(&n->dev.mutex); |
| 927 | if ((features & (1 << VHOST_F_LOG_ALL)) && |
| 928 | !vhost_log_access_ok(&n->dev)) { |
| 929 | mutex_unlock(&n->dev.mutex); |
| 930 | return -EFAULT; |
| 931 | } |
| 932 | n->dev.acked_features = features; |
| 933 | smp_wmb(); |
| 934 | for (i = 0; i < VHOST_NET_VQ_MAX; ++i) { |
| 935 | mutex_lock(&n->vqs[i].mutex); |
David Stevens | 8dd014a | 2010-07-27 18:52:21 +0300 | [diff] [blame] | 936 | n->vqs[i].vhost_hlen = vhost_hlen; |
| 937 | n->vqs[i].sock_hlen = sock_hlen; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 938 | mutex_unlock(&n->vqs[i].mutex); |
| 939 | } |
| 940 | vhost_net_flush(n); |
| 941 | mutex_unlock(&n->dev.mutex); |
| 942 | return 0; |
| 943 | } |
| 944 | |
| 945 | static long vhost_net_ioctl(struct file *f, unsigned int ioctl, |
| 946 | unsigned long arg) |
| 947 | { |
| 948 | struct vhost_net *n = f->private_data; |
| 949 | void __user *argp = (void __user *)arg; |
| 950 | u64 __user *featurep = argp; |
| 951 | struct vhost_vring_file backend; |
| 952 | u64 features; |
| 953 | int r; |
Krishna Kumar | d47effe | 2011-03-01 17:06:37 +0530 | [diff] [blame] | 954 | |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 955 | switch (ioctl) { |
| 956 | case VHOST_NET_SET_BACKEND: |
Takuya Yoshikawa | d3553a5 | 2010-05-27 19:01:58 +0900 | [diff] [blame] | 957 | if (copy_from_user(&backend, argp, sizeof backend)) |
| 958 | return -EFAULT; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 959 | return vhost_net_set_backend(n, backend.index, backend.fd); |
| 960 | case VHOST_GET_FEATURES: |
Stefan Hajnoczi | 0dd05a3 | 2012-07-21 06:55:36 +0000 | [diff] [blame] | 961 | features = VHOST_NET_FEATURES; |
Takuya Yoshikawa | d3553a5 | 2010-05-27 19:01:58 +0900 | [diff] [blame] | 962 | if (copy_to_user(featurep, &features, sizeof features)) |
| 963 | return -EFAULT; |
| 964 | return 0; |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 965 | case VHOST_SET_FEATURES: |
Takuya Yoshikawa | d3553a5 | 2010-05-27 19:01:58 +0900 | [diff] [blame] | 966 | if (copy_from_user(&features, featurep, sizeof features)) |
| 967 | return -EFAULT; |
Stefan Hajnoczi | 0dd05a3 | 2012-07-21 06:55:36 +0000 | [diff] [blame] | 968 | if (features & ~VHOST_NET_FEATURES) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 969 | return -EOPNOTSUPP; |
| 970 | return vhost_net_set_features(n, features); |
| 971 | case VHOST_RESET_OWNER: |
| 972 | return vhost_net_reset_owner(n); |
| 973 | default: |
| 974 | mutex_lock(&n->dev.mutex); |
Michael S. Tsirkin | 935cdee | 2012-12-06 14:03:34 +0200 | [diff] [blame] | 975 | r = vhost_dev_ioctl(&n->dev, ioctl, argp); |
| 976 | if (r == -ENOIOCTLCMD) |
| 977 | r = vhost_vring_ioctl(&n->dev, ioctl, argp); |
| 978 | else |
| 979 | vhost_net_flush(n); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 980 | mutex_unlock(&n->dev.mutex); |
| 981 | return r; |
| 982 | } |
| 983 | } |
| 984 | |
| 985 | #ifdef CONFIG_COMPAT |
| 986 | static long vhost_net_compat_ioctl(struct file *f, unsigned int ioctl, |
| 987 | unsigned long arg) |
| 988 | { |
| 989 | return vhost_net_ioctl(f, ioctl, (unsigned long)compat_ptr(arg)); |
| 990 | } |
| 991 | #endif |
| 992 | |
Tobias Klauser | 373a83a | 2010-05-17 15:12:49 +0200 | [diff] [blame] | 993 | static const struct file_operations vhost_net_fops = { |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 994 | .owner = THIS_MODULE, |
| 995 | .release = vhost_net_release, |
| 996 | .unlocked_ioctl = vhost_net_ioctl, |
| 997 | #ifdef CONFIG_COMPAT |
| 998 | .compat_ioctl = vhost_net_compat_ioctl, |
| 999 | #endif |
| 1000 | .open = vhost_net_open, |
Arnd Bergmann | 6038f37 | 2010-08-15 18:52:59 +0200 | [diff] [blame] | 1001 | .llseek = noop_llseek, |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1002 | }; |
| 1003 | |
| 1004 | static struct miscdevice vhost_net_misc = { |
stephen hemminger | 7c7c7f0 | 2012-01-11 19:30:38 +0000 | [diff] [blame] | 1005 | .minor = VHOST_NET_MINOR, |
| 1006 | .name = "vhost-net", |
| 1007 | .fops = &vhost_net_fops, |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1008 | }; |
| 1009 | |
Christoph Hellwig | a8d3782 | 2010-04-13 14:11:25 -0400 | [diff] [blame] | 1010 | static int vhost_net_init(void) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1011 | { |
Michael S. Tsirkin | bab632d | 2011-07-18 03:48:46 +0000 | [diff] [blame] | 1012 | if (experimental_zcopytx) |
| 1013 | vhost_enable_zcopy(VHOST_NET_VQ_TX); |
Tejun Heo | c23f3445 | 2010-06-02 20:40:00 +0200 | [diff] [blame] | 1014 | return misc_register(&vhost_net_misc); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1015 | } |
| 1016 | module_init(vhost_net_init); |
| 1017 | |
Christoph Hellwig | a8d3782 | 2010-04-13 14:11:25 -0400 | [diff] [blame] | 1018 | static void vhost_net_exit(void) |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1019 | { |
| 1020 | misc_deregister(&vhost_net_misc); |
Michael S. Tsirkin | 3a4d5c94e | 2010-01-14 06:17:27 +0000 | [diff] [blame] | 1021 | } |
| 1022 | module_exit(vhost_net_exit); |
| 1023 | |
| 1024 | MODULE_VERSION("0.0.1"); |
| 1025 | MODULE_LICENSE("GPL v2"); |
| 1026 | MODULE_AUTHOR("Michael S. Tsirkin"); |
| 1027 | MODULE_DESCRIPTION("Host kernel accelerator for virtio net"); |
stephen hemminger | 7c7c7f0 | 2012-01-11 19:30:38 +0000 | [diff] [blame] | 1028 | MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR); |
| 1029 | MODULE_ALIAS("devname:vhost-net"); |