blob: 0ec970ca64ce14022e5755cee7b4216b2238e929 [file] [log] [blame]
Asias He433fc582016-07-28 15:36:34 +01001/*
2 * vhost transport for vsock
3 *
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2.
9 */
10#include <linux/miscdevice.h>
11#include <linux/atomic.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/vmalloc.h>
15#include <net/sock.h>
16#include <linux/virtio_vsock.h>
17#include <linux/vhost.h>
18
19#include <net/af_vsock.h>
20#include "vhost.h"
21
22#define VHOST_VSOCK_DEFAULT_HOST_CID 2
23
24enum {
25 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
26};
27
28/* Used to track all the vhost_vsock instances on the system. */
29static DEFINE_SPINLOCK(vhost_vsock_lock);
30static LIST_HEAD(vhost_vsock_list);
31
32struct vhost_vsock {
33 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[2];
35
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 struct list_head list;
38
39 struct vhost_work send_pkt_work;
40 spinlock_t send_pkt_list_lock;
41 struct list_head send_pkt_list; /* host->guest pending packets */
42
43 atomic_t queued_replies;
44
45 u32 guest_cid;
46};
47
48static u32 vhost_transport_get_local_cid(void)
49{
50 return VHOST_VSOCK_DEFAULT_HOST_CID;
51}
52
53static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
54{
55 struct vhost_vsock *vsock;
56
57 spin_lock_bh(&vhost_vsock_lock);
58 list_for_each_entry(vsock, &vhost_vsock_list, list) {
59 u32 other_cid = vsock->guest_cid;
60
61 /* Skip instances that have no CID yet */
62 if (other_cid == 0)
63 continue;
64
65 if (other_cid == guest_cid) {
66 spin_unlock_bh(&vhost_vsock_lock);
67 return vsock;
68 }
69 }
70 spin_unlock_bh(&vhost_vsock_lock);
71
72 return NULL;
73}
74
75static void
76vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
77 struct vhost_virtqueue *vq)
78{
79 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
80 bool added = false;
81 bool restart_tx = false;
82
83 mutex_lock(&vq->mutex);
84
85 if (!vq->private_data)
86 goto out;
87
88 /* Avoid further vmexits, we're already processing the virtqueue */
89 vhost_disable_notify(&vsock->dev, vq);
90
91 for (;;) {
92 struct virtio_vsock_pkt *pkt;
93 struct iov_iter iov_iter;
94 unsigned out, in;
95 size_t nbytes;
96 size_t len;
97 int head;
98
99 spin_lock_bh(&vsock->send_pkt_list_lock);
100 if (list_empty(&vsock->send_pkt_list)) {
101 spin_unlock_bh(&vsock->send_pkt_list_lock);
102 vhost_enable_notify(&vsock->dev, vq);
103 break;
104 }
105
106 pkt = list_first_entry(&vsock->send_pkt_list,
107 struct virtio_vsock_pkt, list);
108 list_del_init(&pkt->list);
109 spin_unlock_bh(&vsock->send_pkt_list_lock);
110
111 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
112 &out, &in, NULL, NULL);
113 if (head < 0) {
114 spin_lock_bh(&vsock->send_pkt_list_lock);
115 list_add(&pkt->list, &vsock->send_pkt_list);
116 spin_unlock_bh(&vsock->send_pkt_list_lock);
117 break;
118 }
119
120 if (head == vq->num) {
121 spin_lock_bh(&vsock->send_pkt_list_lock);
122 list_add(&pkt->list, &vsock->send_pkt_list);
123 spin_unlock_bh(&vsock->send_pkt_list_lock);
124
125 /* We cannot finish yet if more buffers snuck in while
126 * re-enabling notify.
127 */
128 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
129 vhost_disable_notify(&vsock->dev, vq);
130 continue;
131 }
132 break;
133 }
134
135 if (out) {
136 virtio_transport_free_pkt(pkt);
137 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
138 break;
139 }
140
141 len = iov_length(&vq->iov[out], in);
142 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
143
144 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
145 if (nbytes != sizeof(pkt->hdr)) {
146 virtio_transport_free_pkt(pkt);
147 vq_err(vq, "Faulted on copying pkt hdr\n");
148 break;
149 }
150
151 nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
152 if (nbytes != pkt->len) {
153 virtio_transport_free_pkt(pkt);
154 vq_err(vq, "Faulted on copying pkt buf\n");
155 break;
156 }
157
158 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
159 added = true;
160
161 if (pkt->reply) {
162 int val;
163
164 val = atomic_dec_return(&vsock->queued_replies);
165
166 /* Do we have resources to resume tx processing? */
167 if (val + 1 == tx_vq->num)
168 restart_tx = true;
169 }
170
171 virtio_transport_free_pkt(pkt);
172 }
173 if (added)
174 vhost_signal(&vsock->dev, vq);
175
176out:
177 mutex_unlock(&vq->mutex);
178
179 if (restart_tx)
180 vhost_poll_queue(&tx_vq->poll);
181}
182
183static void vhost_transport_send_pkt_work(struct vhost_work *work)
184{
185 struct vhost_virtqueue *vq;
186 struct vhost_vsock *vsock;
187
188 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
189 vq = &vsock->vqs[VSOCK_VQ_RX];
190
191 vhost_transport_do_send_pkt(vsock, vq);
192}
193
194static int
195vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
196{
197 struct vhost_vsock *vsock;
198 struct vhost_virtqueue *vq;
199 int len = pkt->len;
200
201 /* Find the vhost_vsock according to guest context id */
202 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
203 if (!vsock) {
204 virtio_transport_free_pkt(pkt);
205 return -ENODEV;
206 }
207
208 vq = &vsock->vqs[VSOCK_VQ_RX];
209
210 if (pkt->reply)
211 atomic_inc(&vsock->queued_replies);
212
213 spin_lock_bh(&vsock->send_pkt_list_lock);
214 list_add_tail(&pkt->list, &vsock->send_pkt_list);
215 spin_unlock_bh(&vsock->send_pkt_list_lock);
216
217 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
218 return len;
219}
220
Peng Tao482b3f92017-03-15 09:32:15 +0800221static int
222vhost_transport_cancel_pkt(struct vsock_sock *vsk)
223{
224 struct vhost_vsock *vsock;
225 struct virtio_vsock_pkt *pkt, *n;
226 int cnt = 0;
227 LIST_HEAD(freeme);
228
229 /* Find the vhost_vsock according to guest context id */
230 vsock = vhost_vsock_get(vsk->remote_addr.svm_cid);
231 if (!vsock)
232 return -ENODEV;
233
234 spin_lock_bh(&vsock->send_pkt_list_lock);
235 list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) {
236 if (pkt->vsk != vsk)
237 continue;
238 list_move(&pkt->list, &freeme);
239 }
240 spin_unlock_bh(&vsock->send_pkt_list_lock);
241
242 list_for_each_entry_safe(pkt, n, &freeme, list) {
243 if (pkt->reply)
244 cnt++;
245 list_del(&pkt->list);
246 virtio_transport_free_pkt(pkt);
247 }
248
249 if (cnt) {
250 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
251 int new_cnt;
252
253 new_cnt = atomic_sub_return(cnt, &vsock->queued_replies);
254 if (new_cnt + cnt >= tx_vq->num && new_cnt < tx_vq->num)
255 vhost_poll_queue(&tx_vq->poll);
256 }
257
258 return 0;
259}
260
Asias He433fc582016-07-28 15:36:34 +0100261static struct virtio_vsock_pkt *
262vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
263 unsigned int out, unsigned int in)
264{
265 struct virtio_vsock_pkt *pkt;
266 struct iov_iter iov_iter;
267 size_t nbytes;
268 size_t len;
269
270 if (in != 0) {
271 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
272 return NULL;
273 }
274
275 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
276 if (!pkt)
277 return NULL;
278
279 len = iov_length(vq->iov, out);
280 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
281
282 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
283 if (nbytes != sizeof(pkt->hdr)) {
284 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
285 sizeof(pkt->hdr), nbytes);
286 kfree(pkt);
287 return NULL;
288 }
289
290 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
291 pkt->len = le32_to_cpu(pkt->hdr.len);
292
293 /* No payload */
294 if (!pkt->len)
295 return pkt;
296
297 /* The pkt is too big */
298 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
299 kfree(pkt);
300 return NULL;
301 }
302
303 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
304 if (!pkt->buf) {
305 kfree(pkt);
306 return NULL;
307 }
308
309 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
310 if (nbytes != pkt->len) {
311 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
312 pkt->len, nbytes);
313 virtio_transport_free_pkt(pkt);
314 return NULL;
315 }
316
317 return pkt;
318}
319
320/* Is there space left for replies to rx packets? */
321static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
322{
323 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
324 int val;
325
326 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
327 val = atomic_read(&vsock->queued_replies);
328
329 return val < vq->num;
330}
331
332static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
333{
334 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
335 poll.work);
336 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
337 dev);
338 struct virtio_vsock_pkt *pkt;
339 int head;
340 unsigned int out, in;
341 bool added = false;
342
343 mutex_lock(&vq->mutex);
344
345 if (!vq->private_data)
346 goto out;
347
348 vhost_disable_notify(&vsock->dev, vq);
349 for (;;) {
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100350 u32 len;
351
Asias He433fc582016-07-28 15:36:34 +0100352 if (!vhost_vsock_more_replies(vsock)) {
353 /* Stop tx until the device processes already
354 * pending replies. Leave tx virtqueue
355 * callbacks disabled.
356 */
357 goto no_more_replies;
358 }
359
360 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
361 &out, &in, NULL, NULL);
362 if (head < 0)
363 break;
364
365 if (head == vq->num) {
366 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
367 vhost_disable_notify(&vsock->dev, vq);
368 continue;
369 }
370 break;
371 }
372
373 pkt = vhost_vsock_alloc_pkt(vq, out, in);
374 if (!pkt) {
375 vq_err(vq, "Faulted on pkt\n");
376 continue;
377 }
378
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100379 len = pkt->len;
380
Asias He433fc582016-07-28 15:36:34 +0100381 /* Only accept correctly addressed packets */
382 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
383 virtio_transport_recv_pkt(pkt);
384 else
385 virtio_transport_free_pkt(pkt);
386
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100387 vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
Asias He433fc582016-07-28 15:36:34 +0100388 added = true;
389 }
390
391no_more_replies:
392 if (added)
393 vhost_signal(&vsock->dev, vq);
394
395out:
396 mutex_unlock(&vq->mutex);
397}
398
399static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
400{
401 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
402 poll.work);
403 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
404 dev);
405
406 vhost_transport_do_send_pkt(vsock, vq);
407}
408
409static int vhost_vsock_start(struct vhost_vsock *vsock)
410{
Stefan Hajnocziae36f6a2017-01-19 10:43:53 +0000411 struct vhost_virtqueue *vq;
Asias He433fc582016-07-28 15:36:34 +0100412 size_t i;
413 int ret;
414
415 mutex_lock(&vsock->dev.mutex);
416
417 ret = vhost_dev_check_owner(&vsock->dev);
418 if (ret)
419 goto err;
420
421 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
Stefan Hajnocziae36f6a2017-01-19 10:43:53 +0000422 vq = &vsock->vqs[i];
Asias He433fc582016-07-28 15:36:34 +0100423
424 mutex_lock(&vq->mutex);
425
426 if (!vhost_vq_access_ok(vq)) {
427 ret = -EFAULT;
Asias He433fc582016-07-28 15:36:34 +0100428 goto err_vq;
429 }
430
431 if (!vq->private_data) {
432 vq->private_data = vsock;
Stefan Hajnocziae36f6a2017-01-19 10:43:53 +0000433 ret = vhost_vq_init_access(vq);
434 if (ret)
435 goto err_vq;
Asias He433fc582016-07-28 15:36:34 +0100436 }
437
438 mutex_unlock(&vq->mutex);
439 }
440
441 mutex_unlock(&vsock->dev.mutex);
442 return 0;
443
444err_vq:
Stefan Hajnocziae36f6a2017-01-19 10:43:53 +0000445 vq->private_data = NULL;
446 mutex_unlock(&vq->mutex);
447
Asias He433fc582016-07-28 15:36:34 +0100448 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
Stefan Hajnocziae36f6a2017-01-19 10:43:53 +0000449 vq = &vsock->vqs[i];
Asias He433fc582016-07-28 15:36:34 +0100450
451 mutex_lock(&vq->mutex);
452 vq->private_data = NULL;
453 mutex_unlock(&vq->mutex);
454 }
455err:
456 mutex_unlock(&vsock->dev.mutex);
457 return ret;
458}
459
460static int vhost_vsock_stop(struct vhost_vsock *vsock)
461{
462 size_t i;
463 int ret;
464
465 mutex_lock(&vsock->dev.mutex);
466
467 ret = vhost_dev_check_owner(&vsock->dev);
468 if (ret)
469 goto err;
470
471 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
472 struct vhost_virtqueue *vq = &vsock->vqs[i];
473
474 mutex_lock(&vq->mutex);
475 vq->private_data = NULL;
476 mutex_unlock(&vq->mutex);
477 }
478
479err:
480 mutex_unlock(&vsock->dev.mutex);
481 return ret;
482}
483
484static void vhost_vsock_free(struct vhost_vsock *vsock)
485{
Wei Yongjunb226aca2016-08-02 13:50:42 +0000486 kvfree(vsock);
Asias He433fc582016-07-28 15:36:34 +0100487}
488
489static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
490{
491 struct vhost_virtqueue **vqs;
492 struct vhost_vsock *vsock;
493 int ret;
494
495 /* This struct is large and allocation could fail, fall back to vmalloc
496 * if there is no other way.
497 */
498 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
499 if (!vsock) {
500 vsock = vmalloc(sizeof(*vsock));
501 if (!vsock)
502 return -ENOMEM;
503 }
504
505 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
506 if (!vqs) {
507 ret = -ENOMEM;
508 goto out;
509 }
510
511 atomic_set(&vsock->queued_replies, 0);
512
513 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
514 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
515 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
516 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
517
518 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
519
520 file->private_data = vsock;
521 spin_lock_init(&vsock->send_pkt_list_lock);
522 INIT_LIST_HEAD(&vsock->send_pkt_list);
523 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
524
525 spin_lock_bh(&vhost_vsock_lock);
526 list_add_tail(&vsock->list, &vhost_vsock_list);
527 spin_unlock_bh(&vhost_vsock_lock);
528 return 0;
529
530out:
531 vhost_vsock_free(vsock);
532 return ret;
533}
534
535static void vhost_vsock_flush(struct vhost_vsock *vsock)
536{
537 int i;
538
539 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
540 if (vsock->vqs[i].handle_kick)
541 vhost_poll_flush(&vsock->vqs[i].poll);
542 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
543}
544
545static void vhost_vsock_reset_orphans(struct sock *sk)
546{
547 struct vsock_sock *vsk = vsock_sk(sk);
548
549 /* vmci_transport.c doesn't take sk_lock here either. At least we're
550 * under vsock_table_lock so the sock cannot disappear while we're
551 * executing.
552 */
553
Peng Taoc4587632016-12-09 01:10:46 +0800554 if (!vhost_vsock_get(vsk->remote_addr.svm_cid)) {
Asias He433fc582016-07-28 15:36:34 +0100555 sock_set_flag(sk, SOCK_DONE);
556 vsk->peer_shutdown = SHUTDOWN_MASK;
557 sk->sk_state = SS_UNCONNECTED;
558 sk->sk_err = ECONNRESET;
559 sk->sk_error_report(sk);
560 }
561}
562
563static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
564{
565 struct vhost_vsock *vsock = file->private_data;
566
567 spin_lock_bh(&vhost_vsock_lock);
568 list_del(&vsock->list);
569 spin_unlock_bh(&vhost_vsock_lock);
570
571 /* Iterating over all connections for all CIDs to find orphans is
572 * inefficient. Room for improvement here. */
573 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
574
575 vhost_vsock_stop(vsock);
576 vhost_vsock_flush(vsock);
577 vhost_dev_stop(&vsock->dev);
578
579 spin_lock_bh(&vsock->send_pkt_list_lock);
580 while (!list_empty(&vsock->send_pkt_list)) {
581 struct virtio_vsock_pkt *pkt;
582
583 pkt = list_first_entry(&vsock->send_pkt_list,
584 struct virtio_vsock_pkt, list);
585 list_del_init(&pkt->list);
586 virtio_transport_free_pkt(pkt);
587 }
588 spin_unlock_bh(&vsock->send_pkt_list_lock);
589
590 vhost_dev_cleanup(&vsock->dev, false);
591 kfree(vsock->dev.vqs);
592 vhost_vsock_free(vsock);
593 return 0;
594}
595
596static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
597{
598 struct vhost_vsock *other;
599
600 /* Refuse reserved CIDs */
601 if (guest_cid <= VMADDR_CID_HOST ||
602 guest_cid == U32_MAX)
603 return -EINVAL;
604
605 /* 64-bit CIDs are not yet supported */
606 if (guest_cid > U32_MAX)
607 return -EINVAL;
608
609 /* Refuse if CID is already in use */
610 other = vhost_vsock_get(guest_cid);
611 if (other && other != vsock)
612 return -EADDRINUSE;
613
614 spin_lock_bh(&vhost_vsock_lock);
615 vsock->guest_cid = guest_cid;
616 spin_unlock_bh(&vhost_vsock_lock);
617
618 return 0;
619}
620
621static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
622{
623 struct vhost_virtqueue *vq;
624 int i;
625
626 if (features & ~VHOST_VSOCK_FEATURES)
627 return -EOPNOTSUPP;
628
629 mutex_lock(&vsock->dev.mutex);
630 if ((features & (1 << VHOST_F_LOG_ALL)) &&
631 !vhost_log_access_ok(&vsock->dev)) {
632 mutex_unlock(&vsock->dev.mutex);
633 return -EFAULT;
634 }
635
636 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
637 vq = &vsock->vqs[i];
638 mutex_lock(&vq->mutex);
639 vq->acked_features = features;
640 mutex_unlock(&vq->mutex);
641 }
642 mutex_unlock(&vsock->dev.mutex);
643 return 0;
644}
645
646static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
647 unsigned long arg)
648{
649 struct vhost_vsock *vsock = f->private_data;
650 void __user *argp = (void __user *)arg;
651 u64 guest_cid;
652 u64 features;
653 int start;
654 int r;
655
656 switch (ioctl) {
657 case VHOST_VSOCK_SET_GUEST_CID:
658 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
659 return -EFAULT;
660 return vhost_vsock_set_cid(vsock, guest_cid);
661 case VHOST_VSOCK_SET_RUNNING:
662 if (copy_from_user(&start, argp, sizeof(start)))
663 return -EFAULT;
664 if (start)
665 return vhost_vsock_start(vsock);
666 else
667 return vhost_vsock_stop(vsock);
668 case VHOST_GET_FEATURES:
669 features = VHOST_VSOCK_FEATURES;
670 if (copy_to_user(argp, &features, sizeof(features)))
671 return -EFAULT;
672 return 0;
673 case VHOST_SET_FEATURES:
674 if (copy_from_user(&features, argp, sizeof(features)))
675 return -EFAULT;
676 return vhost_vsock_set_features(vsock, features);
677 default:
678 mutex_lock(&vsock->dev.mutex);
679 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
680 if (r == -ENOIOCTLCMD)
681 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
682 else
683 vhost_vsock_flush(vsock);
684 mutex_unlock(&vsock->dev.mutex);
685 return r;
686 }
687}
688
689static const struct file_operations vhost_vsock_fops = {
690 .owner = THIS_MODULE,
691 .open = vhost_vsock_dev_open,
692 .release = vhost_vsock_dev_release,
693 .llseek = noop_llseek,
694 .unlocked_ioctl = vhost_vsock_dev_ioctl,
695};
696
697static struct miscdevice vhost_vsock_misc = {
698 .minor = MISC_DYNAMIC_MINOR,
699 .name = "vhost-vsock",
700 .fops = &vhost_vsock_fops,
701};
702
703static struct virtio_transport vhost_transport = {
704 .transport = {
705 .get_local_cid = vhost_transport_get_local_cid,
706
707 .init = virtio_transport_do_socket_init,
708 .destruct = virtio_transport_destruct,
709 .release = virtio_transport_release,
710 .connect = virtio_transport_connect,
711 .shutdown = virtio_transport_shutdown,
Peng Tao482b3f92017-03-15 09:32:15 +0800712 .cancel_pkt = vhost_transport_cancel_pkt,
Asias He433fc582016-07-28 15:36:34 +0100713
714 .dgram_enqueue = virtio_transport_dgram_enqueue,
715 .dgram_dequeue = virtio_transport_dgram_dequeue,
716 .dgram_bind = virtio_transport_dgram_bind,
717 .dgram_allow = virtio_transport_dgram_allow,
718
719 .stream_enqueue = virtio_transport_stream_enqueue,
720 .stream_dequeue = virtio_transport_stream_dequeue,
721 .stream_has_data = virtio_transport_stream_has_data,
722 .stream_has_space = virtio_transport_stream_has_space,
723 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
724 .stream_is_active = virtio_transport_stream_is_active,
725 .stream_allow = virtio_transport_stream_allow,
726
727 .notify_poll_in = virtio_transport_notify_poll_in,
728 .notify_poll_out = virtio_transport_notify_poll_out,
729 .notify_recv_init = virtio_transport_notify_recv_init,
730 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
731 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
732 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
733 .notify_send_init = virtio_transport_notify_send_init,
734 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
735 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
736 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
737
738 .set_buffer_size = virtio_transport_set_buffer_size,
739 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
740 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
741 .get_buffer_size = virtio_transport_get_buffer_size,
742 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
743 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
744 },
745
746 .send_pkt = vhost_transport_send_pkt,
747};
748
749static int __init vhost_vsock_init(void)
750{
751 int ret;
752
753 ret = vsock_core_init(&vhost_transport.transport);
754 if (ret < 0)
755 return ret;
756 return misc_register(&vhost_vsock_misc);
757};
758
759static void __exit vhost_vsock_exit(void)
760{
761 misc_deregister(&vhost_vsock_misc);
762 vsock_core_exit();
763};
764
765module_init(vhost_vsock_init);
766module_exit(vhost_vsock_exit);
767MODULE_LICENSE("GPL v2");
768MODULE_AUTHOR("Asias He");
769MODULE_DESCRIPTION("vhost transport for vsock ");