blob: e3b30ea9ece5945c935791798ab27ed8f6c3dd11 [file] [log] [blame]
Asias He433fc582016-07-28 15:36:34 +01001/*
2 * vhost transport for vsock
3 *
4 * Copyright (C) 2013-2015 Red Hat, Inc.
5 * Author: Asias He <asias@redhat.com>
6 * Stefan Hajnoczi <stefanha@redhat.com>
7 *
8 * This work is licensed under the terms of the GNU GPL, version 2.
9 */
10#include <linux/miscdevice.h>
11#include <linux/atomic.h>
12#include <linux/module.h>
13#include <linux/mutex.h>
14#include <linux/vmalloc.h>
15#include <net/sock.h>
16#include <linux/virtio_vsock.h>
17#include <linux/vhost.h>
18
19#include <net/af_vsock.h>
20#include "vhost.h"
21
22#define VHOST_VSOCK_DEFAULT_HOST_CID 2
23
24enum {
25 VHOST_VSOCK_FEATURES = VHOST_FEATURES,
26};
27
28/* Used to track all the vhost_vsock instances on the system. */
29static DEFINE_SPINLOCK(vhost_vsock_lock);
30static LIST_HEAD(vhost_vsock_list);
31
32struct vhost_vsock {
33 struct vhost_dev dev;
34 struct vhost_virtqueue vqs[2];
35
36 /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */
37 struct list_head list;
38
39 struct vhost_work send_pkt_work;
40 spinlock_t send_pkt_list_lock;
41 struct list_head send_pkt_list; /* host->guest pending packets */
42
43 atomic_t queued_replies;
44
45 u32 guest_cid;
46};
47
48static u32 vhost_transport_get_local_cid(void)
49{
50 return VHOST_VSOCK_DEFAULT_HOST_CID;
51}
52
53static struct vhost_vsock *vhost_vsock_get(u32 guest_cid)
54{
55 struct vhost_vsock *vsock;
56
57 spin_lock_bh(&vhost_vsock_lock);
58 list_for_each_entry(vsock, &vhost_vsock_list, list) {
59 u32 other_cid = vsock->guest_cid;
60
61 /* Skip instances that have no CID yet */
62 if (other_cid == 0)
63 continue;
64
65 if (other_cid == guest_cid) {
66 spin_unlock_bh(&vhost_vsock_lock);
67 return vsock;
68 }
69 }
70 spin_unlock_bh(&vhost_vsock_lock);
71
72 return NULL;
73}
74
75static void
76vhost_transport_do_send_pkt(struct vhost_vsock *vsock,
77 struct vhost_virtqueue *vq)
78{
79 struct vhost_virtqueue *tx_vq = &vsock->vqs[VSOCK_VQ_TX];
80 bool added = false;
81 bool restart_tx = false;
82
83 mutex_lock(&vq->mutex);
84
85 if (!vq->private_data)
86 goto out;
87
88 /* Avoid further vmexits, we're already processing the virtqueue */
89 vhost_disable_notify(&vsock->dev, vq);
90
91 for (;;) {
92 struct virtio_vsock_pkt *pkt;
93 struct iov_iter iov_iter;
94 unsigned out, in;
95 size_t nbytes;
96 size_t len;
97 int head;
98
99 spin_lock_bh(&vsock->send_pkt_list_lock);
100 if (list_empty(&vsock->send_pkt_list)) {
101 spin_unlock_bh(&vsock->send_pkt_list_lock);
102 vhost_enable_notify(&vsock->dev, vq);
103 break;
104 }
105
106 pkt = list_first_entry(&vsock->send_pkt_list,
107 struct virtio_vsock_pkt, list);
108 list_del_init(&pkt->list);
109 spin_unlock_bh(&vsock->send_pkt_list_lock);
110
111 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
112 &out, &in, NULL, NULL);
113 if (head < 0) {
114 spin_lock_bh(&vsock->send_pkt_list_lock);
115 list_add(&pkt->list, &vsock->send_pkt_list);
116 spin_unlock_bh(&vsock->send_pkt_list_lock);
117 break;
118 }
119
120 if (head == vq->num) {
121 spin_lock_bh(&vsock->send_pkt_list_lock);
122 list_add(&pkt->list, &vsock->send_pkt_list);
123 spin_unlock_bh(&vsock->send_pkt_list_lock);
124
125 /* We cannot finish yet if more buffers snuck in while
126 * re-enabling notify.
127 */
128 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
129 vhost_disable_notify(&vsock->dev, vq);
130 continue;
131 }
132 break;
133 }
134
135 if (out) {
136 virtio_transport_free_pkt(pkt);
137 vq_err(vq, "Expected 0 output buffers, got %u\n", out);
138 break;
139 }
140
141 len = iov_length(&vq->iov[out], in);
142 iov_iter_init(&iov_iter, READ, &vq->iov[out], in, len);
143
144 nbytes = copy_to_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
145 if (nbytes != sizeof(pkt->hdr)) {
146 virtio_transport_free_pkt(pkt);
147 vq_err(vq, "Faulted on copying pkt hdr\n");
148 break;
149 }
150
151 nbytes = copy_to_iter(pkt->buf, pkt->len, &iov_iter);
152 if (nbytes != pkt->len) {
153 virtio_transport_free_pkt(pkt);
154 vq_err(vq, "Faulted on copying pkt buf\n");
155 break;
156 }
157
158 vhost_add_used(vq, head, sizeof(pkt->hdr) + pkt->len);
159 added = true;
160
161 if (pkt->reply) {
162 int val;
163
164 val = atomic_dec_return(&vsock->queued_replies);
165
166 /* Do we have resources to resume tx processing? */
167 if (val + 1 == tx_vq->num)
168 restart_tx = true;
169 }
170
171 virtio_transport_free_pkt(pkt);
172 }
173 if (added)
174 vhost_signal(&vsock->dev, vq);
175
176out:
177 mutex_unlock(&vq->mutex);
178
179 if (restart_tx)
180 vhost_poll_queue(&tx_vq->poll);
181}
182
183static void vhost_transport_send_pkt_work(struct vhost_work *work)
184{
185 struct vhost_virtqueue *vq;
186 struct vhost_vsock *vsock;
187
188 vsock = container_of(work, struct vhost_vsock, send_pkt_work);
189 vq = &vsock->vqs[VSOCK_VQ_RX];
190
191 vhost_transport_do_send_pkt(vsock, vq);
192}
193
194static int
195vhost_transport_send_pkt(struct virtio_vsock_pkt *pkt)
196{
197 struct vhost_vsock *vsock;
198 struct vhost_virtqueue *vq;
199 int len = pkt->len;
200
201 /* Find the vhost_vsock according to guest context id */
202 vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid));
203 if (!vsock) {
204 virtio_transport_free_pkt(pkt);
205 return -ENODEV;
206 }
207
208 vq = &vsock->vqs[VSOCK_VQ_RX];
209
210 if (pkt->reply)
211 atomic_inc(&vsock->queued_replies);
212
213 spin_lock_bh(&vsock->send_pkt_list_lock);
214 list_add_tail(&pkt->list, &vsock->send_pkt_list);
215 spin_unlock_bh(&vsock->send_pkt_list_lock);
216
217 vhost_work_queue(&vsock->dev, &vsock->send_pkt_work);
218 return len;
219}
220
221static struct virtio_vsock_pkt *
222vhost_vsock_alloc_pkt(struct vhost_virtqueue *vq,
223 unsigned int out, unsigned int in)
224{
225 struct virtio_vsock_pkt *pkt;
226 struct iov_iter iov_iter;
227 size_t nbytes;
228 size_t len;
229
230 if (in != 0) {
231 vq_err(vq, "Expected 0 input buffers, got %u\n", in);
232 return NULL;
233 }
234
235 pkt = kzalloc(sizeof(*pkt), GFP_KERNEL);
236 if (!pkt)
237 return NULL;
238
239 len = iov_length(vq->iov, out);
240 iov_iter_init(&iov_iter, WRITE, vq->iov, out, len);
241
242 nbytes = copy_from_iter(&pkt->hdr, sizeof(pkt->hdr), &iov_iter);
243 if (nbytes != sizeof(pkt->hdr)) {
244 vq_err(vq, "Expected %zu bytes for pkt->hdr, got %zu bytes\n",
245 sizeof(pkt->hdr), nbytes);
246 kfree(pkt);
247 return NULL;
248 }
249
250 if (le16_to_cpu(pkt->hdr.type) == VIRTIO_VSOCK_TYPE_STREAM)
251 pkt->len = le32_to_cpu(pkt->hdr.len);
252
253 /* No payload */
254 if (!pkt->len)
255 return pkt;
256
257 /* The pkt is too big */
258 if (pkt->len > VIRTIO_VSOCK_MAX_PKT_BUF_SIZE) {
259 kfree(pkt);
260 return NULL;
261 }
262
263 pkt->buf = kmalloc(pkt->len, GFP_KERNEL);
264 if (!pkt->buf) {
265 kfree(pkt);
266 return NULL;
267 }
268
269 nbytes = copy_from_iter(pkt->buf, pkt->len, &iov_iter);
270 if (nbytes != pkt->len) {
271 vq_err(vq, "Expected %u byte payload, got %zu bytes\n",
272 pkt->len, nbytes);
273 virtio_transport_free_pkt(pkt);
274 return NULL;
275 }
276
277 return pkt;
278}
279
280/* Is there space left for replies to rx packets? */
281static bool vhost_vsock_more_replies(struct vhost_vsock *vsock)
282{
283 struct vhost_virtqueue *vq = &vsock->vqs[VSOCK_VQ_TX];
284 int val;
285
286 smp_rmb(); /* paired with atomic_inc() and atomic_dec_return() */
287 val = atomic_read(&vsock->queued_replies);
288
289 return val < vq->num;
290}
291
292static void vhost_vsock_handle_tx_kick(struct vhost_work *work)
293{
294 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
295 poll.work);
296 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
297 dev);
298 struct virtio_vsock_pkt *pkt;
299 int head;
300 unsigned int out, in;
301 bool added = false;
302
303 mutex_lock(&vq->mutex);
304
305 if (!vq->private_data)
306 goto out;
307
308 vhost_disable_notify(&vsock->dev, vq);
309 for (;;) {
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100310 u32 len;
311
Asias He433fc582016-07-28 15:36:34 +0100312 if (!vhost_vsock_more_replies(vsock)) {
313 /* Stop tx until the device processes already
314 * pending replies. Leave tx virtqueue
315 * callbacks disabled.
316 */
317 goto no_more_replies;
318 }
319
320 head = vhost_get_vq_desc(vq, vq->iov, ARRAY_SIZE(vq->iov),
321 &out, &in, NULL, NULL);
322 if (head < 0)
323 break;
324
325 if (head == vq->num) {
326 if (unlikely(vhost_enable_notify(&vsock->dev, vq))) {
327 vhost_disable_notify(&vsock->dev, vq);
328 continue;
329 }
330 break;
331 }
332
333 pkt = vhost_vsock_alloc_pkt(vq, out, in);
334 if (!pkt) {
335 vq_err(vq, "Faulted on pkt\n");
336 continue;
337 }
338
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100339 len = pkt->len;
340
Asias He433fc582016-07-28 15:36:34 +0100341 /* Only accept correctly addressed packets */
342 if (le64_to_cpu(pkt->hdr.src_cid) == vsock->guest_cid)
343 virtio_transport_recv_pkt(pkt);
344 else
345 virtio_transport_free_pkt(pkt);
346
Stefan Hajnoczi3fda5d62016-08-04 14:52:53 +0100347 vhost_add_used(vq, head, sizeof(pkt->hdr) + len);
Asias He433fc582016-07-28 15:36:34 +0100348 added = true;
349 }
350
351no_more_replies:
352 if (added)
353 vhost_signal(&vsock->dev, vq);
354
355out:
356 mutex_unlock(&vq->mutex);
357}
358
359static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
360{
361 struct vhost_virtqueue *vq = container_of(work, struct vhost_virtqueue,
362 poll.work);
363 struct vhost_vsock *vsock = container_of(vq->dev, struct vhost_vsock,
364 dev);
365
366 vhost_transport_do_send_pkt(vsock, vq);
367}
368
369static int vhost_vsock_start(struct vhost_vsock *vsock)
370{
371 size_t i;
372 int ret;
373
374 mutex_lock(&vsock->dev.mutex);
375
376 ret = vhost_dev_check_owner(&vsock->dev);
377 if (ret)
378 goto err;
379
380 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
381 struct vhost_virtqueue *vq = &vsock->vqs[i];
382
383 mutex_lock(&vq->mutex);
384
385 if (!vhost_vq_access_ok(vq)) {
386 ret = -EFAULT;
387 mutex_unlock(&vq->mutex);
388 goto err_vq;
389 }
390
391 if (!vq->private_data) {
392 vq->private_data = vsock;
393 vhost_vq_init_access(vq);
394 }
395
396 mutex_unlock(&vq->mutex);
397 }
398
399 mutex_unlock(&vsock->dev.mutex);
400 return 0;
401
402err_vq:
403 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
404 struct vhost_virtqueue *vq = &vsock->vqs[i];
405
406 mutex_lock(&vq->mutex);
407 vq->private_data = NULL;
408 mutex_unlock(&vq->mutex);
409 }
410err:
411 mutex_unlock(&vsock->dev.mutex);
412 return ret;
413}
414
415static int vhost_vsock_stop(struct vhost_vsock *vsock)
416{
417 size_t i;
418 int ret;
419
420 mutex_lock(&vsock->dev.mutex);
421
422 ret = vhost_dev_check_owner(&vsock->dev);
423 if (ret)
424 goto err;
425
426 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
427 struct vhost_virtqueue *vq = &vsock->vqs[i];
428
429 mutex_lock(&vq->mutex);
430 vq->private_data = NULL;
431 mutex_unlock(&vq->mutex);
432 }
433
434err:
435 mutex_unlock(&vsock->dev.mutex);
436 return ret;
437}
438
439static void vhost_vsock_free(struct vhost_vsock *vsock)
440{
Wei Yongjunb226aca2016-08-02 13:50:42 +0000441 kvfree(vsock);
Asias He433fc582016-07-28 15:36:34 +0100442}
443
444static int vhost_vsock_dev_open(struct inode *inode, struct file *file)
445{
446 struct vhost_virtqueue **vqs;
447 struct vhost_vsock *vsock;
448 int ret;
449
450 /* This struct is large and allocation could fail, fall back to vmalloc
451 * if there is no other way.
452 */
453 vsock = kzalloc(sizeof(*vsock), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
454 if (!vsock) {
455 vsock = vmalloc(sizeof(*vsock));
456 if (!vsock)
457 return -ENOMEM;
458 }
459
460 vqs = kmalloc_array(ARRAY_SIZE(vsock->vqs), sizeof(*vqs), GFP_KERNEL);
461 if (!vqs) {
462 ret = -ENOMEM;
463 goto out;
464 }
465
466 atomic_set(&vsock->queued_replies, 0);
467
468 vqs[VSOCK_VQ_TX] = &vsock->vqs[VSOCK_VQ_TX];
469 vqs[VSOCK_VQ_RX] = &vsock->vqs[VSOCK_VQ_RX];
470 vsock->vqs[VSOCK_VQ_TX].handle_kick = vhost_vsock_handle_tx_kick;
471 vsock->vqs[VSOCK_VQ_RX].handle_kick = vhost_vsock_handle_rx_kick;
472
473 vhost_dev_init(&vsock->dev, vqs, ARRAY_SIZE(vsock->vqs));
474
475 file->private_data = vsock;
476 spin_lock_init(&vsock->send_pkt_list_lock);
477 INIT_LIST_HEAD(&vsock->send_pkt_list);
478 vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work);
479
480 spin_lock_bh(&vhost_vsock_lock);
481 list_add_tail(&vsock->list, &vhost_vsock_list);
482 spin_unlock_bh(&vhost_vsock_lock);
483 return 0;
484
485out:
486 vhost_vsock_free(vsock);
487 return ret;
488}
489
490static void vhost_vsock_flush(struct vhost_vsock *vsock)
491{
492 int i;
493
494 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++)
495 if (vsock->vqs[i].handle_kick)
496 vhost_poll_flush(&vsock->vqs[i].poll);
497 vhost_work_flush(&vsock->dev, &vsock->send_pkt_work);
498}
499
500static void vhost_vsock_reset_orphans(struct sock *sk)
501{
502 struct vsock_sock *vsk = vsock_sk(sk);
503
504 /* vmci_transport.c doesn't take sk_lock here either. At least we're
505 * under vsock_table_lock so the sock cannot disappear while we're
506 * executing.
507 */
508
509 if (!vhost_vsock_get(vsk->local_addr.svm_cid)) {
510 sock_set_flag(sk, SOCK_DONE);
511 vsk->peer_shutdown = SHUTDOWN_MASK;
512 sk->sk_state = SS_UNCONNECTED;
513 sk->sk_err = ECONNRESET;
514 sk->sk_error_report(sk);
515 }
516}
517
518static int vhost_vsock_dev_release(struct inode *inode, struct file *file)
519{
520 struct vhost_vsock *vsock = file->private_data;
521
522 spin_lock_bh(&vhost_vsock_lock);
523 list_del(&vsock->list);
524 spin_unlock_bh(&vhost_vsock_lock);
525
526 /* Iterating over all connections for all CIDs to find orphans is
527 * inefficient. Room for improvement here. */
528 vsock_for_each_connected_socket(vhost_vsock_reset_orphans);
529
530 vhost_vsock_stop(vsock);
531 vhost_vsock_flush(vsock);
532 vhost_dev_stop(&vsock->dev);
533
534 spin_lock_bh(&vsock->send_pkt_list_lock);
535 while (!list_empty(&vsock->send_pkt_list)) {
536 struct virtio_vsock_pkt *pkt;
537
538 pkt = list_first_entry(&vsock->send_pkt_list,
539 struct virtio_vsock_pkt, list);
540 list_del_init(&pkt->list);
541 virtio_transport_free_pkt(pkt);
542 }
543 spin_unlock_bh(&vsock->send_pkt_list_lock);
544
545 vhost_dev_cleanup(&vsock->dev, false);
546 kfree(vsock->dev.vqs);
547 vhost_vsock_free(vsock);
548 return 0;
549}
550
551static int vhost_vsock_set_cid(struct vhost_vsock *vsock, u64 guest_cid)
552{
553 struct vhost_vsock *other;
554
555 /* Refuse reserved CIDs */
556 if (guest_cid <= VMADDR_CID_HOST ||
557 guest_cid == U32_MAX)
558 return -EINVAL;
559
560 /* 64-bit CIDs are not yet supported */
561 if (guest_cid > U32_MAX)
562 return -EINVAL;
563
564 /* Refuse if CID is already in use */
565 other = vhost_vsock_get(guest_cid);
566 if (other && other != vsock)
567 return -EADDRINUSE;
568
569 spin_lock_bh(&vhost_vsock_lock);
570 vsock->guest_cid = guest_cid;
571 spin_unlock_bh(&vhost_vsock_lock);
572
573 return 0;
574}
575
576static int vhost_vsock_set_features(struct vhost_vsock *vsock, u64 features)
577{
578 struct vhost_virtqueue *vq;
579 int i;
580
581 if (features & ~VHOST_VSOCK_FEATURES)
582 return -EOPNOTSUPP;
583
584 mutex_lock(&vsock->dev.mutex);
585 if ((features & (1 << VHOST_F_LOG_ALL)) &&
586 !vhost_log_access_ok(&vsock->dev)) {
587 mutex_unlock(&vsock->dev.mutex);
588 return -EFAULT;
589 }
590
591 for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
592 vq = &vsock->vqs[i];
593 mutex_lock(&vq->mutex);
594 vq->acked_features = features;
595 mutex_unlock(&vq->mutex);
596 }
597 mutex_unlock(&vsock->dev.mutex);
598 return 0;
599}
600
601static long vhost_vsock_dev_ioctl(struct file *f, unsigned int ioctl,
602 unsigned long arg)
603{
604 struct vhost_vsock *vsock = f->private_data;
605 void __user *argp = (void __user *)arg;
606 u64 guest_cid;
607 u64 features;
608 int start;
609 int r;
610
611 switch (ioctl) {
612 case VHOST_VSOCK_SET_GUEST_CID:
613 if (copy_from_user(&guest_cid, argp, sizeof(guest_cid)))
614 return -EFAULT;
615 return vhost_vsock_set_cid(vsock, guest_cid);
616 case VHOST_VSOCK_SET_RUNNING:
617 if (copy_from_user(&start, argp, sizeof(start)))
618 return -EFAULT;
619 if (start)
620 return vhost_vsock_start(vsock);
621 else
622 return vhost_vsock_stop(vsock);
623 case VHOST_GET_FEATURES:
624 features = VHOST_VSOCK_FEATURES;
625 if (copy_to_user(argp, &features, sizeof(features)))
626 return -EFAULT;
627 return 0;
628 case VHOST_SET_FEATURES:
629 if (copy_from_user(&features, argp, sizeof(features)))
630 return -EFAULT;
631 return vhost_vsock_set_features(vsock, features);
632 default:
633 mutex_lock(&vsock->dev.mutex);
634 r = vhost_dev_ioctl(&vsock->dev, ioctl, argp);
635 if (r == -ENOIOCTLCMD)
636 r = vhost_vring_ioctl(&vsock->dev, ioctl, argp);
637 else
638 vhost_vsock_flush(vsock);
639 mutex_unlock(&vsock->dev.mutex);
640 return r;
641 }
642}
643
644static const struct file_operations vhost_vsock_fops = {
645 .owner = THIS_MODULE,
646 .open = vhost_vsock_dev_open,
647 .release = vhost_vsock_dev_release,
648 .llseek = noop_llseek,
649 .unlocked_ioctl = vhost_vsock_dev_ioctl,
650};
651
652static struct miscdevice vhost_vsock_misc = {
653 .minor = MISC_DYNAMIC_MINOR,
654 .name = "vhost-vsock",
655 .fops = &vhost_vsock_fops,
656};
657
658static struct virtio_transport vhost_transport = {
659 .transport = {
660 .get_local_cid = vhost_transport_get_local_cid,
661
662 .init = virtio_transport_do_socket_init,
663 .destruct = virtio_transport_destruct,
664 .release = virtio_transport_release,
665 .connect = virtio_transport_connect,
666 .shutdown = virtio_transport_shutdown,
667
668 .dgram_enqueue = virtio_transport_dgram_enqueue,
669 .dgram_dequeue = virtio_transport_dgram_dequeue,
670 .dgram_bind = virtio_transport_dgram_bind,
671 .dgram_allow = virtio_transport_dgram_allow,
672
673 .stream_enqueue = virtio_transport_stream_enqueue,
674 .stream_dequeue = virtio_transport_stream_dequeue,
675 .stream_has_data = virtio_transport_stream_has_data,
676 .stream_has_space = virtio_transport_stream_has_space,
677 .stream_rcvhiwat = virtio_transport_stream_rcvhiwat,
678 .stream_is_active = virtio_transport_stream_is_active,
679 .stream_allow = virtio_transport_stream_allow,
680
681 .notify_poll_in = virtio_transport_notify_poll_in,
682 .notify_poll_out = virtio_transport_notify_poll_out,
683 .notify_recv_init = virtio_transport_notify_recv_init,
684 .notify_recv_pre_block = virtio_transport_notify_recv_pre_block,
685 .notify_recv_pre_dequeue = virtio_transport_notify_recv_pre_dequeue,
686 .notify_recv_post_dequeue = virtio_transport_notify_recv_post_dequeue,
687 .notify_send_init = virtio_transport_notify_send_init,
688 .notify_send_pre_block = virtio_transport_notify_send_pre_block,
689 .notify_send_pre_enqueue = virtio_transport_notify_send_pre_enqueue,
690 .notify_send_post_enqueue = virtio_transport_notify_send_post_enqueue,
691
692 .set_buffer_size = virtio_transport_set_buffer_size,
693 .set_min_buffer_size = virtio_transport_set_min_buffer_size,
694 .set_max_buffer_size = virtio_transport_set_max_buffer_size,
695 .get_buffer_size = virtio_transport_get_buffer_size,
696 .get_min_buffer_size = virtio_transport_get_min_buffer_size,
697 .get_max_buffer_size = virtio_transport_get_max_buffer_size,
698 },
699
700 .send_pkt = vhost_transport_send_pkt,
701};
702
703static int __init vhost_vsock_init(void)
704{
705 int ret;
706
707 ret = vsock_core_init(&vhost_transport.transport);
708 if (ret < 0)
709 return ret;
710 return misc_register(&vhost_vsock_misc);
711};
712
713static void __exit vhost_vsock_exit(void)
714{
715 misc_deregister(&vhost_vsock_misc);
716 vsock_core_exit();
717};
718
719module_init(vhost_vsock_init);
720module_exit(vhost_vsock_exit);
721MODULE_LICENSE("GPL v2");
722MODULE_AUTHOR("Asias He");
723MODULE_DESCRIPTION("vhost transport for vsock ");