blob: b58f4ae82cb8e1e912f83c88eeec1e82eb950d5d [file] [log] [blame]
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +00001#ifndef _VHOST_H
2#define _VHOST_H
3
4#include <linux/eventfd.h>
5#include <linux/vhost.h>
6#include <linux/mm.h>
7#include <linux/mutex.h>
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +00008#include <linux/poll.h>
9#include <linux/file.h>
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000010#include <linux/uio.h>
11#include <linux/virtio_config.h>
12#include <linux/virtio_ring.h>
Arun Sharma600634972011-07-26 16:09:06 -070013#include <linux/atomic.h>
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000014
15struct vhost_device;
16
Tejun Heoc23f34452010-06-02 20:40:00 +020017struct vhost_work;
18typedef void (*vhost_work_fn_t)(struct vhost_work *work);
19
20struct vhost_work {
21 struct list_head node;
22 vhost_work_fn_t fn;
23 wait_queue_head_t done;
24 int flushing;
25 unsigned queue_seq;
26 unsigned done_seq;
27};
28
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000029/* Poll a file (eventfd or socket) */
30/* Note: there's nothing vhost specific about this structure. */
31struct vhost_poll {
32 poll_table table;
33 wait_queue_head_t *wqh;
34 wait_queue_t wait;
Tejun Heoc23f34452010-06-02 20:40:00 +020035 struct vhost_work work;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000036 unsigned long mask;
Tejun Heoc23f34452010-06-02 20:40:00 +020037 struct vhost_dev *dev;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000038};
39
Stefan Hajnoczi163049a2012-07-21 06:55:37 +000040void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn);
41void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work);
42
Tejun Heoc23f34452010-06-02 20:40:00 +020043void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
44 unsigned long mask, struct vhost_dev *dev);
Jason Wang2b8b3282013-01-28 01:05:18 +000045int vhost_poll_start(struct vhost_poll *poll, struct file *file);
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000046void vhost_poll_stop(struct vhost_poll *poll);
47void vhost_poll_flush(struct vhost_poll *poll);
48void vhost_poll_queue(struct vhost_poll *poll);
49
50struct vhost_log {
51 u64 addr;
52 u64 len;
53};
54
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +000055struct vhost_virtqueue;
56
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000057/* The virtqueue structure describes a queue attached to a device. */
58struct vhost_virtqueue {
59 struct vhost_dev *dev;
60
61 /* The actual ring of buffers. */
62 struct mutex mutex;
63 unsigned int num;
64 struct vring_desc __user *desc;
65 struct vring_avail __user *avail;
66 struct vring_used __user *used;
67 struct file *kick;
68 struct file *call;
69 struct file *error;
70 struct eventfd_ctx *call_ctx;
71 struct eventfd_ctx *error_ctx;
72 struct eventfd_ctx *log_ctx;
73
74 struct vhost_poll poll;
75
76 /* The routine to call when the Guest pings us, or timeout. */
Tejun Heoc23f34452010-06-02 20:40:00 +020077 vhost_work_fn_t handle_kick;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000078
79 /* Last available index we saw. */
80 u16 last_avail_idx;
81
82 /* Caches available index value from user. */
83 u16 avail_idx;
84
85 /* Last index we used. */
86 u16 last_used_idx;
87
88 /* Used flags */
89 u16 used_flags;
90
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030091 /* Last used index value we have signalled on */
92 u16 signalled_used;
93
94 /* Last used index value we have signalled on */
95 bool signalled_used_valid;
96
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +000097 /* Log writes to used structure. */
98 bool log_used;
99 u64 log_addr;
100
Jason Wange0e9b402010-09-14 23:53:05 +0800101 struct iovec iov[UIO_MAXIOV];
102 /* hdr is used to store the virtio header.
103 * Since each iovec has >= 1 byte length, we never need more than
104 * header length entries to store the header. */
Jason Wange0e9b402010-09-14 23:53:05 +0800105 struct iovec *indirect;
Jason Wange0e9b402010-09-14 23:53:05 +0800106 struct vring_used_elem *heads;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000107 /* We use a kind of RCU to access private pointer.
Tejun Heoc23f34452010-06-02 20:40:00 +0200108 * All readers access it from worker, which makes it possible to
109 * flush the vhost_work instead of synchronize_rcu. Therefore readers do
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000110 * not need to call rcu_read_lock/rcu_read_unlock: the beginning of
Tejun Heoc23f34452010-06-02 20:40:00 +0200111 * vhost_work execution acts instead of rcu_read_lock() and the end of
Jason Wanga290aec2010-11-29 13:48:40 +0800112 * vhost_work execution acts instead of rcu_read_unlock().
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000113 * Writers use virtqueue mutex. */
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100114 void __rcu *private_data;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000115 /* Log write descriptors */
116 void __user *log_base;
Jason Wange0e9b402010-09-14 23:53:05 +0800117 struct vhost_log *log;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000118};
119
120struct vhost_dev {
121 /* Readers use RCU to access memory table pointer
122 * log base pointer and features.
123 * Writers use mutex below.*/
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100124 struct vhost_memory __rcu *memory;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000125 struct mm_struct *mm;
126 struct mutex mutex;
127 unsigned acked_features;
Asias He3ab2e422013-04-27 11:16:48 +0800128 struct vhost_virtqueue **vqs;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000129 int nvqs;
130 struct file *log_file;
131 struct eventfd_ctx *log_ctx;
Tejun Heoc23f34452010-06-02 20:40:00 +0200132 spinlock_t work_lock;
133 struct list_head work_list;
134 struct task_struct *worker;
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000135};
136
Asias He3ab2e422013-04-27 11:16:48 +0800137long vhost_dev_init(struct vhost_dev *, struct vhost_virtqueue **vqs, int nvqs);
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000138long vhost_dev_check_owner(struct vhost_dev *);
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300139struct vhost_memory *vhost_dev_reset_owner_prepare(void);
140void vhost_dev_reset_owner(struct vhost_dev *, struct vhost_memory *);
Michael S. Tsirkinea5d4042011-11-27 19:05:58 +0200141void vhost_dev_cleanup(struct vhost_dev *, bool locked);
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000142void vhost_dev_stop(struct vhost_dev *);
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +0200143long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, void __user *argp);
144long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp);
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000145int vhost_vq_access_ok(struct vhost_virtqueue *vq);
146int vhost_log_access_ok(struct vhost_dev *);
147
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +0300148int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
149 struct iovec iov[], unsigned int iov_count,
150 unsigned int *out_num, unsigned int *in_num,
151 struct vhost_log *log, unsigned int *log_num);
David Stevens8dd014a2010-07-27 18:52:21 +0300152void vhost_discard_vq_desc(struct vhost_virtqueue *, int n);
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000153
Jason Wangf59281d2011-06-21 18:04:27 +0800154int vhost_init_used(struct vhost_virtqueue *);
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000155int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
David Stevens8dd014a2010-07-27 18:52:21 +0300156int vhost_add_used_n(struct vhost_virtqueue *, struct vring_used_elem *heads,
157 unsigned count);
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000158void vhost_add_used_and_signal(struct vhost_dev *, struct vhost_virtqueue *,
David Stevens8dd014a2010-07-27 18:52:21 +0300159 unsigned int id, int len);
160void vhost_add_used_and_signal_n(struct vhost_dev *, struct vhost_virtqueue *,
161 struct vring_used_elem *heads, unsigned count);
162void vhost_signal(struct vhost_dev *, struct vhost_virtqueue *);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300163void vhost_disable_notify(struct vhost_dev *, struct vhost_virtqueue *);
164bool vhost_enable_notify(struct vhost_dev *, struct vhost_virtqueue *);
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000165
166int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
167 unsigned int log_num, u64 len);
168
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000169#define vq_err(vq, fmt, ...) do { \
170 pr_debug(pr_fmt(fmt), ##__VA_ARGS__); \
171 if ((vq)->error_ctx) \
172 eventfd_signal((vq)->error_ctx, 1);\
173 } while (0)
174
175enum {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300176 VHOST_FEATURES = (1ULL << VIRTIO_F_NOTIFY_ON_EMPTY) |
177 (1ULL << VIRTIO_RING_F_INDIRECT_DESC) |
178 (1ULL << VIRTIO_RING_F_EVENT_IDX) |
Stefan Hajnoczi0dd05a32012-07-21 06:55:36 +0000179 (1ULL << VHOST_F_LOG_ALL),
180 VHOST_NET_FEATURES = VHOST_FEATURES |
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300181 (1ULL << VHOST_NET_F_VIRTIO_NET_HDR) |
182 (1ULL << VIRTIO_NET_F_MRG_RXBUF),
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000183};
184
185static inline int vhost_has_feature(struct vhost_dev *dev, int bit)
186{
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100187 unsigned acked_features;
188
Michael S. Tsirkin5e182472011-01-18 13:04:43 +0200189 /* TODO: check that we are running from vhost_worker or dev mutex is
190 * held? */
191 acked_features = rcu_dereference_index_check(dev->acked_features, 1);
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000192 return acked_features & (1 << bit);
193}
194
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000195void vhost_enable_zcopy(int vq);
196
Michael S. Tsirkin3a4d5c94e2010-01-14 06:17:27 +0000197#endif