blob: c90f4374442a571610da46b56dc94d51a3e26feb [file] [log] [blame]
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001/* Copyright (C) 2009 Red Hat, Inc.
2 * Copyright (C) 2006 Rusty Russell IBM Corporation
3 *
4 * Author: Michael S. Tsirkin <mst@redhat.com>
5 *
6 * Inspiration, some code, and most witty comments come from
Rob Landley61516582011-05-06 09:27:36 -07007 * Documentation/virtual/lguest/lguest.c, by Rusty Russell
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00008 *
9 * This work is licensed under the terms of the GNU GPL, version 2.
10 *
11 * Generic code for virtio server in host kernel.
12 */
13
14#include <linux/eventfd.h>
15#include <linux/vhost.h>
Asias He35596b22013-08-19 09:23:19 +080016#include <linux/uio.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000017#include <linux/mm.h>
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +020018#include <linux/mmu_context.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000019#include <linux/miscdevice.h>
20#include <linux/mutex.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000021#include <linux/poll.h>
22#include <linux/file.h>
23#include <linux/highmem.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Tejun Heoc23f34452010-06-02 20:40:00 +020025#include <linux/kthread.h>
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +030026#include <linux/cgroup.h>
Asias He6ac1afb2013-05-06 16:38:21 +080027#include <linux/module.h>
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000028
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000029#include "vhost.h"
30
31enum {
32 VHOST_MEMORY_MAX_NREGIONS = 64,
33 VHOST_MEMORY_F_LOG = 0x1,
34};
35
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +030036#define vhost_used_event(vq) ((u16 __user *)&vq->avail->ring[vq->num])
37#define vhost_avail_event(vq) ((u16 __user *)&vq->used->ring[vq->num])
38
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000039static void vhost_poll_func(struct file *file, wait_queue_head_t *wqh,
40 poll_table *pt)
41{
42 struct vhost_poll *poll;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000043
Krishna Kumard47effe2011-03-01 17:06:37 +053044 poll = container_of(pt, struct vhost_poll, table);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000045 poll->wqh = wqh;
46 add_wait_queue(wqh, &poll->wait);
47}
48
49static int vhost_poll_wakeup(wait_queue_t *wait, unsigned mode, int sync,
50 void *key)
51{
Tejun Heoc23f34452010-06-02 20:40:00 +020052 struct vhost_poll *poll = container_of(wait, struct vhost_poll, wait);
53
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000054 if (!((unsigned long)key & poll->mask))
55 return 0;
56
Tejun Heoc23f34452010-06-02 20:40:00 +020057 vhost_poll_queue(poll);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000058 return 0;
59}
60
Stefan Hajnoczi163049a2012-07-21 06:55:37 +000061void vhost_work_init(struct vhost_work *work, vhost_work_fn_t fn)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000062{
Tejun Heoc23f34452010-06-02 20:40:00 +020063 INIT_LIST_HEAD(&work->node);
64 work->fn = fn;
65 init_waitqueue_head(&work->done);
66 work->flushing = 0;
67 work->queue_seq = work->done_seq = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000068}
Asias He6ac1afb2013-05-06 16:38:21 +080069EXPORT_SYMBOL_GPL(vhost_work_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000070
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +030071/* Init poll structure */
72void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn,
73 unsigned long mask, struct vhost_dev *dev)
74{
75 init_waitqueue_func_entry(&poll->wait, vhost_poll_wakeup);
76 init_poll_funcptr(&poll->table, vhost_poll_func);
77 poll->mask = mask;
78 poll->dev = dev;
Jason Wang2b8b3282013-01-28 01:05:18 +000079 poll->wqh = NULL;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +030080
81 vhost_work_init(&poll->work, fn);
82}
Asias He6ac1afb2013-05-06 16:38:21 +080083EXPORT_SYMBOL_GPL(vhost_poll_init);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +030084
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000085/* Start polling a file. We add ourselves to file's wait queue. The caller must
86 * keep a reference to a file until after vhost_poll_stop is called. */
Jason Wang2b8b3282013-01-28 01:05:18 +000087int vhost_poll_start(struct vhost_poll *poll, struct file *file)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000088{
89 unsigned long mask;
Jason Wang2b8b3282013-01-28 01:05:18 +000090 int ret = 0;
Krishna Kumard47effe2011-03-01 17:06:37 +053091
Jason Wang70181d512013-04-10 20:50:48 +000092 if (poll->wqh)
93 return 0;
94
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +000095 mask = file->f_op->poll(file, &poll->table);
96 if (mask)
97 vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask);
Jason Wang2b8b3282013-01-28 01:05:18 +000098 if (mask & POLLERR) {
99 if (poll->wqh)
100 remove_wait_queue(poll->wqh, &poll->wait);
101 ret = -EINVAL;
102 }
103
104 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000105}
Asias He6ac1afb2013-05-06 16:38:21 +0800106EXPORT_SYMBOL_GPL(vhost_poll_start);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000107
108/* Stop polling a file. After this function returns, it becomes safe to drop the
109 * file reference. You must also flush afterwards. */
110void vhost_poll_stop(struct vhost_poll *poll)
111{
Jason Wang2b8b3282013-01-28 01:05:18 +0000112 if (poll->wqh) {
113 remove_wait_queue(poll->wqh, &poll->wait);
114 poll->wqh = NULL;
115 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000116}
Asias He6ac1afb2013-05-06 16:38:21 +0800117EXPORT_SYMBOL_GPL(vhost_poll_stop);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000118
Michael S. Tsirkin0174b0c2011-01-10 10:03:20 +0200119static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work,
120 unsigned seq)
121{
122 int left;
Krishna Kumard47effe2011-03-01 17:06:37 +0530123
Michael S. Tsirkin0174b0c2011-01-10 10:03:20 +0200124 spin_lock_irq(&dev->work_lock);
125 left = seq - work->done_seq;
126 spin_unlock_irq(&dev->work_lock);
127 return left <= 0;
128}
129
Asias He6ac1afb2013-05-06 16:38:21 +0800130void vhost_work_flush(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000131{
Tejun Heoc23f34452010-06-02 20:40:00 +0200132 unsigned seq;
Tejun Heoc23f34452010-06-02 20:40:00 +0200133 int flushing;
134
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300135 spin_lock_irq(&dev->work_lock);
Tejun Heoc23f34452010-06-02 20:40:00 +0200136 seq = work->queue_seq;
137 work->flushing++;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300138 spin_unlock_irq(&dev->work_lock);
Michael S. Tsirkin0174b0c2011-01-10 10:03:20 +0200139 wait_event(work->done, vhost_work_seq_done(dev, work, seq));
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300140 spin_lock_irq(&dev->work_lock);
Tejun Heoc23f34452010-06-02 20:40:00 +0200141 flushing = --work->flushing;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300142 spin_unlock_irq(&dev->work_lock);
Tejun Heoc23f34452010-06-02 20:40:00 +0200143 BUG_ON(flushing < 0);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000144}
Asias He6ac1afb2013-05-06 16:38:21 +0800145EXPORT_SYMBOL_GPL(vhost_work_flush);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000146
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300147/* Flush any work that has been scheduled. When calling this, don't hold any
148 * locks that are also used by the callback. */
149void vhost_poll_flush(struct vhost_poll *poll)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000150{
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300151 vhost_work_flush(poll->dev, &poll->work);
152}
Asias He6ac1afb2013-05-06 16:38:21 +0800153EXPORT_SYMBOL_GPL(vhost_poll_flush);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300154
Stefan Hajnoczi163049a2012-07-21 06:55:37 +0000155void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work)
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300156{
Tejun Heoc23f34452010-06-02 20:40:00 +0200157 unsigned long flags;
158
159 spin_lock_irqsave(&dev->work_lock, flags);
160 if (list_empty(&work->node)) {
161 list_add_tail(&work->node, &dev->work_list);
162 work->queue_seq++;
Qin Chuanyuac9fde22013-06-07 21:50:16 +0800163 spin_unlock_irqrestore(&dev->work_lock, flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200164 wake_up_process(dev->worker);
Qin Chuanyuac9fde22013-06-07 21:50:16 +0800165 } else {
166 spin_unlock_irqrestore(&dev->work_lock, flags);
Tejun Heoc23f34452010-06-02 20:40:00 +0200167 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000168}
Asias He6ac1afb2013-05-06 16:38:21 +0800169EXPORT_SYMBOL_GPL(vhost_work_queue);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000170
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300171void vhost_poll_queue(struct vhost_poll *poll)
172{
173 vhost_work_queue(poll->dev, &poll->work);
174}
Asias He6ac1afb2013-05-06 16:38:21 +0800175EXPORT_SYMBOL_GPL(vhost_poll_queue);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300176
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000177static void vhost_vq_reset(struct vhost_dev *dev,
178 struct vhost_virtqueue *vq)
179{
180 vq->num = 1;
181 vq->desc = NULL;
182 vq->avail = NULL;
183 vq->used = NULL;
184 vq->last_avail_idx = 0;
185 vq->avail_idx = 0;
186 vq->last_used_idx = 0;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300187 vq->signalled_used = 0;
188 vq->signalled_used_valid = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000189 vq->used_flags = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000190 vq->log_used = false;
191 vq->log_addr = -1ull;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000192 vq->private_data = NULL;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300193 vq->acked_features = 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000194 vq->log_base = NULL;
195 vq->error_ctx = NULL;
196 vq->error = NULL;
197 vq->kick = NULL;
198 vq->call_ctx = NULL;
199 vq->call = NULL;
Michael S. Tsirkin73a99f02010-02-23 11:23:45 +0200200 vq->log_ctx = NULL;
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300201 vq->memory = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000202}
203
Tejun Heoc23f34452010-06-02 20:40:00 +0200204static int vhost_worker(void *data)
205{
206 struct vhost_dev *dev = data;
207 struct vhost_work *work = NULL;
208 unsigned uninitialized_var(seq);
Jens Freimannd7ffde32012-06-26 00:59:58 +0000209 mm_segment_t oldfs = get_fs();
Tejun Heoc23f34452010-06-02 20:40:00 +0200210
Jens Freimannd7ffde32012-06-26 00:59:58 +0000211 set_fs(USER_DS);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200212 use_mm(dev->mm);
213
Tejun Heoc23f34452010-06-02 20:40:00 +0200214 for (;;) {
215 /* mb paired w/ kthread_stop */
216 set_current_state(TASK_INTERRUPTIBLE);
217
218 spin_lock_irq(&dev->work_lock);
219 if (work) {
220 work->done_seq = seq;
221 if (work->flushing)
222 wake_up_all(&work->done);
223 }
224
225 if (kthread_should_stop()) {
226 spin_unlock_irq(&dev->work_lock);
227 __set_current_state(TASK_RUNNING);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200228 break;
Tejun Heoc23f34452010-06-02 20:40:00 +0200229 }
230 if (!list_empty(&dev->work_list)) {
231 work = list_first_entry(&dev->work_list,
232 struct vhost_work, node);
233 list_del_init(&work->node);
234 seq = work->queue_seq;
235 } else
236 work = NULL;
237 spin_unlock_irq(&dev->work_lock);
238
239 if (work) {
240 __set_current_state(TASK_RUNNING);
241 work->fn(work);
Nadav Har'Eld550dda2012-02-27 15:07:29 +0200242 if (need_resched())
243 schedule();
Tejun Heoc23f34452010-06-02 20:40:00 +0200244 } else
245 schedule();
246
247 }
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200248 unuse_mm(dev->mm);
Jens Freimannd7ffde32012-06-26 00:59:58 +0000249 set_fs(oldfs);
Michael S. Tsirkin64e1c802010-10-06 15:34:45 +0200250 return 0;
Tejun Heoc23f34452010-06-02 20:40:00 +0200251}
252
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000253static void vhost_vq_free_iovecs(struct vhost_virtqueue *vq)
254{
255 kfree(vq->indirect);
256 vq->indirect = NULL;
257 kfree(vq->log);
258 vq->log = NULL;
259 kfree(vq->heads);
260 vq->heads = NULL;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000261}
262
Jason Wange0e9b402010-09-14 23:53:05 +0800263/* Helper to allocate iovec buffers for all vqs. */
264static long vhost_dev_alloc_iovecs(struct vhost_dev *dev)
265{
Asias He6d5e6aa2013-05-06 16:38:23 +0800266 struct vhost_virtqueue *vq;
Jason Wange0e9b402010-09-14 23:53:05 +0800267 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530268
Jason Wange0e9b402010-09-14 23:53:05 +0800269 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800270 vq = dev->vqs[i];
271 vq->indirect = kmalloc(sizeof *vq->indirect * UIO_MAXIOV,
272 GFP_KERNEL);
273 vq->log = kmalloc(sizeof *vq->log * UIO_MAXIOV, GFP_KERNEL);
274 vq->heads = kmalloc(sizeof *vq->heads * UIO_MAXIOV, GFP_KERNEL);
275 if (!vq->indirect || !vq->log || !vq->heads)
Jason Wange0e9b402010-09-14 23:53:05 +0800276 goto err_nomem;
277 }
278 return 0;
Krishna Kumard47effe2011-03-01 17:06:37 +0530279
Jason Wange0e9b402010-09-14 23:53:05 +0800280err_nomem:
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000281 for (; i >= 0; --i)
Asias He3ab2e422013-04-27 11:16:48 +0800282 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800283 return -ENOMEM;
284}
285
286static void vhost_dev_free_iovecs(struct vhost_dev *dev)
287{
288 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530289
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000290 for (i = 0; i < dev->nvqs; ++i)
Asias He3ab2e422013-04-27 11:16:48 +0800291 vhost_vq_free_iovecs(dev->vqs[i]);
Jason Wange0e9b402010-09-14 23:53:05 +0800292}
293
Zhi Yong Wu59566b6e2013-12-07 04:13:03 +0800294void vhost_dev_init(struct vhost_dev *dev,
Asias He3ab2e422013-04-27 11:16:48 +0800295 struct vhost_virtqueue **vqs, int nvqs)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000296{
Asias He6d5e6aa2013-05-06 16:38:23 +0800297 struct vhost_virtqueue *vq;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000298 int i;
Tejun Heoc23f34452010-06-02 20:40:00 +0200299
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000300 dev->vqs = vqs;
301 dev->nvqs = nvqs;
302 mutex_init(&dev->mutex);
303 dev->log_ctx = NULL;
304 dev->log_file = NULL;
305 dev->memory = NULL;
306 dev->mm = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200307 spin_lock_init(&dev->work_lock);
308 INIT_LIST_HEAD(&dev->work_list);
309 dev->worker = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000310
311 for (i = 0; i < dev->nvqs; ++i) {
Asias He6d5e6aa2013-05-06 16:38:23 +0800312 vq = dev->vqs[i];
313 vq->log = NULL;
314 vq->indirect = NULL;
315 vq->heads = NULL;
316 vq->dev = dev;
317 mutex_init(&vq->mutex);
318 vhost_vq_reset(dev, vq);
319 if (vq->handle_kick)
320 vhost_poll_init(&vq->poll, vq->handle_kick,
321 POLLIN, dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000322 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000323}
Asias He6ac1afb2013-05-06 16:38:21 +0800324EXPORT_SYMBOL_GPL(vhost_dev_init);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000325
326/* Caller should have device mutex */
327long vhost_dev_check_owner(struct vhost_dev *dev)
328{
329 /* Are you the owner? If not, I don't think you mean to do that */
330 return dev->mm == current->mm ? 0 : -EPERM;
331}
Asias He6ac1afb2013-05-06 16:38:21 +0800332EXPORT_SYMBOL_GPL(vhost_dev_check_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000333
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300334struct vhost_attach_cgroups_struct {
Krishna Kumard47effe2011-03-01 17:06:37 +0530335 struct vhost_work work;
336 struct task_struct *owner;
337 int ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300338};
339
340static void vhost_attach_cgroups_work(struct vhost_work *work)
341{
Krishna Kumard47effe2011-03-01 17:06:37 +0530342 struct vhost_attach_cgroups_struct *s;
343
344 s = container_of(work, struct vhost_attach_cgroups_struct, work);
345 s->ret = cgroup_attach_task_all(s->owner, current);
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300346}
347
348static int vhost_attach_cgroups(struct vhost_dev *dev)
349{
Krishna Kumard47effe2011-03-01 17:06:37 +0530350 struct vhost_attach_cgroups_struct attach;
351
352 attach.owner = current;
353 vhost_work_init(&attach.work, vhost_attach_cgroups_work);
354 vhost_work_queue(dev, &attach.work);
355 vhost_work_flush(dev, &attach.work);
356 return attach.ret;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300357}
358
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000359/* Caller should have device mutex */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300360bool vhost_dev_has_owner(struct vhost_dev *dev)
361{
362 return dev->mm;
363}
Asias He6ac1afb2013-05-06 16:38:21 +0800364EXPORT_SYMBOL_GPL(vhost_dev_has_owner);
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300365
366/* Caller should have device mutex */
Asias He54db63c2013-05-06 11:15:59 +0800367long vhost_dev_set_owner(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000368{
Tejun Heoc23f34452010-06-02 20:40:00 +0200369 struct task_struct *worker;
370 int err;
Krishna Kumard47effe2011-03-01 17:06:37 +0530371
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000372 /* Is there an owner already? */
Michael S. Tsirkin05c05352013-06-06 15:20:39 +0300373 if (vhost_dev_has_owner(dev)) {
Tejun Heoc23f34452010-06-02 20:40:00 +0200374 err = -EBUSY;
375 goto err_mm;
376 }
Krishna Kumard47effe2011-03-01 17:06:37 +0530377
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000378 /* No owner, become one */
379 dev->mm = get_task_mm(current);
Tejun Heoc23f34452010-06-02 20:40:00 +0200380 worker = kthread_create(vhost_worker, dev, "vhost-%d", current->pid);
381 if (IS_ERR(worker)) {
382 err = PTR_ERR(worker);
383 goto err_worker;
384 }
385
386 dev->worker = worker;
Michael S. Tsirkin87d6a412010-09-02 14:05:30 +0300387 wake_up_process(worker); /* avoid contributing to loadavg */
388
389 err = vhost_attach_cgroups(dev);
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300390 if (err)
391 goto err_cgroup;
Tejun Heoc23f34452010-06-02 20:40:00 +0200392
Jason Wange0e9b402010-09-14 23:53:05 +0800393 err = vhost_dev_alloc_iovecs(dev);
394 if (err)
395 goto err_cgroup;
396
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000397 return 0;
Michael S. Tsirkin9e3d1952010-07-27 22:56:50 +0300398err_cgroup:
399 kthread_stop(worker);
Michael S. Tsirkin615cc222010-09-02 14:16:36 +0300400 dev->worker = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200401err_worker:
402 if (dev->mm)
403 mmput(dev->mm);
404 dev->mm = NULL;
405err_mm:
406 return err;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000407}
Asias He6ac1afb2013-05-06 16:38:21 +0800408EXPORT_SYMBOL_GPL(vhost_dev_set_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000409
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300410struct vhost_memory *vhost_dev_reset_owner_prepare(void)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000411{
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300412 return kmalloc(offsetof(struct vhost_memory, regions), GFP_KERNEL);
413}
Asias He6ac1afb2013-05-06 16:38:21 +0800414EXPORT_SYMBOL_GPL(vhost_dev_reset_owner_prepare);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000415
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300416/* Caller should have device mutex */
417void vhost_dev_reset_owner(struct vhost_dev *dev, struct vhost_memory *memory)
418{
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300419 int i;
420
Michael S. Tsirkinea5d4042011-11-27 19:05:58 +0200421 vhost_dev_cleanup(dev, true);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000422
Michael S. Tsirkin150b9e52013-04-28 17:12:08 +0300423 /* Restore memory to default empty mapping. */
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000424 memory->nregions = 0;
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300425 dev->memory = memory;
426 /* We don't need VQ locks below since vhost_dev_cleanup makes sure
427 * VQs aren't running.
428 */
429 for (i = 0; i < dev->nvqs; ++i)
430 dev->vqs[i]->memory = memory;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000431}
Asias He6ac1afb2013-05-06 16:38:21 +0800432EXPORT_SYMBOL_GPL(vhost_dev_reset_owner);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000433
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000434void vhost_dev_stop(struct vhost_dev *dev)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000435{
436 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530437
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000438 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800439 if (dev->vqs[i]->kick && dev->vqs[i]->handle_kick) {
440 vhost_poll_stop(&dev->vqs[i]->poll);
441 vhost_poll_flush(&dev->vqs[i]->poll);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000442 }
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000443 }
444}
Asias He6ac1afb2013-05-06 16:38:21 +0800445EXPORT_SYMBOL_GPL(vhost_dev_stop);
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000446
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000447/* Caller should have device mutex if and only if locked is set */
448void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
449{
450 int i;
Michael S. Tsirkinbab632d2011-07-18 03:48:46 +0000451
Michael S. Tsirkinb2116162012-11-01 09:16:46 +0000452 for (i = 0; i < dev->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800453 if (dev->vqs[i]->error_ctx)
454 eventfd_ctx_put(dev->vqs[i]->error_ctx);
455 if (dev->vqs[i]->error)
456 fput(dev->vqs[i]->error);
457 if (dev->vqs[i]->kick)
458 fput(dev->vqs[i]->kick);
459 if (dev->vqs[i]->call_ctx)
460 eventfd_ctx_put(dev->vqs[i]->call_ctx);
461 if (dev->vqs[i]->call)
462 fput(dev->vqs[i]->call);
463 vhost_vq_reset(dev, dev->vqs[i]);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000464 }
Jason Wange0e9b402010-09-14 23:53:05 +0800465 vhost_dev_free_iovecs(dev);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000466 if (dev->log_ctx)
467 eventfd_ctx_put(dev->log_ctx);
468 dev->log_ctx = NULL;
469 if (dev->log_file)
470 fput(dev->log_file);
471 dev->log_file = NULL;
472 /* No one will access memory at this point */
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300473 kfree(dev->memory);
474 dev->memory = NULL;
Tejun Heoc23f34452010-06-02 20:40:00 +0200475 WARN_ON(!list_empty(&dev->work_list));
Eric Dumazet78b620c2010-08-31 02:05:57 +0000476 if (dev->worker) {
477 kthread_stop(dev->worker);
478 dev->worker = NULL;
479 }
Michael S. Tsirkin533a19b2010-10-06 15:34:38 +0200480 if (dev->mm)
481 mmput(dev->mm);
482 dev->mm = NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000483}
Asias He6ac1afb2013-05-06 16:38:21 +0800484EXPORT_SYMBOL_GPL(vhost_dev_cleanup);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000485
486static int log_access_ok(void __user *log_base, u64 addr, unsigned long sz)
487{
488 u64 a = addr / VHOST_PAGE_SIZE / 8;
Krishna Kumard47effe2011-03-01 17:06:37 +0530489
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000490 /* Make sure 64 bit math will not overflow. */
491 if (a > ULONG_MAX - (unsigned long)log_base ||
492 a + (unsigned long)log_base > ULONG_MAX)
Dan Carpenter6d97e552010-10-11 19:24:19 +0200493 return 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000494
495 return access_ok(VERIFY_WRITE, log_base + a,
496 (sz + VHOST_PAGE_SIZE * 8 - 1) / VHOST_PAGE_SIZE / 8);
497}
498
499/* Caller should have vq mutex and device mutex. */
500static int vq_memory_access_ok(void __user *log_base, struct vhost_memory *mem,
501 int log_all)
502{
503 int i;
Jeff Dike179b2842010-04-07 09:59:10 -0400504
Michael S. Tsirkinf8322fb2010-05-27 12:28:03 +0300505 if (!mem)
506 return 0;
Jeff Dike179b2842010-04-07 09:59:10 -0400507
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000508 for (i = 0; i < mem->nregions; ++i) {
509 struct vhost_memory_region *m = mem->regions + i;
510 unsigned long a = m->userspace_addr;
511 if (m->memory_size > ULONG_MAX)
512 return 0;
513 else if (!access_ok(VERIFY_WRITE, (void __user *)a,
514 m->memory_size))
515 return 0;
516 else if (log_all && !log_access_ok(log_base,
517 m->guest_phys_addr,
518 m->memory_size))
519 return 0;
520 }
521 return 1;
522}
523
524/* Can we switch to this memory table? */
525/* Caller should have device mutex but not vq mutex */
526static int memory_access_ok(struct vhost_dev *d, struct vhost_memory *mem,
527 int log_all)
528{
529 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530530
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000531 for (i = 0; i < d->nvqs; ++i) {
532 int ok;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300533 bool log;
534
Asias He3ab2e422013-04-27 11:16:48 +0800535 mutex_lock(&d->vqs[i]->mutex);
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300536 log = log_all || vhost_has_feature(d->vqs[i], VHOST_F_LOG_ALL);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000537 /* If ring is inactive, will check when it's enabled. */
Asias He3ab2e422013-04-27 11:16:48 +0800538 if (d->vqs[i]->private_data)
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300539 ok = vq_memory_access_ok(d->vqs[i]->log_base, mem, log);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000540 else
541 ok = 1;
Asias He3ab2e422013-04-27 11:16:48 +0800542 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000543 if (!ok)
544 return 0;
545 }
546 return 1;
547}
548
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300549static int vq_access_ok(struct vhost_virtqueue *vq, unsigned int num,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000550 struct vring_desc __user *desc,
551 struct vring_avail __user *avail,
552 struct vring_used __user *used)
553{
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300554 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000555 return access_ok(VERIFY_READ, desc, num * sizeof *desc) &&
556 access_ok(VERIFY_READ, avail,
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300557 sizeof *avail + num * sizeof *avail->ring + s) &&
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000558 access_ok(VERIFY_WRITE, used,
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300559 sizeof *used + num * sizeof *used->ring + s);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000560}
561
562/* Can we log writes? */
563/* Caller should have device mutex but not vq mutex */
564int vhost_log_access_ok(struct vhost_dev *dev)
565{
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300566 return memory_access_ok(dev, dev->memory, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000567}
Asias He6ac1afb2013-05-06 16:38:21 +0800568EXPORT_SYMBOL_GPL(vhost_log_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000569
570/* Verify access for write logging. */
571/* Caller should have vq mutex and device mutex */
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300572static int vq_log_access_ok(struct vhost_virtqueue *vq,
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300573 void __user *log_base)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000574{
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300575 size_t s = vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX) ? 2 : 0;
Arnd Bergmann28457ee2010-03-09 19:24:45 +0100576
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300577 return vq_memory_access_ok(log_base, vq->memory,
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300578 vhost_has_feature(vq, VHOST_F_LOG_ALL)) &&
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000579 (!vq->log_used || log_access_ok(log_base, vq->log_addr,
580 sizeof *vq->used +
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +0300581 vq->num * sizeof *vq->used->ring + s));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000582}
583
584/* Can we start vq? */
585/* Caller should have vq mutex and device mutex */
586int vhost_vq_access_ok(struct vhost_virtqueue *vq)
587{
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300588 return vq_access_ok(vq, vq->num, vq->desc, vq->avail, vq->used) &&
589 vq_log_access_ok(vq, vq->log_base);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000590}
Asias He6ac1afb2013-05-06 16:38:21 +0800591EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000592
593static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
594{
595 struct vhost_memory mem, *newmem, *oldmem;
596 unsigned long size = offsetof(struct vhost_memory, regions);
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +0300597 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530598
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900599 if (copy_from_user(&mem, m, size))
600 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000601 if (mem.padding)
602 return -EOPNOTSUPP;
603 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS)
604 return -E2BIG;
605 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL);
606 if (!newmem)
607 return -ENOMEM;
608
609 memcpy(newmem, &mem, size);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900610 if (copy_from_user(newmem->regions, m->regions,
611 mem.nregions * sizeof *m->regions)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000612 kfree(newmem);
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900613 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000614 }
615
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300616 if (!memory_access_ok(d, newmem, 0)) {
Takuya Yoshikawaa02c3782010-05-27 19:03:56 +0900617 kfree(newmem);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000618 return -EFAULT;
Takuya Yoshikawaa02c3782010-05-27 19:03:56 +0900619 }
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300620 oldmem = d->memory;
621 d->memory = newmem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +0300622
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300623 /* All memory accesses are done under some VQ mutex. */
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +0300624 for (i = 0; i < d->nvqs; ++i) {
625 mutex_lock(&d->vqs[i]->mutex);
Michael S. Tsirkin47283be2014-06-05 15:20:27 +0300626 d->vqs[i]->memory = newmem;
Michael S. Tsirkin98f9ca02014-05-28 17:07:02 +0300627 mutex_unlock(&d->vqs[i]->mutex);
628 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000629 kfree(oldmem);
630 return 0;
631}
632
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +0200633long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000634{
Al Virocecb46f2012-08-27 14:21:39 -0400635 struct file *eventfp, *filep = NULL;
636 bool pollstart = false, pollstop = false;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000637 struct eventfd_ctx *ctx = NULL;
638 u32 __user *idxp = argp;
639 struct vhost_virtqueue *vq;
640 struct vhost_vring_state s;
641 struct vhost_vring_file f;
642 struct vhost_vring_addr a;
643 u32 idx;
644 long r;
645
646 r = get_user(idx, idxp);
647 if (r < 0)
648 return r;
Krishna Kumar0f3d9a12010-05-25 11:10:36 +0530649 if (idx >= d->nvqs)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000650 return -ENOBUFS;
651
Asias He3ab2e422013-04-27 11:16:48 +0800652 vq = d->vqs[idx];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000653
654 mutex_lock(&vq->mutex);
655
656 switch (ioctl) {
657 case VHOST_SET_VRING_NUM:
658 /* Resizing ring with an active backend?
659 * You don't want to do that. */
660 if (vq->private_data) {
661 r = -EBUSY;
662 break;
663 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900664 if (copy_from_user(&s, argp, sizeof s)) {
665 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000666 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900667 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000668 if (!s.num || s.num > 0xffff || (s.num & (s.num - 1))) {
669 r = -EINVAL;
670 break;
671 }
672 vq->num = s.num;
673 break;
674 case VHOST_SET_VRING_BASE:
675 /* Moving base with an active backend?
676 * You don't want to do that. */
677 if (vq->private_data) {
678 r = -EBUSY;
679 break;
680 }
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900681 if (copy_from_user(&s, argp, sizeof s)) {
682 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000683 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900684 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000685 if (s.num > 0xffff) {
686 r = -EINVAL;
687 break;
688 }
689 vq->last_avail_idx = s.num;
690 /* Forget the cached index value. */
691 vq->avail_idx = vq->last_avail_idx;
692 break;
693 case VHOST_GET_VRING_BASE:
694 s.index = idx;
695 s.num = vq->last_avail_idx;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900696 if (copy_to_user(argp, &s, sizeof s))
697 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000698 break;
699 case VHOST_SET_VRING_ADDR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900700 if (copy_from_user(&a, argp, sizeof a)) {
701 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000702 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900703 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000704 if (a.flags & ~(0x1 << VHOST_VRING_F_LOG)) {
705 r = -EOPNOTSUPP;
706 break;
707 }
708 /* For 32bit, verify that the top 32bits of the user
709 data are set to zero. */
710 if ((u64)(unsigned long)a.desc_user_addr != a.desc_user_addr ||
711 (u64)(unsigned long)a.used_user_addr != a.used_user_addr ||
712 (u64)(unsigned long)a.avail_user_addr != a.avail_user_addr) {
713 r = -EFAULT;
714 break;
715 }
716 if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
717 (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
718 (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
719 r = -EINVAL;
720 break;
721 }
722
723 /* We only verify access here if backend is configured.
724 * If it is not, we don't as size might not have been setup.
725 * We will verify when backend is configured. */
726 if (vq->private_data) {
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300727 if (!vq_access_ok(vq, vq->num,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000728 (void __user *)(unsigned long)a.desc_user_addr,
729 (void __user *)(unsigned long)a.avail_user_addr,
730 (void __user *)(unsigned long)a.used_user_addr)) {
731 r = -EINVAL;
732 break;
733 }
734
735 /* Also validate log access for used ring if enabled. */
736 if ((a.flags & (0x1 << VHOST_VRING_F_LOG)) &&
737 !log_access_ok(vq->log_base, a.log_guest_addr,
738 sizeof *vq->used +
739 vq->num * sizeof *vq->used->ring)) {
740 r = -EINVAL;
741 break;
742 }
743 }
744
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000745 vq->log_used = !!(a.flags & (0x1 << VHOST_VRING_F_LOG));
746 vq->desc = (void __user *)(unsigned long)a.desc_user_addr;
747 vq->avail = (void __user *)(unsigned long)a.avail_user_addr;
748 vq->log_addr = a.log_guest_addr;
749 vq->used = (void __user *)(unsigned long)a.used_user_addr;
750 break;
751 case VHOST_SET_VRING_KICK:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900752 if (copy_from_user(&f, argp, sizeof f)) {
753 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000754 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900755 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000756 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +0200757 if (IS_ERR(eventfp)) {
758 r = PTR_ERR(eventfp);
759 break;
760 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000761 if (eventfp != vq->kick) {
Al Virocecb46f2012-08-27 14:21:39 -0400762 pollstop = (filep = vq->kick) != NULL;
763 pollstart = (vq->kick = eventfp) != NULL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000764 } else
765 filep = eventfp;
766 break;
767 case VHOST_SET_VRING_CALL:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900768 if (copy_from_user(&f, argp, sizeof f)) {
769 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000770 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900771 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000772 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +0200773 if (IS_ERR(eventfp)) {
774 r = PTR_ERR(eventfp);
775 break;
776 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000777 if (eventfp != vq->call) {
778 filep = vq->call;
779 ctx = vq->call_ctx;
780 vq->call = eventfp;
781 vq->call_ctx = eventfp ?
782 eventfd_ctx_fileget(eventfp) : NULL;
783 } else
784 filep = eventfp;
785 break;
786 case VHOST_SET_VRING_ERR:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900787 if (copy_from_user(&f, argp, sizeof f)) {
788 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000789 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900790 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000791 eventfp = f.fd == -1 ? NULL : eventfd_fget(f.fd);
Michael S. Tsirkin535297a2010-03-17 16:06:11 +0200792 if (IS_ERR(eventfp)) {
793 r = PTR_ERR(eventfp);
794 break;
795 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000796 if (eventfp != vq->error) {
797 filep = vq->error;
798 vq->error = eventfp;
799 ctx = vq->error_ctx;
800 vq->error_ctx = eventfp ?
801 eventfd_ctx_fileget(eventfp) : NULL;
802 } else
803 filep = eventfp;
804 break;
805 default:
806 r = -ENOIOCTLCMD;
807 }
808
809 if (pollstop && vq->handle_kick)
810 vhost_poll_stop(&vq->poll);
811
812 if (ctx)
813 eventfd_ctx_put(ctx);
814 if (filep)
815 fput(filep);
816
817 if (pollstart && vq->handle_kick)
Jason Wang2b8b3282013-01-28 01:05:18 +0000818 r = vhost_poll_start(&vq->poll, vq->kick);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000819
820 mutex_unlock(&vq->mutex);
821
822 if (pollstop && vq->handle_kick)
823 vhost_poll_flush(&vq->poll);
824 return r;
825}
Asias He6ac1afb2013-05-06 16:38:21 +0800826EXPORT_SYMBOL_GPL(vhost_vring_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000827
828/* Caller must have device mutex */
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +0200829long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000830{
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000831 struct file *eventfp, *filep = NULL;
832 struct eventfd_ctx *ctx = NULL;
833 u64 p;
834 long r;
835 int i, fd;
836
837 /* If you are not the owner, you can become one */
838 if (ioctl == VHOST_SET_OWNER) {
839 r = vhost_dev_set_owner(d);
840 goto done;
841 }
842
843 /* You must be the owner to do anything else */
844 r = vhost_dev_check_owner(d);
845 if (r)
846 goto done;
847
848 switch (ioctl) {
849 case VHOST_SET_MEM_TABLE:
850 r = vhost_set_memory(d, argp);
851 break;
852 case VHOST_SET_LOG_BASE:
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900853 if (copy_from_user(&p, argp, sizeof p)) {
854 r = -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000855 break;
Takuya Yoshikawa7ad9c9d2010-05-27 18:58:03 +0900856 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000857 if ((u64)(unsigned long)p != p) {
858 r = -EFAULT;
859 break;
860 }
861 for (i = 0; i < d->nvqs; ++i) {
862 struct vhost_virtqueue *vq;
863 void __user *base = (void __user *)(unsigned long)p;
Asias He3ab2e422013-04-27 11:16:48 +0800864 vq = d->vqs[i];
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000865 mutex_lock(&vq->mutex);
866 /* If ring is inactive, will check when it's enabled. */
Michael S. Tsirkinea16c512014-06-05 15:20:23 +0300867 if (vq->private_data && !vq_log_access_ok(vq, base))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000868 r = -EFAULT;
869 else
870 vq->log_base = base;
871 mutex_unlock(&vq->mutex);
872 }
873 break;
874 case VHOST_SET_LOG_FD:
875 r = get_user(fd, (int __user *)argp);
876 if (r < 0)
877 break;
878 eventfp = fd == -1 ? NULL : eventfd_fget(fd);
879 if (IS_ERR(eventfp)) {
880 r = PTR_ERR(eventfp);
881 break;
882 }
883 if (eventfp != d->log_file) {
884 filep = d->log_file;
885 ctx = d->log_ctx;
886 d->log_ctx = eventfp ?
887 eventfd_ctx_fileget(eventfp) : NULL;
888 } else
889 filep = eventfp;
890 for (i = 0; i < d->nvqs; ++i) {
Asias He3ab2e422013-04-27 11:16:48 +0800891 mutex_lock(&d->vqs[i]->mutex);
892 d->vqs[i]->log_ctx = d->log_ctx;
893 mutex_unlock(&d->vqs[i]->mutex);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000894 }
895 if (ctx)
896 eventfd_ctx_put(ctx);
897 if (filep)
898 fput(filep);
899 break;
900 default:
Michael S. Tsirkin935cdee2012-12-06 14:03:34 +0200901 r = -ENOIOCTLCMD;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000902 break;
903 }
904done:
905 return r;
906}
Asias He6ac1afb2013-05-06 16:38:21 +0800907EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000908
909static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
910 __u64 addr, __u32 len)
911{
912 struct vhost_memory_region *reg;
913 int i;
Krishna Kumard47effe2011-03-01 17:06:37 +0530914
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000915 /* linear search is not brilliant, but we really have on the order of 6
916 * regions in practice */
917 for (i = 0; i < mem->nregions; ++i) {
918 reg = mem->regions + i;
919 if (reg->guest_phys_addr <= addr &&
920 reg->guest_phys_addr + reg->memory_size - 1 >= addr)
921 return reg;
922 }
923 return NULL;
924}
925
926/* TODO: This is really inefficient. We need something like get_user()
927 * (instruction directly accesses the data, with an exception table entry
928 * returning -EFAULT). See Documentation/x86/exception-tables.txt.
929 */
930static int set_bit_to_user(int nr, void __user *addr)
931{
932 unsigned long log = (unsigned long)addr;
933 struct page *page;
934 void *base;
935 int bit = nr + (log % PAGE_SIZE) * 8;
936 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +0530937
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000938 r = get_user_pages_fast(log, 1, 1, &page);
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +0200939 if (r < 0)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000940 return r;
Michael S. Tsirkind6db3f52010-02-23 11:25:23 +0200941 BUG_ON(r != 1);
Cong Wangc6daa7f2011-11-25 23:14:26 +0800942 base = kmap_atomic(page);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000943 set_bit(bit, base);
Cong Wangc6daa7f2011-11-25 23:14:26 +0800944 kunmap_atomic(base);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000945 set_page_dirty_lock(page);
946 put_page(page);
947 return 0;
948}
949
950static int log_write(void __user *log_base,
951 u64 write_address, u64 write_length)
952{
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +0200953 u64 write_page = write_address / VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000954 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +0530955
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000956 if (!write_length)
957 return 0;
Michael S. Tsirkin3bf9be42010-11-29 10:19:07 +0200958 write_length += write_address % VHOST_PAGE_SIZE;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000959 for (;;) {
960 u64 base = (u64)(unsigned long)log_base;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +0200961 u64 log = base + write_page / 8;
962 int bit = write_page % 8;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000963 if ((u64)(unsigned long)log != log)
964 return -EFAULT;
965 r = set_bit_to_user(bit, (void __user *)(unsigned long)log);
966 if (r < 0)
967 return r;
968 if (write_length <= VHOST_PAGE_SIZE)
969 break;
970 write_length -= VHOST_PAGE_SIZE;
Michael S. Tsirkin28831ee2010-11-29 10:22:10 +0200971 write_page += 1;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000972 }
973 return r;
974}
975
976int vhost_log_write(struct vhost_virtqueue *vq, struct vhost_log *log,
977 unsigned int log_num, u64 len)
978{
979 int i, r;
980
981 /* Make sure data written is seen before log. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +0000982 smp_wmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000983 for (i = 0; i < log_num; ++i) {
984 u64 l = min(log[i].len, len);
985 r = log_write(vq->log_base, log[i].addr, l);
986 if (r < 0)
987 return r;
988 len -= l;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +0200989 if (!len) {
990 if (vq->log_ctx)
991 eventfd_signal(vq->log_ctx, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000992 return 0;
Michael S. Tsirkin5786aee2010-09-22 12:31:53 +0200993 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000994 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +0000995 /* Length written exceeds what we have stored. This is a bug. */
996 BUG();
997 return 0;
998}
Asias He6ac1afb2013-05-06 16:38:21 +0800999EXPORT_SYMBOL_GPL(vhost_log_write);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001000
Jason Wang2723fea2011-06-21 18:04:38 +08001001static int vhost_update_used_flags(struct vhost_virtqueue *vq)
1002{
1003 void __user *used;
Michael S. Tsirkinb8342262011-07-19 17:15:43 +03001004 if (__put_user(vq->used_flags, &vq->used->flags) < 0)
Jason Wang2723fea2011-06-21 18:04:38 +08001005 return -EFAULT;
1006 if (unlikely(vq->log_used)) {
1007 /* Make sure the flag is seen before log. */
1008 smp_wmb();
1009 /* Log used flag write. */
1010 used = &vq->used->flags;
1011 log_write(vq->log_base, vq->log_addr +
1012 (used - (void __user *)vq->used),
1013 sizeof vq->used->flags);
1014 if (vq->log_ctx)
1015 eventfd_signal(vq->log_ctx, 1);
1016 }
1017 return 0;
1018}
1019
1020static int vhost_update_avail_event(struct vhost_virtqueue *vq, u16 avail_event)
1021{
Michael S. Tsirkinb8342262011-07-19 17:15:43 +03001022 if (__put_user(vq->avail_idx, vhost_avail_event(vq)))
Jason Wang2723fea2011-06-21 18:04:38 +08001023 return -EFAULT;
1024 if (unlikely(vq->log_used)) {
1025 void __user *used;
1026 /* Make sure the event is seen before log. */
1027 smp_wmb();
1028 /* Log avail event write */
1029 used = vhost_avail_event(vq);
1030 log_write(vq->log_base, vq->log_addr +
1031 (used - (void __user *)vq->used),
1032 sizeof *vhost_avail_event(vq));
1033 if (vq->log_ctx)
1034 eventfd_signal(vq->log_ctx, 1);
1035 }
1036 return 0;
1037}
1038
1039int vhost_init_used(struct vhost_virtqueue *vq)
1040{
1041 int r;
1042 if (!vq->private_data)
1043 return 0;
1044
1045 r = vhost_update_used_flags(vq);
1046 if (r)
1047 return r;
1048 vq->signalled_used_valid = false;
1049 return get_user(vq->last_used_idx, &vq->used->idx);
1050}
Asias He6ac1afb2013-05-06 16:38:21 +08001051EXPORT_SYMBOL_GPL(vhost_init_used);
Jason Wang2723fea2011-06-21 18:04:38 +08001052
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001053static int translate_desc(struct vhost_virtqueue *vq, u64 addr, u32 len,
Christoph Hellwiga8d37822010-04-13 14:11:25 -04001054 struct iovec iov[], int iov_size)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001055{
1056 const struct vhost_memory_region *reg;
1057 struct vhost_memory *mem;
1058 struct iovec *_iov;
1059 u64 s = 0;
1060 int ret = 0;
1061
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001062 mem = vq->memory;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001063 while ((u64)len > s) {
1064 u64 size;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001065 if (unlikely(ret >= iov_size)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001066 ret = -ENOBUFS;
1067 break;
1068 }
1069 reg = find_region(mem, addr, len);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001070 if (unlikely(!reg)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001071 ret = -EFAULT;
1072 break;
1073 }
1074 _iov = iov + ret;
1075 size = reg->memory_size - addr + reg->guest_phys_addr;
Michael S. Tsirkinbd971202012-11-26 05:57:27 +00001076 _iov->iov_len = min((u64)len - s, size);
Christoph Hellwiga8d37822010-04-13 14:11:25 -04001077 _iov->iov_base = (void __user *)(unsigned long)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001078 (reg->userspace_addr + addr - reg->guest_phys_addr);
1079 s += size;
1080 addr += size;
1081 ++ret;
1082 }
1083
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001084 return ret;
1085}
1086
1087/* Each buffer in the virtqueues is actually a chain of descriptors. This
1088 * function returns the next descriptor in the chain,
1089 * or -1U if we're at the end. */
1090static unsigned next_desc(struct vring_desc *desc)
1091{
1092 unsigned int next;
1093
1094 /* If this descriptor says it doesn't chain, we're done. */
1095 if (!(desc->flags & VRING_DESC_F_NEXT))
1096 return -1U;
1097
1098 /* Check they're not leading us off end of descriptors. */
1099 next = desc->next;
1100 /* Make sure compiler knows to grab that: we don't want it changing! */
1101 /* We will use the result as an index in an array, so most
1102 * architectures only need a compiler barrier here. */
1103 read_barrier_depends();
1104
1105 return next;
1106}
1107
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001108static int get_indirect(struct vhost_virtqueue *vq,
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001109 struct iovec iov[], unsigned int iov_size,
1110 unsigned int *out_num, unsigned int *in_num,
1111 struct vhost_log *log, unsigned int *log_num,
1112 struct vring_desc *indirect)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001113{
1114 struct vring_desc desc;
1115 unsigned int i = 0, count, found = 0;
1116 int ret;
1117
1118 /* Sanity check */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001119 if (unlikely(indirect->len % sizeof desc)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001120 vq_err(vq, "Invalid length in indirect descriptor: "
1121 "len 0x%llx not multiple of 0x%zx\n",
1122 (unsigned long long)indirect->len,
1123 sizeof desc);
1124 return -EINVAL;
1125 }
1126
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001127 ret = translate_desc(vq, indirect->addr, indirect->len, vq->indirect,
Jason Wange0e9b402010-09-14 23:53:05 +08001128 UIO_MAXIOV);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001129 if (unlikely(ret < 0)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001130 vq_err(vq, "Translation failure %d in indirect.\n", ret);
1131 return ret;
1132 }
1133
1134 /* We will use the result as an address to read from, so most
1135 * architectures only need a compiler barrier here. */
1136 read_barrier_depends();
1137
1138 count = indirect->len / sizeof desc;
1139 /* Buffers are chained via a 16 bit next field, so
1140 * we can have at most 2^16 of these. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001141 if (unlikely(count > USHRT_MAX + 1)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001142 vq_err(vq, "Indirect buffer length too big: %d\n",
1143 indirect->len);
1144 return -E2BIG;
1145 }
1146
1147 do {
1148 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001149 if (unlikely(++found > count)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001150 vq_err(vq, "Loop detected: last one at %u "
1151 "indirect size %u\n",
1152 i, count);
1153 return -EINVAL;
1154 }
Krishna Kumard47effe2011-03-01 17:06:37 +05301155 if (unlikely(memcpy_fromiovec((unsigned char *)&desc,
1156 vq->indirect, sizeof desc))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001157 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
1158 i, (size_t)indirect->addr + i * sizeof desc);
1159 return -EINVAL;
1160 }
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001161 if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001162 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
1163 i, (size_t)indirect->addr + i * sizeof desc);
1164 return -EINVAL;
1165 }
1166
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001167 ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001168 iov_size - iov_count);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001169 if (unlikely(ret < 0)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001170 vq_err(vq, "Translation failure %d indirect idx %d\n",
1171 ret, i);
1172 return ret;
1173 }
1174 /* If this is an input descriptor, increment that count. */
1175 if (desc.flags & VRING_DESC_F_WRITE) {
1176 *in_num += ret;
1177 if (unlikely(log)) {
1178 log[*log_num].addr = desc.addr;
1179 log[*log_num].len = desc.len;
1180 ++*log_num;
1181 }
1182 } else {
1183 /* If it's an output descriptor, they're all supposed
1184 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001185 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001186 vq_err(vq, "Indirect descriptor "
1187 "has out after in: idx %d\n", i);
1188 return -EINVAL;
1189 }
1190 *out_num += ret;
1191 }
1192 } while ((i = next_desc(&desc)) != -1);
1193 return 0;
1194}
1195
1196/* This looks in the virtqueue and for the first available buffer, and converts
1197 * it to an iovec for convenient access. Since descriptors consist of some
1198 * number of output then some number of input descriptors, it's actually two
1199 * iovecs, but we pack them into one and note how many of each there were.
1200 *
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001201 * This function returns the descriptor number found, or vq->num (which is
1202 * never a valid descriptor number) if none was found. A negative code is
1203 * returned on error. */
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001204int vhost_get_vq_desc(struct vhost_virtqueue *vq,
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001205 struct iovec iov[], unsigned int iov_size,
1206 unsigned int *out_num, unsigned int *in_num,
1207 struct vhost_log *log, unsigned int *log_num)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001208{
1209 struct vring_desc desc;
1210 unsigned int i, head, found = 0;
1211 u16 last_avail_idx;
1212 int ret;
1213
1214 /* Check it isn't doing very strange things with descriptor numbers. */
1215 last_avail_idx = vq->last_avail_idx;
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001216 if (unlikely(__get_user(vq->avail_idx, &vq->avail->idx))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001217 vq_err(vq, "Failed to access avail idx at %p\n",
1218 &vq->avail->idx);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001219 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001220 }
1221
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001222 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001223 vq_err(vq, "Guest moved used index from %u to %u",
1224 last_avail_idx, vq->avail_idx);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001225 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001226 }
1227
1228 /* If there's nothing new since last we looked, return invalid. */
1229 if (vq->avail_idx == last_avail_idx)
1230 return vq->num;
1231
1232 /* Only get avail ring entries after they have been exposed by guest. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001233 smp_rmb();
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001234
1235 /* Grab the next descriptor number they're advertising, and increment
1236 * the index we've seen. */
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001237 if (unlikely(__get_user(head,
1238 &vq->avail->ring[last_avail_idx % vq->num]))) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001239 vq_err(vq, "Failed to read head: idx %d address %p\n",
1240 last_avail_idx,
1241 &vq->avail->ring[last_avail_idx % vq->num]);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001242 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001243 }
1244
1245 /* If their number is silly, that's an error. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001246 if (unlikely(head >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001247 vq_err(vq, "Guest says index %u > %u is available",
1248 head, vq->num);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001249 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001250 }
1251
1252 /* When we start there are none of either input nor output. */
1253 *out_num = *in_num = 0;
1254 if (unlikely(log))
1255 *log_num = 0;
1256
1257 i = head;
1258 do {
1259 unsigned iov_count = *in_num + *out_num;
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001260 if (unlikely(i >= vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001261 vq_err(vq, "Desc index is %u > %u, head = %u",
1262 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001263 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001264 }
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001265 if (unlikely(++found > vq->num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001266 vq_err(vq, "Loop detected: last one at %u "
1267 "vq size %u head %u\n",
1268 i, vq->num, head);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001269 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001270 }
Michael S. Tsirkinfcc042a2011-03-06 13:33:49 +02001271 ret = __copy_from_user(&desc, vq->desc + i, sizeof desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001272 if (unlikely(ret)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001273 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
1274 i, vq->desc + i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001275 return -EFAULT;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001276 }
1277 if (desc.flags & VRING_DESC_F_INDIRECT) {
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001278 ret = get_indirect(vq, iov, iov_size,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001279 out_num, in_num,
1280 log, log_num, &desc);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001281 if (unlikely(ret < 0)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001282 vq_err(vq, "Failure detected "
1283 "in indirect descriptor at idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001284 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001285 }
1286 continue;
1287 }
1288
Michael S. Tsirkin47283be2014-06-05 15:20:27 +03001289 ret = translate_desc(vq, desc.addr, desc.len, iov + iov_count,
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001290 iov_size - iov_count);
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001291 if (unlikely(ret < 0)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001292 vq_err(vq, "Translation failure %d descriptor idx %d\n",
1293 ret, i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001294 return ret;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001295 }
1296 if (desc.flags & VRING_DESC_F_WRITE) {
1297 /* If this is an input descriptor,
1298 * increment that count. */
1299 *in_num += ret;
1300 if (unlikely(log)) {
1301 log[*log_num].addr = desc.addr;
1302 log[*log_num].len = desc.len;
1303 ++*log_num;
1304 }
1305 } else {
1306 /* If it's an output descriptor, they're all supposed
1307 * to come before any input descriptors. */
Michael S. Tsirkin7b3384f2010-07-01 18:40:12 +03001308 if (unlikely(*in_num)) {
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001309 vq_err(vq, "Descriptor has out after in: "
1310 "idx %d\n", i);
Michael S. Tsirkind5675bd2010-06-24 16:59:59 +03001311 return -EINVAL;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001312 }
1313 *out_num += ret;
1314 }
1315 } while ((i = next_desc(&desc)) != -1);
1316
1317 /* On success, increment avail index. */
1318 vq->last_avail_idx++;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001319
1320 /* Assume notifications from guest are disabled at this point,
1321 * if they aren't we would need to update avail_event index. */
1322 BUG_ON(!(vq->used_flags & VRING_USED_F_NO_NOTIFY));
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001323 return head;
1324}
Asias He6ac1afb2013-05-06 16:38:21 +08001325EXPORT_SYMBOL_GPL(vhost_get_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001326
1327/* Reverse the effect of vhost_get_vq_desc. Useful for error handling. */
David Stevens8dd014a2010-07-27 18:52:21 +03001328void vhost_discard_vq_desc(struct vhost_virtqueue *vq, int n)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001329{
David Stevens8dd014a2010-07-27 18:52:21 +03001330 vq->last_avail_idx -= n;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001331}
Asias He6ac1afb2013-05-06 16:38:21 +08001332EXPORT_SYMBOL_GPL(vhost_discard_vq_desc);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001333
1334/* After we've used one of their buffers, we tell them about it. We'll then
1335 * want to notify the guest, using eventfd. */
1336int vhost_add_used(struct vhost_virtqueue *vq, unsigned int head, int len)
1337{
Jason Wangc49e4e52013-09-02 16:40:58 +08001338 struct vring_used_elem heads = { head, len };
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001339
Jason Wangc49e4e52013-09-02 16:40:58 +08001340 return vhost_add_used_n(vq, &heads, 1);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001341}
Asias He6ac1afb2013-05-06 16:38:21 +08001342EXPORT_SYMBOL_GPL(vhost_add_used);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001343
David Stevens8dd014a2010-07-27 18:52:21 +03001344static int __vhost_add_used_n(struct vhost_virtqueue *vq,
1345 struct vring_used_elem *heads,
1346 unsigned count)
1347{
1348 struct vring_used_elem __user *used;
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001349 u16 old, new;
David Stevens8dd014a2010-07-27 18:52:21 +03001350 int start;
1351
1352 start = vq->last_used_idx % vq->num;
1353 used = vq->used->ring + start;
Jason Wangc49e4e52013-09-02 16:40:58 +08001354 if (count == 1) {
1355 if (__put_user(heads[0].id, &used->id)) {
1356 vq_err(vq, "Failed to write used id");
1357 return -EFAULT;
1358 }
1359 if (__put_user(heads[0].len, &used->len)) {
1360 vq_err(vq, "Failed to write used len");
1361 return -EFAULT;
1362 }
1363 } else if (__copy_to_user(used, heads, count * sizeof *used)) {
David Stevens8dd014a2010-07-27 18:52:21 +03001364 vq_err(vq, "Failed to write used");
1365 return -EFAULT;
1366 }
1367 if (unlikely(vq->log_used)) {
1368 /* Make sure data is seen before log. */
1369 smp_wmb();
1370 /* Log used ring entry write. */
1371 log_write(vq->log_base,
1372 vq->log_addr +
1373 ((void __user *)used - (void __user *)vq->used),
1374 count * sizeof *used);
1375 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001376 old = vq->last_used_idx;
1377 new = (vq->last_used_idx += count);
1378 /* If the driver never bothers to signal in a very long while,
1379 * used index might wrap around. If that happens, invalidate
1380 * signalled_used index we stored. TODO: make sure driver
1381 * signals at least once in 2^16 and remove this. */
1382 if (unlikely((u16)(new - vq->signalled_used) < (u16)(new - old)))
1383 vq->signalled_used_valid = false;
David Stevens8dd014a2010-07-27 18:52:21 +03001384 return 0;
1385}
1386
1387/* After we've used one of their buffers, we tell them about it. We'll then
1388 * want to notify the guest, using eventfd. */
1389int vhost_add_used_n(struct vhost_virtqueue *vq, struct vring_used_elem *heads,
1390 unsigned count)
1391{
1392 int start, n, r;
1393
1394 start = vq->last_used_idx % vq->num;
1395 n = vq->num - start;
1396 if (n < count) {
1397 r = __vhost_add_used_n(vq, heads, n);
1398 if (r < 0)
1399 return r;
1400 heads += n;
1401 count -= n;
1402 }
1403 r = __vhost_add_used_n(vq, heads, count);
1404
1405 /* Make sure buffer is written before we update index. */
1406 smp_wmb();
1407 if (put_user(vq->last_used_idx, &vq->used->idx)) {
1408 vq_err(vq, "Failed to increment used idx");
1409 return -EFAULT;
1410 }
1411 if (unlikely(vq->log_used)) {
1412 /* Log used index update. */
1413 log_write(vq->log_base,
1414 vq->log_addr + offsetof(struct vring_used, idx),
1415 sizeof vq->used->idx);
1416 if (vq->log_ctx)
1417 eventfd_signal(vq->log_ctx, 1);
1418 }
1419 return r;
1420}
Asias He6ac1afb2013-05-06 16:38:21 +08001421EXPORT_SYMBOL_GPL(vhost_add_used_n);
David Stevens8dd014a2010-07-27 18:52:21 +03001422
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001423static bool vhost_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001424{
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001425 __u16 old, new, event;
1426 bool v;
Michael S. Tsirkin0d499352010-05-11 19:44:17 +03001427 /* Flush out used index updates. This is paired
1428 * with the barrier that the Guest executes when enabling
1429 * interrupts. */
1430 smp_mb();
1431
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001432 if (vhost_has_feature(vq, VIRTIO_F_NOTIFY_ON_EMPTY) &&
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001433 unlikely(vq->avail_idx == vq->last_avail_idx))
1434 return true;
1435
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001436 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001437 __u16 flags;
1438 if (__get_user(flags, &vq->avail->flags)) {
1439 vq_err(vq, "Failed to get flags");
1440 return true;
1441 }
1442 return !(flags & VRING_AVAIL_F_NO_INTERRUPT);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001443 }
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001444 old = vq->signalled_used;
1445 v = vq->signalled_used_valid;
1446 new = vq->signalled_used = vq->last_used_idx;
1447 vq->signalled_used_valid = true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001448
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001449 if (unlikely(!v))
1450 return true;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001451
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001452 if (get_user(event, vhost_used_event(vq))) {
1453 vq_err(vq, "Failed to get used event idx");
1454 return true;
1455 }
1456 return vring_need_event(event, new, old);
1457}
1458
1459/* This actually signals the guest, using eventfd. */
1460void vhost_signal(struct vhost_dev *dev, struct vhost_virtqueue *vq)
1461{
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001462 /* Signal the Guest tell them we used something up. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001463 if (vq->call_ctx && vhost_notify(dev, vq))
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001464 eventfd_signal(vq->call_ctx, 1);
1465}
Asias He6ac1afb2013-05-06 16:38:21 +08001466EXPORT_SYMBOL_GPL(vhost_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001467
1468/* And here's the combo meal deal. Supersize me! */
1469void vhost_add_used_and_signal(struct vhost_dev *dev,
1470 struct vhost_virtqueue *vq,
1471 unsigned int head, int len)
1472{
1473 vhost_add_used(vq, head, len);
1474 vhost_signal(dev, vq);
1475}
Asias He6ac1afb2013-05-06 16:38:21 +08001476EXPORT_SYMBOL_GPL(vhost_add_used_and_signal);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001477
David Stevens8dd014a2010-07-27 18:52:21 +03001478/* multi-buffer version of vhost_add_used_and_signal */
1479void vhost_add_used_and_signal_n(struct vhost_dev *dev,
1480 struct vhost_virtqueue *vq,
1481 struct vring_used_elem *heads, unsigned count)
1482{
1483 vhost_add_used_n(vq, heads, count);
1484 vhost_signal(dev, vq);
1485}
Asias He6ac1afb2013-05-06 16:38:21 +08001486EXPORT_SYMBOL_GPL(vhost_add_used_and_signal_n);
David Stevens8dd014a2010-07-27 18:52:21 +03001487
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001488/* OK, now we need to know about added descriptors. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001489bool vhost_enable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001490{
1491 u16 avail_idx;
1492 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301493
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001494 if (!(vq->used_flags & VRING_USED_F_NO_NOTIFY))
1495 return false;
1496 vq->used_flags &= ~VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001497 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08001498 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001499 if (r) {
1500 vq_err(vq, "Failed to enable notification at %p: %d\n",
1501 &vq->used->flags, r);
1502 return false;
1503 }
1504 } else {
Jason Wang2723fea2011-06-21 18:04:38 +08001505 r = vhost_update_avail_event(vq, vq->avail_idx);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001506 if (r) {
1507 vq_err(vq, "Failed to update avail event index at %p: %d\n",
1508 vhost_avail_event(vq), r);
1509 return false;
1510 }
1511 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001512 /* They could have slipped one in as we were doing that: make
1513 * sure it's written, then check again. */
Michael S. Tsirkin56593382010-02-01 07:21:02 +00001514 smp_mb();
Michael S. Tsirkin8b7347a2010-09-19 15:56:30 +02001515 r = __get_user(avail_idx, &vq->avail->idx);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001516 if (r) {
1517 vq_err(vq, "Failed to check avail idx at %p: %d\n",
1518 &vq->avail->idx, r);
1519 return false;
1520 }
1521
David Stevens8dd014a2010-07-27 18:52:21 +03001522 return avail_idx != vq->avail_idx;
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001523}
Asias He6ac1afb2013-05-06 16:38:21 +08001524EXPORT_SYMBOL_GPL(vhost_enable_notify);
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001525
1526/* We don't need to be notified again. */
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001527void vhost_disable_notify(struct vhost_dev *dev, struct vhost_virtqueue *vq)
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001528{
1529 int r;
Krishna Kumard47effe2011-03-01 17:06:37 +05301530
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001531 if (vq->used_flags & VRING_USED_F_NO_NOTIFY)
1532 return;
1533 vq->used_flags |= VRING_USED_F_NO_NOTIFY;
Michael S. Tsirkinea16c512014-06-05 15:20:23 +03001534 if (!vhost_has_feature(vq, VIRTIO_RING_F_EVENT_IDX)) {
Jason Wang2723fea2011-06-21 18:04:38 +08001535 r = vhost_update_used_flags(vq);
Michael S. Tsirkin8ea8cf82011-05-20 02:10:54 +03001536 if (r)
1537 vq_err(vq, "Failed to enable notification at %p: %d\n",
1538 &vq->used->flags, r);
1539 }
Michael S. Tsirkin3a4d5c92010-01-14 06:17:27 +00001540}
Asias He6ac1afb2013-05-06 16:38:21 +08001541EXPORT_SYMBOL_GPL(vhost_disable_notify);
1542
1543static int __init vhost_init(void)
1544{
1545 return 0;
1546}
1547
1548static void __exit vhost_exit(void)
1549{
1550}
1551
1552module_init(vhost_init);
1553module_exit(vhost_exit);
1554
1555MODULE_VERSION("0.0.1");
1556MODULE_LICENSE("GPL v2");
1557MODULE_AUTHOR("Michael S. Tsirkin");
1558MODULE_DESCRIPTION("Host kernel accelerator for virtio");