blob: ab0be6c084f60c540fdbf580d0d0aaa60ba9dbe1 [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
Rusty Russelle34f8722008-07-25 12:06:13 -050021#include <linux/virtio_config.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100022#include <linux/device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Paul Gortmakerb5a2c4f2011-07-03 16:20:30 -040024#include <linux/module.h>
Rusty Russelle93300b2012-01-12 15:44:43 +103025#include <linux/hrtimer.h>
Joel Stanley6abb2dd2014-02-13 15:03:46 +103026#include <linux/kmemleak.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100027
28#ifdef DEBUG
29/* For development, we want to crash whenever the ring is screwed. */
Rusty Russell9499f5e2009-06-12 22:16:35 -060030#define BAD_RING(_vq, fmt, args...) \
31 do { \
32 dev_err(&(_vq)->vq.vdev->dev, \
33 "%s:"fmt, (_vq)->vq.name, ##args); \
34 BUG(); \
35 } while (0)
Rusty Russellc5f841f2009-03-30 21:55:22 -060036/* Caller is supposed to guarantee no reentry. */
37#define START_USE(_vq) \
38 do { \
39 if ((_vq)->in_use) \
Rusty Russell9499f5e2009-06-12 22:16:35 -060040 panic("%s:in_use = %i\n", \
41 (_vq)->vq.name, (_vq)->in_use); \
Rusty Russellc5f841f2009-03-30 21:55:22 -060042 (_vq)->in_use = __LINE__; \
Rusty Russell9499f5e2009-06-12 22:16:35 -060043 } while (0)
Roel Kluin3a35ce72009-01-22 16:42:57 +010044#define END_USE(_vq) \
Rusty Russell97a545a2010-02-24 14:22:22 -060045 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100046#else
Rusty Russell9499f5e2009-06-12 22:16:35 -060047#define BAD_RING(_vq, fmt, args...) \
48 do { \
49 dev_err(&_vq->vq.vdev->dev, \
50 "%s:"fmt, (_vq)->vq.name, ##args); \
51 (_vq)->broken = true; \
52 } while (0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100053#define START_USE(vq)
54#define END_USE(vq)
55#endif
56
Michael S. Tsirkin43b4f722015-01-15 13:33:31 +020057struct vring_virtqueue {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100058 struct virtqueue vq;
59
60 /* Actual memory layout for this queue */
61 struct vring vring;
62
Rusty Russell7b21e342012-01-12 15:44:42 +103063 /* Can we use weak barriers? */
64 bool weak_barriers;
65
Rusty Russell0a8a69d2007-10-22 11:03:40 +100066 /* Other side has made a mess, don't try any more. */
67 bool broken;
68
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +010069 /* Host supports indirect buffers */
70 bool indirect;
71
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +030072 /* Host publishes avail event idx */
73 bool event;
74
Rusty Russell0a8a69d2007-10-22 11:03:40 +100075 /* Head of free buffer list. */
76 unsigned int free_head;
77 /* Number we've added since last sync. */
78 unsigned int num_added;
79
80 /* Last used index we've seen. */
Anthony Liguori1bc49532007-11-07 15:49:24 -060081 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +100082
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -080083 /* Last written value to avail->flags */
84 u16 avail_flags_shadow;
85
86 /* Last written value to avail->idx in guest byte order */
87 u16 avail_idx_shadow;
88
Rusty Russell0a8a69d2007-10-22 11:03:40 +100089 /* How to notify other side. FIXME: commonalize hcalls! */
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +103090 bool (*notify)(struct virtqueue *vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +100091
92#ifdef DEBUG
93 /* They're supposed to lock for us. */
94 unsigned int in_use;
Rusty Russelle93300b2012-01-12 15:44:43 +103095
96 /* Figure out if their kicks are too delayed. */
97 bool last_add_time_valid;
98 ktime_t last_add_time;
Rusty Russell0a8a69d2007-10-22 11:03:40 +100099#endif
100
101 /* Tokens for callbacks. */
102 void *data[];
103};
104
105#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
106
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800107/*
108 * The interaction between virtio and a possible IOMMU is a mess.
109 *
110 * On most systems with virtio, physical addresses match bus addresses,
111 * and it doesn't particularly matter whether we use the DMA API.
112 *
113 * On some systems, including Xen and any system with a physical device
114 * that speaks virtio behind a physical IOMMU, we must use the DMA API
115 * for virtio DMA to work at all.
116 *
117 * On other systems, including SPARC and PPC64, virtio-pci devices are
118 * enumerated as though they are behind an IOMMU, but the virtio host
119 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
120 * there or somehow map everything as the identity.
121 *
122 * For the time being, we preserve historic behavior and bypass the DMA
123 * API.
124 */
125
126static bool vring_use_dma_api(struct virtio_device *vdev)
127{
128 return false;
129}
130
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300131static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
132 unsigned int total_sg, gfp_t gfp)
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100133{
134 struct vring_desc *desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930135 unsigned int i;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100136
Will Deaconb92b1b82012-10-19 14:03:33 +0100137 /*
138 * We require lowmem mappings for the descriptors because
139 * otherwise virt_to_phys will give us bogus addresses in the
140 * virtqueue.
141 */
Michal Hocko82107532015-12-01 15:32:49 +0100142 gfp &= ~__GFP_HIGHMEM;
Will Deaconb92b1b82012-10-19 14:03:33 +0100143
Rusty Russell13816c72013-03-20 15:37:09 +1030144 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100145 if (!desc)
Rusty Russellb25bd252014-09-11 10:17:38 +0930146 return NULL;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100147
Rusty Russellb25bd252014-09-11 10:17:38 +0930148 for (i = 0; i < total_sg; i++)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300149 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
Rusty Russellb25bd252014-09-11 10:17:38 +0930150 return desc;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100151}
152
Rusty Russell13816c72013-03-20 15:37:09 +1030153static inline int virtqueue_add(struct virtqueue *_vq,
154 struct scatterlist *sgs[],
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930155 unsigned int total_sg,
Rusty Russell13816c72013-03-20 15:37:09 +1030156 unsigned int out_sgs,
157 unsigned int in_sgs,
158 void *data,
159 gfp_t gfp)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000160{
161 struct vring_virtqueue *vq = to_vvq(_vq);
Rusty Russell13816c72013-03-20 15:37:09 +1030162 struct scatterlist *sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930163 struct vring_desc *desc;
164 unsigned int i, n, avail, descs_used, uninitialized_var(prev);
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930165 int head;
Rusty Russellb25bd252014-09-11 10:17:38 +0930166 bool indirect;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000167
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100168 START_USE(vq);
169
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000170 BUG_ON(data == NULL);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100171
Rusty Russell70670444c22014-03-13 11:23:40 +1030172 if (unlikely(vq->broken)) {
173 END_USE(vq);
174 return -EIO;
175 }
176
Rusty Russelle93300b2012-01-12 15:44:43 +1030177#ifdef DEBUG
178 {
179 ktime_t now = ktime_get();
180
181 /* No kick or get, with .1 second between? Warn. */
182 if (vq->last_add_time_valid)
183 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
184 > 100);
185 vq->last_add_time = now;
186 vq->last_add_time_valid = true;
187 }
188#endif
189
Rusty Russell13816c72013-03-20 15:37:09 +1030190 BUG_ON(total_sg > vq->vring.num);
191 BUG_ON(total_sg == 0);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000192
Rusty Russellb25bd252014-09-11 10:17:38 +0930193 head = vq->free_head;
194
195 /* If the host supports indirect descriptor tables, and we have multiple
196 * buffers, then go indirect. FIXME: tune this threshold */
197 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300198 desc = alloc_indirect(_vq, total_sg, gfp);
Rusty Russellb25bd252014-09-11 10:17:38 +0930199 else
200 desc = NULL;
201
202 if (desc) {
203 /* Use a single buffer which doesn't continue */
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300204 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
205 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc));
Rusty Russellb25bd252014-09-11 10:17:38 +0930206 /* avoid kmemleak false positive (hidden by virt_to_phys) */
207 kmemleak_ignore(desc);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300208 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
Rusty Russellb25bd252014-09-11 10:17:38 +0930209
210 /* Set up rest to use this indirect table. */
211 i = 0;
212 descs_used = 1;
213 indirect = true;
214 } else {
215 desc = vq->vring.desc;
216 i = head;
217 descs_used = total_sg;
218 indirect = false;
219 }
220
221 if (vq->vq.num_free < descs_used) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000222 pr_debug("Can't add buf len %i - avail = %i\n",
Rusty Russellb25bd252014-09-11 10:17:38 +0930223 descs_used, vq->vq.num_free);
Rusty Russell44653ea2008-07-25 12:06:04 -0500224 /* FIXME: for historical reasons, we force a notify here if
225 * there are outgoing parts to the buffer. Presumably the
226 * host should service the ring ASAP. */
Rusty Russell13816c72013-03-20 15:37:09 +1030227 if (out_sgs)
Rusty Russell44653ea2008-07-25 12:06:04 -0500228 vq->notify(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000229 END_USE(vq);
230 return -ENOSPC;
231 }
232
233 /* We're about to use some buffers from the free list. */
Rusty Russellb25bd252014-09-11 10:17:38 +0930234 vq->vq.num_free -= descs_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000235
Rusty Russell13816c72013-03-20 15:37:09 +1030236 for (n = 0; n < out_sgs; n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930237 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300238 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
239 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
240 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030241 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300242 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030243 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000244 }
Rusty Russell13816c72013-03-20 15:37:09 +1030245 for (; n < (out_sgs + in_sgs); n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930246 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300247 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
248 desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg));
249 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030250 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300251 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030252 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000253 }
254 /* Last one doesn't continue. */
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300255 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000256
257 /* Update free pointer */
Rusty Russellb25bd252014-09-11 10:17:38 +0930258 if (indirect)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300259 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
Rusty Russellb25bd252014-09-11 10:17:38 +0930260 else
261 vq->free_head = i;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000262
263 /* Set token. */
264 vq->data[head] = data;
265
266 /* Put entry in available array (but don't update avail->idx until they
Rusty Russell3b720b82012-01-12 15:44:43 +1030267 * do sync). */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800268 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300269 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000270
Rusty Russellee7cd892012-01-12 15:44:43 +1030271 /* Descriptors and available array need to be set before we expose the
272 * new available array entries. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030273 virtio_wmb(vq->weak_barriers);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800274 vq->avail_idx_shadow++;
275 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
Rusty Russellee7cd892012-01-12 15:44:43 +1030276 vq->num_added++;
277
Tetsuo Handa5e05bf52015-02-11 15:01:13 +1030278 pr_debug("Added buffer head %i to %p\n", head, vq);
279 END_USE(vq);
280
Rusty Russellee7cd892012-01-12 15:44:43 +1030281 /* This is very unlikely, but theoretically possible. Kick
282 * just in case. */
283 if (unlikely(vq->num_added == (1 << 16) - 1))
284 virtqueue_kick(_vq);
285
Rusty Russell98e8c6b2012-10-16 23:56:15 +1030286 return 0;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000287}
Rusty Russell13816c72013-03-20 15:37:09 +1030288
289/**
Rusty Russell13816c72013-03-20 15:37:09 +1030290 * virtqueue_add_sgs - expose buffers to other end
291 * @vq: the struct virtqueue we're talking about.
292 * @sgs: array of terminated scatterlists.
293 * @out_num: the number of scatterlists readable by other side
294 * @in_num: the number of scatterlists which are writable (after readable ones)
295 * @data: the token identifying the buffer.
296 * @gfp: how to do memory allocations (if necessary).
297 *
298 * Caller must ensure we don't call this with other virtqueue operations
299 * at the same time (except where noted).
300 *
Rusty Russell70670444c22014-03-13 11:23:40 +1030301 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell13816c72013-03-20 15:37:09 +1030302 */
303int virtqueue_add_sgs(struct virtqueue *_vq,
304 struct scatterlist *sgs[],
305 unsigned int out_sgs,
306 unsigned int in_sgs,
307 void *data,
308 gfp_t gfp)
309{
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930310 unsigned int i, total_sg = 0;
Rusty Russell13816c72013-03-20 15:37:09 +1030311
312 /* Count them first. */
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930313 for (i = 0; i < out_sgs + in_sgs; i++) {
Rusty Russell13816c72013-03-20 15:37:09 +1030314 struct scatterlist *sg;
315 for (sg = sgs[i]; sg; sg = sg_next(sg))
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930316 total_sg++;
Rusty Russell13816c72013-03-20 15:37:09 +1030317 }
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930318 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
Rusty Russell13816c72013-03-20 15:37:09 +1030319}
320EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
321
322/**
Rusty Russell282edb32013-03-20 15:44:26 +1030323 * virtqueue_add_outbuf - expose output buffers to other end
324 * @vq: the struct virtqueue we're talking about.
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930325 * @sg: scatterlist (must be well-formed and terminated!)
326 * @num: the number of entries in @sg readable by other side
Rusty Russell282edb32013-03-20 15:44:26 +1030327 * @data: the token identifying the buffer.
328 * @gfp: how to do memory allocations (if necessary).
329 *
330 * Caller must ensure we don't call this with other virtqueue operations
331 * at the same time (except where noted).
332 *
Rusty Russell70670444c22014-03-13 11:23:40 +1030333 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell282edb32013-03-20 15:44:26 +1030334 */
335int virtqueue_add_outbuf(struct virtqueue *vq,
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930336 struct scatterlist *sg, unsigned int num,
Rusty Russell282edb32013-03-20 15:44:26 +1030337 void *data,
338 gfp_t gfp)
339{
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930340 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
Rusty Russell282edb32013-03-20 15:44:26 +1030341}
342EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
343
344/**
345 * virtqueue_add_inbuf - expose input buffers to other end
346 * @vq: the struct virtqueue we're talking about.
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930347 * @sg: scatterlist (must be well-formed and terminated!)
348 * @num: the number of entries in @sg writable by other side
Rusty Russell282edb32013-03-20 15:44:26 +1030349 * @data: the token identifying the buffer.
350 * @gfp: how to do memory allocations (if necessary).
351 *
352 * Caller must ensure we don't call this with other virtqueue operations
353 * at the same time (except where noted).
354 *
Rusty Russell70670444c22014-03-13 11:23:40 +1030355 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell282edb32013-03-20 15:44:26 +1030356 */
357int virtqueue_add_inbuf(struct virtqueue *vq,
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930358 struct scatterlist *sg, unsigned int num,
Rusty Russell282edb32013-03-20 15:44:26 +1030359 void *data,
360 gfp_t gfp)
361{
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930362 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
Rusty Russell282edb32013-03-20 15:44:26 +1030363}
364EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
365
366/**
Rusty Russell41f03772012-01-12 15:44:43 +1030367 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030368 * @vq: the struct virtqueue
369 *
Rusty Russell41f03772012-01-12 15:44:43 +1030370 * Instead of virtqueue_kick(), you can do:
371 * if (virtqueue_kick_prepare(vq))
372 * virtqueue_notify(vq);
Rusty Russell5dfc1762012-01-12 15:44:42 +1030373 *
Rusty Russell41f03772012-01-12 15:44:43 +1030374 * This is sometimes useful because the virtqueue_kick_prepare() needs
375 * to be serialized, but the actual virtqueue_notify() call does not.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030376 */
Rusty Russell41f03772012-01-12 15:44:43 +1030377bool virtqueue_kick_prepare(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000378{
379 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300380 u16 new, old;
Rusty Russell41f03772012-01-12 15:44:43 +1030381 bool needs_kick;
382
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000383 START_USE(vq);
Jason Wanga72caae2012-01-20 16:17:08 +0800384 /* We need to expose available array entries before checking avail
385 * event. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030386 virtio_mb(vq->weak_barriers);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000387
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800388 old = vq->avail_idx_shadow - vq->num_added;
389 new = vq->avail_idx_shadow;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000390 vq->num_added = 0;
391
Rusty Russelle93300b2012-01-12 15:44:43 +1030392#ifdef DEBUG
393 if (vq->last_add_time_valid) {
394 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
395 vq->last_add_time)) > 100);
396 }
397 vq->last_add_time_valid = false;
398#endif
399
Rusty Russell41f03772012-01-12 15:44:43 +1030400 if (vq->event) {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300401 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
Rusty Russell41f03772012-01-12 15:44:43 +1030402 new, old);
403 } else {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300404 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
Rusty Russell41f03772012-01-12 15:44:43 +1030405 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000406 END_USE(vq);
Rusty Russell41f03772012-01-12 15:44:43 +1030407 return needs_kick;
408}
409EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
410
411/**
412 * virtqueue_notify - second half of split virtqueue_kick call.
413 * @vq: the struct virtqueue
414 *
415 * This does not need to be serialized.
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030416 *
417 * Returns false if host notify failed or queue is broken, otherwise true.
Rusty Russell41f03772012-01-12 15:44:43 +1030418 */
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030419bool virtqueue_notify(struct virtqueue *_vq)
Rusty Russell41f03772012-01-12 15:44:43 +1030420{
421 struct vring_virtqueue *vq = to_vvq(_vq);
422
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030423 if (unlikely(vq->broken))
424 return false;
425
Rusty Russell41f03772012-01-12 15:44:43 +1030426 /* Prod other side to tell it about changes. */
Heinz Graalfs2342d6a2013-11-05 21:20:27 +1030427 if (!vq->notify(_vq)) {
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030428 vq->broken = true;
429 return false;
430 }
431 return true;
Rusty Russell41f03772012-01-12 15:44:43 +1030432}
433EXPORT_SYMBOL_GPL(virtqueue_notify);
434
435/**
436 * virtqueue_kick - update after add_buf
437 * @vq: the struct virtqueue
438 *
Rusty Russellb3087e42013-05-20 12:15:44 +0930439 * After one or more virtqueue_add_* calls, invoke this to kick
Rusty Russell41f03772012-01-12 15:44:43 +1030440 * the other side.
441 *
442 * Caller must ensure we don't call this with other virtqueue
443 * operations at the same time (except where noted).
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030444 *
445 * Returns false if kick failed, otherwise true.
Rusty Russell41f03772012-01-12 15:44:43 +1030446 */
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030447bool virtqueue_kick(struct virtqueue *vq)
Rusty Russell41f03772012-01-12 15:44:43 +1030448{
449 if (virtqueue_kick_prepare(vq))
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030450 return virtqueue_notify(vq);
451 return true;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000452}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300453EXPORT_SYMBOL_GPL(virtqueue_kick);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000454
455static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
456{
457 unsigned int i;
458
459 /* Clear data ptr. */
460 vq->data[head] = NULL;
461
462 /* Put back on free list: find end */
463 i = head;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100464
465 /* Free the indirect table */
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300466 if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT))
467 kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr)));
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100468
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300469 while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) {
470 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
Rusty Russell06ca2872012-10-16 23:56:14 +1030471 vq->vq.num_free++;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000472 }
473
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300474 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000475 vq->free_head = head;
476 /* Plus final descriptor */
Rusty Russell06ca2872012-10-16 23:56:14 +1030477 vq->vq.num_free++;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000478}
479
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000480static inline bool more_used(const struct vring_virtqueue *vq)
481{
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300482 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000483}
484
Rusty Russell5dfc1762012-01-12 15:44:42 +1030485/**
486 * virtqueue_get_buf - get the next used buffer
487 * @vq: the struct virtqueue we're talking about.
488 * @len: the length written into the buffer
489 *
490 * If the driver wrote data into the buffer, @len will be set to the
491 * amount written. This means you don't need to clear the buffer
492 * beforehand to ensure there's no data leakage in the case of short
493 * writes.
494 *
495 * Caller must ensure we don't call this with other virtqueue
496 * operations at the same time (except where noted).
497 *
498 * Returns NULL if there are no used buffers, or the "data" token
Rusty Russellb3087e42013-05-20 12:15:44 +0930499 * handed to virtqueue_add_*().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030500 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300501void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000502{
503 struct vring_virtqueue *vq = to_vvq(_vq);
504 void *ret;
505 unsigned int i;
Rusty Russell3b720b82012-01-12 15:44:43 +1030506 u16 last_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000507
508 START_USE(vq);
509
Rusty Russell5ef82752008-05-02 21:50:43 -0500510 if (unlikely(vq->broken)) {
511 END_USE(vq);
512 return NULL;
513 }
514
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000515 if (!more_used(vq)) {
516 pr_debug("No more buffers in queue\n");
517 END_USE(vq);
518 return NULL;
519 }
520
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200521 /* Only get used array entries after they have been exposed by host. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030522 virtio_rmb(vq->weak_barriers);
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200523
Rusty Russell3b720b82012-01-12 15:44:43 +1030524 last_used = (vq->last_used_idx & (vq->vring.num - 1));
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300525 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
526 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000527
528 if (unlikely(i >= vq->vring.num)) {
529 BAD_RING(vq, "id %u out of range\n", i);
530 return NULL;
531 }
532 if (unlikely(!vq->data[i])) {
533 BAD_RING(vq, "id %u is not a head!\n", i);
534 return NULL;
535 }
536
537 /* detach_buf clears data, so grab it now. */
538 ret = vq->data[i];
539 detach_buf(vq, i);
540 vq->last_used_idx++;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300541 /* If we expect an interrupt for the next entry, tell host
542 * by writing event index and flush out the write before
543 * the read in the next get_buf call. */
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200544 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
545 virtio_store_mb(vq->weak_barriers,
546 &vring_used_event(&vq->vring),
547 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300548
Rusty Russelle93300b2012-01-12 15:44:43 +1030549#ifdef DEBUG
550 vq->last_add_time_valid = false;
551#endif
552
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000553 END_USE(vq);
554 return ret;
555}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300556EXPORT_SYMBOL_GPL(virtqueue_get_buf);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000557
Rusty Russell5dfc1762012-01-12 15:44:42 +1030558/**
559 * virtqueue_disable_cb - disable callbacks
560 * @vq: the struct virtqueue we're talking about.
561 *
562 * Note that this is not necessarily synchronous, hence unreliable and only
563 * useful as an optimization.
564 *
565 * Unlike other operations, this need not be serialized.
566 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300567void virtqueue_disable_cb(struct virtqueue *_vq)
Rusty Russell18445c42008-02-04 23:49:57 -0500568{
569 struct vring_virtqueue *vq = to_vvq(_vq);
570
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800571 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
572 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
573 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
574 }
575
Rusty Russell18445c42008-02-04 23:49:57 -0500576}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300577EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
Rusty Russell18445c42008-02-04 23:49:57 -0500578
Rusty Russell5dfc1762012-01-12 15:44:42 +1030579/**
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300580 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
581 * @vq: the struct virtqueue we're talking about.
582 *
583 * This re-enables callbacks; it returns current queue state
584 * in an opaque unsigned value. This value should be later tested by
585 * virtqueue_poll, to detect a possible race between the driver checking for
586 * more work, and enabling callbacks.
587 *
588 * Caller must ensure we don't call this with other virtqueue
589 * operations at the same time (except where noted).
590 */
591unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
592{
593 struct vring_virtqueue *vq = to_vvq(_vq);
594 u16 last_used_idx;
595
596 START_USE(vq);
597
598 /* We optimistically turn back on interrupts, then check if there was
599 * more to do. */
600 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
601 * either clear the flags bit or point the event index at the next
602 * entry. Always do both to keep code simple. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800603 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
604 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
605 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
606 }
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300607 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300608 END_USE(vq);
609 return last_used_idx;
610}
611EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
612
613/**
614 * virtqueue_poll - query pending used buffers
615 * @vq: the struct virtqueue we're talking about.
616 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
617 *
618 * Returns "true" if there are pending used buffers in the queue.
619 *
620 * This does not need to be serialized.
621 */
622bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
623{
624 struct vring_virtqueue *vq = to_vvq(_vq);
625
626 virtio_mb(vq->weak_barriers);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300627 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300628}
629EXPORT_SYMBOL_GPL(virtqueue_poll);
630
631/**
Rusty Russell5dfc1762012-01-12 15:44:42 +1030632 * virtqueue_enable_cb - restart callbacks after disable_cb.
633 * @vq: the struct virtqueue we're talking about.
634 *
635 * This re-enables callbacks; it returns "false" if there are pending
636 * buffers in the queue, to detect a possible race between the driver
637 * checking for more work, and enabling callbacks.
638 *
639 * Caller must ensure we don't call this with other virtqueue
640 * operations at the same time (except where noted).
641 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300642bool virtqueue_enable_cb(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000643{
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300644 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
645 return !virtqueue_poll(_vq, last_used_idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000646}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300647EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000648
Rusty Russell5dfc1762012-01-12 15:44:42 +1030649/**
650 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
651 * @vq: the struct virtqueue we're talking about.
652 *
653 * This re-enables callbacks but hints to the other side to delay
654 * interrupts until most of the available buffers have been processed;
655 * it returns "false" if there are many pending buffers in the queue,
656 * to detect a possible race between the driver checking for more work,
657 * and enabling callbacks.
658 *
659 * Caller must ensure we don't call this with other virtqueue
660 * operations at the same time (except where noted).
661 */
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300662bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
663{
664 struct vring_virtqueue *vq = to_vvq(_vq);
665 u16 bufs;
666
667 START_USE(vq);
668
669 /* We optimistically turn back on interrupts, then check if there was
670 * more to do. */
671 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
672 * either clear the flags bit or point the event index at the next
673 * entry. Always do both to keep code simple. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800674 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
675 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
676 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
677 }
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300678 /* TODO: tune this threshold */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800679 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200680
681 virtio_store_mb(vq->weak_barriers,
682 &vring_used_event(&vq->vring),
683 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
684
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300685 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300686 END_USE(vq);
687 return false;
688 }
689
690 END_USE(vq);
691 return true;
692}
693EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
694
Rusty Russell5dfc1762012-01-12 15:44:42 +1030695/**
696 * virtqueue_detach_unused_buf - detach first unused buffer
697 * @vq: the struct virtqueue we're talking about.
698 *
Rusty Russellb3087e42013-05-20 12:15:44 +0930699 * Returns NULL or the "data" token handed to virtqueue_add_*().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030700 * This is not valid on an active queue; it is useful only for device
701 * shutdown.
702 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300703void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
Shirley Mac021eac2010-01-18 19:15:23 +0530704{
705 struct vring_virtqueue *vq = to_vvq(_vq);
706 unsigned int i;
707 void *buf;
708
709 START_USE(vq);
710
711 for (i = 0; i < vq->vring.num; i++) {
712 if (!vq->data[i])
713 continue;
714 /* detach_buf clears data, so grab it now. */
715 buf = vq->data[i];
716 detach_buf(vq, i);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800717 vq->avail_idx_shadow--;
718 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
Shirley Mac021eac2010-01-18 19:15:23 +0530719 END_USE(vq);
720 return buf;
721 }
722 /* That should have freed everything. */
Rusty Russell06ca2872012-10-16 23:56:14 +1030723 BUG_ON(vq->vq.num_free != vq->vring.num);
Shirley Mac021eac2010-01-18 19:15:23 +0530724
725 END_USE(vq);
726 return NULL;
727}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300728EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
Shirley Mac021eac2010-01-18 19:15:23 +0530729
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000730irqreturn_t vring_interrupt(int irq, void *_vq)
731{
732 struct vring_virtqueue *vq = to_vvq(_vq);
733
734 if (!more_used(vq)) {
735 pr_debug("virtqueue interrupt with no work for %p\n", vq);
736 return IRQ_NONE;
737 }
738
739 if (unlikely(vq->broken))
740 return IRQ_HANDLED;
741
742 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
Rusty Russell18445c42008-02-04 23:49:57 -0500743 if (vq->vq.callback)
744 vq->vq.callback(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000745
746 return IRQ_HANDLED;
747}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500748EXPORT_SYMBOL_GPL(vring_interrupt);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000749
Jason Wang17bb6d42012-08-28 13:54:13 +0200750struct virtqueue *vring_new_virtqueue(unsigned int index,
751 unsigned int num,
Rusty Russell87c7d572008-12-30 09:26:03 -0600752 unsigned int vring_align,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000753 struct virtio_device *vdev,
Rusty Russell7b21e342012-01-12 15:44:42 +1030754 bool weak_barriers,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000755 void *pages,
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +1030756 bool (*notify)(struct virtqueue *),
Rusty Russell9499f5e2009-06-12 22:16:35 -0600757 void (*callback)(struct virtqueue *),
758 const char *name)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000759{
760 struct vring_virtqueue *vq;
761 unsigned int i;
762
Rusty Russell42b36cc2007-11-12 13:39:18 +1100763 /* We assume num is a power of 2. */
764 if (num & (num - 1)) {
765 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
766 return NULL;
767 }
768
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000769 vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL);
770 if (!vq)
771 return NULL;
772
Rusty Russell87c7d572008-12-30 09:26:03 -0600773 vring_init(&vq->vring, num, pages, vring_align);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000774 vq->vq.callback = callback;
775 vq->vq.vdev = vdev;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600776 vq->vq.name = name;
Rusty Russell06ca2872012-10-16 23:56:14 +1030777 vq->vq.num_free = num;
778 vq->vq.index = index;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000779 vq->notify = notify;
Rusty Russell7b21e342012-01-12 15:44:42 +1030780 vq->weak_barriers = weak_barriers;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000781 vq->broken = false;
782 vq->last_used_idx = 0;
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800783 vq->avail_flags_shadow = 0;
784 vq->avail_idx_shadow = 0;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000785 vq->num_added = 0;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600786 list_add_tail(&vq->vq.list, &vdev->vqs);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000787#ifdef DEBUG
788 vq->in_use = false;
Rusty Russelle93300b2012-01-12 15:44:43 +1030789 vq->last_add_time_valid = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000790#endif
791
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100792 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300793 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100794
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000795 /* No callback? Tell other side not to bother us. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800796 if (!callback) {
797 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
798 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
799 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000800
801 /* Put everything in free lists. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000802 vq->free_head = 0;
Amit Shah3b870622010-02-12 10:32:14 +0530803 for (i = 0; i < num-1; i++) {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300804 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
Amit Shah3b870622010-02-12 10:32:14 +0530805 vq->data[i] = NULL;
806 }
807 vq->data[i] = NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000808
809 return &vq->vq;
810}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500811EXPORT_SYMBOL_GPL(vring_new_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000812
813void vring_del_virtqueue(struct virtqueue *vq)
814{
Rusty Russell9499f5e2009-06-12 22:16:35 -0600815 list_del(&vq->list);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000816 kfree(to_vvq(vq));
817}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500818EXPORT_SYMBOL_GPL(vring_del_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000819
Rusty Russelle34f8722008-07-25 12:06:13 -0500820/* Manipulates transport-specific feature bits. */
821void vring_transport_features(struct virtio_device *vdev)
822{
823 unsigned int i;
824
825 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
826 switch (i) {
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100827 case VIRTIO_RING_F_INDIRECT_DESC:
828 break;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300829 case VIRTIO_RING_F_EVENT_IDX:
830 break;
Michael S. Tsirkin747ae342014-12-01 15:52:40 +0200831 case VIRTIO_F_VERSION_1:
832 break;
Rusty Russelle34f8722008-07-25 12:06:13 -0500833 default:
834 /* We don't understand this bit. */
Michael S. Tsirkine16e12b2014-10-07 16:39:42 +0200835 __virtio_clear_bit(vdev, i);
Rusty Russelle34f8722008-07-25 12:06:13 -0500836 }
837 }
838}
839EXPORT_SYMBOL_GPL(vring_transport_features);
840
Rusty Russell5dfc1762012-01-12 15:44:42 +1030841/**
842 * virtqueue_get_vring_size - return the size of the virtqueue's vring
843 * @vq: the struct virtqueue containing the vring of interest.
844 *
845 * Returns the size of the vring. This is mainly used for boasting to
846 * userspace. Unlike other operations, this need not be serialized.
847 */
Rick Jones8f9f4662011-10-19 08:10:59 +0000848unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
849{
850
851 struct vring_virtqueue *vq = to_vvq(_vq);
852
853 return vq->vring.num;
854}
855EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
856
Heinz Graalfsb3b32c92013-10-29 09:40:19 +1030857bool virtqueue_is_broken(struct virtqueue *_vq)
858{
859 struct vring_virtqueue *vq = to_vvq(_vq);
860
861 return vq->broken;
862}
863EXPORT_SYMBOL_GPL(virtqueue_is_broken);
864
Rusty Russelle2dcdfe2014-04-28 11:15:08 +0930865/*
866 * This should prevent the device from being used, allowing drivers to
867 * recover. You may need to grab appropriate locks to flush.
868 */
869void virtio_break_device(struct virtio_device *dev)
870{
871 struct virtqueue *_vq;
872
873 list_for_each_entry(_vq, &dev->vqs, list) {
874 struct vring_virtqueue *vq = to_vvq(_vq);
875 vq->broken = true;
876 }
877}
878EXPORT_SYMBOL_GPL(virtio_break_device);
879
Cornelia Huck89062652014-10-07 16:39:47 +0200880void *virtqueue_get_avail(struct virtqueue *_vq)
881{
882 struct vring_virtqueue *vq = to_vvq(_vq);
883
884 return vq->vring.avail;
885}
886EXPORT_SYMBOL_GPL(virtqueue_get_avail);
887
888void *virtqueue_get_used(struct virtqueue *_vq)
889{
890 struct vring_virtqueue *vq = to_vvq(_vq);
891
892 return vq->vring.used;
893}
894EXPORT_SYMBOL_GPL(virtqueue_get_used);
895
Rusty Russellc6fd4702008-02-04 23:50:05 -0500896MODULE_LICENSE("GPL");