blob: 9abc008ff7ea84d0fecffb11e69ebf73cc936c12 [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
Rusty Russelle34f8722008-07-25 12:06:13 -050021#include <linux/virtio_config.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100022#include <linux/device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Paul Gortmakerb5a2c4f2011-07-03 16:20:30 -040024#include <linux/module.h>
Rusty Russelle93300b2012-01-12 15:44:43 +103025#include <linux/hrtimer.h>
Joel Stanley6abb2dd2014-02-13 15:03:46 +103026#include <linux/kmemleak.h>
Andy Lutomirski780bc792016-02-02 21:46:36 -080027#include <linux/dma-mapping.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100028
29#ifdef DEBUG
30/* For development, we want to crash whenever the ring is screwed. */
Rusty Russell9499f5e2009-06-12 22:16:35 -060031#define BAD_RING(_vq, fmt, args...) \
32 do { \
33 dev_err(&(_vq)->vq.vdev->dev, \
34 "%s:"fmt, (_vq)->vq.name, ##args); \
35 BUG(); \
36 } while (0)
Rusty Russellc5f841f2009-03-30 21:55:22 -060037/* Caller is supposed to guarantee no reentry. */
38#define START_USE(_vq) \
39 do { \
40 if ((_vq)->in_use) \
Rusty Russell9499f5e2009-06-12 22:16:35 -060041 panic("%s:in_use = %i\n", \
42 (_vq)->vq.name, (_vq)->in_use); \
Rusty Russellc5f841f2009-03-30 21:55:22 -060043 (_vq)->in_use = __LINE__; \
Rusty Russell9499f5e2009-06-12 22:16:35 -060044 } while (0)
Roel Kluin3a35ce72009-01-22 16:42:57 +010045#define END_USE(_vq) \
Rusty Russell97a545a2010-02-24 14:22:22 -060046 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100047#else
Rusty Russell9499f5e2009-06-12 22:16:35 -060048#define BAD_RING(_vq, fmt, args...) \
49 do { \
50 dev_err(&_vq->vq.vdev->dev, \
51 "%s:"fmt, (_vq)->vq.name, ##args); \
52 (_vq)->broken = true; \
53 } while (0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100054#define START_USE(vq)
55#define END_USE(vq)
56#endif
57
Andy Lutomirski780bc792016-02-02 21:46:36 -080058struct vring_desc_state {
59 void *data; /* Data for callback. */
60 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
61};
62
Michael S. Tsirkin43b4f722015-01-15 13:33:31 +020063struct vring_virtqueue {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100064 struct virtqueue vq;
65
66 /* Actual memory layout for this queue */
67 struct vring vring;
68
Rusty Russell7b21e342012-01-12 15:44:42 +103069 /* Can we use weak barriers? */
70 bool weak_barriers;
71
Rusty Russell0a8a69d2007-10-22 11:03:40 +100072 /* Other side has made a mess, don't try any more. */
73 bool broken;
74
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +010075 /* Host supports indirect buffers */
76 bool indirect;
77
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +030078 /* Host publishes avail event idx */
79 bool event;
80
Rusty Russell0a8a69d2007-10-22 11:03:40 +100081 /* Head of free buffer list. */
82 unsigned int free_head;
83 /* Number we've added since last sync. */
84 unsigned int num_added;
85
86 /* Last used index we've seen. */
Anthony Liguori1bc49532007-11-07 15:49:24 -060087 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +100088
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -080089 /* Last written value to avail->flags */
90 u16 avail_flags_shadow;
91
92 /* Last written value to avail->idx in guest byte order */
93 u16 avail_idx_shadow;
94
Rusty Russell0a8a69d2007-10-22 11:03:40 +100095 /* How to notify other side. FIXME: commonalize hcalls! */
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +103096 bool (*notify)(struct virtqueue *vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +100097
98#ifdef DEBUG
99 /* They're supposed to lock for us. */
100 unsigned int in_use;
Rusty Russelle93300b2012-01-12 15:44:43 +1030101
102 /* Figure out if their kicks are too delayed. */
103 bool last_add_time_valid;
104 ktime_t last_add_time;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000105#endif
106
Andy Lutomirski780bc792016-02-02 21:46:36 -0800107 /* Per-descriptor state. */
108 struct vring_desc_state desc_state[];
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000109};
110
111#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
112
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800113/*
114 * The interaction between virtio and a possible IOMMU is a mess.
115 *
116 * On most systems with virtio, physical addresses match bus addresses,
117 * and it doesn't particularly matter whether we use the DMA API.
118 *
119 * On some systems, including Xen and any system with a physical device
120 * that speaks virtio behind a physical IOMMU, we must use the DMA API
121 * for virtio DMA to work at all.
122 *
123 * On other systems, including SPARC and PPC64, virtio-pci devices are
124 * enumerated as though they are behind an IOMMU, but the virtio host
125 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
126 * there or somehow map everything as the identity.
127 *
128 * For the time being, we preserve historic behavior and bypass the DMA
129 * API.
130 */
131
132static bool vring_use_dma_api(struct virtio_device *vdev)
133{
134 return false;
135}
136
Andy Lutomirski780bc792016-02-02 21:46:36 -0800137/*
138 * The DMA ops on various arches are rather gnarly right now, and
139 * making all of the arch DMA ops work on the vring device itself
140 * is a mess. For now, we use the parent device for DMA ops.
141 */
142struct device *vring_dma_dev(const struct vring_virtqueue *vq)
143{
144 return vq->vq.vdev->dev.parent;
145}
146
147/* Map one sg entry. */
148static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
149 struct scatterlist *sg,
150 enum dma_data_direction direction)
151{
152 if (!vring_use_dma_api(vq->vq.vdev))
153 return (dma_addr_t)sg_phys(sg);
154
155 /*
156 * We can't use dma_map_sg, because we don't use scatterlists in
157 * the way it expects (we don't guarantee that the scatterlist
158 * will exist for the lifetime of the mapping).
159 */
160 return dma_map_page(vring_dma_dev(vq),
161 sg_page(sg), sg->offset, sg->length,
162 direction);
163}
164
165static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
166 void *cpu_addr, size_t size,
167 enum dma_data_direction direction)
168{
169 if (!vring_use_dma_api(vq->vq.vdev))
170 return (dma_addr_t)virt_to_phys(cpu_addr);
171
172 return dma_map_single(vring_dma_dev(vq),
173 cpu_addr, size, direction);
174}
175
176static void vring_unmap_one(const struct vring_virtqueue *vq,
177 struct vring_desc *desc)
178{
179 u16 flags;
180
181 if (!vring_use_dma_api(vq->vq.vdev))
182 return;
183
184 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
185
186 if (flags & VRING_DESC_F_INDIRECT) {
187 dma_unmap_single(vring_dma_dev(vq),
188 virtio64_to_cpu(vq->vq.vdev, desc->addr),
189 virtio32_to_cpu(vq->vq.vdev, desc->len),
190 (flags & VRING_DESC_F_WRITE) ?
191 DMA_FROM_DEVICE : DMA_TO_DEVICE);
192 } else {
193 dma_unmap_page(vring_dma_dev(vq),
194 virtio64_to_cpu(vq->vq.vdev, desc->addr),
195 virtio32_to_cpu(vq->vq.vdev, desc->len),
196 (flags & VRING_DESC_F_WRITE) ?
197 DMA_FROM_DEVICE : DMA_TO_DEVICE);
198 }
199}
200
201static int vring_mapping_error(const struct vring_virtqueue *vq,
202 dma_addr_t addr)
203{
204 if (!vring_use_dma_api(vq->vq.vdev))
205 return 0;
206
207 return dma_mapping_error(vring_dma_dev(vq), addr);
208}
209
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300210static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
211 unsigned int total_sg, gfp_t gfp)
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100212{
213 struct vring_desc *desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930214 unsigned int i;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100215
Will Deaconb92b1b82012-10-19 14:03:33 +0100216 /*
217 * We require lowmem mappings for the descriptors because
218 * otherwise virt_to_phys will give us bogus addresses in the
219 * virtqueue.
220 */
Michal Hocko82107532015-12-01 15:32:49 +0100221 gfp &= ~__GFP_HIGHMEM;
Will Deaconb92b1b82012-10-19 14:03:33 +0100222
Rusty Russell13816c72013-03-20 15:37:09 +1030223 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100224 if (!desc)
Rusty Russellb25bd252014-09-11 10:17:38 +0930225 return NULL;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100226
Rusty Russellb25bd252014-09-11 10:17:38 +0930227 for (i = 0; i < total_sg; i++)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300228 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
Rusty Russellb25bd252014-09-11 10:17:38 +0930229 return desc;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100230}
231
Rusty Russell13816c72013-03-20 15:37:09 +1030232static inline int virtqueue_add(struct virtqueue *_vq,
233 struct scatterlist *sgs[],
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930234 unsigned int total_sg,
Rusty Russell13816c72013-03-20 15:37:09 +1030235 unsigned int out_sgs,
236 unsigned int in_sgs,
237 void *data,
238 gfp_t gfp)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000239{
240 struct vring_virtqueue *vq = to_vvq(_vq);
Rusty Russell13816c72013-03-20 15:37:09 +1030241 struct scatterlist *sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930242 struct vring_desc *desc;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800243 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930244 int head;
Rusty Russellb25bd252014-09-11 10:17:38 +0930245 bool indirect;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000246
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100247 START_USE(vq);
248
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000249 BUG_ON(data == NULL);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100250
Rusty Russell70670444c22014-03-13 11:23:40 +1030251 if (unlikely(vq->broken)) {
252 END_USE(vq);
253 return -EIO;
254 }
255
Rusty Russelle93300b2012-01-12 15:44:43 +1030256#ifdef DEBUG
257 {
258 ktime_t now = ktime_get();
259
260 /* No kick or get, with .1 second between? Warn. */
261 if (vq->last_add_time_valid)
262 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
263 > 100);
264 vq->last_add_time = now;
265 vq->last_add_time_valid = true;
266 }
267#endif
268
Rusty Russell13816c72013-03-20 15:37:09 +1030269 BUG_ON(total_sg > vq->vring.num);
270 BUG_ON(total_sg == 0);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000271
Rusty Russellb25bd252014-09-11 10:17:38 +0930272 head = vq->free_head;
273
274 /* If the host supports indirect descriptor tables, and we have multiple
275 * buffers, then go indirect. FIXME: tune this threshold */
276 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300277 desc = alloc_indirect(_vq, total_sg, gfp);
Rusty Russellb25bd252014-09-11 10:17:38 +0930278 else
279 desc = NULL;
280
281 if (desc) {
282 /* Use a single buffer which doesn't continue */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800283 indirect = true;
Rusty Russellb25bd252014-09-11 10:17:38 +0930284 /* Set up rest to use this indirect table. */
285 i = 0;
286 descs_used = 1;
Rusty Russellb25bd252014-09-11 10:17:38 +0930287 } else {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800288 indirect = false;
Rusty Russellb25bd252014-09-11 10:17:38 +0930289 desc = vq->vring.desc;
290 i = head;
291 descs_used = total_sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930292 }
293
294 if (vq->vq.num_free < descs_used) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000295 pr_debug("Can't add buf len %i - avail = %i\n",
Rusty Russellb25bd252014-09-11 10:17:38 +0930296 descs_used, vq->vq.num_free);
Rusty Russell44653ea2008-07-25 12:06:04 -0500297 /* FIXME: for historical reasons, we force a notify here if
298 * there are outgoing parts to the buffer. Presumably the
299 * host should service the ring ASAP. */
Rusty Russell13816c72013-03-20 15:37:09 +1030300 if (out_sgs)
Rusty Russell44653ea2008-07-25 12:06:04 -0500301 vq->notify(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000302 END_USE(vq);
303 return -ENOSPC;
304 }
305
Rusty Russell13816c72013-03-20 15:37:09 +1030306 for (n = 0; n < out_sgs; n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930307 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800308 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
309 if (vring_mapping_error(vq, addr))
310 goto unmap_release;
311
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300312 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800313 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300314 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030315 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300316 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030317 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000318 }
Rusty Russell13816c72013-03-20 15:37:09 +1030319 for (; n < (out_sgs + in_sgs); n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930320 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800321 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
322 if (vring_mapping_error(vq, addr))
323 goto unmap_release;
324
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300325 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800326 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300327 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030328 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300329 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030330 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000331 }
332 /* Last one doesn't continue. */
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300333 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000334
Andy Lutomirski780bc792016-02-02 21:46:36 -0800335 if (indirect) {
336 /* Now that the indirect table is filled in, map it. */
337 dma_addr_t addr = vring_map_single(
338 vq, desc, total_sg * sizeof(struct vring_desc),
339 DMA_TO_DEVICE);
340 if (vring_mapping_error(vq, addr))
341 goto unmap_release;
342
343 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
344 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
345
346 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
347 }
348
349 /* We're using some buffers from the free list. */
350 vq->vq.num_free -= descs_used;
351
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000352 /* Update free pointer */
Rusty Russellb25bd252014-09-11 10:17:38 +0930353 if (indirect)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300354 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
Rusty Russellb25bd252014-09-11 10:17:38 +0930355 else
356 vq->free_head = i;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000357
Andy Lutomirski780bc792016-02-02 21:46:36 -0800358 /* Store token and indirect buffer state. */
359 vq->desc_state[head].data = data;
360 if (indirect)
361 vq->desc_state[head].indir_desc = desc;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000362
363 /* Put entry in available array (but don't update avail->idx until they
Rusty Russell3b720b82012-01-12 15:44:43 +1030364 * do sync). */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800365 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300366 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000367
Rusty Russellee7cd892012-01-12 15:44:43 +1030368 /* Descriptors and available array need to be set before we expose the
369 * new available array entries. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030370 virtio_wmb(vq->weak_barriers);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800371 vq->avail_idx_shadow++;
372 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
Rusty Russellee7cd892012-01-12 15:44:43 +1030373 vq->num_added++;
374
Tetsuo Handa5e05bf52015-02-11 15:01:13 +1030375 pr_debug("Added buffer head %i to %p\n", head, vq);
376 END_USE(vq);
377
Rusty Russellee7cd892012-01-12 15:44:43 +1030378 /* This is very unlikely, but theoretically possible. Kick
379 * just in case. */
380 if (unlikely(vq->num_added == (1 << 16) - 1))
381 virtqueue_kick(_vq);
382
Rusty Russell98e8c6b2012-10-16 23:56:15 +1030383 return 0;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800384
385unmap_release:
386 err_idx = i;
387 i = head;
388
389 for (n = 0; n < total_sg; n++) {
390 if (i == err_idx)
391 break;
392 vring_unmap_one(vq, &desc[i]);
393 i = vq->vring.desc[i].next;
394 }
395
396 vq->vq.num_free += total_sg;
397
398 if (indirect)
399 kfree(desc);
400
401 return -EIO;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000402}
Rusty Russell13816c72013-03-20 15:37:09 +1030403
404/**
Rusty Russell13816c72013-03-20 15:37:09 +1030405 * virtqueue_add_sgs - expose buffers to other end
406 * @vq: the struct virtqueue we're talking about.
407 * @sgs: array of terminated scatterlists.
408 * @out_num: the number of scatterlists readable by other side
409 * @in_num: the number of scatterlists which are writable (after readable ones)
410 * @data: the token identifying the buffer.
411 * @gfp: how to do memory allocations (if necessary).
412 *
413 * Caller must ensure we don't call this with other virtqueue operations
414 * at the same time (except where noted).
415 *
Rusty Russell70670444c22014-03-13 11:23:40 +1030416 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell13816c72013-03-20 15:37:09 +1030417 */
418int virtqueue_add_sgs(struct virtqueue *_vq,
419 struct scatterlist *sgs[],
420 unsigned int out_sgs,
421 unsigned int in_sgs,
422 void *data,
423 gfp_t gfp)
424{
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930425 unsigned int i, total_sg = 0;
Rusty Russell13816c72013-03-20 15:37:09 +1030426
427 /* Count them first. */
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930428 for (i = 0; i < out_sgs + in_sgs; i++) {
Rusty Russell13816c72013-03-20 15:37:09 +1030429 struct scatterlist *sg;
430 for (sg = sgs[i]; sg; sg = sg_next(sg))
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930431 total_sg++;
Rusty Russell13816c72013-03-20 15:37:09 +1030432 }
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930433 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp);
Rusty Russell13816c72013-03-20 15:37:09 +1030434}
435EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
436
437/**
Rusty Russell282edb32013-03-20 15:44:26 +1030438 * virtqueue_add_outbuf - expose output buffers to other end
439 * @vq: the struct virtqueue we're talking about.
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930440 * @sg: scatterlist (must be well-formed and terminated!)
441 * @num: the number of entries in @sg readable by other side
Rusty Russell282edb32013-03-20 15:44:26 +1030442 * @data: the token identifying the buffer.
443 * @gfp: how to do memory allocations (if necessary).
444 *
445 * Caller must ensure we don't call this with other virtqueue operations
446 * at the same time (except where noted).
447 *
Rusty Russell70670444c22014-03-13 11:23:40 +1030448 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell282edb32013-03-20 15:44:26 +1030449 */
450int virtqueue_add_outbuf(struct virtqueue *vq,
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930451 struct scatterlist *sg, unsigned int num,
Rusty Russell282edb32013-03-20 15:44:26 +1030452 void *data,
453 gfp_t gfp)
454{
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930455 return virtqueue_add(vq, &sg, num, 1, 0, data, gfp);
Rusty Russell282edb32013-03-20 15:44:26 +1030456}
457EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
458
459/**
460 * virtqueue_add_inbuf - expose input buffers to other end
461 * @vq: the struct virtqueue we're talking about.
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930462 * @sg: scatterlist (must be well-formed and terminated!)
463 * @num: the number of entries in @sg writable by other side
Rusty Russell282edb32013-03-20 15:44:26 +1030464 * @data: the token identifying the buffer.
465 * @gfp: how to do memory allocations (if necessary).
466 *
467 * Caller must ensure we don't call this with other virtqueue operations
468 * at the same time (except where noted).
469 *
Rusty Russell70670444c22014-03-13 11:23:40 +1030470 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell282edb32013-03-20 15:44:26 +1030471 */
472int virtqueue_add_inbuf(struct virtqueue *vq,
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930473 struct scatterlist *sg, unsigned int num,
Rusty Russell282edb32013-03-20 15:44:26 +1030474 void *data,
475 gfp_t gfp)
476{
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930477 return virtqueue_add(vq, &sg, num, 0, 1, data, gfp);
Rusty Russell282edb32013-03-20 15:44:26 +1030478}
479EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
480
481/**
Rusty Russell41f03772012-01-12 15:44:43 +1030482 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030483 * @vq: the struct virtqueue
484 *
Rusty Russell41f03772012-01-12 15:44:43 +1030485 * Instead of virtqueue_kick(), you can do:
486 * if (virtqueue_kick_prepare(vq))
487 * virtqueue_notify(vq);
Rusty Russell5dfc1762012-01-12 15:44:42 +1030488 *
Rusty Russell41f03772012-01-12 15:44:43 +1030489 * This is sometimes useful because the virtqueue_kick_prepare() needs
490 * to be serialized, but the actual virtqueue_notify() call does not.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030491 */
Rusty Russell41f03772012-01-12 15:44:43 +1030492bool virtqueue_kick_prepare(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000493{
494 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300495 u16 new, old;
Rusty Russell41f03772012-01-12 15:44:43 +1030496 bool needs_kick;
497
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000498 START_USE(vq);
Jason Wanga72caae2012-01-20 16:17:08 +0800499 /* We need to expose available array entries before checking avail
500 * event. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030501 virtio_mb(vq->weak_barriers);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000502
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800503 old = vq->avail_idx_shadow - vq->num_added;
504 new = vq->avail_idx_shadow;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000505 vq->num_added = 0;
506
Rusty Russelle93300b2012-01-12 15:44:43 +1030507#ifdef DEBUG
508 if (vq->last_add_time_valid) {
509 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
510 vq->last_add_time)) > 100);
511 }
512 vq->last_add_time_valid = false;
513#endif
514
Rusty Russell41f03772012-01-12 15:44:43 +1030515 if (vq->event) {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300516 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
Rusty Russell41f03772012-01-12 15:44:43 +1030517 new, old);
518 } else {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300519 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
Rusty Russell41f03772012-01-12 15:44:43 +1030520 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000521 END_USE(vq);
Rusty Russell41f03772012-01-12 15:44:43 +1030522 return needs_kick;
523}
524EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
525
526/**
527 * virtqueue_notify - second half of split virtqueue_kick call.
528 * @vq: the struct virtqueue
529 *
530 * This does not need to be serialized.
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030531 *
532 * Returns false if host notify failed or queue is broken, otherwise true.
Rusty Russell41f03772012-01-12 15:44:43 +1030533 */
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030534bool virtqueue_notify(struct virtqueue *_vq)
Rusty Russell41f03772012-01-12 15:44:43 +1030535{
536 struct vring_virtqueue *vq = to_vvq(_vq);
537
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030538 if (unlikely(vq->broken))
539 return false;
540
Rusty Russell41f03772012-01-12 15:44:43 +1030541 /* Prod other side to tell it about changes. */
Heinz Graalfs2342d6a2013-11-05 21:20:27 +1030542 if (!vq->notify(_vq)) {
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030543 vq->broken = true;
544 return false;
545 }
546 return true;
Rusty Russell41f03772012-01-12 15:44:43 +1030547}
548EXPORT_SYMBOL_GPL(virtqueue_notify);
549
550/**
551 * virtqueue_kick - update after add_buf
552 * @vq: the struct virtqueue
553 *
Rusty Russellb3087e42013-05-20 12:15:44 +0930554 * After one or more virtqueue_add_* calls, invoke this to kick
Rusty Russell41f03772012-01-12 15:44:43 +1030555 * the other side.
556 *
557 * Caller must ensure we don't call this with other virtqueue
558 * operations at the same time (except where noted).
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030559 *
560 * Returns false if kick failed, otherwise true.
Rusty Russell41f03772012-01-12 15:44:43 +1030561 */
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030562bool virtqueue_kick(struct virtqueue *vq)
Rusty Russell41f03772012-01-12 15:44:43 +1030563{
564 if (virtqueue_kick_prepare(vq))
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030565 return virtqueue_notify(vq);
566 return true;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000567}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300568EXPORT_SYMBOL_GPL(virtqueue_kick);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000569
570static void detach_buf(struct vring_virtqueue *vq, unsigned int head)
571{
Andy Lutomirski780bc792016-02-02 21:46:36 -0800572 unsigned int i, j;
573 u16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000574
575 /* Clear data ptr. */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800576 vq->desc_state[head].data = NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000577
Andy Lutomirski780bc792016-02-02 21:46:36 -0800578 /* Put back on free list: unmap first-level descriptors and find end */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000579 i = head;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100580
Andy Lutomirski780bc792016-02-02 21:46:36 -0800581 while (vq->vring.desc[i].flags & nextflag) {
582 vring_unmap_one(vq, &vq->vring.desc[i]);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300583 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
Rusty Russell06ca2872012-10-16 23:56:14 +1030584 vq->vq.num_free++;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000585 }
586
Andy Lutomirski780bc792016-02-02 21:46:36 -0800587 vring_unmap_one(vq, &vq->vring.desc[i]);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300588 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000589 vq->free_head = head;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800590
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000591 /* Plus final descriptor */
Rusty Russell06ca2872012-10-16 23:56:14 +1030592 vq->vq.num_free++;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800593
594 /* Free the indirect table, if any, now that it's unmapped. */
595 if (vq->desc_state[head].indir_desc) {
596 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
597 u32 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
598
599 BUG_ON(!(vq->vring.desc[head].flags &
600 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
601 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
602
603 for (j = 0; j < len / sizeof(struct vring_desc); j++)
604 vring_unmap_one(vq, &indir_desc[j]);
605
606 kfree(vq->desc_state[head].indir_desc);
607 vq->desc_state[head].indir_desc = NULL;
608 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000609}
610
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000611static inline bool more_used(const struct vring_virtqueue *vq)
612{
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300613 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000614}
615
Rusty Russell5dfc1762012-01-12 15:44:42 +1030616/**
617 * virtqueue_get_buf - get the next used buffer
618 * @vq: the struct virtqueue we're talking about.
619 * @len: the length written into the buffer
620 *
621 * If the driver wrote data into the buffer, @len will be set to the
622 * amount written. This means you don't need to clear the buffer
623 * beforehand to ensure there's no data leakage in the case of short
624 * writes.
625 *
626 * Caller must ensure we don't call this with other virtqueue
627 * operations at the same time (except where noted).
628 *
629 * Returns NULL if there are no used buffers, or the "data" token
Rusty Russellb3087e42013-05-20 12:15:44 +0930630 * handed to virtqueue_add_*().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030631 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300632void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000633{
634 struct vring_virtqueue *vq = to_vvq(_vq);
635 void *ret;
636 unsigned int i;
Rusty Russell3b720b82012-01-12 15:44:43 +1030637 u16 last_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000638
639 START_USE(vq);
640
Rusty Russell5ef82752008-05-02 21:50:43 -0500641 if (unlikely(vq->broken)) {
642 END_USE(vq);
643 return NULL;
644 }
645
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000646 if (!more_used(vq)) {
647 pr_debug("No more buffers in queue\n");
648 END_USE(vq);
649 return NULL;
650 }
651
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200652 /* Only get used array entries after they have been exposed by host. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030653 virtio_rmb(vq->weak_barriers);
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200654
Rusty Russell3b720b82012-01-12 15:44:43 +1030655 last_used = (vq->last_used_idx & (vq->vring.num - 1));
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300656 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
657 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000658
659 if (unlikely(i >= vq->vring.num)) {
660 BAD_RING(vq, "id %u out of range\n", i);
661 return NULL;
662 }
Andy Lutomirski780bc792016-02-02 21:46:36 -0800663 if (unlikely(!vq->desc_state[i].data)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000664 BAD_RING(vq, "id %u is not a head!\n", i);
665 return NULL;
666 }
667
668 /* detach_buf clears data, so grab it now. */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800669 ret = vq->desc_state[i].data;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000670 detach_buf(vq, i);
671 vq->last_used_idx++;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300672 /* If we expect an interrupt for the next entry, tell host
673 * by writing event index and flush out the write before
674 * the read in the next get_buf call. */
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200675 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
676 virtio_store_mb(vq->weak_barriers,
677 &vring_used_event(&vq->vring),
678 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300679
Rusty Russelle93300b2012-01-12 15:44:43 +1030680#ifdef DEBUG
681 vq->last_add_time_valid = false;
682#endif
683
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000684 END_USE(vq);
685 return ret;
686}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300687EXPORT_SYMBOL_GPL(virtqueue_get_buf);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000688
Rusty Russell5dfc1762012-01-12 15:44:42 +1030689/**
690 * virtqueue_disable_cb - disable callbacks
691 * @vq: the struct virtqueue we're talking about.
692 *
693 * Note that this is not necessarily synchronous, hence unreliable and only
694 * useful as an optimization.
695 *
696 * Unlike other operations, this need not be serialized.
697 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300698void virtqueue_disable_cb(struct virtqueue *_vq)
Rusty Russell18445c42008-02-04 23:49:57 -0500699{
700 struct vring_virtqueue *vq = to_vvq(_vq);
701
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800702 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
703 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
704 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
705 }
706
Rusty Russell18445c42008-02-04 23:49:57 -0500707}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300708EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
Rusty Russell18445c42008-02-04 23:49:57 -0500709
Rusty Russell5dfc1762012-01-12 15:44:42 +1030710/**
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300711 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
712 * @vq: the struct virtqueue we're talking about.
713 *
714 * This re-enables callbacks; it returns current queue state
715 * in an opaque unsigned value. This value should be later tested by
716 * virtqueue_poll, to detect a possible race between the driver checking for
717 * more work, and enabling callbacks.
718 *
719 * Caller must ensure we don't call this with other virtqueue
720 * operations at the same time (except where noted).
721 */
722unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
723{
724 struct vring_virtqueue *vq = to_vvq(_vq);
725 u16 last_used_idx;
726
727 START_USE(vq);
728
729 /* We optimistically turn back on interrupts, then check if there was
730 * more to do. */
731 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
732 * either clear the flags bit or point the event index at the next
733 * entry. Always do both to keep code simple. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800734 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
735 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
736 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
737 }
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300738 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300739 END_USE(vq);
740 return last_used_idx;
741}
742EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
743
744/**
745 * virtqueue_poll - query pending used buffers
746 * @vq: the struct virtqueue we're talking about.
747 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
748 *
749 * Returns "true" if there are pending used buffers in the queue.
750 *
751 * This does not need to be serialized.
752 */
753bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
754{
755 struct vring_virtqueue *vq = to_vvq(_vq);
756
757 virtio_mb(vq->weak_barriers);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300758 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300759}
760EXPORT_SYMBOL_GPL(virtqueue_poll);
761
762/**
Rusty Russell5dfc1762012-01-12 15:44:42 +1030763 * virtqueue_enable_cb - restart callbacks after disable_cb.
764 * @vq: the struct virtqueue we're talking about.
765 *
766 * This re-enables callbacks; it returns "false" if there are pending
767 * buffers in the queue, to detect a possible race between the driver
768 * checking for more work, and enabling callbacks.
769 *
770 * Caller must ensure we don't call this with other virtqueue
771 * operations at the same time (except where noted).
772 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300773bool virtqueue_enable_cb(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000774{
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300775 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
776 return !virtqueue_poll(_vq, last_used_idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000777}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300778EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000779
Rusty Russell5dfc1762012-01-12 15:44:42 +1030780/**
781 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
782 * @vq: the struct virtqueue we're talking about.
783 *
784 * This re-enables callbacks but hints to the other side to delay
785 * interrupts until most of the available buffers have been processed;
786 * it returns "false" if there are many pending buffers in the queue,
787 * to detect a possible race between the driver checking for more work,
788 * and enabling callbacks.
789 *
790 * Caller must ensure we don't call this with other virtqueue
791 * operations at the same time (except where noted).
792 */
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300793bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
794{
795 struct vring_virtqueue *vq = to_vvq(_vq);
796 u16 bufs;
797
798 START_USE(vq);
799
800 /* We optimistically turn back on interrupts, then check if there was
801 * more to do. */
802 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
803 * either clear the flags bit or point the event index at the next
804 * entry. Always do both to keep code simple. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800805 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
806 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
807 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
808 }
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300809 /* TODO: tune this threshold */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800810 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200811
812 virtio_store_mb(vq->weak_barriers,
813 &vring_used_event(&vq->vring),
814 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
815
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300816 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300817 END_USE(vq);
818 return false;
819 }
820
821 END_USE(vq);
822 return true;
823}
824EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
825
Rusty Russell5dfc1762012-01-12 15:44:42 +1030826/**
827 * virtqueue_detach_unused_buf - detach first unused buffer
828 * @vq: the struct virtqueue we're talking about.
829 *
Rusty Russellb3087e42013-05-20 12:15:44 +0930830 * Returns NULL or the "data" token handed to virtqueue_add_*().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030831 * This is not valid on an active queue; it is useful only for device
832 * shutdown.
833 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300834void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
Shirley Mac021eac2010-01-18 19:15:23 +0530835{
836 struct vring_virtqueue *vq = to_vvq(_vq);
837 unsigned int i;
838 void *buf;
839
840 START_USE(vq);
841
842 for (i = 0; i < vq->vring.num; i++) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800843 if (!vq->desc_state[i].data)
Shirley Mac021eac2010-01-18 19:15:23 +0530844 continue;
845 /* detach_buf clears data, so grab it now. */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800846 buf = vq->desc_state[i].data;
Shirley Mac021eac2010-01-18 19:15:23 +0530847 detach_buf(vq, i);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800848 vq->avail_idx_shadow--;
849 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
Shirley Mac021eac2010-01-18 19:15:23 +0530850 END_USE(vq);
851 return buf;
852 }
853 /* That should have freed everything. */
Rusty Russell06ca2872012-10-16 23:56:14 +1030854 BUG_ON(vq->vq.num_free != vq->vring.num);
Shirley Mac021eac2010-01-18 19:15:23 +0530855
856 END_USE(vq);
857 return NULL;
858}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300859EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
Shirley Mac021eac2010-01-18 19:15:23 +0530860
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000861irqreturn_t vring_interrupt(int irq, void *_vq)
862{
863 struct vring_virtqueue *vq = to_vvq(_vq);
864
865 if (!more_used(vq)) {
866 pr_debug("virtqueue interrupt with no work for %p\n", vq);
867 return IRQ_NONE;
868 }
869
870 if (unlikely(vq->broken))
871 return IRQ_HANDLED;
872
873 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
Rusty Russell18445c42008-02-04 23:49:57 -0500874 if (vq->vq.callback)
875 vq->vq.callback(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000876
877 return IRQ_HANDLED;
878}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500879EXPORT_SYMBOL_GPL(vring_interrupt);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000880
Jason Wang17bb6d42012-08-28 13:54:13 +0200881struct virtqueue *vring_new_virtqueue(unsigned int index,
882 unsigned int num,
Rusty Russell87c7d572008-12-30 09:26:03 -0600883 unsigned int vring_align,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000884 struct virtio_device *vdev,
Rusty Russell7b21e342012-01-12 15:44:42 +1030885 bool weak_barriers,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000886 void *pages,
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +1030887 bool (*notify)(struct virtqueue *),
Rusty Russell9499f5e2009-06-12 22:16:35 -0600888 void (*callback)(struct virtqueue *),
889 const char *name)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000890{
891 struct vring_virtqueue *vq;
892 unsigned int i;
893
Rusty Russell42b36cc2007-11-12 13:39:18 +1100894 /* We assume num is a power of 2. */
895 if (num & (num - 1)) {
896 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
897 return NULL;
898 }
899
Andy Lutomirski780bc792016-02-02 21:46:36 -0800900 vq = kmalloc(sizeof(*vq) + num * sizeof(struct vring_desc_state),
901 GFP_KERNEL);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000902 if (!vq)
903 return NULL;
904
Rusty Russell87c7d572008-12-30 09:26:03 -0600905 vring_init(&vq->vring, num, pages, vring_align);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000906 vq->vq.callback = callback;
907 vq->vq.vdev = vdev;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600908 vq->vq.name = name;
Rusty Russell06ca2872012-10-16 23:56:14 +1030909 vq->vq.num_free = num;
910 vq->vq.index = index;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000911 vq->notify = notify;
Rusty Russell7b21e342012-01-12 15:44:42 +1030912 vq->weak_barriers = weak_barriers;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000913 vq->broken = false;
914 vq->last_used_idx = 0;
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800915 vq->avail_flags_shadow = 0;
916 vq->avail_idx_shadow = 0;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000917 vq->num_added = 0;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600918 list_add_tail(&vq->vq.list, &vdev->vqs);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000919#ifdef DEBUG
920 vq->in_use = false;
Rusty Russelle93300b2012-01-12 15:44:43 +1030921 vq->last_add_time_valid = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000922#endif
923
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100924 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300925 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100926
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000927 /* No callback? Tell other side not to bother us. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800928 if (!callback) {
929 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
930 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
931 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000932
933 /* Put everything in free lists. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000934 vq->free_head = 0;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800935 for (i = 0; i < num-1; i++)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300936 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800937 memset(vq->desc_state, 0, num * sizeof(struct vring_desc_state));
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000938
939 return &vq->vq;
940}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500941EXPORT_SYMBOL_GPL(vring_new_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000942
943void vring_del_virtqueue(struct virtqueue *vq)
944{
Rusty Russell9499f5e2009-06-12 22:16:35 -0600945 list_del(&vq->list);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000946 kfree(to_vvq(vq));
947}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500948EXPORT_SYMBOL_GPL(vring_del_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000949
Rusty Russelle34f8722008-07-25 12:06:13 -0500950/* Manipulates transport-specific feature bits. */
951void vring_transport_features(struct virtio_device *vdev)
952{
953 unsigned int i;
954
955 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
956 switch (i) {
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100957 case VIRTIO_RING_F_INDIRECT_DESC:
958 break;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300959 case VIRTIO_RING_F_EVENT_IDX:
960 break;
Michael S. Tsirkin747ae342014-12-01 15:52:40 +0200961 case VIRTIO_F_VERSION_1:
962 break;
Rusty Russelle34f8722008-07-25 12:06:13 -0500963 default:
964 /* We don't understand this bit. */
Michael S. Tsirkine16e12b2014-10-07 16:39:42 +0200965 __virtio_clear_bit(vdev, i);
Rusty Russelle34f8722008-07-25 12:06:13 -0500966 }
967 }
968}
969EXPORT_SYMBOL_GPL(vring_transport_features);
970
Rusty Russell5dfc1762012-01-12 15:44:42 +1030971/**
972 * virtqueue_get_vring_size - return the size of the virtqueue's vring
973 * @vq: the struct virtqueue containing the vring of interest.
974 *
975 * Returns the size of the vring. This is mainly used for boasting to
976 * userspace. Unlike other operations, this need not be serialized.
977 */
Rick Jones8f9f4662011-10-19 08:10:59 +0000978unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
979{
980
981 struct vring_virtqueue *vq = to_vvq(_vq);
982
983 return vq->vring.num;
984}
985EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
986
Heinz Graalfsb3b32c92013-10-29 09:40:19 +1030987bool virtqueue_is_broken(struct virtqueue *_vq)
988{
989 struct vring_virtqueue *vq = to_vvq(_vq);
990
991 return vq->broken;
992}
993EXPORT_SYMBOL_GPL(virtqueue_is_broken);
994
Rusty Russelle2dcdfe2014-04-28 11:15:08 +0930995/*
996 * This should prevent the device from being used, allowing drivers to
997 * recover. You may need to grab appropriate locks to flush.
998 */
999void virtio_break_device(struct virtio_device *dev)
1000{
1001 struct virtqueue *_vq;
1002
1003 list_for_each_entry(_vq, &dev->vqs, list) {
1004 struct vring_virtqueue *vq = to_vvq(_vq);
1005 vq->broken = true;
1006 }
1007}
1008EXPORT_SYMBOL_GPL(virtio_break_device);
1009
Cornelia Huck89062652014-10-07 16:39:47 +02001010void *virtqueue_get_avail(struct virtqueue *_vq)
1011{
1012 struct vring_virtqueue *vq = to_vvq(_vq);
1013
1014 return vq->vring.avail;
1015}
1016EXPORT_SYMBOL_GPL(virtqueue_get_avail);
1017
1018void *virtqueue_get_used(struct virtqueue *_vq)
1019{
1020 struct vring_virtqueue *vq = to_vvq(_vq);
1021
1022 return vq->vring.used;
1023}
1024EXPORT_SYMBOL_GPL(virtqueue_get_used);
1025
Rusty Russellc6fd4702008-02-04 23:50:05 -05001026MODULE_LICENSE("GPL");