blob: 5e1b548828e60745ba87581d2b9bcb6380b092f8 [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001/* Virtio ring implementation.
2 *
3 * Copyright 2007 Rusty Russell IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 */
19#include <linux/virtio.h>
20#include <linux/virtio_ring.h>
Rusty Russelle34f8722008-07-25 12:06:13 -050021#include <linux/virtio_config.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100022#include <linux/device.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090023#include <linux/slab.h>
Paul Gortmakerb5a2c4f2011-07-03 16:20:30 -040024#include <linux/module.h>
Rusty Russelle93300b2012-01-12 15:44:43 +103025#include <linux/hrtimer.h>
Joel Stanley6abb2dd2014-02-13 15:03:46 +103026#include <linux/kmemleak.h>
Andy Lutomirski780bc792016-02-02 21:46:36 -080027#include <linux/dma-mapping.h>
Andy Lutomirski78fe3982016-02-02 21:46:40 -080028#include <xen/xen.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +100029
30#ifdef DEBUG
31/* For development, we want to crash whenever the ring is screwed. */
Rusty Russell9499f5e2009-06-12 22:16:35 -060032#define BAD_RING(_vq, fmt, args...) \
33 do { \
34 dev_err(&(_vq)->vq.vdev->dev, \
35 "%s:"fmt, (_vq)->vq.name, ##args); \
36 BUG(); \
37 } while (0)
Rusty Russellc5f841f2009-03-30 21:55:22 -060038/* Caller is supposed to guarantee no reentry. */
39#define START_USE(_vq) \
40 do { \
41 if ((_vq)->in_use) \
Rusty Russell9499f5e2009-06-12 22:16:35 -060042 panic("%s:in_use = %i\n", \
43 (_vq)->vq.name, (_vq)->in_use); \
Rusty Russellc5f841f2009-03-30 21:55:22 -060044 (_vq)->in_use = __LINE__; \
Rusty Russell9499f5e2009-06-12 22:16:35 -060045 } while (0)
Roel Kluin3a35ce72009-01-22 16:42:57 +010046#define END_USE(_vq) \
Rusty Russell97a545a2010-02-24 14:22:22 -060047 do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100048#else
Rusty Russell9499f5e2009-06-12 22:16:35 -060049#define BAD_RING(_vq, fmt, args...) \
50 do { \
51 dev_err(&_vq->vq.vdev->dev, \
52 "%s:"fmt, (_vq)->vq.name, ##args); \
53 (_vq)->broken = true; \
54 } while (0)
Rusty Russell0a8a69d2007-10-22 11:03:40 +100055#define START_USE(vq)
56#define END_USE(vq)
57#endif
58
Andy Lutomirski780bc792016-02-02 21:46:36 -080059struct vring_desc_state {
60 void *data; /* Data for callback. */
61 struct vring_desc *indir_desc; /* Indirect descriptor, if any. */
62};
63
Michael S. Tsirkin43b4f722015-01-15 13:33:31 +020064struct vring_virtqueue {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100065 struct virtqueue vq;
66
67 /* Actual memory layout for this queue */
68 struct vring vring;
69
Rusty Russell7b21e342012-01-12 15:44:42 +103070 /* Can we use weak barriers? */
71 bool weak_barriers;
72
Rusty Russell0a8a69d2007-10-22 11:03:40 +100073 /* Other side has made a mess, don't try any more. */
74 bool broken;
75
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +010076 /* Host supports indirect buffers */
77 bool indirect;
78
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +030079 /* Host publishes avail event idx */
80 bool event;
81
Rusty Russell0a8a69d2007-10-22 11:03:40 +100082 /* Head of free buffer list. */
83 unsigned int free_head;
84 /* Number we've added since last sync. */
85 unsigned int num_added;
86
87 /* Last used index we've seen. */
Anthony Liguori1bc49532007-11-07 15:49:24 -060088 u16 last_used_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +100089
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -080090 /* Last written value to avail->flags */
91 u16 avail_flags_shadow;
92
93 /* Last written value to avail->idx in guest byte order */
94 u16 avail_idx_shadow;
95
Rusty Russell0a8a69d2007-10-22 11:03:40 +100096 /* How to notify other side. FIXME: commonalize hcalls! */
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +103097 bool (*notify)(struct virtqueue *vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +100098
Andy Lutomirski2a2d1382016-02-02 21:46:37 -080099 /* DMA, allocation, and size information */
100 bool we_own_ring;
101 size_t queue_size_in_bytes;
102 dma_addr_t queue_dma_addr;
103
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000104#ifdef DEBUG
105 /* They're supposed to lock for us. */
106 unsigned int in_use;
Rusty Russelle93300b2012-01-12 15:44:43 +1030107
108 /* Figure out if their kicks are too delayed. */
109 bool last_add_time_valid;
110 ktime_t last_add_time;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000111#endif
112
Andy Lutomirski780bc792016-02-02 21:46:36 -0800113 /* Per-descriptor state. */
114 struct vring_desc_state desc_state[];
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000115};
116
117#define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq)
118
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800119/*
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300120 * Modern virtio devices have feature bits to specify whether they need a
121 * quirk and bypass the IOMMU. If not there, just use the DMA API.
122 *
123 * If there, the interaction between virtio and DMA API is messy.
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800124 *
125 * On most systems with virtio, physical addresses match bus addresses,
126 * and it doesn't particularly matter whether we use the DMA API.
127 *
128 * On some systems, including Xen and any system with a physical device
129 * that speaks virtio behind a physical IOMMU, we must use the DMA API
130 * for virtio DMA to work at all.
131 *
132 * On other systems, including SPARC and PPC64, virtio-pci devices are
133 * enumerated as though they are behind an IOMMU, but the virtio host
134 * ignores the IOMMU, so we must either pretend that the IOMMU isn't
135 * there or somehow map everything as the identity.
136 *
137 * For the time being, we preserve historic behavior and bypass the DMA
138 * API.
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300139 *
140 * TODO: install a per-device DMA ops structure that does the right thing
141 * taking into account all the above quirks, and use the DMA API
142 * unconditionally on data path.
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800143 */
144
145static bool vring_use_dma_api(struct virtio_device *vdev)
146{
Michael S. Tsirkin1a937692016-04-18 12:58:14 +0300147 if (!virtio_has_iommu_quirk(vdev))
148 return true;
149
150 /* Otherwise, we are left to guess. */
Andy Lutomirski78fe3982016-02-02 21:46:40 -0800151 /*
152 * In theory, it's possible to have a buggy QEMU-supposed
153 * emulated Q35 IOMMU and Xen enabled at the same time. On
154 * such a configuration, virtio has never worked and will
155 * not work without an even larger kludge. Instead, enable
156 * the DMA API if we're a Xen guest, which at least allows
157 * all of the sensible Xen configurations to work correctly.
158 */
159 if (xen_domain())
160 return true;
161
Andy Lutomirskid26c96c2016-02-02 21:46:35 -0800162 return false;
163}
164
Andy Lutomirski780bc792016-02-02 21:46:36 -0800165/*
166 * The DMA ops on various arches are rather gnarly right now, and
167 * making all of the arch DMA ops work on the vring device itself
168 * is a mess. For now, we use the parent device for DMA ops.
169 */
Michael S. Tsirkin75bfa812016-10-31 00:38:21 +0200170static inline struct device *vring_dma_dev(const struct vring_virtqueue *vq)
Andy Lutomirski780bc792016-02-02 21:46:36 -0800171{
172 return vq->vq.vdev->dev.parent;
173}
174
175/* Map one sg entry. */
176static dma_addr_t vring_map_one_sg(const struct vring_virtqueue *vq,
177 struct scatterlist *sg,
178 enum dma_data_direction direction)
179{
180 if (!vring_use_dma_api(vq->vq.vdev))
181 return (dma_addr_t)sg_phys(sg);
182
183 /*
184 * We can't use dma_map_sg, because we don't use scatterlists in
185 * the way it expects (we don't guarantee that the scatterlist
186 * will exist for the lifetime of the mapping).
187 */
188 return dma_map_page(vring_dma_dev(vq),
189 sg_page(sg), sg->offset, sg->length,
190 direction);
191}
192
193static dma_addr_t vring_map_single(const struct vring_virtqueue *vq,
194 void *cpu_addr, size_t size,
195 enum dma_data_direction direction)
196{
197 if (!vring_use_dma_api(vq->vq.vdev))
198 return (dma_addr_t)virt_to_phys(cpu_addr);
199
200 return dma_map_single(vring_dma_dev(vq),
201 cpu_addr, size, direction);
202}
203
204static void vring_unmap_one(const struct vring_virtqueue *vq,
205 struct vring_desc *desc)
206{
207 u16 flags;
208
209 if (!vring_use_dma_api(vq->vq.vdev))
210 return;
211
212 flags = virtio16_to_cpu(vq->vq.vdev, desc->flags);
213
214 if (flags & VRING_DESC_F_INDIRECT) {
215 dma_unmap_single(vring_dma_dev(vq),
216 virtio64_to_cpu(vq->vq.vdev, desc->addr),
217 virtio32_to_cpu(vq->vq.vdev, desc->len),
218 (flags & VRING_DESC_F_WRITE) ?
219 DMA_FROM_DEVICE : DMA_TO_DEVICE);
220 } else {
221 dma_unmap_page(vring_dma_dev(vq),
222 virtio64_to_cpu(vq->vq.vdev, desc->addr),
223 virtio32_to_cpu(vq->vq.vdev, desc->len),
224 (flags & VRING_DESC_F_WRITE) ?
225 DMA_FROM_DEVICE : DMA_TO_DEVICE);
226 }
227}
228
229static int vring_mapping_error(const struct vring_virtqueue *vq,
230 dma_addr_t addr)
231{
232 if (!vring_use_dma_api(vq->vq.vdev))
233 return 0;
234
235 return dma_mapping_error(vring_dma_dev(vq), addr);
236}
237
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300238static struct vring_desc *alloc_indirect(struct virtqueue *_vq,
239 unsigned int total_sg, gfp_t gfp)
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100240{
241 struct vring_desc *desc;
Rusty Russellb25bd252014-09-11 10:17:38 +0930242 unsigned int i;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100243
Will Deaconb92b1b82012-10-19 14:03:33 +0100244 /*
245 * We require lowmem mappings for the descriptors because
246 * otherwise virt_to_phys will give us bogus addresses in the
247 * virtqueue.
248 */
Michal Hocko82107532015-12-01 15:32:49 +0100249 gfp &= ~__GFP_HIGHMEM;
Will Deaconb92b1b82012-10-19 14:03:33 +0100250
Rusty Russell13816c72013-03-20 15:37:09 +1030251 desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100252 if (!desc)
Rusty Russellb25bd252014-09-11 10:17:38 +0930253 return NULL;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100254
Rusty Russellb25bd252014-09-11 10:17:38 +0930255 for (i = 0; i < total_sg; i++)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300256 desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1);
Rusty Russellb25bd252014-09-11 10:17:38 +0930257 return desc;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100258}
259
Rusty Russell13816c72013-03-20 15:37:09 +1030260static inline int virtqueue_add(struct virtqueue *_vq,
261 struct scatterlist *sgs[],
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930262 unsigned int total_sg,
Rusty Russell13816c72013-03-20 15:37:09 +1030263 unsigned int out_sgs,
264 unsigned int in_sgs,
265 void *data,
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200266 void *ctx,
Rusty Russell13816c72013-03-20 15:37:09 +1030267 gfp_t gfp)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000268{
269 struct vring_virtqueue *vq = to_vvq(_vq);
Rusty Russell13816c72013-03-20 15:37:09 +1030270 struct scatterlist *sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930271 struct vring_desc *desc;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800272 unsigned int i, n, avail, descs_used, uninitialized_var(prev), err_idx;
Michael S. Tsirkin1fe9b6f2010-07-26 16:55:30 +0930273 int head;
Rusty Russellb25bd252014-09-11 10:17:38 +0930274 bool indirect;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000275
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100276 START_USE(vq);
277
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000278 BUG_ON(data == NULL);
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200279 BUG_ON(ctx && vq->indirect);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100280
Rusty Russell70670442014-03-13 11:23:40 +1030281 if (unlikely(vq->broken)) {
282 END_USE(vq);
283 return -EIO;
284 }
285
Rusty Russelle93300b2012-01-12 15:44:43 +1030286#ifdef DEBUG
287 {
288 ktime_t now = ktime_get();
289
290 /* No kick or get, with .1 second between? Warn. */
291 if (vq->last_add_time_valid)
292 WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time))
293 > 100);
294 vq->last_add_time = now;
295 vq->last_add_time_valid = true;
296 }
297#endif
298
Rusty Russell13816c72013-03-20 15:37:09 +1030299 BUG_ON(total_sg > vq->vring.num);
300 BUG_ON(total_sg == 0);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000301
Rusty Russellb25bd252014-09-11 10:17:38 +0930302 head = vq->free_head;
303
304 /* If the host supports indirect descriptor tables, and we have multiple
305 * buffers, then go indirect. FIXME: tune this threshold */
306 if (vq->indirect && total_sg > 1 && vq->vq.num_free)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300307 desc = alloc_indirect(_vq, total_sg, gfp);
Rusty Russellb25bd252014-09-11 10:17:38 +0930308 else
309 desc = NULL;
310
311 if (desc) {
312 /* Use a single buffer which doesn't continue */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800313 indirect = true;
Rusty Russellb25bd252014-09-11 10:17:38 +0930314 /* Set up rest to use this indirect table. */
315 i = 0;
316 descs_used = 1;
Rusty Russellb25bd252014-09-11 10:17:38 +0930317 } else {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800318 indirect = false;
Rusty Russellb25bd252014-09-11 10:17:38 +0930319 desc = vq->vring.desc;
320 i = head;
321 descs_used = total_sg;
Rusty Russellb25bd252014-09-11 10:17:38 +0930322 }
323
324 if (vq->vq.num_free < descs_used) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000325 pr_debug("Can't add buf len %i - avail = %i\n",
Rusty Russellb25bd252014-09-11 10:17:38 +0930326 descs_used, vq->vq.num_free);
Rusty Russell44653ea2008-07-25 12:06:04 -0500327 /* FIXME: for historical reasons, we force a notify here if
328 * there are outgoing parts to the buffer. Presumably the
329 * host should service the ring ASAP. */
Rusty Russell13816c72013-03-20 15:37:09 +1030330 if (out_sgs)
Rusty Russell44653ea2008-07-25 12:06:04 -0500331 vq->notify(&vq->vq);
Wei Yongjun58625ed2016-08-02 14:16:31 +0000332 if (indirect)
333 kfree(desc);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000334 END_USE(vq);
335 return -ENOSPC;
336 }
337
Rusty Russell13816c72013-03-20 15:37:09 +1030338 for (n = 0; n < out_sgs; n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930339 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800340 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_TO_DEVICE);
341 if (vring_mapping_error(vq, addr))
342 goto unmap_release;
343
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300344 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800345 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300346 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030347 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300348 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030349 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000350 }
Rusty Russell13816c72013-03-20 15:37:09 +1030351 for (; n < (out_sgs + in_sgs); n++) {
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930352 for (sg = sgs[n]; sg; sg = sg_next(sg)) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800353 dma_addr_t addr = vring_map_one_sg(vq, sg, DMA_FROM_DEVICE);
354 if (vring_mapping_error(vq, addr))
355 goto unmap_release;
356
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300357 desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800358 desc[i].addr = cpu_to_virtio64(_vq->vdev, addr);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300359 desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length);
Rusty Russell13816c72013-03-20 15:37:09 +1030360 prev = i;
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300361 i = virtio16_to_cpu(_vq->vdev, desc[i].next);
Rusty Russell13816c72013-03-20 15:37:09 +1030362 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000363 }
364 /* Last one doesn't continue. */
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300365 desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000366
Andy Lutomirski780bc792016-02-02 21:46:36 -0800367 if (indirect) {
368 /* Now that the indirect table is filled in, map it. */
369 dma_addr_t addr = vring_map_single(
370 vq, desc, total_sg * sizeof(struct vring_desc),
371 DMA_TO_DEVICE);
372 if (vring_mapping_error(vq, addr))
373 goto unmap_release;
374
375 vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT);
376 vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, addr);
377
378 vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc));
379 }
380
381 /* We're using some buffers from the free list. */
382 vq->vq.num_free -= descs_used;
383
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000384 /* Update free pointer */
Rusty Russellb25bd252014-09-11 10:17:38 +0930385 if (indirect)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300386 vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next);
Rusty Russellb25bd252014-09-11 10:17:38 +0930387 else
388 vq->free_head = i;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000389
Andy Lutomirski780bc792016-02-02 21:46:36 -0800390 /* Store token and indirect buffer state. */
391 vq->desc_state[head].data = data;
392 if (indirect)
393 vq->desc_state[head].indir_desc = desc;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200394 if (ctx)
395 vq->desc_state[head].indir_desc = ctx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000396
397 /* Put entry in available array (but don't update avail->idx until they
Rusty Russell3b720b82012-01-12 15:44:43 +1030398 * do sync). */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800399 avail = vq->avail_idx_shadow & (vq->vring.num - 1);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300400 vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000401
Rusty Russellee7cd892012-01-12 15:44:43 +1030402 /* Descriptors and available array need to be set before we expose the
403 * new available array entries. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030404 virtio_wmb(vq->weak_barriers);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800405 vq->avail_idx_shadow++;
406 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
Rusty Russellee7cd892012-01-12 15:44:43 +1030407 vq->num_added++;
408
Tetsuo Handa5e05bf52015-02-11 15:01:13 +1030409 pr_debug("Added buffer head %i to %p\n", head, vq);
410 END_USE(vq);
411
Rusty Russellee7cd892012-01-12 15:44:43 +1030412 /* This is very unlikely, but theoretically possible. Kick
413 * just in case. */
414 if (unlikely(vq->num_added == (1 << 16) - 1))
415 virtqueue_kick(_vq);
416
Rusty Russell98e8c6b2012-10-16 23:56:15 +1030417 return 0;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800418
419unmap_release:
420 err_idx = i;
421 i = head;
422
423 for (n = 0; n < total_sg; n++) {
424 if (i == err_idx)
425 break;
426 vring_unmap_one(vq, &desc[i]);
Gongleic60923c2016-11-22 13:51:50 +0800427 i = virtio16_to_cpu(_vq->vdev, vq->vring.desc[i].next);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800428 }
429
430 vq->vq.num_free += total_sg;
431
432 if (indirect)
433 kfree(desc);
434
Michael S. Tsirkin3cc36f62016-08-03 07:18:51 +0300435 END_USE(vq);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800436 return -EIO;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000437}
Rusty Russell13816c72013-03-20 15:37:09 +1030438
439/**
Rusty Russell13816c72013-03-20 15:37:09 +1030440 * virtqueue_add_sgs - expose buffers to other end
441 * @vq: the struct virtqueue we're talking about.
442 * @sgs: array of terminated scatterlists.
443 * @out_num: the number of scatterlists readable by other side
444 * @in_num: the number of scatterlists which are writable (after readable ones)
445 * @data: the token identifying the buffer.
446 * @gfp: how to do memory allocations (if necessary).
447 *
448 * Caller must ensure we don't call this with other virtqueue operations
449 * at the same time (except where noted).
450 *
Rusty Russell70670442014-03-13 11:23:40 +1030451 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell13816c72013-03-20 15:37:09 +1030452 */
453int virtqueue_add_sgs(struct virtqueue *_vq,
454 struct scatterlist *sgs[],
455 unsigned int out_sgs,
456 unsigned int in_sgs,
457 void *data,
458 gfp_t gfp)
459{
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930460 unsigned int i, total_sg = 0;
Rusty Russell13816c72013-03-20 15:37:09 +1030461
462 /* Count them first. */
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930463 for (i = 0; i < out_sgs + in_sgs; i++) {
Rusty Russell13816c72013-03-20 15:37:09 +1030464 struct scatterlist *sg;
465 for (sg = sgs[i]; sg; sg = sg_next(sg))
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930466 total_sg++;
Rusty Russell13816c72013-03-20 15:37:09 +1030467 }
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200468 return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs,
469 data, NULL, gfp);
Rusty Russell13816c72013-03-20 15:37:09 +1030470}
471EXPORT_SYMBOL_GPL(virtqueue_add_sgs);
472
473/**
Rusty Russell282edb32013-03-20 15:44:26 +1030474 * virtqueue_add_outbuf - expose output buffers to other end
475 * @vq: the struct virtqueue we're talking about.
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930476 * @sg: scatterlist (must be well-formed and terminated!)
477 * @num: the number of entries in @sg readable by other side
Rusty Russell282edb32013-03-20 15:44:26 +1030478 * @data: the token identifying the buffer.
479 * @gfp: how to do memory allocations (if necessary).
480 *
481 * Caller must ensure we don't call this with other virtqueue operations
482 * at the same time (except where noted).
483 *
Rusty Russell70670442014-03-13 11:23:40 +1030484 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell282edb32013-03-20 15:44:26 +1030485 */
486int virtqueue_add_outbuf(struct virtqueue *vq,
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930487 struct scatterlist *sg, unsigned int num,
Rusty Russell282edb32013-03-20 15:44:26 +1030488 void *data,
489 gfp_t gfp)
490{
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200491 return virtqueue_add(vq, &sg, num, 1, 0, data, NULL, gfp);
Rusty Russell282edb32013-03-20 15:44:26 +1030492}
493EXPORT_SYMBOL_GPL(virtqueue_add_outbuf);
494
495/**
496 * virtqueue_add_inbuf - expose input buffers to other end
497 * @vq: the struct virtqueue we're talking about.
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930498 * @sg: scatterlist (must be well-formed and terminated!)
499 * @num: the number of entries in @sg writable by other side
Rusty Russell282edb32013-03-20 15:44:26 +1030500 * @data: the token identifying the buffer.
501 * @gfp: how to do memory allocations (if necessary).
502 *
503 * Caller must ensure we don't call this with other virtqueue operations
504 * at the same time (except where noted).
505 *
Rusty Russell70670442014-03-13 11:23:40 +1030506 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
Rusty Russell282edb32013-03-20 15:44:26 +1030507 */
508int virtqueue_add_inbuf(struct virtqueue *vq,
Rusty Russelleeebf9b2014-09-11 10:17:37 +0930509 struct scatterlist *sg, unsigned int num,
Rusty Russell282edb32013-03-20 15:44:26 +1030510 void *data,
511 gfp_t gfp)
512{
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200513 return virtqueue_add(vq, &sg, num, 0, 1, data, NULL, gfp);
Rusty Russell282edb32013-03-20 15:44:26 +1030514}
515EXPORT_SYMBOL_GPL(virtqueue_add_inbuf);
516
517/**
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200518 * virtqueue_add_inbuf_ctx - expose input buffers to other end
519 * @vq: the struct virtqueue we're talking about.
520 * @sg: scatterlist (must be well-formed and terminated!)
521 * @num: the number of entries in @sg writable by other side
522 * @data: the token identifying the buffer.
523 * @ctx: extra context for the token
524 * @gfp: how to do memory allocations (if necessary).
525 *
526 * Caller must ensure we don't call this with other virtqueue operations
527 * at the same time (except where noted).
528 *
529 * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO).
530 */
531int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
532 struct scatterlist *sg, unsigned int num,
533 void *data,
534 void *ctx,
535 gfp_t gfp)
536{
537 return virtqueue_add(vq, &sg, num, 0, 1, data, ctx, gfp);
538}
539EXPORT_SYMBOL_GPL(virtqueue_add_inbuf_ctx);
540
541/**
Rusty Russell41f03772012-01-12 15:44:43 +1030542 * virtqueue_kick_prepare - first half of split virtqueue_kick call.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030543 * @vq: the struct virtqueue
544 *
Rusty Russell41f03772012-01-12 15:44:43 +1030545 * Instead of virtqueue_kick(), you can do:
546 * if (virtqueue_kick_prepare(vq))
547 * virtqueue_notify(vq);
Rusty Russell5dfc1762012-01-12 15:44:42 +1030548 *
Rusty Russell41f03772012-01-12 15:44:43 +1030549 * This is sometimes useful because the virtqueue_kick_prepare() needs
550 * to be serialized, but the actual virtqueue_notify() call does not.
Rusty Russell5dfc1762012-01-12 15:44:42 +1030551 */
Rusty Russell41f03772012-01-12 15:44:43 +1030552bool virtqueue_kick_prepare(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000553{
554 struct vring_virtqueue *vq = to_vvq(_vq);
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300555 u16 new, old;
Rusty Russell41f03772012-01-12 15:44:43 +1030556 bool needs_kick;
557
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000558 START_USE(vq);
Jason Wanga72caae2012-01-20 16:17:08 +0800559 /* We need to expose available array entries before checking avail
560 * event. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030561 virtio_mb(vq->weak_barriers);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000562
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800563 old = vq->avail_idx_shadow - vq->num_added;
564 new = vq->avail_idx_shadow;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000565 vq->num_added = 0;
566
Rusty Russelle93300b2012-01-12 15:44:43 +1030567#ifdef DEBUG
568 if (vq->last_add_time_valid) {
569 WARN_ON(ktime_to_ms(ktime_sub(ktime_get(),
570 vq->last_add_time)) > 100);
571 }
572 vq->last_add_time_valid = false;
573#endif
574
Rusty Russell41f03772012-01-12 15:44:43 +1030575 if (vq->event) {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300576 needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)),
Rusty Russell41f03772012-01-12 15:44:43 +1030577 new, old);
578 } else {
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300579 needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY));
Rusty Russell41f03772012-01-12 15:44:43 +1030580 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000581 END_USE(vq);
Rusty Russell41f03772012-01-12 15:44:43 +1030582 return needs_kick;
583}
584EXPORT_SYMBOL_GPL(virtqueue_kick_prepare);
585
586/**
587 * virtqueue_notify - second half of split virtqueue_kick call.
588 * @vq: the struct virtqueue
589 *
590 * This does not need to be serialized.
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030591 *
592 * Returns false if host notify failed or queue is broken, otherwise true.
Rusty Russell41f03772012-01-12 15:44:43 +1030593 */
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030594bool virtqueue_notify(struct virtqueue *_vq)
Rusty Russell41f03772012-01-12 15:44:43 +1030595{
596 struct vring_virtqueue *vq = to_vvq(_vq);
597
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030598 if (unlikely(vq->broken))
599 return false;
600
Rusty Russell41f03772012-01-12 15:44:43 +1030601 /* Prod other side to tell it about changes. */
Heinz Graalfs2342d6a2013-11-05 21:20:27 +1030602 if (!vq->notify(_vq)) {
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030603 vq->broken = true;
604 return false;
605 }
606 return true;
Rusty Russell41f03772012-01-12 15:44:43 +1030607}
608EXPORT_SYMBOL_GPL(virtqueue_notify);
609
610/**
611 * virtqueue_kick - update after add_buf
612 * @vq: the struct virtqueue
613 *
Rusty Russellb3087e42013-05-20 12:15:44 +0930614 * After one or more virtqueue_add_* calls, invoke this to kick
Rusty Russell41f03772012-01-12 15:44:43 +1030615 * the other side.
616 *
617 * Caller must ensure we don't call this with other virtqueue
618 * operations at the same time (except where noted).
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030619 *
620 * Returns false if kick failed, otherwise true.
Rusty Russell41f03772012-01-12 15:44:43 +1030621 */
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030622bool virtqueue_kick(struct virtqueue *vq)
Rusty Russell41f03772012-01-12 15:44:43 +1030623{
624 if (virtqueue_kick_prepare(vq))
Heinz Graalfs5b1bf7c2013-10-29 09:39:48 +1030625 return virtqueue_notify(vq);
626 return true;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000627}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300628EXPORT_SYMBOL_GPL(virtqueue_kick);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000629
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200630static void detach_buf(struct vring_virtqueue *vq, unsigned int head,
631 void **ctx)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000632{
Andy Lutomirski780bc792016-02-02 21:46:36 -0800633 unsigned int i, j;
Gongleic60923c2016-11-22 13:51:50 +0800634 __virtio16 nextflag = cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000635
636 /* Clear data ptr. */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800637 vq->desc_state[head].data = NULL;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000638
Andy Lutomirski780bc792016-02-02 21:46:36 -0800639 /* Put back on free list: unmap first-level descriptors and find end */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000640 i = head;
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +0100641
Andy Lutomirski780bc792016-02-02 21:46:36 -0800642 while (vq->vring.desc[i].flags & nextflag) {
643 vring_unmap_one(vq, &vq->vring.desc[i]);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300644 i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next);
Rusty Russell06ca2872012-10-16 23:56:14 +1030645 vq->vq.num_free++;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000646 }
647
Andy Lutomirski780bc792016-02-02 21:46:36 -0800648 vring_unmap_one(vq, &vq->vring.desc[i]);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300649 vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000650 vq->free_head = head;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800651
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000652 /* Plus final descriptor */
Rusty Russell06ca2872012-10-16 23:56:14 +1030653 vq->vq.num_free++;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800654
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200655 if (vq->indirect) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800656 struct vring_desc *indir_desc = vq->desc_state[head].indir_desc;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200657 u32 len;
658
659 /* Free the indirect table, if any, now that it's unmapped. */
660 if (!indir_desc)
661 return;
662
663 len = virtio32_to_cpu(vq->vq.vdev, vq->vring.desc[head].len);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800664
665 BUG_ON(!(vq->vring.desc[head].flags &
666 cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)));
667 BUG_ON(len == 0 || len % sizeof(struct vring_desc));
668
669 for (j = 0; j < len / sizeof(struct vring_desc); j++)
670 vring_unmap_one(vq, &indir_desc[j]);
671
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200672 kfree(indir_desc);
Andy Lutomirski780bc792016-02-02 21:46:36 -0800673 vq->desc_state[head].indir_desc = NULL;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200674 } else if (ctx) {
675 *ctx = vq->desc_state[head].indir_desc;
Andy Lutomirski780bc792016-02-02 21:46:36 -0800676 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000677}
678
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000679static inline bool more_used(const struct vring_virtqueue *vq)
680{
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300681 return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000682}
683
Rusty Russell5dfc1762012-01-12 15:44:42 +1030684/**
685 * virtqueue_get_buf - get the next used buffer
686 * @vq: the struct virtqueue we're talking about.
687 * @len: the length written into the buffer
688 *
Felipe Franciosi0c7eaf52016-11-14 06:16:15 -0800689 * If the device wrote data into the buffer, @len will be set to the
Rusty Russell5dfc1762012-01-12 15:44:42 +1030690 * amount written. This means you don't need to clear the buffer
691 * beforehand to ensure there's no data leakage in the case of short
692 * writes.
693 *
694 * Caller must ensure we don't call this with other virtqueue
695 * operations at the same time (except where noted).
696 *
697 * Returns NULL if there are no used buffers, or the "data" token
Rusty Russellb3087e42013-05-20 12:15:44 +0930698 * handed to virtqueue_add_*().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030699 */
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200700void *virtqueue_get_buf_ctx(struct virtqueue *_vq, unsigned int *len,
701 void **ctx)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000702{
703 struct vring_virtqueue *vq = to_vvq(_vq);
704 void *ret;
705 unsigned int i;
Rusty Russell3b720b82012-01-12 15:44:43 +1030706 u16 last_used;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000707
708 START_USE(vq);
709
Rusty Russell5ef82752008-05-02 21:50:43 -0500710 if (unlikely(vq->broken)) {
711 END_USE(vq);
712 return NULL;
713 }
714
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000715 if (!more_used(vq)) {
716 pr_debug("No more buffers in queue\n");
717 END_USE(vq);
718 return NULL;
719 }
720
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200721 /* Only get used array entries after they have been exposed by host. */
Rusty Russella9a0fef2013-03-18 13:22:19 +1030722 virtio_rmb(vq->weak_barriers);
Michael S. Tsirkin2d61ba92009-10-25 15:28:53 +0200723
Rusty Russell3b720b82012-01-12 15:44:43 +1030724 last_used = (vq->last_used_idx & (vq->vring.num - 1));
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300725 i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id);
726 *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000727
728 if (unlikely(i >= vq->vring.num)) {
729 BAD_RING(vq, "id %u out of range\n", i);
730 return NULL;
731 }
Andy Lutomirski780bc792016-02-02 21:46:36 -0800732 if (unlikely(!vq->desc_state[i].data)) {
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000733 BAD_RING(vq, "id %u is not a head!\n", i);
734 return NULL;
735 }
736
737 /* detach_buf clears data, so grab it now. */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800738 ret = vq->desc_state[i].data;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200739 detach_buf(vq, i, ctx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000740 vq->last_used_idx++;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300741 /* If we expect an interrupt for the next entry, tell host
742 * by writing event index and flush out the write before
743 * the read in the next get_buf call. */
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200744 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT))
745 virtio_store_mb(vq->weak_barriers,
746 &vring_used_event(&vq->vring),
747 cpu_to_virtio16(_vq->vdev, vq->last_used_idx));
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300748
Rusty Russelle93300b2012-01-12 15:44:43 +1030749#ifdef DEBUG
750 vq->last_add_time_valid = false;
751#endif
752
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000753 END_USE(vq);
754 return ret;
755}
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200756EXPORT_SYMBOL_GPL(virtqueue_get_buf_ctx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000757
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200758void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len)
759{
760 return virtqueue_get_buf_ctx(_vq, len, NULL);
761}
762EXPORT_SYMBOL_GPL(virtqueue_get_buf);
Rusty Russell5dfc1762012-01-12 15:44:42 +1030763/**
764 * virtqueue_disable_cb - disable callbacks
765 * @vq: the struct virtqueue we're talking about.
766 *
767 * Note that this is not necessarily synchronous, hence unreliable and only
768 * useful as an optimization.
769 *
770 * Unlike other operations, this need not be serialized.
771 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300772void virtqueue_disable_cb(struct virtqueue *_vq)
Rusty Russell18445c42008-02-04 23:49:57 -0500773{
774 struct vring_virtqueue *vq = to_vvq(_vq);
775
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800776 if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) {
777 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200778 if (!vq->event)
779 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800780 }
781
Rusty Russell18445c42008-02-04 23:49:57 -0500782}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300783EXPORT_SYMBOL_GPL(virtqueue_disable_cb);
Rusty Russell18445c42008-02-04 23:49:57 -0500784
Rusty Russell5dfc1762012-01-12 15:44:42 +1030785/**
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300786 * virtqueue_enable_cb_prepare - restart callbacks after disable_cb
787 * @vq: the struct virtqueue we're talking about.
788 *
789 * This re-enables callbacks; it returns current queue state
790 * in an opaque unsigned value. This value should be later tested by
791 * virtqueue_poll, to detect a possible race between the driver checking for
792 * more work, and enabling callbacks.
793 *
794 * Caller must ensure we don't call this with other virtqueue
795 * operations at the same time (except where noted).
796 */
797unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq)
798{
799 struct vring_virtqueue *vq = to_vvq(_vq);
800 u16 last_used_idx;
801
802 START_USE(vq);
803
804 /* We optimistically turn back on interrupts, then check if there was
805 * more to do. */
806 /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to
807 * either clear the flags bit or point the event index at the next
808 * entry. Always do both to keep code simple. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800809 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
810 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200811 if (!vq->event)
812 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800813 }
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300814 vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300815 END_USE(vq);
816 return last_used_idx;
817}
818EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare);
819
820/**
821 * virtqueue_poll - query pending used buffers
822 * @vq: the struct virtqueue we're talking about.
823 * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare).
824 *
825 * Returns "true" if there are pending used buffers in the queue.
826 *
827 * This does not need to be serialized.
828 */
829bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
830{
831 struct vring_virtqueue *vq = to_vvq(_vq);
832
833 virtio_mb(vq->weak_barriers);
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300834 return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300835}
836EXPORT_SYMBOL_GPL(virtqueue_poll);
837
838/**
Rusty Russell5dfc1762012-01-12 15:44:42 +1030839 * virtqueue_enable_cb - restart callbacks after disable_cb.
840 * @vq: the struct virtqueue we're talking about.
841 *
842 * This re-enables callbacks; it returns "false" if there are pending
843 * buffers in the queue, to detect a possible race between the driver
844 * checking for more work, and enabling callbacks.
845 *
846 * Caller must ensure we don't call this with other virtqueue
847 * operations at the same time (except where noted).
848 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300849bool virtqueue_enable_cb(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000850{
Michael S. Tsirkincc229882013-07-09 13:19:18 +0300851 unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq);
852 return !virtqueue_poll(_vq, last_used_idx);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000853}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300854EXPORT_SYMBOL_GPL(virtqueue_enable_cb);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000855
Rusty Russell5dfc1762012-01-12 15:44:42 +1030856/**
857 * virtqueue_enable_cb_delayed - restart callbacks after disable_cb.
858 * @vq: the struct virtqueue we're talking about.
859 *
860 * This re-enables callbacks but hints to the other side to delay
861 * interrupts until most of the available buffers have been processed;
862 * it returns "false" if there are many pending buffers in the queue,
863 * to detect a possible race between the driver checking for more work,
864 * and enabling callbacks.
865 *
866 * Caller must ensure we don't call this with other virtqueue
867 * operations at the same time (except where noted).
868 */
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300869bool virtqueue_enable_cb_delayed(struct virtqueue *_vq)
870{
871 struct vring_virtqueue *vq = to_vvq(_vq);
872 u16 bufs;
873
874 START_USE(vq);
875
876 /* We optimistically turn back on interrupts, then check if there was
877 * more to do. */
878 /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to
879 * either clear the flags bit or point the event index at the next
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200880 * entry. Always update the event index to keep code simple. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800881 if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) {
882 vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +0200883 if (!vq->event)
884 vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800885 }
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300886 /* TODO: tune this threshold */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800887 bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4;
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +0200888
889 virtio_store_mb(vq->weak_barriers,
890 &vring_used_event(&vq->vring),
891 cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs));
892
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +0300893 if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) {
Michael S. Tsirkin7ab358c2011-05-20 02:11:14 +0300894 END_USE(vq);
895 return false;
896 }
897
898 END_USE(vq);
899 return true;
900}
901EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed);
902
Rusty Russell5dfc1762012-01-12 15:44:42 +1030903/**
904 * virtqueue_detach_unused_buf - detach first unused buffer
905 * @vq: the struct virtqueue we're talking about.
906 *
Rusty Russellb3087e42013-05-20 12:15:44 +0930907 * Returns NULL or the "data" token handed to virtqueue_add_*().
Rusty Russell5dfc1762012-01-12 15:44:42 +1030908 * This is not valid on an active queue; it is useful only for device
909 * shutdown.
910 */
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300911void *virtqueue_detach_unused_buf(struct virtqueue *_vq)
Shirley Mac021eac2010-01-18 19:15:23 +0530912{
913 struct vring_virtqueue *vq = to_vvq(_vq);
914 unsigned int i;
915 void *buf;
916
917 START_USE(vq);
918
919 for (i = 0; i < vq->vring.num; i++) {
Andy Lutomirski780bc792016-02-02 21:46:36 -0800920 if (!vq->desc_state[i].data)
Shirley Mac021eac2010-01-18 19:15:23 +0530921 continue;
922 /* detach_buf clears data, so grab it now. */
Andy Lutomirski780bc792016-02-02 21:46:36 -0800923 buf = vq->desc_state[i].data;
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200924 detach_buf(vq, i, NULL);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800925 vq->avail_idx_shadow--;
926 vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow);
Shirley Mac021eac2010-01-18 19:15:23 +0530927 END_USE(vq);
928 return buf;
929 }
930 /* That should have freed everything. */
Rusty Russell06ca2872012-10-16 23:56:14 +1030931 BUG_ON(vq->vq.num_free != vq->vring.num);
Shirley Mac021eac2010-01-18 19:15:23 +0530932
933 END_USE(vq);
934 return NULL;
935}
Michael S. Tsirkin7c5e9ed2010-04-12 16:19:07 +0300936EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf);
Shirley Mac021eac2010-01-18 19:15:23 +0530937
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000938irqreturn_t vring_interrupt(int irq, void *_vq)
939{
940 struct vring_virtqueue *vq = to_vvq(_vq);
941
942 if (!more_used(vq)) {
943 pr_debug("virtqueue interrupt with no work for %p\n", vq);
944 return IRQ_NONE;
945 }
946
947 if (unlikely(vq->broken))
948 return IRQ_HANDLED;
949
950 pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback);
Rusty Russell18445c42008-02-04 23:49:57 -0500951 if (vq->vq.callback)
952 vq->vq.callback(&vq->vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000953
954 return IRQ_HANDLED;
955}
Rusty Russellc6fd4702008-02-04 23:50:05 -0500956EXPORT_SYMBOL_GPL(vring_interrupt);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000957
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800958struct virtqueue *__vring_new_virtqueue(unsigned int index,
959 struct vring vring,
960 struct virtio_device *vdev,
961 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +0200962 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800963 bool (*notify)(struct virtqueue *),
964 void (*callback)(struct virtqueue *),
965 const char *name)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000966{
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000967 unsigned int i;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800968 struct vring_virtqueue *vq;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000969
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800970 vq = kmalloc(sizeof(*vq) + vring.num * sizeof(struct vring_desc_state),
Andy Lutomirski780bc792016-02-02 21:46:36 -0800971 GFP_KERNEL);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000972 if (!vq)
973 return NULL;
974
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800975 vq->vring = vring;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000976 vq->vq.callback = callback;
977 vq->vq.vdev = vdev;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600978 vq->vq.name = name;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800979 vq->vq.num_free = vring.num;
Rusty Russell06ca2872012-10-16 23:56:14 +1030980 vq->vq.index = index;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800981 vq->we_own_ring = false;
982 vq->queue_dma_addr = 0;
983 vq->queue_size_in_bytes = 0;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000984 vq->notify = notify;
Rusty Russell7b21e342012-01-12 15:44:42 +1030985 vq->weak_barriers = weak_barriers;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000986 vq->broken = false;
987 vq->last_used_idx = 0;
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -0800988 vq->avail_flags_shadow = 0;
989 vq->avail_idx_shadow = 0;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000990 vq->num_added = 0;
Rusty Russell9499f5e2009-06-12 22:16:35 -0600991 list_add_tail(&vq->vq.list, &vdev->vqs);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000992#ifdef DEBUG
993 vq->in_use = false;
Rusty Russelle93300b2012-01-12 15:44:43 +1030994 vq->last_add_time_valid = false;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000995#endif
996
Michael S. Tsirkin5a08b042017-02-07 06:15:13 +0200997 vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC) &&
998 !context;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +0300999 vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX);
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +01001000
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001001 /* No callback? Tell other side not to bother us. */
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -08001002 if (!callback) {
1003 vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT;
Ladi Prosek0ea1e4a2016-08-31 14:00:04 +02001004 if (!vq->event)
1005 vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow);
Venkatesh Srinivasf277ec42015-11-10 16:21:07 -08001006 }
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001007
1008 /* Put everything in free lists. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001009 vq->free_head = 0;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001010 for (i = 0; i < vring.num-1; i++)
Michael S. Tsirkin00e6f3d2014-10-22 15:42:09 +03001011 vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001012 memset(vq->desc_state, 0, vring.num * sizeof(struct vring_desc_state));
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001013
1014 return &vq->vq;
1015}
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001016EXPORT_SYMBOL_GPL(__vring_new_virtqueue);
1017
1018static void *vring_alloc_queue(struct virtio_device *vdev, size_t size,
1019 dma_addr_t *dma_handle, gfp_t flag)
1020{
1021 if (vring_use_dma_api(vdev)) {
1022 return dma_alloc_coherent(vdev->dev.parent, size,
1023 dma_handle, flag);
1024 } else {
1025 void *queue = alloc_pages_exact(PAGE_ALIGN(size), flag);
1026 if (queue) {
1027 phys_addr_t phys_addr = virt_to_phys(queue);
1028 *dma_handle = (dma_addr_t)phys_addr;
1029
1030 /*
1031 * Sanity check: make sure we dind't truncate
1032 * the address. The only arches I can find that
1033 * have 64-bit phys_addr_t but 32-bit dma_addr_t
1034 * are certain non-highmem MIPS and x86
1035 * configurations, but these configurations
1036 * should never allocate physical pages above 32
1037 * bits, so this is fine. Just in case, throw a
1038 * warning and abort if we end up with an
1039 * unrepresentable address.
1040 */
1041 if (WARN_ON_ONCE(*dma_handle != phys_addr)) {
1042 free_pages_exact(queue, PAGE_ALIGN(size));
1043 return NULL;
1044 }
1045 }
1046 return queue;
1047 }
1048}
1049
1050static void vring_free_queue(struct virtio_device *vdev, size_t size,
1051 void *queue, dma_addr_t dma_handle)
1052{
1053 if (vring_use_dma_api(vdev)) {
1054 dma_free_coherent(vdev->dev.parent, size, queue, dma_handle);
1055 } else {
1056 free_pages_exact(queue, PAGE_ALIGN(size));
1057 }
1058}
1059
1060struct virtqueue *vring_create_virtqueue(
1061 unsigned int index,
1062 unsigned int num,
1063 unsigned int vring_align,
1064 struct virtio_device *vdev,
1065 bool weak_barriers,
1066 bool may_reduce_num,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02001067 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001068 bool (*notify)(struct virtqueue *),
1069 void (*callback)(struct virtqueue *),
1070 const char *name)
1071{
1072 struct virtqueue *vq;
Dan Carpentere00f7bd2016-04-15 17:45:10 +03001073 void *queue = NULL;
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001074 dma_addr_t dma_addr;
1075 size_t queue_size_in_bytes;
1076 struct vring vring;
1077
1078 /* We assume num is a power of 2. */
1079 if (num & (num - 1)) {
1080 dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num);
1081 return NULL;
1082 }
1083
1084 /* TODO: allocate each queue chunk individually */
1085 for (; num && vring_size(num, vring_align) > PAGE_SIZE; num /= 2) {
1086 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1087 &dma_addr,
1088 GFP_KERNEL|__GFP_NOWARN|__GFP_ZERO);
1089 if (queue)
1090 break;
1091 }
1092
1093 if (!num)
1094 return NULL;
1095
1096 if (!queue) {
1097 /* Try to get a single page. You are my only hope! */
1098 queue = vring_alloc_queue(vdev, vring_size(num, vring_align),
1099 &dma_addr, GFP_KERNEL|__GFP_ZERO);
1100 }
1101 if (!queue)
1102 return NULL;
1103
1104 queue_size_in_bytes = vring_size(num, vring_align);
1105 vring_init(&vring, num, queue, vring_align);
1106
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02001107 vq = __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001108 notify, callback, name);
1109 if (!vq) {
1110 vring_free_queue(vdev, queue_size_in_bytes, queue,
1111 dma_addr);
1112 return NULL;
1113 }
1114
1115 to_vvq(vq)->queue_dma_addr = dma_addr;
1116 to_vvq(vq)->queue_size_in_bytes = queue_size_in_bytes;
1117 to_vvq(vq)->we_own_ring = true;
1118
1119 return vq;
1120}
1121EXPORT_SYMBOL_GPL(vring_create_virtqueue);
1122
1123struct virtqueue *vring_new_virtqueue(unsigned int index,
1124 unsigned int num,
1125 unsigned int vring_align,
1126 struct virtio_device *vdev,
1127 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02001128 bool context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001129 void *pages,
1130 bool (*notify)(struct virtqueue *vq),
1131 void (*callback)(struct virtqueue *vq),
1132 const char *name)
1133{
1134 struct vring vring;
1135 vring_init(&vring, num, pages, vring_align);
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +02001136 return __vring_new_virtqueue(index, vring, vdev, weak_barriers, context,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001137 notify, callback, name);
1138}
Rusty Russellc6fd4702008-02-04 23:50:05 -05001139EXPORT_SYMBOL_GPL(vring_new_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001140
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001141void vring_del_virtqueue(struct virtqueue *_vq)
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001142{
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001143 struct vring_virtqueue *vq = to_vvq(_vq);
1144
1145 if (vq->we_own_ring) {
1146 vring_free_queue(vq->vq.vdev, vq->queue_size_in_bytes,
1147 vq->vring.desc, vq->queue_dma_addr);
1148 }
1149 list_del(&_vq->list);
1150 kfree(vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001151}
Rusty Russellc6fd4702008-02-04 23:50:05 -05001152EXPORT_SYMBOL_GPL(vring_del_virtqueue);
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001153
Rusty Russelle34f8722008-07-25 12:06:13 -05001154/* Manipulates transport-specific feature bits. */
1155void vring_transport_features(struct virtio_device *vdev)
1156{
1157 unsigned int i;
1158
1159 for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) {
1160 switch (i) {
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +01001161 case VIRTIO_RING_F_INDIRECT_DESC:
1162 break;
Michael S. Tsirkina5c262c2011-05-20 02:10:44 +03001163 case VIRTIO_RING_F_EVENT_IDX:
1164 break;
Michael S. Tsirkin747ae342014-12-01 15:52:40 +02001165 case VIRTIO_F_VERSION_1:
1166 break;
Michael S. Tsirkin1a937692016-04-18 12:58:14 +03001167 case VIRTIO_F_IOMMU_PLATFORM:
1168 break;
Rusty Russelle34f8722008-07-25 12:06:13 -05001169 default:
1170 /* We don't understand this bit. */
Michael S. Tsirkine16e12b2014-10-07 16:39:42 +02001171 __virtio_clear_bit(vdev, i);
Rusty Russelle34f8722008-07-25 12:06:13 -05001172 }
1173 }
1174}
1175EXPORT_SYMBOL_GPL(vring_transport_features);
1176
Rusty Russell5dfc1762012-01-12 15:44:42 +10301177/**
1178 * virtqueue_get_vring_size - return the size of the virtqueue's vring
1179 * @vq: the struct virtqueue containing the vring of interest.
1180 *
1181 * Returns the size of the vring. This is mainly used for boasting to
1182 * userspace. Unlike other operations, this need not be serialized.
1183 */
Rick Jones8f9f4662011-10-19 08:10:59 +00001184unsigned int virtqueue_get_vring_size(struct virtqueue *_vq)
1185{
1186
1187 struct vring_virtqueue *vq = to_vvq(_vq);
1188
1189 return vq->vring.num;
1190}
1191EXPORT_SYMBOL_GPL(virtqueue_get_vring_size);
1192
Heinz Graalfsb3b32c92013-10-29 09:40:19 +10301193bool virtqueue_is_broken(struct virtqueue *_vq)
1194{
1195 struct vring_virtqueue *vq = to_vvq(_vq);
1196
1197 return vq->broken;
1198}
1199EXPORT_SYMBOL_GPL(virtqueue_is_broken);
1200
Rusty Russelle2dcdfe2014-04-28 11:15:08 +09301201/*
1202 * This should prevent the device from being used, allowing drivers to
1203 * recover. You may need to grab appropriate locks to flush.
1204 */
1205void virtio_break_device(struct virtio_device *dev)
1206{
1207 struct virtqueue *_vq;
1208
1209 list_for_each_entry(_vq, &dev->vqs, list) {
1210 struct vring_virtqueue *vq = to_vvq(_vq);
1211 vq->broken = true;
1212 }
1213}
1214EXPORT_SYMBOL_GPL(virtio_break_device);
1215
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001216dma_addr_t virtqueue_get_desc_addr(struct virtqueue *_vq)
Cornelia Huck89062652014-10-07 16:39:47 +02001217{
1218 struct vring_virtqueue *vq = to_vvq(_vq);
1219
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001220 BUG_ON(!vq->we_own_ring);
Cornelia Huck89062652014-10-07 16:39:47 +02001221
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001222 return vq->queue_dma_addr;
1223}
1224EXPORT_SYMBOL_GPL(virtqueue_get_desc_addr);
1225
1226dma_addr_t virtqueue_get_avail_addr(struct virtqueue *_vq)
Cornelia Huck89062652014-10-07 16:39:47 +02001227{
1228 struct vring_virtqueue *vq = to_vvq(_vq);
1229
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001230 BUG_ON(!vq->we_own_ring);
1231
1232 return vq->queue_dma_addr +
1233 ((char *)vq->vring.avail - (char *)vq->vring.desc);
Cornelia Huck89062652014-10-07 16:39:47 +02001234}
Andy Lutomirski2a2d1382016-02-02 21:46:37 -08001235EXPORT_SYMBOL_GPL(virtqueue_get_avail_addr);
1236
1237dma_addr_t virtqueue_get_used_addr(struct virtqueue *_vq)
1238{
1239 struct vring_virtqueue *vq = to_vvq(_vq);
1240
1241 BUG_ON(!vq->we_own_ring);
1242
1243 return vq->queue_dma_addr +
1244 ((char *)vq->vring.used - (char *)vq->vring.desc);
1245}
1246EXPORT_SYMBOL_GPL(virtqueue_get_used_addr);
1247
1248const struct vring *virtqueue_get_vring(struct virtqueue *vq)
1249{
1250 return &to_vvq(vq)->vring;
1251}
1252EXPORT_SYMBOL_GPL(virtqueue_get_vring);
Cornelia Huck89062652014-10-07 16:39:47 +02001253
Rusty Russellc6fd4702008-02-04 23:50:05 -05001254MODULE_LICENSE("GPL");