Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 1 | /* Virtio ring implementation. |
| 2 | * |
| 3 | * Copyright 2007 Rusty Russell IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 18 | */ |
| 19 | #include <linux/virtio.h> |
| 20 | #include <linux/virtio_ring.h> |
Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 21 | #include <linux/virtio_config.h> |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 22 | #include <linux/device.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 23 | #include <linux/slab.h> |
Paul Gortmaker | b5a2c4f | 2011-07-03 16:20:30 -0400 | [diff] [blame] | 24 | #include <linux/module.h> |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 25 | #include <linux/hrtimer.h> |
Joel Stanley | 6abb2dd | 2014-02-13 15:03:46 +1030 | [diff] [blame] | 26 | #include <linux/kmemleak.h> |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 27 | |
| 28 | #ifdef DEBUG |
| 29 | /* For development, we want to crash whenever the ring is screwed. */ |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 30 | #define BAD_RING(_vq, fmt, args...) \ |
| 31 | do { \ |
| 32 | dev_err(&(_vq)->vq.vdev->dev, \ |
| 33 | "%s:"fmt, (_vq)->vq.name, ##args); \ |
| 34 | BUG(); \ |
| 35 | } while (0) |
Rusty Russell | c5f841f | 2009-03-30 21:55:22 -0600 | [diff] [blame] | 36 | /* Caller is supposed to guarantee no reentry. */ |
| 37 | #define START_USE(_vq) \ |
| 38 | do { \ |
| 39 | if ((_vq)->in_use) \ |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 40 | panic("%s:in_use = %i\n", \ |
| 41 | (_vq)->vq.name, (_vq)->in_use); \ |
Rusty Russell | c5f841f | 2009-03-30 21:55:22 -0600 | [diff] [blame] | 42 | (_vq)->in_use = __LINE__; \ |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 43 | } while (0) |
Roel Kluin | 3a35ce7 | 2009-01-22 16:42:57 +0100 | [diff] [blame] | 44 | #define END_USE(_vq) \ |
Rusty Russell | 97a545a | 2010-02-24 14:22:22 -0600 | [diff] [blame] | 45 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 46 | #else |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 47 | #define BAD_RING(_vq, fmt, args...) \ |
| 48 | do { \ |
| 49 | dev_err(&_vq->vq.vdev->dev, \ |
| 50 | "%s:"fmt, (_vq)->vq.name, ##args); \ |
| 51 | (_vq)->broken = true; \ |
| 52 | } while (0) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 53 | #define START_USE(vq) |
| 54 | #define END_USE(vq) |
| 55 | #endif |
| 56 | |
Michael S. Tsirkin | 43b4f72 | 2015-01-15 13:33:31 +0200 | [diff] [blame] | 57 | struct vring_virtqueue { |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 58 | struct virtqueue vq; |
| 59 | |
| 60 | /* Actual memory layout for this queue */ |
| 61 | struct vring vring; |
| 62 | |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 63 | /* Can we use weak barriers? */ |
| 64 | bool weak_barriers; |
| 65 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 66 | /* Other side has made a mess, don't try any more. */ |
| 67 | bool broken; |
| 68 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 69 | /* Host supports indirect buffers */ |
| 70 | bool indirect; |
| 71 | |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 72 | /* Host publishes avail event idx */ |
| 73 | bool event; |
| 74 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 75 | /* Head of free buffer list. */ |
| 76 | unsigned int free_head; |
| 77 | /* Number we've added since last sync. */ |
| 78 | unsigned int num_added; |
| 79 | |
| 80 | /* Last used index we've seen. */ |
Anthony Liguori | 1bc4953 | 2007-11-07 15:49:24 -0600 | [diff] [blame] | 81 | u16 last_used_idx; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 82 | |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 83 | /* Last written value to avail->flags */ |
| 84 | u16 avail_flags_shadow; |
| 85 | |
| 86 | /* Last written value to avail->idx in guest byte order */ |
| 87 | u16 avail_idx_shadow; |
| 88 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 89 | /* How to notify other side. FIXME: commonalize hcalls! */ |
Heinz Graalfs | 46f9c2b | 2013-10-29 09:38:50 +1030 | [diff] [blame] | 90 | bool (*notify)(struct virtqueue *vq); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 91 | |
| 92 | #ifdef DEBUG |
| 93 | /* They're supposed to lock for us. */ |
| 94 | unsigned int in_use; |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 95 | |
| 96 | /* Figure out if their kicks are too delayed. */ |
| 97 | bool last_add_time_valid; |
| 98 | ktime_t last_add_time; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 99 | #endif |
| 100 | |
| 101 | /* Tokens for callbacks. */ |
| 102 | void *data[]; |
| 103 | }; |
| 104 | |
| 105 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) |
| 106 | |
Andy Lutomirski | d26c96c | 2016-02-02 21:46:35 -0800 | [diff] [blame^] | 107 | /* |
| 108 | * The interaction between virtio and a possible IOMMU is a mess. |
| 109 | * |
| 110 | * On most systems with virtio, physical addresses match bus addresses, |
| 111 | * and it doesn't particularly matter whether we use the DMA API. |
| 112 | * |
| 113 | * On some systems, including Xen and any system with a physical device |
| 114 | * that speaks virtio behind a physical IOMMU, we must use the DMA API |
| 115 | * for virtio DMA to work at all. |
| 116 | * |
| 117 | * On other systems, including SPARC and PPC64, virtio-pci devices are |
| 118 | * enumerated as though they are behind an IOMMU, but the virtio host |
| 119 | * ignores the IOMMU, so we must either pretend that the IOMMU isn't |
| 120 | * there or somehow map everything as the identity. |
| 121 | * |
| 122 | * For the time being, we preserve historic behavior and bypass the DMA |
| 123 | * API. |
| 124 | */ |
| 125 | |
| 126 | static bool vring_use_dma_api(struct virtio_device *vdev) |
| 127 | { |
| 128 | return false; |
| 129 | } |
| 130 | |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 131 | static struct vring_desc *alloc_indirect(struct virtqueue *_vq, |
| 132 | unsigned int total_sg, gfp_t gfp) |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 133 | { |
| 134 | struct vring_desc *desc; |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 135 | unsigned int i; |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 136 | |
Will Deacon | b92b1b8 | 2012-10-19 14:03:33 +0100 | [diff] [blame] | 137 | /* |
| 138 | * We require lowmem mappings for the descriptors because |
| 139 | * otherwise virt_to_phys will give us bogus addresses in the |
| 140 | * virtqueue. |
| 141 | */ |
Michal Hocko | 8210753 | 2015-12-01 15:32:49 +0100 | [diff] [blame] | 142 | gfp &= ~__GFP_HIGHMEM; |
Will Deacon | b92b1b8 | 2012-10-19 14:03:33 +0100 | [diff] [blame] | 143 | |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 144 | desc = kmalloc(total_sg * sizeof(struct vring_desc), gfp); |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 145 | if (!desc) |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 146 | return NULL; |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 147 | |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 148 | for (i = 0; i < total_sg; i++) |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 149 | desc[i].next = cpu_to_virtio16(_vq->vdev, i + 1); |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 150 | return desc; |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 151 | } |
| 152 | |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 153 | static inline int virtqueue_add(struct virtqueue *_vq, |
| 154 | struct scatterlist *sgs[], |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 155 | unsigned int total_sg, |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 156 | unsigned int out_sgs, |
| 157 | unsigned int in_sgs, |
| 158 | void *data, |
| 159 | gfp_t gfp) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 160 | { |
| 161 | struct vring_virtqueue *vq = to_vvq(_vq); |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 162 | struct scatterlist *sg; |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 163 | struct vring_desc *desc; |
| 164 | unsigned int i, n, avail, descs_used, uninitialized_var(prev); |
Michael S. Tsirkin | 1fe9b6f | 2010-07-26 16:55:30 +0930 | [diff] [blame] | 165 | int head; |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 166 | bool indirect; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 167 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 168 | START_USE(vq); |
| 169 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 170 | BUG_ON(data == NULL); |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 171 | |
Rusty Russell | 70670444c2 | 2014-03-13 11:23:40 +1030 | [diff] [blame] | 172 | if (unlikely(vq->broken)) { |
| 173 | END_USE(vq); |
| 174 | return -EIO; |
| 175 | } |
| 176 | |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 177 | #ifdef DEBUG |
| 178 | { |
| 179 | ktime_t now = ktime_get(); |
| 180 | |
| 181 | /* No kick or get, with .1 second between? Warn. */ |
| 182 | if (vq->last_add_time_valid) |
| 183 | WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) |
| 184 | > 100); |
| 185 | vq->last_add_time = now; |
| 186 | vq->last_add_time_valid = true; |
| 187 | } |
| 188 | #endif |
| 189 | |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 190 | BUG_ON(total_sg > vq->vring.num); |
| 191 | BUG_ON(total_sg == 0); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 192 | |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 193 | head = vq->free_head; |
| 194 | |
| 195 | /* If the host supports indirect descriptor tables, and we have multiple |
| 196 | * buffers, then go indirect. FIXME: tune this threshold */ |
| 197 | if (vq->indirect && total_sg > 1 && vq->vq.num_free) |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 198 | desc = alloc_indirect(_vq, total_sg, gfp); |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 199 | else |
| 200 | desc = NULL; |
| 201 | |
| 202 | if (desc) { |
| 203 | /* Use a single buffer which doesn't continue */ |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 204 | vq->vring.desc[head].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_INDIRECT); |
| 205 | vq->vring.desc[head].addr = cpu_to_virtio64(_vq->vdev, virt_to_phys(desc)); |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 206 | /* avoid kmemleak false positive (hidden by virt_to_phys) */ |
| 207 | kmemleak_ignore(desc); |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 208 | vq->vring.desc[head].len = cpu_to_virtio32(_vq->vdev, total_sg * sizeof(struct vring_desc)); |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 209 | |
| 210 | /* Set up rest to use this indirect table. */ |
| 211 | i = 0; |
| 212 | descs_used = 1; |
| 213 | indirect = true; |
| 214 | } else { |
| 215 | desc = vq->vring.desc; |
| 216 | i = head; |
| 217 | descs_used = total_sg; |
| 218 | indirect = false; |
| 219 | } |
| 220 | |
| 221 | if (vq->vq.num_free < descs_used) { |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 222 | pr_debug("Can't add buf len %i - avail = %i\n", |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 223 | descs_used, vq->vq.num_free); |
Rusty Russell | 44653ea | 2008-07-25 12:06:04 -0500 | [diff] [blame] | 224 | /* FIXME: for historical reasons, we force a notify here if |
| 225 | * there are outgoing parts to the buffer. Presumably the |
| 226 | * host should service the ring ASAP. */ |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 227 | if (out_sgs) |
Rusty Russell | 44653ea | 2008-07-25 12:06:04 -0500 | [diff] [blame] | 228 | vq->notify(&vq->vq); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 229 | END_USE(vq); |
| 230 | return -ENOSPC; |
| 231 | } |
| 232 | |
| 233 | /* We're about to use some buffers from the free list. */ |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 234 | vq->vq.num_free -= descs_used; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 235 | |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 236 | for (n = 0; n < out_sgs; n++) { |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 237 | for (sg = sgs[n]; sg; sg = sg_next(sg)) { |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 238 | desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT); |
| 239 | desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); |
| 240 | desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 241 | prev = i; |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 242 | i = virtio16_to_cpu(_vq->vdev, desc[i].next); |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 243 | } |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 244 | } |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 245 | for (; n < (out_sgs + in_sgs); n++) { |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 246 | for (sg = sgs[n]; sg; sg = sg_next(sg)) { |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 247 | desc[i].flags = cpu_to_virtio16(_vq->vdev, VRING_DESC_F_NEXT | VRING_DESC_F_WRITE); |
| 248 | desc[i].addr = cpu_to_virtio64(_vq->vdev, sg_phys(sg)); |
| 249 | desc[i].len = cpu_to_virtio32(_vq->vdev, sg->length); |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 250 | prev = i; |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 251 | i = virtio16_to_cpu(_vq->vdev, desc[i].next); |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 252 | } |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 253 | } |
| 254 | /* Last one doesn't continue. */ |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 255 | desc[prev].flags &= cpu_to_virtio16(_vq->vdev, ~VRING_DESC_F_NEXT); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 256 | |
| 257 | /* Update free pointer */ |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 258 | if (indirect) |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 259 | vq->free_head = virtio16_to_cpu(_vq->vdev, vq->vring.desc[head].next); |
Rusty Russell | b25bd25 | 2014-09-11 10:17:38 +0930 | [diff] [blame] | 260 | else |
| 261 | vq->free_head = i; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 262 | |
| 263 | /* Set token. */ |
| 264 | vq->data[head] = data; |
| 265 | |
| 266 | /* Put entry in available array (but don't update avail->idx until they |
Rusty Russell | 3b720b8 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 267 | * do sync). */ |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 268 | avail = vq->avail_idx_shadow & (vq->vring.num - 1); |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 269 | vq->vring.avail->ring[avail] = cpu_to_virtio16(_vq->vdev, head); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 270 | |
Rusty Russell | ee7cd89 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 271 | /* Descriptors and available array need to be set before we expose the |
| 272 | * new available array entries. */ |
Rusty Russell | a9a0fef | 2013-03-18 13:22:19 +1030 | [diff] [blame] | 273 | virtio_wmb(vq->weak_barriers); |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 274 | vq->avail_idx_shadow++; |
| 275 | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); |
Rusty Russell | ee7cd89 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 276 | vq->num_added++; |
| 277 | |
Tetsuo Handa | 5e05bf5 | 2015-02-11 15:01:13 +1030 | [diff] [blame] | 278 | pr_debug("Added buffer head %i to %p\n", head, vq); |
| 279 | END_USE(vq); |
| 280 | |
Rusty Russell | ee7cd89 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 281 | /* This is very unlikely, but theoretically possible. Kick |
| 282 | * just in case. */ |
| 283 | if (unlikely(vq->num_added == (1 << 16) - 1)) |
| 284 | virtqueue_kick(_vq); |
| 285 | |
Rusty Russell | 98e8c6b | 2012-10-16 23:56:15 +1030 | [diff] [blame] | 286 | return 0; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 287 | } |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 288 | |
| 289 | /** |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 290 | * virtqueue_add_sgs - expose buffers to other end |
| 291 | * @vq: the struct virtqueue we're talking about. |
| 292 | * @sgs: array of terminated scatterlists. |
| 293 | * @out_num: the number of scatterlists readable by other side |
| 294 | * @in_num: the number of scatterlists which are writable (after readable ones) |
| 295 | * @data: the token identifying the buffer. |
| 296 | * @gfp: how to do memory allocations (if necessary). |
| 297 | * |
| 298 | * Caller must ensure we don't call this with other virtqueue operations |
| 299 | * at the same time (except where noted). |
| 300 | * |
Rusty Russell | 70670444c2 | 2014-03-13 11:23:40 +1030 | [diff] [blame] | 301 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 302 | */ |
| 303 | int virtqueue_add_sgs(struct virtqueue *_vq, |
| 304 | struct scatterlist *sgs[], |
| 305 | unsigned int out_sgs, |
| 306 | unsigned int in_sgs, |
| 307 | void *data, |
| 308 | gfp_t gfp) |
| 309 | { |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 310 | unsigned int i, total_sg = 0; |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 311 | |
| 312 | /* Count them first. */ |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 313 | for (i = 0; i < out_sgs + in_sgs; i++) { |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 314 | struct scatterlist *sg; |
| 315 | for (sg = sgs[i]; sg; sg = sg_next(sg)) |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 316 | total_sg++; |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 317 | } |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 318 | return virtqueue_add(_vq, sgs, total_sg, out_sgs, in_sgs, data, gfp); |
Rusty Russell | 13816c7 | 2013-03-20 15:37:09 +1030 | [diff] [blame] | 319 | } |
| 320 | EXPORT_SYMBOL_GPL(virtqueue_add_sgs); |
| 321 | |
| 322 | /** |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 323 | * virtqueue_add_outbuf - expose output buffers to other end |
| 324 | * @vq: the struct virtqueue we're talking about. |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 325 | * @sg: scatterlist (must be well-formed and terminated!) |
| 326 | * @num: the number of entries in @sg readable by other side |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 327 | * @data: the token identifying the buffer. |
| 328 | * @gfp: how to do memory allocations (if necessary). |
| 329 | * |
| 330 | * Caller must ensure we don't call this with other virtqueue operations |
| 331 | * at the same time (except where noted). |
| 332 | * |
Rusty Russell | 70670444c2 | 2014-03-13 11:23:40 +1030 | [diff] [blame] | 333 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 334 | */ |
| 335 | int virtqueue_add_outbuf(struct virtqueue *vq, |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 336 | struct scatterlist *sg, unsigned int num, |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 337 | void *data, |
| 338 | gfp_t gfp) |
| 339 | { |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 340 | return virtqueue_add(vq, &sg, num, 1, 0, data, gfp); |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 341 | } |
| 342 | EXPORT_SYMBOL_GPL(virtqueue_add_outbuf); |
| 343 | |
| 344 | /** |
| 345 | * virtqueue_add_inbuf - expose input buffers to other end |
| 346 | * @vq: the struct virtqueue we're talking about. |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 347 | * @sg: scatterlist (must be well-formed and terminated!) |
| 348 | * @num: the number of entries in @sg writable by other side |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 349 | * @data: the token identifying the buffer. |
| 350 | * @gfp: how to do memory allocations (if necessary). |
| 351 | * |
| 352 | * Caller must ensure we don't call this with other virtqueue operations |
| 353 | * at the same time (except where noted). |
| 354 | * |
Rusty Russell | 70670444c2 | 2014-03-13 11:23:40 +1030 | [diff] [blame] | 355 | * Returns zero or a negative error (ie. ENOSPC, ENOMEM, EIO). |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 356 | */ |
| 357 | int virtqueue_add_inbuf(struct virtqueue *vq, |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 358 | struct scatterlist *sg, unsigned int num, |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 359 | void *data, |
| 360 | gfp_t gfp) |
| 361 | { |
Rusty Russell | eeebf9b | 2014-09-11 10:17:37 +0930 | [diff] [blame] | 362 | return virtqueue_add(vq, &sg, num, 0, 1, data, gfp); |
Rusty Russell | 282edb3 | 2013-03-20 15:44:26 +1030 | [diff] [blame] | 363 | } |
| 364 | EXPORT_SYMBOL_GPL(virtqueue_add_inbuf); |
| 365 | |
| 366 | /** |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 367 | * virtqueue_kick_prepare - first half of split virtqueue_kick call. |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 368 | * @vq: the struct virtqueue |
| 369 | * |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 370 | * Instead of virtqueue_kick(), you can do: |
| 371 | * if (virtqueue_kick_prepare(vq)) |
| 372 | * virtqueue_notify(vq); |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 373 | * |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 374 | * This is sometimes useful because the virtqueue_kick_prepare() needs |
| 375 | * to be serialized, but the actual virtqueue_notify() call does not. |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 376 | */ |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 377 | bool virtqueue_kick_prepare(struct virtqueue *_vq) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 378 | { |
| 379 | struct vring_virtqueue *vq = to_vvq(_vq); |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 380 | u16 new, old; |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 381 | bool needs_kick; |
| 382 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 383 | START_USE(vq); |
Jason Wang | a72caae | 2012-01-20 16:17:08 +0800 | [diff] [blame] | 384 | /* We need to expose available array entries before checking avail |
| 385 | * event. */ |
Rusty Russell | a9a0fef | 2013-03-18 13:22:19 +1030 | [diff] [blame] | 386 | virtio_mb(vq->weak_barriers); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 387 | |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 388 | old = vq->avail_idx_shadow - vq->num_added; |
| 389 | new = vq->avail_idx_shadow; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 390 | vq->num_added = 0; |
| 391 | |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 392 | #ifdef DEBUG |
| 393 | if (vq->last_add_time_valid) { |
| 394 | WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), |
| 395 | vq->last_add_time)) > 100); |
| 396 | } |
| 397 | vq->last_add_time_valid = false; |
| 398 | #endif |
| 399 | |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 400 | if (vq->event) { |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 401 | needs_kick = vring_need_event(virtio16_to_cpu(_vq->vdev, vring_avail_event(&vq->vring)), |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 402 | new, old); |
| 403 | } else { |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 404 | needs_kick = !(vq->vring.used->flags & cpu_to_virtio16(_vq->vdev, VRING_USED_F_NO_NOTIFY)); |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 405 | } |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 406 | END_USE(vq); |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 407 | return needs_kick; |
| 408 | } |
| 409 | EXPORT_SYMBOL_GPL(virtqueue_kick_prepare); |
| 410 | |
| 411 | /** |
| 412 | * virtqueue_notify - second half of split virtqueue_kick call. |
| 413 | * @vq: the struct virtqueue |
| 414 | * |
| 415 | * This does not need to be serialized. |
Heinz Graalfs | 5b1bf7c | 2013-10-29 09:39:48 +1030 | [diff] [blame] | 416 | * |
| 417 | * Returns false if host notify failed or queue is broken, otherwise true. |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 418 | */ |
Heinz Graalfs | 5b1bf7c | 2013-10-29 09:39:48 +1030 | [diff] [blame] | 419 | bool virtqueue_notify(struct virtqueue *_vq) |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 420 | { |
| 421 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 422 | |
Heinz Graalfs | 5b1bf7c | 2013-10-29 09:39:48 +1030 | [diff] [blame] | 423 | if (unlikely(vq->broken)) |
| 424 | return false; |
| 425 | |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 426 | /* Prod other side to tell it about changes. */ |
Heinz Graalfs | 2342d6a | 2013-11-05 21:20:27 +1030 | [diff] [blame] | 427 | if (!vq->notify(_vq)) { |
Heinz Graalfs | 5b1bf7c | 2013-10-29 09:39:48 +1030 | [diff] [blame] | 428 | vq->broken = true; |
| 429 | return false; |
| 430 | } |
| 431 | return true; |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 432 | } |
| 433 | EXPORT_SYMBOL_GPL(virtqueue_notify); |
| 434 | |
| 435 | /** |
| 436 | * virtqueue_kick - update after add_buf |
| 437 | * @vq: the struct virtqueue |
| 438 | * |
Rusty Russell | b3087e4 | 2013-05-20 12:15:44 +0930 | [diff] [blame] | 439 | * After one or more virtqueue_add_* calls, invoke this to kick |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 440 | * the other side. |
| 441 | * |
| 442 | * Caller must ensure we don't call this with other virtqueue |
| 443 | * operations at the same time (except where noted). |
Heinz Graalfs | 5b1bf7c | 2013-10-29 09:39:48 +1030 | [diff] [blame] | 444 | * |
| 445 | * Returns false if kick failed, otherwise true. |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 446 | */ |
Heinz Graalfs | 5b1bf7c | 2013-10-29 09:39:48 +1030 | [diff] [blame] | 447 | bool virtqueue_kick(struct virtqueue *vq) |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 448 | { |
| 449 | if (virtqueue_kick_prepare(vq)) |
Heinz Graalfs | 5b1bf7c | 2013-10-29 09:39:48 +1030 | [diff] [blame] | 450 | return virtqueue_notify(vq); |
| 451 | return true; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 452 | } |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 453 | EXPORT_SYMBOL_GPL(virtqueue_kick); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 454 | |
| 455 | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) |
| 456 | { |
| 457 | unsigned int i; |
| 458 | |
| 459 | /* Clear data ptr. */ |
| 460 | vq->data[head] = NULL; |
| 461 | |
| 462 | /* Put back on free list: find end */ |
| 463 | i = head; |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 464 | |
| 465 | /* Free the indirect table */ |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 466 | if (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_INDIRECT)) |
| 467 | kfree(phys_to_virt(virtio64_to_cpu(vq->vq.vdev, vq->vring.desc[i].addr))); |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 468 | |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 469 | while (vq->vring.desc[i].flags & cpu_to_virtio16(vq->vq.vdev, VRING_DESC_F_NEXT)) { |
| 470 | i = virtio16_to_cpu(vq->vq.vdev, vq->vring.desc[i].next); |
Rusty Russell | 06ca287 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 471 | vq->vq.num_free++; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 472 | } |
| 473 | |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 474 | vq->vring.desc[i].next = cpu_to_virtio16(vq->vq.vdev, vq->free_head); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 475 | vq->free_head = head; |
| 476 | /* Plus final descriptor */ |
Rusty Russell | 06ca287 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 477 | vq->vq.num_free++; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 478 | } |
| 479 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 480 | static inline bool more_used(const struct vring_virtqueue *vq) |
| 481 | { |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 482 | return vq->last_used_idx != virtio16_to_cpu(vq->vq.vdev, vq->vring.used->idx); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 483 | } |
| 484 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 485 | /** |
| 486 | * virtqueue_get_buf - get the next used buffer |
| 487 | * @vq: the struct virtqueue we're talking about. |
| 488 | * @len: the length written into the buffer |
| 489 | * |
| 490 | * If the driver wrote data into the buffer, @len will be set to the |
| 491 | * amount written. This means you don't need to clear the buffer |
| 492 | * beforehand to ensure there's no data leakage in the case of short |
| 493 | * writes. |
| 494 | * |
| 495 | * Caller must ensure we don't call this with other virtqueue |
| 496 | * operations at the same time (except where noted). |
| 497 | * |
| 498 | * Returns NULL if there are no used buffers, or the "data" token |
Rusty Russell | b3087e4 | 2013-05-20 12:15:44 +0930 | [diff] [blame] | 499 | * handed to virtqueue_add_*(). |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 500 | */ |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 501 | void *virtqueue_get_buf(struct virtqueue *_vq, unsigned int *len) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 502 | { |
| 503 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 504 | void *ret; |
| 505 | unsigned int i; |
Rusty Russell | 3b720b8 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 506 | u16 last_used; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 507 | |
| 508 | START_USE(vq); |
| 509 | |
Rusty Russell | 5ef8275 | 2008-05-02 21:50:43 -0500 | [diff] [blame] | 510 | if (unlikely(vq->broken)) { |
| 511 | END_USE(vq); |
| 512 | return NULL; |
| 513 | } |
| 514 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 515 | if (!more_used(vq)) { |
| 516 | pr_debug("No more buffers in queue\n"); |
| 517 | END_USE(vq); |
| 518 | return NULL; |
| 519 | } |
| 520 | |
Michael S. Tsirkin | 2d61ba9 | 2009-10-25 15:28:53 +0200 | [diff] [blame] | 521 | /* Only get used array entries after they have been exposed by host. */ |
Rusty Russell | a9a0fef | 2013-03-18 13:22:19 +1030 | [diff] [blame] | 522 | virtio_rmb(vq->weak_barriers); |
Michael S. Tsirkin | 2d61ba9 | 2009-10-25 15:28:53 +0200 | [diff] [blame] | 523 | |
Rusty Russell | 3b720b8 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 524 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 525 | i = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].id); |
| 526 | *len = virtio32_to_cpu(_vq->vdev, vq->vring.used->ring[last_used].len); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 527 | |
| 528 | if (unlikely(i >= vq->vring.num)) { |
| 529 | BAD_RING(vq, "id %u out of range\n", i); |
| 530 | return NULL; |
| 531 | } |
| 532 | if (unlikely(!vq->data[i])) { |
| 533 | BAD_RING(vq, "id %u is not a head!\n", i); |
| 534 | return NULL; |
| 535 | } |
| 536 | |
| 537 | /* detach_buf clears data, so grab it now. */ |
| 538 | ret = vq->data[i]; |
| 539 | detach_buf(vq, i); |
| 540 | vq->last_used_idx++; |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 541 | /* If we expect an interrupt for the next entry, tell host |
| 542 | * by writing event index and flush out the write before |
| 543 | * the read in the next get_buf call. */ |
Michael S. Tsirkin | 788e5b3 | 2015-12-17 12:20:39 +0200 | [diff] [blame] | 544 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) |
| 545 | virtio_store_mb(vq->weak_barriers, |
| 546 | &vring_used_event(&vq->vring), |
| 547 | cpu_to_virtio16(_vq->vdev, vq->last_used_idx)); |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 548 | |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 549 | #ifdef DEBUG |
| 550 | vq->last_add_time_valid = false; |
| 551 | #endif |
| 552 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 553 | END_USE(vq); |
| 554 | return ret; |
| 555 | } |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 556 | EXPORT_SYMBOL_GPL(virtqueue_get_buf); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 557 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 558 | /** |
| 559 | * virtqueue_disable_cb - disable callbacks |
| 560 | * @vq: the struct virtqueue we're talking about. |
| 561 | * |
| 562 | * Note that this is not necessarily synchronous, hence unreliable and only |
| 563 | * useful as an optimization. |
| 564 | * |
| 565 | * Unlike other operations, this need not be serialized. |
| 566 | */ |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 567 | void virtqueue_disable_cb(struct virtqueue *_vq) |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 568 | { |
| 569 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 570 | |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 571 | if (!(vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT)) { |
| 572 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; |
| 573 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); |
| 574 | } |
| 575 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 576 | } |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 577 | EXPORT_SYMBOL_GPL(virtqueue_disable_cb); |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 578 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 579 | /** |
Michael S. Tsirkin | cc22988 | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 580 | * virtqueue_enable_cb_prepare - restart callbacks after disable_cb |
| 581 | * @vq: the struct virtqueue we're talking about. |
| 582 | * |
| 583 | * This re-enables callbacks; it returns current queue state |
| 584 | * in an opaque unsigned value. This value should be later tested by |
| 585 | * virtqueue_poll, to detect a possible race between the driver checking for |
| 586 | * more work, and enabling callbacks. |
| 587 | * |
| 588 | * Caller must ensure we don't call this with other virtqueue |
| 589 | * operations at the same time (except where noted). |
| 590 | */ |
| 591 | unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) |
| 592 | { |
| 593 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 594 | u16 last_used_idx; |
| 595 | |
| 596 | START_USE(vq); |
| 597 | |
| 598 | /* We optimistically turn back on interrupts, then check if there was |
| 599 | * more to do. */ |
| 600 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to |
| 601 | * either clear the flags bit or point the event index at the next |
| 602 | * entry. Always do both to keep code simple. */ |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 603 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { |
| 604 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; |
| 605 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); |
| 606 | } |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 607 | vring_used_event(&vq->vring) = cpu_to_virtio16(_vq->vdev, last_used_idx = vq->last_used_idx); |
Michael S. Tsirkin | cc22988 | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 608 | END_USE(vq); |
| 609 | return last_used_idx; |
| 610 | } |
| 611 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); |
| 612 | |
| 613 | /** |
| 614 | * virtqueue_poll - query pending used buffers |
| 615 | * @vq: the struct virtqueue we're talking about. |
| 616 | * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). |
| 617 | * |
| 618 | * Returns "true" if there are pending used buffers in the queue. |
| 619 | * |
| 620 | * This does not need to be serialized. |
| 621 | */ |
| 622 | bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) |
| 623 | { |
| 624 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 625 | |
| 626 | virtio_mb(vq->weak_barriers); |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 627 | return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx); |
Michael S. Tsirkin | cc22988 | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 628 | } |
| 629 | EXPORT_SYMBOL_GPL(virtqueue_poll); |
| 630 | |
| 631 | /** |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 632 | * virtqueue_enable_cb - restart callbacks after disable_cb. |
| 633 | * @vq: the struct virtqueue we're talking about. |
| 634 | * |
| 635 | * This re-enables callbacks; it returns "false" if there are pending |
| 636 | * buffers in the queue, to detect a possible race between the driver |
| 637 | * checking for more work, and enabling callbacks. |
| 638 | * |
| 639 | * Caller must ensure we don't call this with other virtqueue |
| 640 | * operations at the same time (except where noted). |
| 641 | */ |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 642 | bool virtqueue_enable_cb(struct virtqueue *_vq) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 643 | { |
Michael S. Tsirkin | cc22988 | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 644 | unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); |
| 645 | return !virtqueue_poll(_vq, last_used_idx); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 646 | } |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 647 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 648 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 649 | /** |
| 650 | * virtqueue_enable_cb_delayed - restart callbacks after disable_cb. |
| 651 | * @vq: the struct virtqueue we're talking about. |
| 652 | * |
| 653 | * This re-enables callbacks but hints to the other side to delay |
| 654 | * interrupts until most of the available buffers have been processed; |
| 655 | * it returns "false" if there are many pending buffers in the queue, |
| 656 | * to detect a possible race between the driver checking for more work, |
| 657 | * and enabling callbacks. |
| 658 | * |
| 659 | * Caller must ensure we don't call this with other virtqueue |
| 660 | * operations at the same time (except where noted). |
| 661 | */ |
Michael S. Tsirkin | 7ab358c | 2011-05-20 02:11:14 +0300 | [diff] [blame] | 662 | bool virtqueue_enable_cb_delayed(struct virtqueue *_vq) |
| 663 | { |
| 664 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 665 | u16 bufs; |
| 666 | |
| 667 | START_USE(vq); |
| 668 | |
| 669 | /* We optimistically turn back on interrupts, then check if there was |
| 670 | * more to do. */ |
| 671 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to |
| 672 | * either clear the flags bit or point the event index at the next |
| 673 | * entry. Always do both to keep code simple. */ |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 674 | if (vq->avail_flags_shadow & VRING_AVAIL_F_NO_INTERRUPT) { |
| 675 | vq->avail_flags_shadow &= ~VRING_AVAIL_F_NO_INTERRUPT; |
| 676 | vq->vring.avail->flags = cpu_to_virtio16(_vq->vdev, vq->avail_flags_shadow); |
| 677 | } |
Michael S. Tsirkin | 7ab358c | 2011-05-20 02:11:14 +0300 | [diff] [blame] | 678 | /* TODO: tune this threshold */ |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 679 | bufs = (u16)(vq->avail_idx_shadow - vq->last_used_idx) * 3 / 4; |
Michael S. Tsirkin | 788e5b3 | 2015-12-17 12:20:39 +0200 | [diff] [blame] | 680 | |
| 681 | virtio_store_mb(vq->weak_barriers, |
| 682 | &vring_used_event(&vq->vring), |
| 683 | cpu_to_virtio16(_vq->vdev, vq->last_used_idx + bufs)); |
| 684 | |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 685 | if (unlikely((u16)(virtio16_to_cpu(_vq->vdev, vq->vring.used->idx) - vq->last_used_idx) > bufs)) { |
Michael S. Tsirkin | 7ab358c | 2011-05-20 02:11:14 +0300 | [diff] [blame] | 686 | END_USE(vq); |
| 687 | return false; |
| 688 | } |
| 689 | |
| 690 | END_USE(vq); |
| 691 | return true; |
| 692 | } |
| 693 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_delayed); |
| 694 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 695 | /** |
| 696 | * virtqueue_detach_unused_buf - detach first unused buffer |
| 697 | * @vq: the struct virtqueue we're talking about. |
| 698 | * |
Rusty Russell | b3087e4 | 2013-05-20 12:15:44 +0930 | [diff] [blame] | 699 | * Returns NULL or the "data" token handed to virtqueue_add_*(). |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 700 | * This is not valid on an active queue; it is useful only for device |
| 701 | * shutdown. |
| 702 | */ |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 703 | void *virtqueue_detach_unused_buf(struct virtqueue *_vq) |
Shirley Ma | c021eac | 2010-01-18 19:15:23 +0530 | [diff] [blame] | 704 | { |
| 705 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 706 | unsigned int i; |
| 707 | void *buf; |
| 708 | |
| 709 | START_USE(vq); |
| 710 | |
| 711 | for (i = 0; i < vq->vring.num; i++) { |
| 712 | if (!vq->data[i]) |
| 713 | continue; |
| 714 | /* detach_buf clears data, so grab it now. */ |
| 715 | buf = vq->data[i]; |
| 716 | detach_buf(vq, i); |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 717 | vq->avail_idx_shadow--; |
| 718 | vq->vring.avail->idx = cpu_to_virtio16(_vq->vdev, vq->avail_idx_shadow); |
Shirley Ma | c021eac | 2010-01-18 19:15:23 +0530 | [diff] [blame] | 719 | END_USE(vq); |
| 720 | return buf; |
| 721 | } |
| 722 | /* That should have freed everything. */ |
Rusty Russell | 06ca287 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 723 | BUG_ON(vq->vq.num_free != vq->vring.num); |
Shirley Ma | c021eac | 2010-01-18 19:15:23 +0530 | [diff] [blame] | 724 | |
| 725 | END_USE(vq); |
| 726 | return NULL; |
| 727 | } |
Michael S. Tsirkin | 7c5e9ed | 2010-04-12 16:19:07 +0300 | [diff] [blame] | 728 | EXPORT_SYMBOL_GPL(virtqueue_detach_unused_buf); |
Shirley Ma | c021eac | 2010-01-18 19:15:23 +0530 | [diff] [blame] | 729 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 730 | irqreturn_t vring_interrupt(int irq, void *_vq) |
| 731 | { |
| 732 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 733 | |
| 734 | if (!more_used(vq)) { |
| 735 | pr_debug("virtqueue interrupt with no work for %p\n", vq); |
| 736 | return IRQ_NONE; |
| 737 | } |
| 738 | |
| 739 | if (unlikely(vq->broken)) |
| 740 | return IRQ_HANDLED; |
| 741 | |
| 742 | pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 743 | if (vq->vq.callback) |
| 744 | vq->vq.callback(&vq->vq); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 745 | |
| 746 | return IRQ_HANDLED; |
| 747 | } |
Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 748 | EXPORT_SYMBOL_GPL(vring_interrupt); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 749 | |
Jason Wang | 17bb6d4 | 2012-08-28 13:54:13 +0200 | [diff] [blame] | 750 | struct virtqueue *vring_new_virtqueue(unsigned int index, |
| 751 | unsigned int num, |
Rusty Russell | 87c7d57 | 2008-12-30 09:26:03 -0600 | [diff] [blame] | 752 | unsigned int vring_align, |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 753 | struct virtio_device *vdev, |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 754 | bool weak_barriers, |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 755 | void *pages, |
Heinz Graalfs | 46f9c2b | 2013-10-29 09:38:50 +1030 | [diff] [blame] | 756 | bool (*notify)(struct virtqueue *), |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 757 | void (*callback)(struct virtqueue *), |
| 758 | const char *name) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 759 | { |
| 760 | struct vring_virtqueue *vq; |
| 761 | unsigned int i; |
| 762 | |
Rusty Russell | 42b36cc | 2007-11-12 13:39:18 +1100 | [diff] [blame] | 763 | /* We assume num is a power of 2. */ |
| 764 | if (num & (num - 1)) { |
| 765 | dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); |
| 766 | return NULL; |
| 767 | } |
| 768 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 769 | vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); |
| 770 | if (!vq) |
| 771 | return NULL; |
| 772 | |
Rusty Russell | 87c7d57 | 2008-12-30 09:26:03 -0600 | [diff] [blame] | 773 | vring_init(&vq->vring, num, pages, vring_align); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 774 | vq->vq.callback = callback; |
| 775 | vq->vq.vdev = vdev; |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 776 | vq->vq.name = name; |
Rusty Russell | 06ca287 | 2012-10-16 23:56:14 +1030 | [diff] [blame] | 777 | vq->vq.num_free = num; |
| 778 | vq->vq.index = index; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 779 | vq->notify = notify; |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 780 | vq->weak_barriers = weak_barriers; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 781 | vq->broken = false; |
| 782 | vq->last_used_idx = 0; |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 783 | vq->avail_flags_shadow = 0; |
| 784 | vq->avail_idx_shadow = 0; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 785 | vq->num_added = 0; |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 786 | list_add_tail(&vq->vq.list, &vdev->vqs); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 787 | #ifdef DEBUG |
| 788 | vq->in_use = false; |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 789 | vq->last_add_time_valid = false; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 790 | #endif |
| 791 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 792 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 793 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 794 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 795 | /* No callback? Tell other side not to bother us. */ |
Venkatesh Srinivas | f277ec4 | 2015-11-10 16:21:07 -0800 | [diff] [blame] | 796 | if (!callback) { |
| 797 | vq->avail_flags_shadow |= VRING_AVAIL_F_NO_INTERRUPT; |
| 798 | vq->vring.avail->flags = cpu_to_virtio16(vdev, vq->avail_flags_shadow); |
| 799 | } |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 800 | |
| 801 | /* Put everything in free lists. */ |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 802 | vq->free_head = 0; |
Amit Shah | 3b87062 | 2010-02-12 10:32:14 +0530 | [diff] [blame] | 803 | for (i = 0; i < num-1; i++) { |
Michael S. Tsirkin | 00e6f3d | 2014-10-22 15:42:09 +0300 | [diff] [blame] | 804 | vq->vring.desc[i].next = cpu_to_virtio16(vdev, i + 1); |
Amit Shah | 3b87062 | 2010-02-12 10:32:14 +0530 | [diff] [blame] | 805 | vq->data[i] = NULL; |
| 806 | } |
| 807 | vq->data[i] = NULL; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 808 | |
| 809 | return &vq->vq; |
| 810 | } |
Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 811 | EXPORT_SYMBOL_GPL(vring_new_virtqueue); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 812 | |
| 813 | void vring_del_virtqueue(struct virtqueue *vq) |
| 814 | { |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 815 | list_del(&vq->list); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 816 | kfree(to_vvq(vq)); |
| 817 | } |
Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 818 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 819 | |
Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 820 | /* Manipulates transport-specific feature bits. */ |
| 821 | void vring_transport_features(struct virtio_device *vdev) |
| 822 | { |
| 823 | unsigned int i; |
| 824 | |
| 825 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { |
| 826 | switch (i) { |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 827 | case VIRTIO_RING_F_INDIRECT_DESC: |
| 828 | break; |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 829 | case VIRTIO_RING_F_EVENT_IDX: |
| 830 | break; |
Michael S. Tsirkin | 747ae34 | 2014-12-01 15:52:40 +0200 | [diff] [blame] | 831 | case VIRTIO_F_VERSION_1: |
| 832 | break; |
Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 833 | default: |
| 834 | /* We don't understand this bit. */ |
Michael S. Tsirkin | e16e12b | 2014-10-07 16:39:42 +0200 | [diff] [blame] | 835 | __virtio_clear_bit(vdev, i); |
Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 836 | } |
| 837 | } |
| 838 | } |
| 839 | EXPORT_SYMBOL_GPL(vring_transport_features); |
| 840 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 841 | /** |
| 842 | * virtqueue_get_vring_size - return the size of the virtqueue's vring |
| 843 | * @vq: the struct virtqueue containing the vring of interest. |
| 844 | * |
| 845 | * Returns the size of the vring. This is mainly used for boasting to |
| 846 | * userspace. Unlike other operations, this need not be serialized. |
| 847 | */ |
Rick Jones | 8f9f466 | 2011-10-19 08:10:59 +0000 | [diff] [blame] | 848 | unsigned int virtqueue_get_vring_size(struct virtqueue *_vq) |
| 849 | { |
| 850 | |
| 851 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 852 | |
| 853 | return vq->vring.num; |
| 854 | } |
| 855 | EXPORT_SYMBOL_GPL(virtqueue_get_vring_size); |
| 856 | |
Heinz Graalfs | b3b32c9 | 2013-10-29 09:40:19 +1030 | [diff] [blame] | 857 | bool virtqueue_is_broken(struct virtqueue *_vq) |
| 858 | { |
| 859 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 860 | |
| 861 | return vq->broken; |
| 862 | } |
| 863 | EXPORT_SYMBOL_GPL(virtqueue_is_broken); |
| 864 | |
Rusty Russell | e2dcdfe | 2014-04-28 11:15:08 +0930 | [diff] [blame] | 865 | /* |
| 866 | * This should prevent the device from being used, allowing drivers to |
| 867 | * recover. You may need to grab appropriate locks to flush. |
| 868 | */ |
| 869 | void virtio_break_device(struct virtio_device *dev) |
| 870 | { |
| 871 | struct virtqueue *_vq; |
| 872 | |
| 873 | list_for_each_entry(_vq, &dev->vqs, list) { |
| 874 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 875 | vq->broken = true; |
| 876 | } |
| 877 | } |
| 878 | EXPORT_SYMBOL_GPL(virtio_break_device); |
| 879 | |
Cornelia Huck | 8906265 | 2014-10-07 16:39:47 +0200 | [diff] [blame] | 880 | void *virtqueue_get_avail(struct virtqueue *_vq) |
| 881 | { |
| 882 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 883 | |
| 884 | return vq->vring.avail; |
| 885 | } |
| 886 | EXPORT_SYMBOL_GPL(virtqueue_get_avail); |
| 887 | |
| 888 | void *virtqueue_get_used(struct virtqueue *_vq) |
| 889 | { |
| 890 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 891 | |
| 892 | return vq->vring.used; |
| 893 | } |
| 894 | EXPORT_SYMBOL_GPL(virtqueue_get_used); |
| 895 | |
Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 896 | MODULE_LICENSE("GPL"); |