Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 1 | /* Virtio ring implementation. |
| 2 | * |
| 3 | * Copyright 2007 Rusty Russell IBM Corporation |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation; either version 2 of the License, or |
| 8 | * (at your option) any later version. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License |
| 16 | * along with this program; if not, write to the Free Software |
| 17 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
| 18 | */ |
| 19 | #include <linux/virtio.h> |
| 20 | #include <linux/virtio_ring.h> |
Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 21 | #include <linux/virtio_config.h> |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 22 | #include <linux/device.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 23 | #include <linux/slab.h> |
Paul Gortmaker | b5a2c4f | 2011-07-03 16:20:30 -0400 | [diff] [blame] | 24 | #include <linux/module.h> |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 25 | #include <linux/hrtimer.h> |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 26 | |
Michael S. Tsirkin | d57ed95 | 2010-01-28 00:42:23 +0200 | [diff] [blame] | 27 | /* virtio guest is communicating with a virtual "device" that actually runs on |
| 28 | * a host processor. Memory barriers are used to control SMP effects. */ |
| 29 | #ifdef CONFIG_SMP |
| 30 | /* Where possible, use SMP barriers which are more lightweight than mandatory |
| 31 | * barriers, because mandatory barriers control MMIO effects on accesses |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 32 | * through relaxed memory I/O windows (which virtio-pci does not use). */ |
| 33 | #define virtio_mb(vq) \ |
| 34 | do { if ((vq)->weak_barriers) smp_mb(); else mb(); } while(0) |
| 35 | #define virtio_rmb(vq) \ |
| 36 | do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0) |
| 37 | #define virtio_wmb(vq) \ |
Jason Wang | 4dbc5d9 | 2012-01-20 16:16:59 +0800 | [diff] [blame] | 38 | do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0) |
Michael S. Tsirkin | d57ed95 | 2010-01-28 00:42:23 +0200 | [diff] [blame] | 39 | #else |
| 40 | /* We must force memory ordering even if guest is UP since host could be |
| 41 | * running on another CPU, but SMP barriers are defined to barrier() in that |
| 42 | * configuration. So fall back to mandatory barriers instead. */ |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 43 | #define virtio_mb(vq) mb() |
| 44 | #define virtio_rmb(vq) rmb() |
| 45 | #define virtio_wmb(vq) wmb() |
Michael S. Tsirkin | d57ed95 | 2010-01-28 00:42:23 +0200 | [diff] [blame] | 46 | #endif |
| 47 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 48 | #ifdef DEBUG |
| 49 | /* For development, we want to crash whenever the ring is screwed. */ |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 50 | #define BAD_RING(_vq, fmt, args...) \ |
| 51 | do { \ |
| 52 | dev_err(&(_vq)->vq.vdev->dev, \ |
| 53 | "%s:"fmt, (_vq)->vq.name, ##args); \ |
| 54 | BUG(); \ |
| 55 | } while (0) |
Rusty Russell | c5f841f | 2009-03-30 21:55:22 -0600 | [diff] [blame] | 56 | /* Caller is supposed to guarantee no reentry. */ |
| 57 | #define START_USE(_vq) \ |
| 58 | do { \ |
| 59 | if ((_vq)->in_use) \ |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 60 | panic("%s:in_use = %i\n", \ |
| 61 | (_vq)->vq.name, (_vq)->in_use); \ |
Rusty Russell | c5f841f | 2009-03-30 21:55:22 -0600 | [diff] [blame] | 62 | (_vq)->in_use = __LINE__; \ |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 63 | } while (0) |
Roel Kluin | 3a35ce7 | 2009-01-22 16:42:57 +0100 | [diff] [blame] | 64 | #define END_USE(_vq) \ |
Rusty Russell | 97a545a | 2010-02-24 14:22:22 -0600 | [diff] [blame] | 65 | do { BUG_ON(!(_vq)->in_use); (_vq)->in_use = 0; } while(0) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 66 | #else |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 67 | #define BAD_RING(_vq, fmt, args...) \ |
| 68 | do { \ |
| 69 | dev_err(&_vq->vq.vdev->dev, \ |
| 70 | "%s:"fmt, (_vq)->vq.name, ##args); \ |
| 71 | (_vq)->broken = true; \ |
| 72 | } while (0) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 73 | #define START_USE(vq) |
| 74 | #define END_USE(vq) |
| 75 | #endif |
| 76 | |
| 77 | struct vring_virtqueue |
| 78 | { |
| 79 | struct virtqueue vq; |
| 80 | |
| 81 | /* Actual memory layout for this queue */ |
| 82 | struct vring vring; |
| 83 | |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 84 | /* Can we use weak barriers? */ |
| 85 | bool weak_barriers; |
| 86 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 87 | /* Other side has made a mess, don't try any more. */ |
| 88 | bool broken; |
| 89 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 90 | /* Host supports indirect buffers */ |
| 91 | bool indirect; |
| 92 | |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 93 | /* Host publishes avail event idx */ |
| 94 | bool event; |
| 95 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 96 | /* Number of free buffers */ |
| 97 | unsigned int num_free; |
| 98 | /* Head of free buffer list. */ |
| 99 | unsigned int free_head; |
| 100 | /* Number we've added since last sync. */ |
| 101 | unsigned int num_added; |
| 102 | |
| 103 | /* Last used index we've seen. */ |
Anthony Liguori | 1bc4953 | 2007-11-07 15:49:24 -0600 | [diff] [blame] | 104 | u16 last_used_idx; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 105 | |
| 106 | /* How to notify other side. FIXME: commonalize hcalls! */ |
| 107 | void (*notify)(struct virtqueue *vq); |
| 108 | |
| 109 | #ifdef DEBUG |
| 110 | /* They're supposed to lock for us. */ |
| 111 | unsigned int in_use; |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 112 | |
| 113 | /* Figure out if their kicks are too delayed. */ |
| 114 | bool last_add_time_valid; |
| 115 | ktime_t last_add_time; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 116 | #endif |
| 117 | |
| 118 | /* Tokens for callbacks. */ |
| 119 | void *data[]; |
| 120 | }; |
| 121 | |
| 122 | #define to_vvq(_vq) container_of(_vq, struct vring_virtqueue, vq) |
| 123 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 124 | /* Set up an indirect table of descriptors and add it to the queue. */ |
| 125 | static int vring_add_indirect(struct vring_virtqueue *vq, |
| 126 | struct scatterlist sg[], |
| 127 | unsigned int out, |
Michael S. Tsirkin | bbd603e | 2010-04-29 17:26:37 +0300 | [diff] [blame] | 128 | unsigned int in, |
| 129 | gfp_t gfp) |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 130 | { |
| 131 | struct vring_desc *desc; |
| 132 | unsigned head; |
| 133 | int i; |
| 134 | |
Will Deacon | 5535748 | 2012-10-19 14:03:33 +0100 | [diff] [blame] | 135 | /* |
| 136 | * We require lowmem mappings for the descriptors because |
| 137 | * otherwise virt_to_phys will give us bogus addresses in the |
| 138 | * virtqueue. |
| 139 | */ |
| 140 | gfp &= ~(__GFP_HIGHMEM | __GFP_HIGH); |
| 141 | |
Michael S. Tsirkin | bbd603e | 2010-04-29 17:26:37 +0300 | [diff] [blame] | 142 | desc = kmalloc((out + in) * sizeof(struct vring_desc), gfp); |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 143 | if (!desc) |
Michael S. Tsirkin | 686d363 | 2010-06-10 18:16:11 +0300 | [diff] [blame] | 144 | return -ENOMEM; |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 145 | |
| 146 | /* Transfer entries from the sg list into the indirect page */ |
| 147 | for (i = 0; i < out; i++) { |
| 148 | desc[i].flags = VRING_DESC_F_NEXT; |
| 149 | desc[i].addr = sg_phys(sg); |
| 150 | desc[i].len = sg->length; |
| 151 | desc[i].next = i+1; |
| 152 | sg++; |
| 153 | } |
| 154 | for (; i < (out + in); i++) { |
| 155 | desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; |
| 156 | desc[i].addr = sg_phys(sg); |
| 157 | desc[i].len = sg->length; |
| 158 | desc[i].next = i+1; |
| 159 | sg++; |
| 160 | } |
| 161 | |
| 162 | /* Last one doesn't continue. */ |
| 163 | desc[i-1].flags &= ~VRING_DESC_F_NEXT; |
| 164 | desc[i-1].next = 0; |
| 165 | |
| 166 | /* We're about to use a buffer */ |
| 167 | vq->num_free--; |
| 168 | |
| 169 | /* Use a single buffer which doesn't continue */ |
| 170 | head = vq->free_head; |
| 171 | vq->vring.desc[head].flags = VRING_DESC_F_INDIRECT; |
| 172 | vq->vring.desc[head].addr = virt_to_phys(desc); |
| 173 | vq->vring.desc[head].len = i * sizeof(struct vring_desc); |
| 174 | |
| 175 | /* Update free pointer */ |
| 176 | vq->free_head = vq->vring.desc[head].next; |
| 177 | |
| 178 | return head; |
| 179 | } |
| 180 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 181 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 182 | * vring_add_buf - expose buffer to other end |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 183 | * @vq: the struct virtqueue we're talking about. |
| 184 | * @sg: the description of the buffer(s). |
| 185 | * @out_num: the number of sg readable by other side |
| 186 | * @in_num: the number of sg which are writable (after readable ones) |
| 187 | * @data: the token identifying the buffer. |
| 188 | * @gfp: how to do memory allocations (if necessary). |
| 189 | * |
| 190 | * Caller must ensure we don't call this with other virtqueue operations |
| 191 | * at the same time (except where noted). |
| 192 | * |
| 193 | * Returns remaining capacity of queue or a negative error |
| 194 | * (ie. ENOSPC). Note that it only really makes sense to treat all |
| 195 | * positive return values as "available": indirect buffers mean that |
| 196 | * we can put an entire sg[] array inside a single queue entry. |
| 197 | */ |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 198 | static int vring_add_buf(struct virtqueue *_vq, |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 199 | struct scatterlist sg[], |
| 200 | unsigned int out, |
| 201 | unsigned int in, |
| 202 | void *data, |
| 203 | gfp_t gfp) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 204 | { |
| 205 | struct vring_virtqueue *vq = to_vvq(_vq); |
Michael S. Tsirkin | 1fe9b6f | 2010-07-26 16:55:30 +0930 | [diff] [blame] | 206 | unsigned int i, avail, uninitialized_var(prev); |
| 207 | int head; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 208 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 209 | START_USE(vq); |
| 210 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 211 | BUG_ON(data == NULL); |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 212 | |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 213 | #ifdef DEBUG |
| 214 | { |
| 215 | ktime_t now = ktime_get(); |
| 216 | |
| 217 | /* No kick or get, with .1 second between? Warn. */ |
| 218 | if (vq->last_add_time_valid) |
| 219 | WARN_ON(ktime_to_ms(ktime_sub(now, vq->last_add_time)) |
| 220 | > 100); |
| 221 | vq->last_add_time = now; |
| 222 | vq->last_add_time_valid = true; |
| 223 | } |
| 224 | #endif |
| 225 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 226 | /* If the host supports indirect descriptor tables, and we have multiple |
| 227 | * buffers, then go indirect. FIXME: tune this threshold */ |
| 228 | if (vq->indirect && (out + in) > 1 && vq->num_free) { |
Michael S. Tsirkin | bbd603e | 2010-04-29 17:26:37 +0300 | [diff] [blame] | 229 | head = vring_add_indirect(vq, sg, out, in, gfp); |
Michael S. Tsirkin | 1fe9b6f | 2010-07-26 16:55:30 +0930 | [diff] [blame] | 230 | if (likely(head >= 0)) |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 231 | goto add_head; |
| 232 | } |
| 233 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 234 | BUG_ON(out + in > vq->vring.num); |
| 235 | BUG_ON(out + in == 0); |
| 236 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 237 | if (vq->num_free < out + in) { |
| 238 | pr_debug("Can't add buf len %i - avail = %i\n", |
| 239 | out + in, vq->num_free); |
Rusty Russell | 44653ea | 2008-07-25 12:06:04 -0500 | [diff] [blame] | 240 | /* FIXME: for historical reasons, we force a notify here if |
| 241 | * there are outgoing parts to the buffer. Presumably the |
| 242 | * host should service the ring ASAP. */ |
| 243 | if (out) |
| 244 | vq->notify(&vq->vq); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 245 | END_USE(vq); |
| 246 | return -ENOSPC; |
| 247 | } |
| 248 | |
| 249 | /* We're about to use some buffers from the free list. */ |
| 250 | vq->num_free -= out + in; |
| 251 | |
| 252 | head = vq->free_head; |
| 253 | for (i = vq->free_head; out; i = vq->vring.desc[i].next, out--) { |
| 254 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT; |
Rusty Russell | 15f9c89 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 255 | vq->vring.desc[i].addr = sg_phys(sg); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 256 | vq->vring.desc[i].len = sg->length; |
| 257 | prev = i; |
| 258 | sg++; |
| 259 | } |
| 260 | for (; in; i = vq->vring.desc[i].next, in--) { |
| 261 | vq->vring.desc[i].flags = VRING_DESC_F_NEXT|VRING_DESC_F_WRITE; |
Rusty Russell | 15f9c89 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 262 | vq->vring.desc[i].addr = sg_phys(sg); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 263 | vq->vring.desc[i].len = sg->length; |
| 264 | prev = i; |
| 265 | sg++; |
| 266 | } |
| 267 | /* Last one doesn't continue. */ |
| 268 | vq->vring.desc[prev].flags &= ~VRING_DESC_F_NEXT; |
| 269 | |
| 270 | /* Update free pointer */ |
| 271 | vq->free_head = i; |
| 272 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 273 | add_head: |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 274 | /* Set token. */ |
| 275 | vq->data[head] = data; |
| 276 | |
| 277 | /* Put entry in available array (but don't update avail->idx until they |
Rusty Russell | 3b720b8 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 278 | * do sync). */ |
Rusty Russell | ee7cd89 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 279 | avail = (vq->vring.avail->idx & (vq->vring.num-1)); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 280 | vq->vring.avail->ring[avail] = head; |
| 281 | |
Rusty Russell | ee7cd89 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 282 | /* Descriptors and available array need to be set before we expose the |
| 283 | * new available array entries. */ |
| 284 | virtio_wmb(vq); |
| 285 | vq->vring.avail->idx++; |
| 286 | vq->num_added++; |
| 287 | |
| 288 | /* This is very unlikely, but theoretically possible. Kick |
| 289 | * just in case. */ |
| 290 | if (unlikely(vq->num_added == (1 << 16) - 1)) |
| 291 | virtqueue_kick(_vq); |
| 292 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 293 | pr_debug("Added buffer head %i to %p\n", head, vq); |
| 294 | END_USE(vq); |
Rusty Russell | 3c1b27d | 2009-09-23 22:26:31 -0600 | [diff] [blame] | 295 | |
Rusty Russell | 3c1b27d | 2009-09-23 22:26:31 -0600 | [diff] [blame] | 296 | return vq->num_free; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 297 | } |
| 298 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 299 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 300 | * vring_kick_prepare - first half of split vring_kick call. |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 301 | * @vq: the struct virtqueue |
| 302 | * |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 303 | * Instead of vring_kick(), you can do: |
| 304 | * if (vring_kick_prepare(vq)) |
| 305 | * vring_kick_notify(vq); |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 306 | * |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 307 | * This is sometimes useful because the vring_kick_prepare() needs |
| 308 | * to be serialized, but the actual vring_kick_notify() call does not. |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 309 | */ |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 310 | static bool vring_kick_prepare(struct virtqueue *_vq) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 311 | { |
| 312 | struct vring_virtqueue *vq = to_vvq(_vq); |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 313 | u16 new, old; |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 314 | bool needs_kick; |
| 315 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 316 | START_USE(vq); |
Jason Wang | a72caae | 2012-01-20 16:17:08 +0800 | [diff] [blame] | 317 | /* We need to expose available array entries before checking avail |
| 318 | * event. */ |
| 319 | virtio_mb(vq); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 320 | |
Rusty Russell | ee7cd89 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 321 | old = vq->vring.avail->idx - vq->num_added; |
| 322 | new = vq->vring.avail->idx; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 323 | vq->num_added = 0; |
| 324 | |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 325 | #ifdef DEBUG |
| 326 | if (vq->last_add_time_valid) { |
| 327 | WARN_ON(ktime_to_ms(ktime_sub(ktime_get(), |
| 328 | vq->last_add_time)) > 100); |
| 329 | } |
| 330 | vq->last_add_time_valid = false; |
| 331 | #endif |
| 332 | |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 333 | if (vq->event) { |
| 334 | needs_kick = vring_need_event(vring_avail_event(&vq->vring), |
| 335 | new, old); |
| 336 | } else { |
| 337 | needs_kick = !(vq->vring.used->flags & VRING_USED_F_NO_NOTIFY); |
| 338 | } |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 339 | END_USE(vq); |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 340 | return needs_kick; |
| 341 | } |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 342 | |
| 343 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 344 | * vring_kick_notify - second half of split virtqueue_kick call. |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 345 | * @vq: the struct virtqueue |
| 346 | * |
| 347 | * This does not need to be serialized. |
| 348 | */ |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 349 | static void vring_kick_notify(struct virtqueue *_vq) |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 350 | { |
| 351 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 352 | |
| 353 | /* Prod other side to tell it about changes. */ |
| 354 | vq->notify(_vq); |
| 355 | } |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 356 | |
| 357 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 358 | * vring_kick - update after add_buf |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 359 | * @vq: the struct virtqueue |
| 360 | * |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 361 | * After one or more vring_add_buf calls, invoke this to kick |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 362 | * the other side. |
| 363 | * |
| 364 | * Caller must ensure we don't call this with other virtqueue |
| 365 | * operations at the same time (except where noted). |
| 366 | */ |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 367 | static void vring_kick(struct virtqueue *vq) |
Rusty Russell | 41f0377 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 368 | { |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 369 | if (vring_kick_prepare(vq)) |
| 370 | vring_kick_notify(vq); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 371 | } |
| 372 | |
| 373 | static void detach_buf(struct vring_virtqueue *vq, unsigned int head) |
| 374 | { |
| 375 | unsigned int i; |
| 376 | |
| 377 | /* Clear data ptr. */ |
| 378 | vq->data[head] = NULL; |
| 379 | |
| 380 | /* Put back on free list: find end */ |
| 381 | i = head; |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 382 | |
| 383 | /* Free the indirect table */ |
| 384 | if (vq->vring.desc[i].flags & VRING_DESC_F_INDIRECT) |
| 385 | kfree(phys_to_virt(vq->vring.desc[i].addr)); |
| 386 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 387 | while (vq->vring.desc[i].flags & VRING_DESC_F_NEXT) { |
| 388 | i = vq->vring.desc[i].next; |
| 389 | vq->num_free++; |
| 390 | } |
| 391 | |
| 392 | vq->vring.desc[i].next = vq->free_head; |
| 393 | vq->free_head = head; |
| 394 | /* Plus final descriptor */ |
| 395 | vq->num_free++; |
| 396 | } |
| 397 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 398 | static inline bool more_used(const struct vring_virtqueue *vq) |
| 399 | { |
| 400 | return vq->last_used_idx != vq->vring.used->idx; |
| 401 | } |
| 402 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 403 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 404 | * vring_get_buf - get the next used buffer |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 405 | * @vq: the struct virtqueue we're talking about. |
| 406 | * @len: the length written into the buffer |
| 407 | * |
| 408 | * If the driver wrote data into the buffer, @len will be set to the |
| 409 | * amount written. This means you don't need to clear the buffer |
| 410 | * beforehand to ensure there's no data leakage in the case of short |
| 411 | * writes. |
| 412 | * |
| 413 | * Caller must ensure we don't call this with other virtqueue |
| 414 | * operations at the same time (except where noted). |
| 415 | * |
| 416 | * Returns NULL if there are no used buffers, or the "data" token |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 417 | * handed to vring_add_buf(). |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 418 | */ |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 419 | static void *vring_get_buf(struct virtqueue *_vq, unsigned int *len) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 420 | { |
| 421 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 422 | void *ret; |
| 423 | unsigned int i; |
Rusty Russell | 3b720b8 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 424 | u16 last_used; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 425 | |
| 426 | START_USE(vq); |
| 427 | |
Rusty Russell | 5ef8275 | 2008-05-02 21:50:43 -0500 | [diff] [blame] | 428 | if (unlikely(vq->broken)) { |
| 429 | END_USE(vq); |
| 430 | return NULL; |
| 431 | } |
| 432 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 433 | if (!more_used(vq)) { |
| 434 | pr_debug("No more buffers in queue\n"); |
| 435 | END_USE(vq); |
| 436 | return NULL; |
| 437 | } |
| 438 | |
Michael S. Tsirkin | 2d61ba9 | 2009-10-25 15:28:53 +0200 | [diff] [blame] | 439 | /* Only get used array entries after they have been exposed by host. */ |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 440 | virtio_rmb(vq); |
Michael S. Tsirkin | 2d61ba9 | 2009-10-25 15:28:53 +0200 | [diff] [blame] | 441 | |
Rusty Russell | 3b720b8 | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 442 | last_used = (vq->last_used_idx & (vq->vring.num - 1)); |
| 443 | i = vq->vring.used->ring[last_used].id; |
| 444 | *len = vq->vring.used->ring[last_used].len; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 445 | |
| 446 | if (unlikely(i >= vq->vring.num)) { |
| 447 | BAD_RING(vq, "id %u out of range\n", i); |
| 448 | return NULL; |
| 449 | } |
| 450 | if (unlikely(!vq->data[i])) { |
| 451 | BAD_RING(vq, "id %u is not a head!\n", i); |
| 452 | return NULL; |
| 453 | } |
| 454 | |
| 455 | /* detach_buf clears data, so grab it now. */ |
| 456 | ret = vq->data[i]; |
| 457 | detach_buf(vq, i); |
| 458 | vq->last_used_idx++; |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 459 | /* If we expect an interrupt for the next entry, tell host |
| 460 | * by writing event index and flush out the write before |
| 461 | * the read in the next get_buf call. */ |
| 462 | if (!(vq->vring.avail->flags & VRING_AVAIL_F_NO_INTERRUPT)) { |
| 463 | vring_used_event(&vq->vring) = vq->last_used_idx; |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 464 | virtio_mb(vq); |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 465 | } |
| 466 | |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 467 | #ifdef DEBUG |
| 468 | vq->last_add_time_valid = false; |
| 469 | #endif |
| 470 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 471 | END_USE(vq); |
| 472 | return ret; |
| 473 | } |
| 474 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 475 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 476 | * vring_disable_cb - disable callbacks |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 477 | * @vq: the struct virtqueue we're talking about. |
| 478 | * |
| 479 | * Note that this is not necessarily synchronous, hence unreliable and only |
| 480 | * useful as an optimization. |
| 481 | * |
| 482 | * Unlike other operations, this need not be serialized. |
| 483 | */ |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 484 | static void vring_disable_cb(struct virtqueue *_vq) |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 485 | { |
| 486 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 487 | |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 488 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 489 | } |
| 490 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 491 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 492 | * vring_enable_cb - restart callbacks after disable_cb. |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 493 | * @vq: the struct virtqueue we're talking about. |
| 494 | * |
Michael S. Tsirkin | b0d97db | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 495 | * This re-enables callbacks; it returns current queue state |
| 496 | * in an opaque unsigned value. This value should be later tested by |
| 497 | * virtqueue_poll, to detect a possible race between the driver checking for |
| 498 | * more work, and enabling callbacks. |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 499 | * |
| 500 | * Caller must ensure we don't call this with other virtqueue |
| 501 | * operations at the same time (except where noted). |
| 502 | */ |
Michael S. Tsirkin | b0d97db | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 503 | unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 504 | { |
| 505 | struct vring_virtqueue *vq = to_vvq(_vq); |
Michael S. Tsirkin | b0d97db | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 506 | u16 last_used_idx; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 507 | |
| 508 | START_USE(vq); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 509 | |
| 510 | /* We optimistically turn back on interrupts, then check if there was |
| 511 | * more to do. */ |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 512 | /* Depending on the VIRTIO_RING_F_EVENT_IDX feature, we need to |
| 513 | * either clear the flags bit or point the event index at the next |
| 514 | * entry. Always do both to keep code simple. */ |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 515 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
Michael S. Tsirkin | b0d97db | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 516 | vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 517 | END_USE(vq); |
Michael S. Tsirkin | b0d97db | 2013-07-09 13:19:18 +0300 | [diff] [blame] | 518 | return last_used_idx; |
| 519 | } |
| 520 | EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); |
| 521 | |
| 522 | /** |
| 523 | * virtqueue_poll - query pending used buffers |
| 524 | * @vq: the struct virtqueue we're talking about. |
| 525 | * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). |
| 526 | * |
| 527 | * Returns "true" if there are pending used buffers in the queue. |
| 528 | * |
| 529 | * This does not need to be serialized. |
| 530 | */ |
| 531 | bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) |
| 532 | { |
| 533 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 534 | |
| 535 | virtio_mb(vq); |
| 536 | return (u16)last_used_idx != vq->vring.used->idx; |
| 537 | } |
| 538 | EXPORT_SYMBOL_GPL(virtqueue_poll); |
| 539 | |
| 540 | /** |
| 541 | * virtqueue_enable_cb - restart callbacks after disable_cb. |
| 542 | * @vq: the struct virtqueue we're talking about. |
| 543 | * |
| 544 | * This re-enables callbacks; it returns "false" if there are pending |
| 545 | * buffers in the queue, to detect a possible race between the driver |
| 546 | * checking for more work, and enabling callbacks. |
| 547 | * |
| 548 | * Caller must ensure we don't call this with other virtqueue |
| 549 | * operations at the same time (except where noted). |
| 550 | */ |
| 551 | bool virtqueue_enable_cb(struct virtqueue *_vq) |
| 552 | { |
| 553 | unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); |
| 554 | return !virtqueue_poll(_vq, last_used_idx); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 555 | } |
| 556 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 557 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 558 | * vring_enable_cb_delayed - restart callbacks after disable_cb. |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 559 | * @vq: the struct virtqueue we're talking about. |
| 560 | * |
| 561 | * This re-enables callbacks but hints to the other side to delay |
| 562 | * interrupts until most of the available buffers have been processed; |
| 563 | * it returns "false" if there are many pending buffers in the queue, |
| 564 | * to detect a possible race between the driver checking for more work, |
| 565 | * and enabling callbacks. |
| 566 | * |
| 567 | * Caller must ensure we don't call this with other virtqueue |
| 568 | * operations at the same time (except where noted). |
| 569 | */ |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 570 | static bool vring_enable_cb_delayed(struct virtqueue *_vq) |
Michael S. Tsirkin | 7ab358c | 2011-05-20 02:11:14 +0300 | [diff] [blame] | 571 | { |
| 572 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 573 | u16 bufs; |
| 574 | |
| 575 | START_USE(vq); |
| 576 | |
| 577 | /* We optimistically turn back on interrupts, then check if there was |
| 578 | * more to do. */ |
| 579 | /* Depending on the VIRTIO_RING_F_USED_EVENT_IDX feature, we need to |
| 580 | * either clear the flags bit or point the event index at the next |
| 581 | * entry. Always do both to keep code simple. */ |
| 582 | vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; |
| 583 | /* TODO: tune this threshold */ |
| 584 | bufs = (u16)(vq->vring.avail->idx - vq->last_used_idx) * 3 / 4; |
| 585 | vring_used_event(&vq->vring) = vq->last_used_idx + bufs; |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 586 | virtio_mb(vq); |
Michael S. Tsirkin | 7ab358c | 2011-05-20 02:11:14 +0300 | [diff] [blame] | 587 | if (unlikely((u16)(vq->vring.used->idx - vq->last_used_idx) > bufs)) { |
| 588 | END_USE(vq); |
| 589 | return false; |
| 590 | } |
| 591 | |
| 592 | END_USE(vq); |
| 593 | return true; |
| 594 | } |
Michael S. Tsirkin | 7ab358c | 2011-05-20 02:11:14 +0300 | [diff] [blame] | 595 | |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 596 | /** |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 597 | * vring_detach_unused_buf - detach first unused buffer |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 598 | * @vq: the struct virtqueue we're talking about. |
| 599 | * |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 600 | * Returns NULL or the "data" token handed to vring_add_buf(). |
Rusty Russell | 5dfc176 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 601 | * This is not valid on an active queue; it is useful only for device |
| 602 | * shutdown. |
| 603 | */ |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 604 | static void *vring_detach_unused_buf(struct virtqueue *_vq) |
Shirley Ma | c021eac | 2010-01-18 19:15:23 +0530 | [diff] [blame] | 605 | { |
| 606 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 607 | unsigned int i; |
| 608 | void *buf; |
| 609 | |
| 610 | START_USE(vq); |
| 611 | |
| 612 | for (i = 0; i < vq->vring.num; i++) { |
| 613 | if (!vq->data[i]) |
| 614 | continue; |
| 615 | /* detach_buf clears data, so grab it now. */ |
| 616 | buf = vq->data[i]; |
| 617 | detach_buf(vq, i); |
Amit Shah | b3258ff | 2011-03-16 19:12:10 +0530 | [diff] [blame] | 618 | vq->vring.avail->idx--; |
Shirley Ma | c021eac | 2010-01-18 19:15:23 +0530 | [diff] [blame] | 619 | END_USE(vq); |
| 620 | return buf; |
| 621 | } |
| 622 | /* That should have freed everything. */ |
| 623 | BUG_ON(vq->num_free != vq->vring.num); |
| 624 | |
| 625 | END_USE(vq); |
| 626 | return NULL; |
| 627 | } |
| 628 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 629 | irqreturn_t vring_interrupt(int irq, void *_vq) |
| 630 | { |
| 631 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 632 | |
| 633 | if (!more_used(vq)) { |
| 634 | pr_debug("virtqueue interrupt with no work for %p\n", vq); |
| 635 | return IRQ_NONE; |
| 636 | } |
| 637 | |
| 638 | if (unlikely(vq->broken)) |
| 639 | return IRQ_HANDLED; |
| 640 | |
| 641 | pr_debug("virtqueue callback for %p (%p)\n", vq, vq->vq.callback); |
Rusty Russell | 18445c4 | 2008-02-04 23:49:57 -0500 | [diff] [blame] | 642 | if (vq->vq.callback) |
| 643 | vq->vq.callback(&vq->vq); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 644 | |
| 645 | return IRQ_HANDLED; |
| 646 | } |
Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 647 | EXPORT_SYMBOL_GPL(vring_interrupt); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 648 | |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 649 | /** |
| 650 | * get_vring_size - return the size of the virtqueue's vring |
| 651 | * @vq: the struct virtqueue containing the vring of interest. |
| 652 | * |
| 653 | * Returns the size of the vring. This is mainly used for boasting to |
| 654 | * userspace. Unlike other operations, this need not be serialized. |
| 655 | */ |
| 656 | static unsigned int get_vring_size(struct virtqueue *_vq) |
| 657 | { |
| 658 | |
| 659 | struct vring_virtqueue *vq = to_vvq(_vq); |
| 660 | |
| 661 | return vq->vring.num; |
| 662 | } |
| 663 | |
| 664 | static struct virtqueue_ops vring_vq_ops = { |
| 665 | .add_buf = vring_add_buf, |
| 666 | .get_buf = vring_get_buf, |
| 667 | .kick = vring_kick, |
| 668 | .kick_prepare = vring_kick_prepare, |
| 669 | .kick_notify = vring_kick_notify, |
| 670 | .disable_cb = vring_disable_cb, |
| 671 | .enable_cb = vring_enable_cb, |
| 672 | .enable_cb_delayed = vring_enable_cb_delayed, |
| 673 | .detach_unused_buf = vring_detach_unused_buf, |
| 674 | .get_impl_size = get_vring_size, |
| 675 | }; |
| 676 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 677 | struct virtqueue *vring_new_virtqueue(unsigned int num, |
Rusty Russell | 87c7d57 | 2008-12-30 09:26:03 -0600 | [diff] [blame] | 678 | unsigned int vring_align, |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 679 | struct virtio_device *vdev, |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 680 | bool weak_barriers, |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 681 | void *pages, |
| 682 | void (*notify)(struct virtqueue *), |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 683 | void (*callback)(struct virtqueue *), |
| 684 | const char *name) |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 685 | { |
| 686 | struct vring_virtqueue *vq; |
| 687 | unsigned int i; |
| 688 | |
Rusty Russell | 42b36cc | 2007-11-12 13:39:18 +1100 | [diff] [blame] | 689 | /* We assume num is a power of 2. */ |
| 690 | if (num & (num - 1)) { |
| 691 | dev_warn(&vdev->dev, "Bad virtqueue length %u\n", num); |
| 692 | return NULL; |
| 693 | } |
| 694 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 695 | vq = kmalloc(sizeof(*vq) + sizeof(void *)*num, GFP_KERNEL); |
| 696 | if (!vq) |
| 697 | return NULL; |
| 698 | |
Rusty Russell | 87c7d57 | 2008-12-30 09:26:03 -0600 | [diff] [blame] | 699 | vring_init(&vq->vring, num, pages, vring_align); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 700 | vq->vq.callback = callback; |
| 701 | vq->vq.vdev = vdev; |
Yatin Manerkar | c89a2d0 | 2012-06-12 15:27:41 -0700 | [diff] [blame] | 702 | vq->vq.vq_ops = &vring_vq_ops; |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 703 | vq->vq.name = name; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 704 | vq->notify = notify; |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 705 | vq->weak_barriers = weak_barriers; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 706 | vq->broken = false; |
| 707 | vq->last_used_idx = 0; |
| 708 | vq->num_added = 0; |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 709 | list_add_tail(&vq->vq.list, &vdev->vqs); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 710 | #ifdef DEBUG |
| 711 | vq->in_use = false; |
Rusty Russell | e93300b | 2012-01-12 15:44:43 +1030 | [diff] [blame] | 712 | vq->last_add_time_valid = false; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 713 | #endif |
| 714 | |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 715 | vq->indirect = virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC); |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 716 | vq->event = virtio_has_feature(vdev, VIRTIO_RING_F_EVENT_IDX); |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 717 | |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 718 | /* No callback? Tell other side not to bother us. */ |
| 719 | if (!callback) |
| 720 | vq->vring.avail->flags |= VRING_AVAIL_F_NO_INTERRUPT; |
| 721 | |
| 722 | /* Put everything in free lists. */ |
| 723 | vq->num_free = num; |
| 724 | vq->free_head = 0; |
Amit Shah | 3b87062 | 2010-02-12 10:32:14 +0530 | [diff] [blame] | 725 | for (i = 0; i < num-1; i++) { |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 726 | vq->vring.desc[i].next = i+1; |
Amit Shah | 3b87062 | 2010-02-12 10:32:14 +0530 | [diff] [blame] | 727 | vq->data[i] = NULL; |
| 728 | } |
| 729 | vq->data[i] = NULL; |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 730 | |
| 731 | return &vq->vq; |
| 732 | } |
Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 733 | EXPORT_SYMBOL_GPL(vring_new_virtqueue); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 734 | |
| 735 | void vring_del_virtqueue(struct virtqueue *vq) |
| 736 | { |
Rusty Russell | 9499f5e | 2009-06-12 22:16:35 -0600 | [diff] [blame] | 737 | list_del(&vq->list); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 738 | kfree(to_vvq(vq)); |
| 739 | } |
Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 740 | EXPORT_SYMBOL_GPL(vring_del_virtqueue); |
Rusty Russell | 0a8a69d | 2007-10-22 11:03:40 +1000 | [diff] [blame] | 741 | |
Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 742 | /* Manipulates transport-specific feature bits. */ |
| 743 | void vring_transport_features(struct virtio_device *vdev) |
| 744 | { |
| 745 | unsigned int i; |
| 746 | |
| 747 | for (i = VIRTIO_TRANSPORT_F_START; i < VIRTIO_TRANSPORT_F_END; i++) { |
| 748 | switch (i) { |
Mark McLoughlin | 9fa29b9d | 2009-05-11 18:11:45 +0100 | [diff] [blame] | 749 | case VIRTIO_RING_F_INDIRECT_DESC: |
| 750 | break; |
Michael S. Tsirkin | a5c262c | 2011-05-20 02:10:44 +0300 | [diff] [blame] | 751 | case VIRTIO_RING_F_EVENT_IDX: |
| 752 | break; |
Rusty Russell | e34f872 | 2008-07-25 12:06:13 -0500 | [diff] [blame] | 753 | default: |
| 754 | /* We don't understand this bit. */ |
| 755 | clear_bit(i, vdev->features); |
| 756 | } |
| 757 | } |
| 758 | } |
| 759 | EXPORT_SYMBOL_GPL(vring_transport_features); |
| 760 | |
Rusty Russell | c6fd470 | 2008-02-04 23:50:05 -0500 | [diff] [blame] | 761 | MODULE_LICENSE("GPL"); |