blob: 4a32cb6da425abe92c9eaa019121f4710171c77a [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001#ifndef _LINUX_VIRTIO_RING_H
2#define _LINUX_VIRTIO_RING_H
3/* An interface for efficient virtio implementation, currently for use by KVM
4 * and lguest, but hopefully others soon. Do NOT change this since it will
5 * break existing servers and clients.
6 *
7 * This header is BSD licensed so anyone can use the definitions to implement
8 * compatible drivers/servers.
9 *
Rusty Russella1b38382011-05-30 11:14:13 -060010 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of IBM nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL IBM OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
32 *
Rusty Russell0a8a69d2007-10-22 11:03:40 +100033 * Copyright Rusty Russell IBM Corporation 2007. */
34#include <linux/types.h>
35
36/* This marks a buffer as continuing via the next field. */
37#define VRING_DESC_F_NEXT 1
38/* This marks a buffer as write-only (otherwise read-only). */
39#define VRING_DESC_F_WRITE 2
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +010040/* This means the buffer contains a list of buffer descriptors. */
41#define VRING_DESC_F_INDIRECT 4
Rusty Russell0a8a69d2007-10-22 11:03:40 +100042
Rusty Russell426e3e02008-02-04 23:49:59 -050043/* The Host uses this in used->flags to advise the Guest: don't kick me when
44 * you add a buffer. It's unreliable, so it's simply an optimization. Guest
45 * will still kick if it's out of buffers. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +100046#define VRING_USED_F_NO_NOTIFY 1
Rusty Russell426e3e02008-02-04 23:49:59 -050047/* The Guest uses this in avail->flags to advise the Host: don't interrupt me
48 * when you consume a buffer. It's unreliable, so it's simply an
49 * optimization. */
Rusty Russell0a8a69d2007-10-22 11:03:40 +100050#define VRING_AVAIL_F_NO_INTERRUPT 1
51
Mark McLoughlin9fa29b9d2009-05-11 18:11:45 +010052/* We support indirect buffer descriptors */
53#define VIRTIO_RING_F_INDIRECT_DESC 28
54
Michael S. Tsirkin770b31a2011-05-20 02:10:17 +030055/* The Guest publishes the used index for which it expects an interrupt
56 * at the end of the avail ring. Host should ignore the avail->flags field. */
57/* The Host publishes the avail index for which it expects a kick
58 * at the end of the used ring. Guest should ignore the used->flags field. */
59#define VIRTIO_RING_F_EVENT_IDX 29
60
Rusty Russell0a8a69d2007-10-22 11:03:40 +100061/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
Rusty Russell1842f232009-07-30 16:03:46 -060062struct vring_desc {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100063 /* Address (guest-physical). */
64 __u64 addr;
65 /* Length. */
66 __u32 len;
67 /* The flags as indicated above. */
68 __u16 flags;
69 /* We chain unused descriptors via this, too */
70 __u16 next;
71};
72
Rusty Russell1842f232009-07-30 16:03:46 -060073struct vring_avail {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100074 __u16 flags;
75 __u16 idx;
76 __u16 ring[];
77};
78
79/* u32 is used here for ids for padding reasons. */
Rusty Russell1842f232009-07-30 16:03:46 -060080struct vring_used_elem {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100081 /* Index of start of used descriptor chain. */
82 __u32 id;
83 /* Total length of the descriptor chain which was used (written to) */
84 __u32 len;
85};
86
Rusty Russell1842f232009-07-30 16:03:46 -060087struct vring_used {
Rusty Russell0a8a69d2007-10-22 11:03:40 +100088 __u16 flags;
89 __u16 idx;
90 struct vring_used_elem ring[];
91};
92
93struct vring {
94 unsigned int num;
95
96 struct vring_desc *desc;
97
98 struct vring_avail *avail;
99
100 struct vring_used *used;
101};
102
103/* The standard layout for the ring is a continuous chunk of memory which looks
Rusty Russell42b36cc2007-11-12 13:39:18 +1100104 * like this. We assume num is a power of 2.
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000105 *
106 * struct vring
107 * {
108 * // The actual descriptors (16 bytes each)
109 * struct vring_desc desc[num];
110 *
111 * // A ring of available descriptor heads with free-running index.
112 * __u16 avail_flags;
113 * __u16 avail_idx;
114 * __u16 available[num];
Michael S. Tsirkin770b31a2011-05-20 02:10:17 +0300115 * __u16 used_event_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000116 *
Rusty Russell5f0d1d72008-12-30 09:25:57 -0600117 * // Padding to the next align boundary.
Rusty Russell42b36cc2007-11-12 13:39:18 +1100118 * char pad[];
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000119 *
120 * // A ring of used descriptor heads with free-running index.
121 * __u16 used_flags;
122 * __u16 used_idx;
123 * struct vring_used_elem used[num];
Michael S. Tsirkin770b31a2011-05-20 02:10:17 +0300124 * __u16 avail_event_idx;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000125 * };
126 */
Michael S. Tsirkin770b31a2011-05-20 02:10:17 +0300127/* We publish the used event index at the end of the available ring, and vice
128 * versa. They are at the end for backwards compatibility. */
129#define vring_used_event(vr) ((vr)->avail->ring[(vr)->num])
130#define vring_avail_event(vr) (*(__u16 *)&(vr)->used->ring[(vr)->num])
131
Rusty Russell42b36cc2007-11-12 13:39:18 +1100132static inline void vring_init(struct vring *vr, unsigned int num, void *p,
Rusty Russell5f0d1d72008-12-30 09:25:57 -0600133 unsigned long align)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000134{
135 vr->num = num;
136 vr->desc = p;
Anthony Liguori44332f72007-11-07 16:31:52 +1100137 vr->avail = p + num*sizeof(struct vring_desc);
Rusty Russell5f0d1d72008-12-30 09:25:57 -0600138 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + align-1)
139 & ~(align - 1));
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000140}
141
Rusty Russell5f0d1d72008-12-30 09:25:57 -0600142static inline unsigned vring_size(unsigned int num, unsigned long align)
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000143{
Rusty Russell42b36cc2007-11-12 13:39:18 +1100144 return ((sizeof(struct vring_desc) * num + sizeof(__u16) * (2 + num)
Rusty Russell5f0d1d72008-12-30 09:25:57 -0600145 + align - 1) & ~(align - 1))
Michael S. Tsirkin770b31a2011-05-20 02:10:17 +0300146 + sizeof(__u16) * 3 + sizeof(struct vring_used_elem) * num;
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000147}
148
Michael S. Tsirkinbf7035b2011-05-20 02:10:27 +0300149/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
150/* Assuming a given event_idx value from the other size, if
151 * we have just incremented index from old to new_idx,
152 * should we trigger an event? */
153static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
154{
155 /* Note: Xen has similar logic for notification hold-off
156 * in include/xen/interface/io/ring.h with req_event and req_prod
157 * corresponding to event_idx + 1 and new_idx respectively.
158 * Note also that req_event and req_prod in Xen start at 1,
159 * event indexes in virtio start at 0. */
160 return (__u16)(new_idx - event_idx - 1) < (__u16)(new_idx - old);
161}
162
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000163#ifdef __KERNEL__
164#include <linux/irqreturn.h>
165struct virtio_device;
166struct virtqueue;
167
168struct virtqueue *vring_new_virtqueue(unsigned int num,
Rusty Russell87c7d572008-12-30 09:26:03 -0600169 unsigned int vring_align,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000170 struct virtio_device *vdev,
171 void *pages,
172 void (*notify)(struct virtqueue *vq),
Rusty Russell9499f5e2009-06-12 22:16:35 -0600173 void (*callback)(struct virtqueue *vq),
174 const char *name);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000175void vring_del_virtqueue(struct virtqueue *vq);
Rusty Russelle34f8722008-07-25 12:06:13 -0500176/* Filter out transport-specific feature bits. */
177void vring_transport_features(struct virtio_device *vdev);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000178
179irqreturn_t vring_interrupt(int irq, void *_vq);
180#endif /* __KERNEL__ */
181#endif /* _LINUX_VIRTIO_RING_H */