blob: 270cfa81830ee4a69bca0c4ccf914a7a94e5b3e1 [file] [log] [blame]
Rusty Russell0a8a69d2007-10-22 11:03:40 +10001#ifndef _LINUX_VIRTIO_RING_H
2#define _LINUX_VIRTIO_RING_H
Rusty Russell0a8a69d2007-10-22 11:03:40 +10003
Michael S. Tsirkinc5610a5d2013-07-08 11:31:06 +09304#include <asm/barrier.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +10005#include <linux/irqreturn.h>
David Howells607ca462012-10-13 10:46:48 +01006#include <uapi/linux/virtio_ring.h>
7
Rusty Russella9a0fef2013-03-18 13:22:19 +10308/*
9 * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
10 * they're not on an SMP host system, so they need to assume real
11 * barriers. Non-SMP virtio hosts could skip the barriers, but does
12 * anyone care?
13 *
14 * For virtio_pci on SMP, we don't need to order with respect to MMIO
Michael S. Tsirkina6596122015-12-27 17:55:35 +020015 * accesses through relaxed memory I/O windows, so virt_mb() et al are
Rusty Russella9a0fef2013-03-18 13:22:19 +103016 * sufficient.
17 *
18 * For using virtio to talk to real devices (eg. other heterogeneous
19 * CPUs) we do need real barriers. In theory, we could be using both
20 * kinds of virtio, so it's a runtime decision, and the branch is
21 * actually quite cheap.
22 */
23
Rusty Russella9a0fef2013-03-18 13:22:19 +103024static inline void virtio_mb(bool weak_barriers)
25{
26 if (weak_barriers)
Michael S. Tsirkina6596122015-12-27 17:55:35 +020027 virt_mb();
Rusty Russella9a0fef2013-03-18 13:22:19 +103028 else
29 mb();
30}
31
32static inline void virtio_rmb(bool weak_barriers)
33{
34 if (weak_barriers)
Michael S. Tsirkina6596122015-12-27 17:55:35 +020035 virt_rmb();
Rusty Russella9a0fef2013-03-18 13:22:19 +103036 else
37 rmb();
38}
39
40static inline void virtio_wmb(bool weak_barriers)
41{
42 if (weak_barriers)
Michael S. Tsirkina6596122015-12-27 17:55:35 +020043 virt_wmb();
Rusty Russella9a0fef2013-03-18 13:22:19 +103044 else
45 wmb();
46}
Rusty Russella9a0fef2013-03-18 13:22:19 +103047
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +020048static inline void virtio_store_mb(bool weak_barriers,
49 __virtio16 *p, __virtio16 v)
50{
51 if (weak_barriers) {
52 virt_store_mb(*p, v);
53 } else {
54 WRITE_ONCE(*p, v);
55 mb();
56 }
57}
58
Rusty Russell0a8a69d2007-10-22 11:03:40 +100059struct virtio_device;
60struct virtqueue;
61
Andy Lutomirski2a2d1382016-02-02 21:46:37 -080062/*
63 * Creates a virtqueue and allocates the descriptor ring. If
64 * may_reduce_num is set, then this may allocate a smaller ring than
65 * expected. The caller should query virtqueue_get_ring_size to learn
66 * the actual size of the ring.
67 */
68struct virtqueue *vring_create_virtqueue(unsigned int index,
69 unsigned int num,
70 unsigned int vring_align,
71 struct virtio_device *vdev,
72 bool weak_barriers,
73 bool may_reduce_num,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +020074 bool ctx,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -080075 bool (*notify)(struct virtqueue *vq),
76 void (*callback)(struct virtqueue *vq),
77 const char *name);
78
79/* Creates a virtqueue with a custom layout. */
80struct virtqueue *__vring_new_virtqueue(unsigned int index,
81 struct vring vring,
82 struct virtio_device *vdev,
83 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +020084 bool ctx,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -080085 bool (*notify)(struct virtqueue *),
86 void (*callback)(struct virtqueue *),
87 const char *name);
88
89/*
90 * Creates a virtqueue with a standard layout but a caller-allocated
91 * ring.
92 */
Jason Wang17bb6d42012-08-28 13:54:13 +020093struct virtqueue *vring_new_virtqueue(unsigned int index,
94 unsigned int num,
Rusty Russell87c7d572008-12-30 09:26:03 -060095 unsigned int vring_align,
Rusty Russell0a8a69d2007-10-22 11:03:40 +100096 struct virtio_device *vdev,
Rusty Russell7b21e342012-01-12 15:44:42 +103097 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +020098 bool ctx,
Rusty Russell0a8a69d2007-10-22 11:03:40 +100099 void *pages,
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +1030100 bool (*notify)(struct virtqueue *vq),
Rusty Russell9499f5e2009-06-12 22:16:35 -0600101 void (*callback)(struct virtqueue *vq),
102 const char *name);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800103
104/*
105 * Destroys a virtqueue. If created with vring_create_virtqueue, this
106 * also frees the ring.
107 */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000108void vring_del_virtqueue(struct virtqueue *vq);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800109
Rusty Russelle34f8722008-07-25 12:06:13 -0500110/* Filter out transport-specific feature bits. */
111void vring_transport_features(struct virtio_device *vdev);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000112
113irqreturn_t vring_interrupt(int irq, void *_vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000114#endif /* _LINUX_VIRTIO_RING_H */