blob: 3dc70adfe5f5edbbfa6f6508849d1d3630a46e2b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Rusty Russell0a8a69d2007-10-22 11:03:40 +10002#ifndef _LINUX_VIRTIO_RING_H
3#define _LINUX_VIRTIO_RING_H
Rusty Russell0a8a69d2007-10-22 11:03:40 +10004
Michael S. Tsirkinc5610a5d2013-07-08 11:31:06 +09305#include <asm/barrier.h>
Rusty Russell0a8a69d2007-10-22 11:03:40 +10006#include <linux/irqreturn.h>
David Howells607ca462012-10-13 10:46:48 +01007#include <uapi/linux/virtio_ring.h>
8
Rusty Russella9a0fef2013-03-18 13:22:19 +10309/*
10 * Barriers in virtio are tricky. Non-SMP virtio guests can't assume
11 * they're not on an SMP host system, so they need to assume real
12 * barriers. Non-SMP virtio hosts could skip the barriers, but does
13 * anyone care?
14 *
15 * For virtio_pci on SMP, we don't need to order with respect to MMIO
Michael S. Tsirkina6596122015-12-27 17:55:35 +020016 * accesses through relaxed memory I/O windows, so virt_mb() et al are
Rusty Russella9a0fef2013-03-18 13:22:19 +103017 * sufficient.
18 *
19 * For using virtio to talk to real devices (eg. other heterogeneous
20 * CPUs) we do need real barriers. In theory, we could be using both
21 * kinds of virtio, so it's a runtime decision, and the branch is
22 * actually quite cheap.
23 */
24
Rusty Russella9a0fef2013-03-18 13:22:19 +103025static inline void virtio_mb(bool weak_barriers)
26{
27 if (weak_barriers)
Michael S. Tsirkina6596122015-12-27 17:55:35 +020028 virt_mb();
Rusty Russella9a0fef2013-03-18 13:22:19 +103029 else
30 mb();
31}
32
33static inline void virtio_rmb(bool weak_barriers)
34{
35 if (weak_barriers)
Michael S. Tsirkina6596122015-12-27 17:55:35 +020036 virt_rmb();
Rusty Russella9a0fef2013-03-18 13:22:19 +103037 else
Michael S. Tsirkin55e49dc2018-04-19 20:29:41 +030038 dma_rmb();
Rusty Russella9a0fef2013-03-18 13:22:19 +103039}
40
41static inline void virtio_wmb(bool weak_barriers)
42{
43 if (weak_barriers)
Michael S. Tsirkina6596122015-12-27 17:55:35 +020044 virt_wmb();
Rusty Russella9a0fef2013-03-18 13:22:19 +103045 else
Michael S. Tsirkin55e49dc2018-04-19 20:29:41 +030046 dma_wmb();
Rusty Russella9a0fef2013-03-18 13:22:19 +103047}
Rusty Russella9a0fef2013-03-18 13:22:19 +103048
Michael S. Tsirkin788e5b32015-12-17 12:20:39 +020049static inline void virtio_store_mb(bool weak_barriers,
50 __virtio16 *p, __virtio16 v)
51{
52 if (weak_barriers) {
53 virt_store_mb(*p, v);
54 } else {
55 WRITE_ONCE(*p, v);
56 mb();
57 }
58}
59
Rusty Russell0a8a69d2007-10-22 11:03:40 +100060struct virtio_device;
61struct virtqueue;
62
Andy Lutomirski2a2d1382016-02-02 21:46:37 -080063/*
64 * Creates a virtqueue and allocates the descriptor ring. If
65 * may_reduce_num is set, then this may allocate a smaller ring than
Cornelia Huckcf94db22019-04-08 14:33:22 +020066 * expected. The caller should query virtqueue_get_vring_size to learn
Andy Lutomirski2a2d1382016-02-02 21:46:37 -080067 * the actual size of the ring.
68 */
69struct virtqueue *vring_create_virtqueue(unsigned int index,
70 unsigned int num,
71 unsigned int vring_align,
72 struct virtio_device *vdev,
73 bool weak_barriers,
74 bool may_reduce_num,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +020075 bool ctx,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -080076 bool (*notify)(struct virtqueue *vq),
77 void (*callback)(struct virtqueue *vq),
78 const char *name);
79
80/* Creates a virtqueue with a custom layout. */
81struct virtqueue *__vring_new_virtqueue(unsigned int index,
82 struct vring vring,
83 struct virtio_device *vdev,
84 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +020085 bool ctx,
Andy Lutomirski2a2d1382016-02-02 21:46:37 -080086 bool (*notify)(struct virtqueue *),
87 void (*callback)(struct virtqueue *),
88 const char *name);
89
90/*
91 * Creates a virtqueue with a standard layout but a caller-allocated
92 * ring.
93 */
Jason Wang17bb6d42012-08-28 13:54:13 +020094struct virtqueue *vring_new_virtqueue(unsigned int index,
95 unsigned int num,
Rusty Russell87c7d572008-12-30 09:26:03 -060096 unsigned int vring_align,
Rusty Russell0a8a69d2007-10-22 11:03:40 +100097 struct virtio_device *vdev,
Rusty Russell7b21e342012-01-12 15:44:42 +103098 bool weak_barriers,
Michael S. Tsirkinf94682d2017-03-06 18:32:29 +020099 bool ctx,
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000100 void *pages,
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +1030101 bool (*notify)(struct virtqueue *vq),
Rusty Russell9499f5e2009-06-12 22:16:35 -0600102 void (*callback)(struct virtqueue *vq),
103 const char *name);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800104
105/*
106 * Destroys a virtqueue. If created with vring_create_virtqueue, this
107 * also frees the ring.
108 */
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000109void vring_del_virtqueue(struct virtqueue *vq);
Andy Lutomirski2a2d1382016-02-02 21:46:37 -0800110
Rusty Russelle34f8722008-07-25 12:06:13 -0500111/* Filter out transport-specific feature bits. */
112void vring_transport_features(struct virtio_device *vdev);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000113
114irqreturn_t vring_interrupt(int irq, void *_vq);
Rusty Russell0a8a69d2007-10-22 11:03:40 +1000115#endif /* _LINUX_VIRTIO_RING_H */