Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 1 | #ifndef LINUX_VIRTIO_H |
| 2 | #define LINUX_VIRTIO_H |
Rusty Russell | 61d0b5a | 2013-03-18 13:22:19 +1030 | [diff] [blame^] | 3 | #include <linux/scatterlist.h> |
| 4 | #include <linux/kernel.h> |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 5 | |
| 6 | /* TODO: empty stubs for now. Broken but enough for virtio_ring.c */ |
| 7 | #define list_add_tail(a, b) do {} while (0) |
| 8 | #define list_del(a) do {} while (0) |
| 9 | |
| 10 | #define BIT_WORD(nr) ((nr) / BITS_PER_LONG) |
| 11 | #define BITS_PER_BYTE 8 |
| 12 | #define BITS_PER_LONG (sizeof(long) * BITS_PER_BYTE) |
| 13 | #define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG)) |
Rusty Russell | 61d0b5a | 2013-03-18 13:22:19 +1030 | [diff] [blame^] | 14 | |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 15 | /* TODO: Not atomic as it should be: |
| 16 | * we don't use this for anything important. */ |
| 17 | static inline void clear_bit(int nr, volatile unsigned long *addr) |
| 18 | { |
| 19 | unsigned long mask = BIT_MASK(nr); |
| 20 | unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); |
| 21 | |
| 22 | *p &= ~mask; |
| 23 | } |
| 24 | |
| 25 | static inline int test_bit(int nr, const volatile unsigned long *addr) |
| 26 | { |
| 27 | return 1UL & (addr[BIT_WORD(nr)] >> (nr & (BITS_PER_LONG-1))); |
| 28 | } |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 29 | /* end of stubs */ |
| 30 | |
| 31 | struct virtio_device { |
| 32 | void *dev; |
| 33 | unsigned long features[1]; |
| 34 | }; |
| 35 | |
| 36 | struct virtqueue { |
| 37 | /* TODO: commented as list macros are empty stubs for now. |
| 38 | * Broken but enough for virtio_ring.c |
| 39 | * struct list_head list; */ |
| 40 | void (*callback)(struct virtqueue *vq); |
| 41 | const char *name; |
| 42 | struct virtio_device *vdev; |
Michael S. Tsirkin | 73640c9 | 2013-03-18 13:22:18 +1030 | [diff] [blame] | 43 | unsigned int index; |
| 44 | unsigned int num_free; |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 45 | void *priv; |
| 46 | }; |
| 47 | |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 48 | #define MODULE_LICENSE(__MODULE_LICENSE_value) \ |
| 49 | const char *__MODULE_LICENSE_name = __MODULE_LICENSE_value |
| 50 | |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 51 | /* Interfaces exported by virtio_ring. */ |
Rusty Russell | f96fde4 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 52 | int virtqueue_add_buf(struct virtqueue *vq, |
| 53 | struct scatterlist sg[], |
| 54 | unsigned int out_num, |
| 55 | unsigned int in_num, |
| 56 | void *data, |
| 57 | gfp_t gfp); |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 58 | |
| 59 | void virtqueue_kick(struct virtqueue *vq); |
| 60 | |
| 61 | void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len); |
| 62 | |
| 63 | void virtqueue_disable_cb(struct virtqueue *vq); |
| 64 | |
| 65 | bool virtqueue_enable_cb(struct virtqueue *vq); |
Michael S. Tsirkin | 64d0988 | 2012-04-16 10:11:12 -0400 | [diff] [blame] | 66 | bool virtqueue_enable_cb_delayed(struct virtqueue *vq); |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 67 | |
| 68 | void *virtqueue_detach_unused_buf(struct virtqueue *vq); |
Michael S. Tsirkin | 73640c9 | 2013-03-18 13:22:18 +1030 | [diff] [blame] | 69 | struct virtqueue *vring_new_virtqueue(unsigned int index, |
| 70 | unsigned int num, |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 71 | unsigned int vring_align, |
| 72 | struct virtio_device *vdev, |
Rusty Russell | 7b21e34 | 2012-01-12 15:44:42 +1030 | [diff] [blame] | 73 | bool weak_barriers, |
Michael S. Tsirkin | 4e53f78 | 2010-11-29 19:16:37 +0200 | [diff] [blame] | 74 | void *pages, |
| 75 | void (*notify)(struct virtqueue *vq), |
| 76 | void (*callback)(struct virtqueue *vq), |
| 77 | const char *name); |
| 78 | void vring_del_virtqueue(struct virtqueue *vq); |
| 79 | |
| 80 | #endif |