Michael S. Tsirkin | 5f4c976 | 2014-12-08 16:39:45 +0200 | [diff] [blame] | 1 | #ifndef _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H |
| 2 | #define _DRIVERS_VIRTIO_VIRTIO_PCI_COMMON_H |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 3 | /* |
Michael S. Tsirkin | a90fdce | 2014-12-08 12:31:02 +0200 | [diff] [blame] | 4 | * Virtio PCI driver - APIs for common functionality for all device versions |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 5 | * |
| 6 | * This module allows virtio devices to be used over a virtual PCI device. |
| 7 | * This can be used with QEMU based VMMs like KVM or Xen. |
| 8 | * |
| 9 | * Copyright IBM Corp. 2007 |
Michael S. Tsirkin | a90fdce | 2014-12-08 12:31:02 +0200 | [diff] [blame] | 10 | * Copyright Red Hat, Inc. 2014 |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 11 | * |
| 12 | * Authors: |
| 13 | * Anthony Liguori <aliguori@us.ibm.com> |
Michael S. Tsirkin | a90fdce | 2014-12-08 12:31:02 +0200 | [diff] [blame] | 14 | * Rusty Russell <rusty@rustcorp.com.au> |
| 15 | * Michael S. Tsirkin <mst@redhat.com> |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 16 | * |
| 17 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 18 | * See the COPYING file in the top-level directory. |
| 19 | * |
| 20 | */ |
| 21 | |
| 22 | #include <linux/module.h> |
| 23 | #include <linux/list.h> |
| 24 | #include <linux/pci.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/interrupt.h> |
| 27 | #include <linux/virtio.h> |
| 28 | #include <linux/virtio_config.h> |
| 29 | #include <linux/virtio_ring.h> |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 30 | #include <linux/virtio_pci.h> |
| 31 | #include <linux/highmem.h> |
| 32 | #include <linux/spinlock.h> |
| 33 | |
| 34 | struct virtio_pci_vq_info { |
| 35 | /* the actual virtqueue */ |
| 36 | struct virtqueue *vq; |
| 37 | |
| 38 | /* the number of entries in the queue */ |
| 39 | int num; |
| 40 | |
| 41 | /* the virtual address of the ring queue */ |
| 42 | void *queue; |
| 43 | |
| 44 | /* the list node for the virtqueues list */ |
| 45 | struct list_head node; |
| 46 | |
| 47 | /* MSI-X vector (or none) */ |
| 48 | unsigned msix_vector; |
| 49 | }; |
| 50 | |
| 51 | /* Our device structure */ |
| 52 | struct virtio_pci_device { |
| 53 | struct virtio_device vdev; |
| 54 | struct pci_dev *pci_dev; |
| 55 | |
Michael S. Tsirkin | 1fcf051 | 2014-12-11 13:59:51 +0200 | [diff] [blame] | 56 | /* In legacy mode, these two point to within ->legacy. */ |
| 57 | /* Where to read and clear interrupt */ |
| 58 | u8 __iomem *isr; |
| 59 | |
| 60 | /* Modern only fields */ |
| 61 | /* The IO mapping for the PCI config space (non-legacy mode) */ |
| 62 | struct virtio_pci_common_cfg __iomem *common; |
| 63 | /* Device-specific data (non-legacy mode) */ |
| 64 | void __iomem *device; |
Michael S. Tsirkin | 3909213 | 2015-01-14 18:50:55 +0200 | [diff] [blame] | 65 | /* Base of vq notifications (non-legacy mode). */ |
| 66 | void __iomem *notify_base; |
Michael S. Tsirkin | 1fcf051 | 2014-12-11 13:59:51 +0200 | [diff] [blame] | 67 | |
| 68 | /* So we can sanity-check accesses. */ |
Michael S. Tsirkin | 3909213 | 2015-01-14 18:50:55 +0200 | [diff] [blame] | 69 | size_t notify_len; |
Michael S. Tsirkin | 1fcf051 | 2014-12-11 13:59:51 +0200 | [diff] [blame] | 70 | size_t device_len; |
| 71 | |
| 72 | /* Capability for when we need to map notifications per-vq. */ |
| 73 | int notify_map_cap; |
| 74 | |
| 75 | /* Multiply queue_notify_off by this value. (non-legacy mode). */ |
| 76 | u32 notify_offset_multiplier; |
| 77 | |
Gerd Hoffmann | 59a5b0f7 | 2015-06-24 07:54:15 +0200 | [diff] [blame] | 78 | int modern_bars; |
| 79 | |
Michael S. Tsirkin | 1fcf051 | 2014-12-11 13:59:51 +0200 | [diff] [blame] | 80 | /* Legacy only field */ |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 81 | /* the IO mapping for the PCI config space */ |
| 82 | void __iomem *ioaddr; |
| 83 | |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 84 | /* a list of queues so we can dispatch IRQs */ |
| 85 | spinlock_t lock; |
| 86 | struct list_head virtqueues; |
| 87 | |
| 88 | /* array of all queues for house-keeping */ |
| 89 | struct virtio_pci_vq_info **vqs; |
| 90 | |
| 91 | /* MSI-X support */ |
| 92 | int msix_enabled; |
| 93 | int intx_enabled; |
| 94 | struct msix_entry *msix_entries; |
| 95 | cpumask_var_t *msix_affinity_masks; |
| 96 | /* Name strings for interrupts. This size should be enough, |
| 97 | * and I'm too lazy to allocate each name separately. */ |
| 98 | char (*msix_names)[256]; |
| 99 | /* Number of available vectors */ |
| 100 | unsigned msix_vectors; |
| 101 | /* Vectors allocated, excluding per-vq vectors if any */ |
| 102 | unsigned msix_used_vectors; |
| 103 | |
| 104 | /* Whether we have vector per vq */ |
| 105 | bool per_vq_vectors; |
| 106 | |
| 107 | struct virtqueue *(*setup_vq)(struct virtio_pci_device *vp_dev, |
| 108 | struct virtio_pci_vq_info *info, |
| 109 | unsigned idx, |
| 110 | void (*callback)(struct virtqueue *vq), |
| 111 | const char *name, |
| 112 | u16 msix_vec); |
| 113 | void (*del_vq)(struct virtio_pci_vq_info *info); |
| 114 | |
| 115 | u16 (*config_vector)(struct virtio_pci_device *vp_dev, u16 vector); |
| 116 | }; |
| 117 | |
| 118 | /* Constants for MSI-X */ |
| 119 | /* Use first vector for configuration changes, second and the rest for |
| 120 | * virtqueues Thus, we need at least 2 vectors for MSI. */ |
| 121 | enum { |
| 122 | VP_MSIX_CONFIG_VECTOR = 0, |
| 123 | VP_MSIX_VQ_VECTOR = 1, |
| 124 | }; |
| 125 | |
| 126 | /* Convert a generic virtio device to our structure */ |
| 127 | static struct virtio_pci_device *to_vp_device(struct virtio_device *vdev) |
| 128 | { |
| 129 | return container_of(vdev, struct virtio_pci_device, vdev); |
| 130 | } |
| 131 | |
| 132 | /* wait for pending irq handlers */ |
| 133 | void vp_synchronize_vectors(struct virtio_device *vdev); |
| 134 | /* the notify function used when creating a virt queue */ |
| 135 | bool vp_notify(struct virtqueue *vq); |
| 136 | /* the config->del_vqs() implementation */ |
| 137 | void vp_del_vqs(struct virtio_device *vdev); |
| 138 | /* the config->find_vqs() implementation */ |
| 139 | int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs, |
| 140 | struct virtqueue *vqs[], |
| 141 | vq_callback_t *callbacks[], |
| 142 | const char *names[]); |
| 143 | const char *vp_bus_name(struct virtio_device *vdev); |
| 144 | |
| 145 | /* Setup the affinity for a virtqueue: |
| 146 | * - force the affinity for per vq vector |
| 147 | * - OR over all affinities for shared MSI |
| 148 | * - ignore the affinity request if we're using INTX |
| 149 | */ |
| 150 | int vp_set_vq_affinity(struct virtqueue *vq, int cpu); |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 151 | |
Michael S. Tsirkin | 46506da | 2015-01-15 16:06:26 +0200 | [diff] [blame] | 152 | #if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY) |
Michael S. Tsirkin | ff31d2e | 2015-01-13 11:23:32 +0200 | [diff] [blame] | 153 | int virtio_pci_legacy_probe(struct virtio_pci_device *); |
| 154 | void virtio_pci_legacy_remove(struct virtio_pci_device *); |
Michael S. Tsirkin | 46506da | 2015-01-15 16:06:26 +0200 | [diff] [blame] | 155 | #else |
| 156 | static inline int virtio_pci_legacy_probe(struct virtio_pci_device *vp_dev) |
| 157 | { |
| 158 | return -ENODEV; |
| 159 | } |
| 160 | static inline void virtio_pci_legacy_remove(struct virtio_pci_device *vp_dev) |
| 161 | { |
| 162 | } |
| 163 | #endif |
Michael S. Tsirkin | 1fcf051 | 2014-12-11 13:59:51 +0200 | [diff] [blame] | 164 | int virtio_pci_modern_probe(struct virtio_pci_device *); |
| 165 | void virtio_pci_modern_remove(struct virtio_pci_device *); |
Michael S. Tsirkin | 38eb4a2 | 2014-12-07 18:41:16 +0200 | [diff] [blame] | 166 | |
| 167 | #endif |