blob: 698d5d06fa039ca1a27b151a3dcf5d322784e3f0 [file] [log] [blame]
Anthony Liguori33436602007-11-12 21:30:26 -06001/*
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +02002 * Virtio PCI driver - common functionality for all device versions
Anthony Liguori33436602007-11-12 21:30:26 -06003 *
4 * This module allows virtio devices to be used over a virtual PCI device.
5 * This can be used with QEMU based VMMs like KVM or Xen.
6 *
7 * Copyright IBM Corp. 2007
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +02008 * Copyright Red Hat, Inc. 2014
Anthony Liguori33436602007-11-12 21:30:26 -06009 *
10 * Authors:
11 * Anthony Liguori <aliguori@us.ibm.com>
Michael S. Tsirkina90fdce2014-12-08 12:31:02 +020012 * Rusty Russell <rusty@rustcorp.com.au>
13 * Michael S. Tsirkin <mst@redhat.com>
Anthony Liguori33436602007-11-12 21:30:26 -060014 *
15 * This work is licensed under the terms of the GNU GPL, version 2 or later.
16 * See the COPYING file in the top-level directory.
17 *
18 */
19
Michael S. Tsirkin5f4c9762014-12-08 16:39:45 +020020#include "virtio_pci_common.h"
Anthony Liguori33436602007-11-12 21:30:26 -060021
Michael S. Tsirkinac399d82015-01-15 17:54:13 +020022static bool force_legacy = false;
23
24#if IS_ENABLED(CONFIG_VIRTIO_PCI_LEGACY)
25module_param(force_legacy, bool, 0444);
26MODULE_PARM_DESC(force_legacy,
27 "Force legacy mode for transitional virtio 1 devices");
28#endif
29
Michael S. Tsirkine6af5782011-11-17 17:41:15 +020030/* wait for pending irq handlers */
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020031void vp_synchronize_vectors(struct virtio_device *vdev)
Michael S. Tsirkine6af5782011-11-17 17:41:15 +020032{
33 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
34 int i;
35
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +030036 if (vp_dev->intx_enabled)
37 synchronize_irq(vp_dev->pci_dev->irq);
38
39 for (i = 0; i < vp_dev->msix_vectors; ++i)
Christoph Hellwigfa3a3272016-11-17 11:43:13 +010040 synchronize_irq(pci_irq_vector(vp_dev->pci_dev, i));
Michael S. Tsirkine6af5782011-11-17 17:41:15 +020041}
42
Anthony Liguori33436602007-11-12 21:30:26 -060043/* the notify function used when creating a virt queue */
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +020044bool vp_notify(struct virtqueue *vq)
Anthony Liguori33436602007-11-12 21:30:26 -060045{
Anthony Liguori33436602007-11-12 21:30:26 -060046 /* we write the queue's selector into the notification register to
47 * signal the other end */
Michael S. Tsirkinf30eaf42014-12-03 18:01:58 +020048 iowrite16(vq->index, (void __iomem *)vq->priv);
Heinz Graalfs46f9c2b2013-10-29 09:38:50 +103049 return true;
Anthony Liguori33436602007-11-12 21:30:26 -060050}
51
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030052/* Handle a configuration change: Tell driver if it wants to know. */
53static irqreturn_t vp_config_changed(int irq, void *opaque)
54{
55 struct virtio_pci_device *vp_dev = opaque;
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030056
Michael S. Tsirkin016c98c2014-10-14 10:40:34 +103057 virtio_config_changed(&vp_dev->vdev);
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030058 return IRQ_HANDLED;
59}
60
61/* Notify all virtqueues on an interrupt. */
62static irqreturn_t vp_vring_interrupt(int irq, void *opaque)
63{
64 struct virtio_pci_device *vp_dev = opaque;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030065 struct virtio_pci_vq_info *info;
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030066 irqreturn_t ret = IRQ_NONE;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030067 unsigned long flags;
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030068
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030069 spin_lock_irqsave(&vp_dev->lock, flags);
70 list_for_each_entry(info, &vp_dev->virtqueues, node) {
71 if (vring_interrupt(irq, info->vq) == IRQ_HANDLED)
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030072 ret = IRQ_HANDLED;
73 }
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +030074 spin_unlock_irqrestore(&vp_dev->lock, flags);
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030075
76 return ret;
77}
78
Anthony Liguori33436602007-11-12 21:30:26 -060079/* A small wrapper to also acknowledge the interrupt when it's handled.
80 * I really need an EIO hook for the vring so I can ack the interrupt once we
81 * know that we'll be handling the IRQ but before we invoke the callback since
82 * the callback may notify the host which results in the host attempting to
83 * raise an interrupt that we would then mask once we acknowledged the
84 * interrupt. */
85static irqreturn_t vp_interrupt(int irq, void *opaque)
86{
87 struct virtio_pci_device *vp_dev = opaque;
Anthony Liguori33436602007-11-12 21:30:26 -060088 u8 isr;
89
90 /* reading the ISR has the effect of also clearing it so it's very
91 * important to save off the value. */
Michael S. Tsirkinaf535722014-12-02 14:35:27 +020092 isr = ioread8(vp_dev->isr);
Anthony Liguori33436602007-11-12 21:30:26 -060093
94 /* It's definitely not us if the ISR was not high */
95 if (!isr)
96 return IRQ_NONE;
97
98 /* Configuration change? Tell driver if it wants to know. */
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +030099 if (isr & VIRTIO_PCI_ISR_CONFIG)
100 vp_config_changed(irq, opaque);
Anthony Liguori33436602007-11-12 21:30:26 -0600101
Michael S. Tsirkin77cf52462009-05-14 13:55:31 +0300102 return vp_vring_interrupt(irq, opaque);
Anthony Liguori33436602007-11-12 21:30:26 -0600103}
104
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300105static int vp_request_msix_vectors(struct virtio_device *vdev, int nvectors,
106 bool per_vq_vectors, struct irq_affinity *desc)
107{
108 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
109 const char *name = dev_name(&vp_dev->vdev.dev);
110 unsigned i, v;
111 int err = -ENOMEM;
112
113 vp_dev->msix_vectors = nvectors;
114
115 vp_dev->msix_names = kmalloc(nvectors * sizeof *vp_dev->msix_names,
116 GFP_KERNEL);
117 if (!vp_dev->msix_names)
118 goto error;
119 vp_dev->msix_affinity_masks
120 = kzalloc(nvectors * sizeof *vp_dev->msix_affinity_masks,
121 GFP_KERNEL);
122 if (!vp_dev->msix_affinity_masks)
123 goto error;
124 for (i = 0; i < nvectors; ++i)
125 if (!alloc_cpumask_var(&vp_dev->msix_affinity_masks[i],
126 GFP_KERNEL))
127 goto error;
128
129 err = pci_alloc_irq_vectors_affinity(vp_dev->pci_dev, nvectors,
130 nvectors, PCI_IRQ_MSIX |
131 (desc ? PCI_IRQ_AFFINITY : 0),
132 desc);
133 if (err < 0)
134 goto error;
135 vp_dev->msix_enabled = 1;
136
137 /* Set the vector used for configuration */
138 v = vp_dev->msix_used_vectors;
139 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
140 "%s-config", name);
141 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
142 vp_config_changed, 0, vp_dev->msix_names[v],
143 vp_dev);
144 if (err)
145 goto error;
146 ++vp_dev->msix_used_vectors;
147
148 v = vp_dev->config_vector(vp_dev, v);
149 /* Verify we had enough resources to assign the vector */
150 if (v == VIRTIO_MSI_NO_VECTOR) {
151 err = -EBUSY;
152 goto error;
153 }
154
155 if (!per_vq_vectors) {
156 /* Shared vector for all VQs */
157 v = vp_dev->msix_used_vectors;
158 snprintf(vp_dev->msix_names[v], sizeof *vp_dev->msix_names,
159 "%s-virtqueues", name);
160 err = request_irq(pci_irq_vector(vp_dev->pci_dev, v),
161 vp_vring_interrupt, 0, vp_dev->msix_names[v],
162 vp_dev);
163 if (err)
164 goto error;
165 ++vp_dev->msix_used_vectors;
166 }
167 return 0;
168error:
169 return err;
170}
171
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300172static struct virtqueue *vp_setup_vq(struct virtio_device *vdev, unsigned index,
173 void (*callback)(struct virtqueue *vq),
174 const char *name,
175 u16 msix_vec)
176{
177 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
178 struct virtio_pci_vq_info *info = kmalloc(sizeof *info, GFP_KERNEL);
179 struct virtqueue *vq;
180 unsigned long flags;
181
182 /* fill out our structure that represents an active queue */
183 if (!info)
184 return ERR_PTR(-ENOMEM);
185
186 vq = vp_dev->setup_vq(vp_dev, info, index, callback, name,
187 msix_vec);
188 if (IS_ERR(vq))
189 goto out_info;
190
191 info->vq = vq;
192 if (callback) {
193 spin_lock_irqsave(&vp_dev->lock, flags);
194 list_add(&info->node, &vp_dev->virtqueues);
195 spin_unlock_irqrestore(&vp_dev->lock, flags);
196 } else {
197 INIT_LIST_HEAD(&info->node);
198 }
199
200 vp_dev->vqs[index] = info;
201 return vq;
202
203out_info:
204 kfree(info);
205 return vq;
206}
207
208static void vp_del_vq(struct virtqueue *vq)
209{
210 struct virtio_pci_device *vp_dev = to_vp_device(vq->vdev);
211 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
212 unsigned long flags;
213
214 spin_lock_irqsave(&vp_dev->lock, flags);
215 list_del(&info->node);
216 spin_unlock_irqrestore(&vp_dev->lock, flags);
217
218 vp_dev->del_vq(info);
219 kfree(info);
220}
221
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300222/* the config->del_vqs() implementation */
223void vp_del_vqs(struct virtio_device *vdev)
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600224{
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300225 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600226 struct virtqueue *vq, *n;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300227 int i;
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600228
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300229 list_for_each_entry_safe(vq, n, &vdev->vqs, list) {
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300230 if (vp_dev->per_vq_vectors) {
231 int v = vp_dev->vqs[vq->index]->msix_vector;
Christoph Hellwigfa3a3272016-11-17 11:43:13 +0100232
Marc Zyngier2f8dc3a2017-03-08 08:09:27 +0000233 if (v != VIRTIO_MSI_NO_VECTOR) {
234 int irq = pci_irq_vector(vp_dev->pci_dev, v);
235
236 irq_set_affinity_hint(irq, NULL);
237 free_irq(irq, vq);
238 }
Christoph Hellwigfa3a3272016-11-17 11:43:13 +0100239 }
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300240 vp_del_vq(vq);
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300241 }
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300242 vp_dev->per_vq_vectors = false;
Michael S. Tsirkin82af8ce2009-05-14 13:55:41 +0300243
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300244 if (vp_dev->intx_enabled) {
245 free_irq(vp_dev->pci_dev->irq, vp_dev);
246 vp_dev->intx_enabled = 0;
247 }
Christoph Hellwig66f2f552016-11-17 11:43:15 +0100248
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300249 for (i = 0; i < vp_dev->msix_used_vectors; ++i)
250 free_irq(pci_irq_vector(vp_dev->pci_dev, i), vp_dev);
Christoph Hellwig66f2f552016-11-17 11:43:15 +0100251
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300252 for (i = 0; i < vp_dev->msix_vectors; i++)
253 if (vp_dev->msix_affinity_masks[i])
Christoph Hellwig07ec5142017-02-05 18:15:19 +0100254 free_cpumask_var(vp_dev->msix_affinity_masks[i]);
255
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300256 if (vp_dev->msix_enabled) {
Christoph Hellwig66f2f552016-11-17 11:43:15 +0100257 /* Disable the vector used for configuration */
258 vp_dev->config_vector(vp_dev, VIRTIO_MSI_NO_VECTOR);
259
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300260 pci_free_irq_vectors(vp_dev->pci_dev);
261 vp_dev->msix_enabled = 0;
Christoph Hellwig66f2f552016-11-17 11:43:15 +0100262 }
263
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300264 vp_dev->msix_vectors = 0;
265 vp_dev->msix_used_vectors = 0;
266 kfree(vp_dev->msix_names);
267 vp_dev->msix_names = NULL;
268 kfree(vp_dev->msix_affinity_masks);
269 vp_dev->msix_affinity_masks = NULL;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300270 kfree(vp_dev->vqs);
271 vp_dev->vqs = NULL;
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600272}
273
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100274static int vp_find_vqs_msix(struct virtio_device *vdev, unsigned nvqs,
Christoph Hellwig52a61512017-02-05 18:15:21 +0100275 struct virtqueue *vqs[], vq_callback_t *callbacks[],
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300276 const char * const names[], bool per_vq_vectors,
277 struct irq_affinity *desc)
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300278{
279 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Rusty Russellf68d2402009-09-23 22:26:29 -0600280 u16 msix_vec;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300281 int i, err, nvectors, allocated_vectors;
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300282
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300283 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
284 if (!vp_dev->vqs)
285 return -ENOMEM;
286
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300287 if (per_vq_vectors) {
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300288 /* Best option: one for change interrupt, one per vq. */
289 nvectors = 1;
290 for (i = 0; i < nvqs; ++i)
291 if (callbacks[i])
292 ++nvectors;
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300293 } else {
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300294 /* Second best: one for change, shared for all vqs. */
295 nvectors = 2;
Rusty Russellf68d2402009-09-23 22:26:29 -0600296 }
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300297
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300298 err = vp_request_msix_vectors(vdev, nvectors, per_vq_vectors,
299 per_vq_vectors ? desc : NULL);
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100300 if (err)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300301 goto error_find;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100302
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300303 vp_dev->per_vq_vectors = per_vq_vectors;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300304 allocated_vectors = vp_dev->msix_used_vectors;
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300305 for (i = 0; i < nvqs; ++i) {
Michael S. Tsirkin6457f1262012-09-05 21:47:45 +0300306 if (!names[i]) {
307 vqs[i] = NULL;
308 continue;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100309 }
310
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300311 if (!callbacks[i])
Christoph Hellwig07ec5142017-02-05 18:15:19 +0100312 msix_vec = VIRTIO_MSI_NO_VECTOR;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300313 else if (vp_dev->per_vq_vectors)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300314 msix_vec = allocated_vectors++;
315 else
316 msix_vec = VP_MSIX_VQ_VECTOR;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300317 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
318 msix_vec);
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300319 if (IS_ERR(vqs[i])) {
320 err = PTR_ERR(vqs[i]);
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300321 goto error_find;
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300322 }
Michael S. Tsirkin0b22bd02009-10-22 15:06:06 +0200323
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300324 if (!vp_dev->per_vq_vectors || msix_vec == VIRTIO_MSI_NO_VECTOR)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300325 continue;
326
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300327 /* allocate per-vq irq if available and necessary */
328 snprintf(vp_dev->msix_names[msix_vec],
329 sizeof *vp_dev->msix_names,
330 "%s-%s",
Michael S. Tsirkin0b22bd02009-10-22 15:06:06 +0200331 dev_name(&vp_dev->vdev.dev), names[i]);
Christoph Hellwigfa3a3272016-11-17 11:43:13 +0100332 err = request_irq(pci_irq_vector(vp_dev->pci_dev, msix_vec),
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300333 vring_interrupt, 0,
334 vp_dev->msix_names[msix_vec],
335 vqs[i]);
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300336 if (err)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300337 goto error_find;
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300338 }
339 return 0;
340
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300341error_find:
342 vp_del_vqs(vdev);
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300343 return err;
344}
345
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100346static int vp_find_vqs_intx(struct virtio_device *vdev, unsigned nvqs,
347 struct virtqueue *vqs[], vq_callback_t *callbacks[],
348 const char * const names[])
349{
350 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
351 int i, err;
352
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300353 vp_dev->vqs = kcalloc(nvqs, sizeof(*vp_dev->vqs), GFP_KERNEL);
354 if (!vp_dev->vqs)
355 return -ENOMEM;
356
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100357 err = request_irq(vp_dev->pci_dev->irq, vp_interrupt, IRQF_SHARED,
358 dev_name(&vdev->dev), vp_dev);
359 if (err)
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300360 goto out_del_vqs;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100361
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300362 vp_dev->intx_enabled = 1;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300363 vp_dev->per_vq_vectors = false;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100364 for (i = 0; i < nvqs; ++i) {
365 if (!names[i]) {
366 vqs[i] = NULL;
367 continue;
368 }
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300369 vqs[i] = vp_setup_vq(vdev, i, callbacks[i], names[i],
370 VIRTIO_MSI_NO_VECTOR);
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100371 if (IS_ERR(vqs[i])) {
372 err = PTR_ERR(vqs[i]);
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300373 goto out_del_vqs;
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100374 }
375 }
376
377 return 0;
Michael S. Tsirkin0b0f9dc2017-04-04 21:15:41 +0300378out_del_vqs:
379 vp_del_vqs(vdev);
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100380 return err;
381}
382
Michael S. Tsirkin82af8ce2009-05-14 13:55:41 +0300383/* the config->find_vqs() implementation */
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200384int vp_find_vqs(struct virtio_device *vdev, unsigned nvqs,
Christoph Hellwigfb5e31d2017-02-05 18:15:22 +0100385 struct virtqueue *vqs[], vq_callback_t *callbacks[],
386 const char * const names[], struct irq_affinity *desc)
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600387{
Rusty Russellf68d2402009-09-23 22:26:29 -0600388 int err;
Michael S. Tsirkin82af8ce2009-05-14 13:55:41 +0300389
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300390 /* Try MSI-X with one vector per queue. */
391 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, true, desc);
Michael S. Tsirkine969fed2009-07-26 15:48:08 +0300392 if (!err)
393 return 0;
Michael S. Tsirkinbf951b12017-04-04 21:08:54 +0300394 /* Fallback: MSI-X with one vector for config, one shared for queues. */
395 err = vp_find_vqs_msix(vdev, nvqs, vqs, callbacks, names, false, desc);
396 if (!err)
397 return 0;
398 /* Finally fall back to regular interrupts. */
Christoph Hellwiga3cbec62016-11-17 11:43:16 +0100399 return vp_find_vqs_intx(vdev, nvqs, vqs, callbacks, names);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600400}
401
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200402const char *vp_bus_name(struct virtio_device *vdev)
Rick Jones66846042011-11-14 14:17:08 +0000403{
404 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
405
406 return pci_name(vp_dev->pci_dev);
407}
408
Jason Wang75a0a522012-08-28 13:54:14 +0200409/* Setup the affinity for a virtqueue:
410 * - force the affinity for per vq vector
411 * - OR over all affinities for shared MSI
412 * - ignore the affinity request if we're using INTX
413 */
Michael S. Tsirkin38eb4a22014-12-07 18:41:16 +0200414int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
Jason Wang75a0a522012-08-28 13:54:14 +0200415{
416 struct virtio_device *vdev = vq->vdev;
417 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300418 struct virtio_pci_vq_info *info = vp_dev->vqs[vq->index];
419 struct cpumask *mask;
420 unsigned int irq;
Jason Wang75a0a522012-08-28 13:54:14 +0200421
422 if (!vq->callback)
423 return -EINVAL;
424
Michael S. Tsirkin2008c152017-04-04 21:09:20 +0300425 if (vp_dev->msix_enabled) {
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300426 mask = vp_dev->msix_affinity_masks[info->msix_vector];
427 irq = pci_irq_vector(vp_dev->pci_dev, info->msix_vector);
Jason Wang75a0a522012-08-28 13:54:14 +0200428 if (cpu == -1)
429 irq_set_affinity_hint(irq, NULL);
430 else {
Jiang Liu210d1502015-06-04 16:41:44 +0800431 cpumask_clear(mask);
Jason Wang75a0a522012-08-28 13:54:14 +0200432 cpumask_set_cpu(cpu, mask);
433 irq_set_affinity_hint(irq, mask);
434 }
435 }
436 return 0;
437}
438
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100439const struct cpumask *vp_get_vq_affinity(struct virtio_device *vdev, int index)
440{
441 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100442
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300443 if (!vp_dev->per_vq_vectors ||
444 vp_dev->vqs[index]->msix_vector == VIRTIO_MSI_NO_VECTOR)
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100445 return NULL;
446
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300447 return pci_irq_get_affinity(vp_dev->pci_dev,
448 vp_dev->vqs[index]->msix_vector);
Christoph Hellwigbbaba472017-02-05 18:15:23 +0100449}
450
Aaron Lu9e266ec2013-09-09 09:57:12 +0930451#ifdef CONFIG_PM_SLEEP
Amit Shahf0fe6f12011-12-22 16:58:26 +0530452static int virtio_pci_freeze(struct device *dev)
453{
454 struct pci_dev *pci_dev = to_pci_dev(dev);
455 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
Amit Shahf0fe6f12011-12-22 16:58:26 +0530456 int ret;
457
Michael S. Tsirkinc6716ba2014-10-14 10:40:35 +1030458 ret = virtio_device_freeze(&vp_dev->vdev);
Amit Shahf0fe6f12011-12-22 16:58:26 +0530459
460 if (!ret)
461 pci_disable_device(pci_dev);
462 return ret;
463}
464
Amit Shahf0fe6f12011-12-22 16:58:26 +0530465static int virtio_pci_restore(struct device *dev)
466{
467 struct pci_dev *pci_dev = to_pci_dev(dev);
468 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
Amit Shahf0fe6f12011-12-22 16:58:26 +0530469 int ret;
470
Amit Shah0517fdd2012-03-29 12:54:43 +0530471 ret = pci_enable_device(pci_dev);
472 if (ret)
473 return ret;
474
475 pci_set_master(pci_dev);
Michael S. Tsirkinc6716ba2014-10-14 10:40:35 +1030476 return virtio_device_restore(&vp_dev->vdev);
Amit Shahf0fe6f12011-12-22 16:58:26 +0530477}
478
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200479static const struct dev_pm_ops virtio_pci_pm_ops = {
Amit Shahf878d0b2012-03-29 12:58:05 +0530480 SET_SYSTEM_SLEEP_PM_OPS(virtio_pci_freeze, virtio_pci_restore)
Amit Shahd0775362011-12-22 16:58:25 +0530481};
Anthony Liguori33436602007-11-12 21:30:26 -0600482#endif
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200483
484
485/* Qumranet donated their vendor ID for devices 0x1000 thru 0x10FF. */
486static const struct pci_device_id virtio_pci_id_table[] = {
Robin H. Johnsoncaf02ab2016-03-06 22:02:30 +0000487 { PCI_DEVICE(PCI_VENDOR_ID_REDHAT_QUMRANET, PCI_ANY_ID) },
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200488 { 0 }
489};
490
491MODULE_DEVICE_TABLE(pci, virtio_pci_id_table);
492
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200493static void virtio_pci_release_dev(struct device *_d)
494{
495 struct virtio_device *vdev = dev_to_virtio(_d);
496 struct virtio_pci_device *vp_dev = to_vp_device(vdev);
497
498 /* As struct device is a kobject, it's not safe to
499 * free the memory (including the reference counter itself)
500 * until it's release callback. */
501 kfree(vp_dev);
502}
503
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200504static int virtio_pci_probe(struct pci_dev *pci_dev,
505 const struct pci_device_id *id)
506{
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200507 struct virtio_pci_device *vp_dev;
508 int rc;
509
510 /* allocate our structure and fill it out */
511 vp_dev = kzalloc(sizeof(struct virtio_pci_device), GFP_KERNEL);
512 if (!vp_dev)
513 return -ENOMEM;
514
515 pci_set_drvdata(pci_dev, vp_dev);
516 vp_dev->vdev.dev.parent = &pci_dev->dev;
517 vp_dev->vdev.dev.release = virtio_pci_release_dev;
518 vp_dev->pci_dev = pci_dev;
Michael S. Tsirkin0a9b3f42017-04-04 21:44:44 +0300519 INIT_LIST_HEAD(&vp_dev->virtqueues);
520 spin_lock_init(&vp_dev->lock);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200521
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200522 /* enable the device */
523 rc = pci_enable_device(pci_dev);
524 if (rc)
525 goto err_enable_device;
526
Michael S. Tsirkinac399d82015-01-15 17:54:13 +0200527 if (force_legacy) {
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200528 rc = virtio_pci_legacy_probe(vp_dev);
Michael S. Tsirkinac399d82015-01-15 17:54:13 +0200529 /* Also try modern mode if we can't map BAR0 (no IO space). */
530 if (rc == -ENODEV || rc == -ENOMEM)
531 rc = virtio_pci_modern_probe(vp_dev);
532 if (rc)
533 goto err_probe;
534 } else {
535 rc = virtio_pci_modern_probe(vp_dev);
536 if (rc == -ENODEV)
537 rc = virtio_pci_legacy_probe(vp_dev);
538 if (rc)
539 goto err_probe;
540 }
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200541
542 pci_set_master(pci_dev);
543
544 rc = register_virtio_device(&vp_dev->vdev);
545 if (rc)
546 goto err_register;
547
548 return 0;
549
550err_register:
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200551 if (vp_dev->ioaddr)
552 virtio_pci_legacy_remove(vp_dev);
553 else
554 virtio_pci_modern_remove(vp_dev);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200555err_probe:
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200556 pci_disable_device(pci_dev);
557err_enable_device:
558 kfree(vp_dev);
559 return rc;
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200560}
561
562static void virtio_pci_remove(struct pci_dev *pci_dev)
563{
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200564 struct virtio_pci_device *vp_dev = pci_get_drvdata(pci_dev);
Michael S. Tsirkin2989be02016-01-14 16:00:41 +0200565 struct device *dev = get_device(&vp_dev->vdev.dev);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200566
567 unregister_virtio_device(&vp_dev->vdev);
568
Michael S. Tsirkin1fcf0512014-12-11 13:59:51 +0200569 if (vp_dev->ioaddr)
570 virtio_pci_legacy_remove(vp_dev);
571 else
572 virtio_pci_modern_remove(vp_dev);
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200573
Michael S. Tsirkinff31d2e2015-01-13 11:23:32 +0200574 pci_disable_device(pci_dev);
Michael S. Tsirkin2989be02016-01-14 16:00:41 +0200575 put_device(dev);
Michael S. Tsirkin9a4253d2014-12-11 21:47:49 +0200576}
577
578static struct pci_driver virtio_pci_driver = {
579 .name = "virtio-pci",
580 .id_table = virtio_pci_id_table,
581 .probe = virtio_pci_probe,
582 .remove = virtio_pci_remove,
583#ifdef CONFIG_PM_SLEEP
584 .driver.pm = &virtio_pci_pm_ops,
585#endif
586};
587
588module_pci_driver(virtio_pci_driver);
Herbert Xu5ff16112014-12-17 00:54:03 +0200589
590MODULE_AUTHOR("Anthony Liguori <aliguori@us.ibm.com>");
591MODULE_DESCRIPTION("virtio-pci");
592MODULE_LICENSE("GPL");
593MODULE_VERSION("1");