blob: 43517ce930f21842fbe8e2d8d57575468f929416 [file] [log] [blame]
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001/*
2 * Copyright (C) 2012 Red Hat, Inc. All rights reserved.
3 * Author: Alex Williamson <alex.williamson@redhat.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Derived from original vfio:
10 * Copyright 2010 Cisco Systems, Inc. All rights reserved.
11 * Author: Tom Lyon, pugs@cisco.com
12 */
13
Alex Williamson80c7e8c2015-04-07 11:14:43 -060014#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
Alex Williamson89e1f7d2012-07-31 08:16:24 -060016#include <linux/device.h>
17#include <linux/eventfd.h>
Alex Williamson8b27ee62013-09-04 11:28:04 -060018#include <linux/file.h>
Alex Williamson89e1f7d2012-07-31 08:16:24 -060019#include <linux/interrupt.h>
20#include <linux/iommu.h>
21#include <linux/module.h>
22#include <linux/mutex.h>
23#include <linux/notifier.h>
24#include <linux/pci.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
27#include <linux/types.h>
28#include <linux/uaccess.h>
29#include <linux/vfio.h>
Alex Williamsonecaa1f62015-04-07 11:14:41 -060030#include <linux/vgaarb.h>
Alex Williamson89e1f7d2012-07-31 08:16:24 -060031
32#include "vfio_pci_private.h"
33
34#define DRIVER_VERSION "0.2"
35#define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>"
36#define DRIVER_DESC "VFIO PCI - User Level meta-driver"
37
Alex Williamson80c7e8c2015-04-07 11:14:43 -060038static char ids[1024] __initdata;
39module_param_string(ids, ids, sizeof(ids), 0);
40MODULE_PARM_DESC(ids, "Initial PCI IDs to add to the vfio driver, format is \"vendor:device[:subvendor[:subdevice[:class[:class_mask]]]]\" and multiple comma separated entries can be specified");
41
Alex Williamson89e1f7d2012-07-31 08:16:24 -060042static bool nointxmask;
43module_param_named(nointxmask, nointxmask, bool, S_IRUGO | S_IWUSR);
44MODULE_PARM_DESC(nointxmask,
45 "Disable support for PCI 2.3 style INTx masking. If this resolves problems for specific devices, report lspci -vvvxxx to linux-pci@vger.kernel.org so the device can be fixed automatically via the broken_intx_masking flag.");
46
Alex Williamson88c0dead2015-04-07 11:14:40 -060047#ifdef CONFIG_VFIO_PCI_VGA
48static bool disable_vga;
49module_param(disable_vga, bool, S_IRUGO);
50MODULE_PARM_DESC(disable_vga, "Disable VGA resource access through vfio-pci");
51#endif
52
Alex Williamson61d79252014-08-07 11:12:04 -060053static DEFINE_MUTEX(driver_lock);
54
Alex Williamson88c0dead2015-04-07 11:14:40 -060055static inline bool vfio_vga_disabled(void)
56{
57#ifdef CONFIG_VFIO_PCI_VGA
58 return disable_vga;
59#else
60 return true;
61#endif
62}
63
Alex Williamsonecaa1f62015-04-07 11:14:41 -060064/*
65 * Our VGA arbiter participation is limited since we don't know anything
66 * about the device itself. However, if the device is the only VGA device
67 * downstream of a bridge and VFIO VGA support is disabled, then we can
68 * safely return legacy VGA IO and memory as not decoded since the user
69 * has no way to get to it and routing can be disabled externally at the
70 * bridge.
71 */
72static unsigned int vfio_pci_set_vga_decode(void *opaque, bool single_vga)
73{
74 struct vfio_pci_device *vdev = opaque;
75 struct pci_dev *tmp = NULL, *pdev = vdev->pdev;
76 unsigned char max_busnr;
77 unsigned int decodes;
78
79 if (single_vga || !vfio_vga_disabled() || pci_is_root_bus(pdev->bus))
80 return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
81 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
82
83 max_busnr = pci_bus_max_busnr(pdev->bus);
84 decodes = VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
85
86 while ((tmp = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, tmp)) != NULL) {
87 if (tmp == pdev ||
88 pci_domain_nr(tmp->bus) != pci_domain_nr(pdev->bus) ||
89 pci_is_root_bus(tmp->bus))
90 continue;
91
92 if (tmp->bus->number >= pdev->bus->number &&
93 tmp->bus->number <= max_busnr) {
94 pci_dev_put(tmp);
95 decodes |= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
96 break;
97 }
98 }
99
100 return decodes;
101}
102
103static inline bool vfio_pci_is_vga(struct pci_dev *pdev)
104{
105 return (pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA;
106}
107
Alex Williamsonbc4fba72014-08-07 11:12:07 -0600108static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev);
109
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600110static int vfio_pci_enable(struct vfio_pci_device *vdev)
111{
112 struct pci_dev *pdev = vdev->pdev;
113 int ret;
114 u16 cmd;
115 u8 msix_pos;
116
Alex Williamson9c22e662014-08-07 11:12:02 -0600117 /* Don't allow our initial saved state to include busmaster */
118 pci_clear_master(pdev);
119
Alex Williamson9a92c502012-12-07 13:43:51 -0700120 ret = pci_enable_device(pdev);
121 if (ret)
122 return ret;
123
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600124 vdev->reset_works = (pci_reset_function(pdev) == 0);
125 pci_save_state(pdev);
126 vdev->pci_saved_state = pci_store_saved_state(pdev);
127 if (!vdev->pci_saved_state)
128 pr_debug("%s: Couldn't store %s saved state\n",
129 __func__, dev_name(&pdev->dev));
130
131 ret = vfio_config_init(vdev);
Alex Williamson9a92c502012-12-07 13:43:51 -0700132 if (ret) {
Alex Williamsoneb5685f2014-05-30 11:35:53 -0600133 kfree(vdev->pci_saved_state);
134 vdev->pci_saved_state = NULL;
Alex Williamson9a92c502012-12-07 13:43:51 -0700135 pci_disable_device(pdev);
136 return ret;
137 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600138
139 if (likely(!nointxmask))
140 vdev->pci_2_3 = pci_intx_mask_supported(pdev);
141
142 pci_read_config_word(pdev, PCI_COMMAND, &cmd);
143 if (vdev->pci_2_3 && (cmd & PCI_COMMAND_INTX_DISABLE)) {
144 cmd &= ~PCI_COMMAND_INTX_DISABLE;
145 pci_write_config_word(pdev, PCI_COMMAND, cmd);
146 }
147
Bjorn Helgaasa9047f22013-04-18 15:12:58 -0600148 msix_pos = pdev->msix_cap;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600149 if (msix_pos) {
150 u16 flags;
151 u32 table;
152
153 pci_read_config_word(pdev, msix_pos + PCI_MSIX_FLAGS, &flags);
154 pci_read_config_dword(pdev, msix_pos + PCI_MSIX_TABLE, &table);
155
Bjorn Helgaas508d1aa2013-04-18 12:42:58 -0600156 vdev->msix_bar = table & PCI_MSIX_TABLE_BIR;
157 vdev->msix_offset = table & PCI_MSIX_TABLE_OFFSET;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600158 vdev->msix_size = ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) * 16;
159 } else
160 vdev->msix_bar = 0xFF;
161
Alex Williamsonecaa1f62015-04-07 11:14:41 -0600162 if (!vfio_vga_disabled() && vfio_pci_is_vga(pdev))
Alex Williamson84237a82013-02-18 10:11:13 -0700163 vdev->has_vga = true;
Alex Williamson84237a82013-02-18 10:11:13 -0700164
Alex Williamson9a92c502012-12-07 13:43:51 -0700165 return 0;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600166}
167
168static void vfio_pci_disable(struct vfio_pci_device *vdev)
169{
Alex Williamson20077222012-12-07 13:43:50 -0700170 struct pci_dev *pdev = vdev->pdev;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600171 int bar;
172
Alex Williamson9c22e662014-08-07 11:12:02 -0600173 /* Stop the device from further DMA */
174 pci_clear_master(pdev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600175
176 vfio_pci_set_irqs_ioctl(vdev, VFIO_IRQ_SET_DATA_NONE |
177 VFIO_IRQ_SET_ACTION_TRIGGER,
178 vdev->irq_type, 0, 0, NULL);
179
180 vdev->virq_disabled = false;
181
182 vfio_config_free(vdev);
183
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600184 for (bar = PCI_STD_RESOURCES; bar <= PCI_STD_RESOURCE_END; bar++) {
185 if (!vdev->barmap[bar])
186 continue;
Alex Williamson20077222012-12-07 13:43:50 -0700187 pci_iounmap(pdev, vdev->barmap[bar]);
188 pci_release_selected_regions(pdev, 1 << bar);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600189 vdev->barmap[bar] = NULL;
190 }
Alex Williamson20077222012-12-07 13:43:50 -0700191
Alex Williamsonbc4fba72014-08-07 11:12:07 -0600192 vdev->needs_reset = true;
193
Alex Williamson20077222012-12-07 13:43:50 -0700194 /*
195 * If we have saved state, restore it. If we can reset the device,
196 * even better. Resetting with current state seems better than
197 * nothing, but saving and restoring current state without reset
198 * is just busy work.
199 */
200 if (pci_load_and_free_saved_state(pdev, &vdev->pci_saved_state)) {
201 pr_info("%s: Couldn't reload %s saved state\n",
202 __func__, dev_name(&pdev->dev));
203
204 if (!vdev->reset_works)
Alex Williamson9c22e662014-08-07 11:12:02 -0600205 goto out;
Alex Williamson20077222012-12-07 13:43:50 -0700206
207 pci_save_state(pdev);
208 }
209
210 /*
211 * Disable INTx and MSI, presumably to avoid spurious interrupts
212 * during reset. Stolen from pci_reset_function()
213 */
214 pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
215
Alex Williamsond24cdbf2013-06-10 16:40:57 -0600216 /*
Alex Williamson890ed572014-01-14 20:45:09 -0700217 * Try to reset the device. The success of this is dependent on
218 * being able to lock the device, which is not always possible.
Alex Williamsond24cdbf2013-06-10 16:40:57 -0600219 */
220 if (vdev->reset_works) {
Alex Williamson890ed572014-01-14 20:45:09 -0700221 int ret = pci_try_reset_function(pdev);
222 if (ret)
223 pr_warn("%s: Failed to reset device %s (%d)\n",
224 __func__, dev_name(&pdev->dev), ret);
Alex Williamsonbc4fba72014-08-07 11:12:07 -0600225 else
226 vdev->needs_reset = false;
Alex Williamsond24cdbf2013-06-10 16:40:57 -0600227 }
Alex Williamson20077222012-12-07 13:43:50 -0700228
229 pci_restore_state(pdev);
Alex Williamson9c22e662014-08-07 11:12:02 -0600230out:
231 pci_disable_device(pdev);
Alex Williamsonbc4fba72014-08-07 11:12:07 -0600232
233 vfio_pci_try_bus_reset(vdev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600234}
235
236static void vfio_pci_release(void *device_data)
237{
238 struct vfio_pci_device *vdev = device_data;
239
Alex Williamson61d79252014-08-07 11:12:04 -0600240 mutex_lock(&driver_lock);
241
242 if (!(--vdev->refcnt)) {
Gavin Shan1b69be52014-06-10 11:41:57 +1000243 vfio_spapr_pci_eeh_release(vdev->pdev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600244 vfio_pci_disable(vdev);
Gavin Shan1b69be52014-06-10 11:41:57 +1000245 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600246
Alex Williamson61d79252014-08-07 11:12:04 -0600247 mutex_unlock(&driver_lock);
248
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600249 module_put(THIS_MODULE);
250}
251
252static int vfio_pci_open(void *device_data)
253{
254 struct vfio_pci_device *vdev = device_data;
Alex Williamson61d79252014-08-07 11:12:04 -0600255 int ret = 0;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600256
257 if (!try_module_get(THIS_MODULE))
258 return -ENODEV;
259
Alex Williamson61d79252014-08-07 11:12:04 -0600260 mutex_lock(&driver_lock);
261
262 if (!vdev->refcnt) {
Gavin Shan1b69be52014-06-10 11:41:57 +1000263 ret = vfio_pci_enable(vdev);
264 if (ret)
265 goto error;
266
Alexey Kardashevskiy9b936c92014-08-08 10:39:16 -0600267 vfio_spapr_pci_eeh_open(vdev->pdev);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600268 }
Alex Williamson61d79252014-08-07 11:12:04 -0600269 vdev->refcnt++;
Gavin Shan1b69be52014-06-10 11:41:57 +1000270error:
Alex Williamson61d79252014-08-07 11:12:04 -0600271 mutex_unlock(&driver_lock);
272 if (ret)
273 module_put(THIS_MODULE);
Gavin Shan1b69be52014-06-10 11:41:57 +1000274 return ret;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600275}
276
277static int vfio_pci_get_irq_count(struct vfio_pci_device *vdev, int irq_type)
278{
279 if (irq_type == VFIO_PCI_INTX_IRQ_INDEX) {
280 u8 pin;
281 pci_read_config_byte(vdev->pdev, PCI_INTERRUPT_PIN, &pin);
Frank Blaschka1d53a3a2014-11-07 09:52:22 -0700282 if (IS_ENABLED(CONFIG_VFIO_PCI_INTX) && pin)
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600283 return 1;
284
285 } else if (irq_type == VFIO_PCI_MSI_IRQ_INDEX) {
286 u8 pos;
287 u16 flags;
288
Bjorn Helgaasa9047f22013-04-18 15:12:58 -0600289 pos = vdev->pdev->msi_cap;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600290 if (pos) {
291 pci_read_config_word(vdev->pdev,
292 pos + PCI_MSI_FLAGS, &flags);
Gavin Shanfd49c812014-05-30 11:35:54 -0600293 return 1 << ((flags & PCI_MSI_FLAGS_QMASK) >> 1);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600294 }
295 } else if (irq_type == VFIO_PCI_MSIX_IRQ_INDEX) {
296 u8 pos;
297 u16 flags;
298
Bjorn Helgaasa9047f22013-04-18 15:12:58 -0600299 pos = vdev->pdev->msix_cap;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600300 if (pos) {
301 pci_read_config_word(vdev->pdev,
302 pos + PCI_MSIX_FLAGS, &flags);
303
304 return (flags & PCI_MSIX_FLAGS_QSIZE) + 1;
305 }
Alex Williamson6140a8f2015-02-06 15:05:08 -0700306 } else if (irq_type == VFIO_PCI_ERR_IRQ_INDEX) {
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -0600307 if (pci_is_pcie(vdev->pdev))
308 return 1;
Alex Williamson6140a8f2015-02-06 15:05:08 -0700309 } else if (irq_type == VFIO_PCI_REQ_IRQ_INDEX) {
310 return 1;
311 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600312
313 return 0;
314}
315
Alex Williamson8b27ee62013-09-04 11:28:04 -0600316static int vfio_pci_count_devs(struct pci_dev *pdev, void *data)
317{
318 (*(int *)data)++;
319 return 0;
320}
321
322struct vfio_pci_fill_info {
323 int max;
324 int cur;
325 struct vfio_pci_dependent_device *devices;
326};
327
328static int vfio_pci_fill_devs(struct pci_dev *pdev, void *data)
329{
330 struct vfio_pci_fill_info *fill = data;
331 struct iommu_group *iommu_group;
332
333 if (fill->cur == fill->max)
334 return -EAGAIN; /* Something changed, try again */
335
336 iommu_group = iommu_group_get(&pdev->dev);
337 if (!iommu_group)
338 return -EPERM; /* Cannot reset non-isolated devices */
339
340 fill->devices[fill->cur].group_id = iommu_group_id(iommu_group);
341 fill->devices[fill->cur].segment = pci_domain_nr(pdev->bus);
342 fill->devices[fill->cur].bus = pdev->bus->number;
343 fill->devices[fill->cur].devfn = pdev->devfn;
344 fill->cur++;
345 iommu_group_put(iommu_group);
346 return 0;
347}
348
349struct vfio_pci_group_entry {
350 struct vfio_group *group;
351 int id;
352};
353
354struct vfio_pci_group_info {
355 int count;
356 struct vfio_pci_group_entry *groups;
357};
358
359static int vfio_pci_validate_devs(struct pci_dev *pdev, void *data)
360{
361 struct vfio_pci_group_info *info = data;
362 struct iommu_group *group;
363 int id, i;
364
365 group = iommu_group_get(&pdev->dev);
366 if (!group)
367 return -EPERM;
368
369 id = iommu_group_id(group);
370
371 for (i = 0; i < info->count; i++)
372 if (info->groups[i].id == id)
373 break;
374
375 iommu_group_put(group);
376
377 return (i == info->count) ? -EINVAL : 0;
378}
379
380static bool vfio_pci_dev_below_slot(struct pci_dev *pdev, struct pci_slot *slot)
381{
382 for (; pdev; pdev = pdev->bus->self)
383 if (pdev->bus == slot->bus)
384 return (pdev->slot == slot);
385 return false;
386}
387
388struct vfio_pci_walk_info {
389 int (*fn)(struct pci_dev *, void *data);
390 void *data;
391 struct pci_dev *pdev;
392 bool slot;
393 int ret;
394};
395
396static int vfio_pci_walk_wrapper(struct pci_dev *pdev, void *data)
397{
398 struct vfio_pci_walk_info *walk = data;
399
400 if (!walk->slot || vfio_pci_dev_below_slot(pdev, walk->pdev->slot))
401 walk->ret = walk->fn(pdev, walk->data);
402
403 return walk->ret;
404}
405
406static int vfio_pci_for_each_slot_or_bus(struct pci_dev *pdev,
407 int (*fn)(struct pci_dev *,
408 void *data), void *data,
409 bool slot)
410{
411 struct vfio_pci_walk_info walk = {
412 .fn = fn, .data = data, .pdev = pdev, .slot = slot, .ret = 0,
413 };
414
415 pci_walk_bus(pdev->bus, vfio_pci_walk_wrapper, &walk);
416
417 return walk.ret;
418}
419
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600420static long vfio_pci_ioctl(void *device_data,
421 unsigned int cmd, unsigned long arg)
422{
423 struct vfio_pci_device *vdev = device_data;
424 unsigned long minsz;
425
426 if (cmd == VFIO_DEVICE_GET_INFO) {
427 struct vfio_device_info info;
428
429 minsz = offsetofend(struct vfio_device_info, num_irqs);
430
431 if (copy_from_user(&info, (void __user *)arg, minsz))
432 return -EFAULT;
433
434 if (info.argsz < minsz)
435 return -EINVAL;
436
437 info.flags = VFIO_DEVICE_FLAGS_PCI;
438
439 if (vdev->reset_works)
440 info.flags |= VFIO_DEVICE_FLAGS_RESET;
441
442 info.num_regions = VFIO_PCI_NUM_REGIONS;
443 info.num_irqs = VFIO_PCI_NUM_IRQS;
444
445 return copy_to_user((void __user *)arg, &info, minsz);
446
447 } else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
448 struct pci_dev *pdev = vdev->pdev;
449 struct vfio_region_info info;
450
451 minsz = offsetofend(struct vfio_region_info, offset);
452
453 if (copy_from_user(&info, (void __user *)arg, minsz))
454 return -EFAULT;
455
456 if (info.argsz < minsz)
457 return -EINVAL;
458
459 switch (info.index) {
460 case VFIO_PCI_CONFIG_REGION_INDEX:
461 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
462 info.size = pdev->cfg_size;
463 info.flags = VFIO_REGION_INFO_FLAG_READ |
464 VFIO_REGION_INFO_FLAG_WRITE;
465 break;
466 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
467 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
468 info.size = pci_resource_len(pdev, info.index);
469 if (!info.size) {
470 info.flags = 0;
471 break;
472 }
473
474 info.flags = VFIO_REGION_INFO_FLAG_READ |
475 VFIO_REGION_INFO_FLAG_WRITE;
Frank Blaschka1d53a3a2014-11-07 09:52:22 -0700476 if (IS_ENABLED(CONFIG_VFIO_PCI_MMAP) &&
477 pci_resource_flags(pdev, info.index) &
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600478 IORESOURCE_MEM && info.size >= PAGE_SIZE)
479 info.flags |= VFIO_REGION_INFO_FLAG_MMAP;
480 break;
481 case VFIO_PCI_ROM_REGION_INDEX:
482 {
483 void __iomem *io;
484 size_t size;
485
486 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
487 info.flags = 0;
488
489 /* Report the BAR size, not the ROM size */
490 info.size = pci_resource_len(pdev, info.index);
491 if (!info.size)
492 break;
493
494 /* Is it really there? */
495 io = pci_map_rom(pdev, &size);
496 if (!io || !size) {
497 info.size = 0;
498 break;
499 }
500 pci_unmap_rom(pdev, io);
501
502 info.flags = VFIO_REGION_INFO_FLAG_READ;
503 break;
504 }
Alex Williamson84237a82013-02-18 10:11:13 -0700505 case VFIO_PCI_VGA_REGION_INDEX:
506 if (!vdev->has_vga)
507 return -EINVAL;
508
509 info.offset = VFIO_PCI_INDEX_TO_OFFSET(info.index);
510 info.size = 0xc0000;
511 info.flags = VFIO_REGION_INFO_FLAG_READ |
512 VFIO_REGION_INFO_FLAG_WRITE;
513
514 break;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600515 default:
516 return -EINVAL;
517 }
518
519 return copy_to_user((void __user *)arg, &info, minsz);
520
521 } else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
522 struct vfio_irq_info info;
523
524 minsz = offsetofend(struct vfio_irq_info, count);
525
526 if (copy_from_user(&info, (void __user *)arg, minsz))
527 return -EFAULT;
528
529 if (info.argsz < minsz || info.index >= VFIO_PCI_NUM_IRQS)
530 return -EINVAL;
531
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -0600532 switch (info.index) {
533 case VFIO_PCI_INTX_IRQ_INDEX ... VFIO_PCI_MSIX_IRQ_INDEX:
Alex Williamson6140a8f2015-02-06 15:05:08 -0700534 case VFIO_PCI_REQ_IRQ_INDEX:
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -0600535 break;
536 case VFIO_PCI_ERR_IRQ_INDEX:
537 if (pci_is_pcie(vdev->pdev))
538 break;
539 /* pass thru to return error */
540 default:
541 return -EINVAL;
542 }
543
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600544 info.flags = VFIO_IRQ_INFO_EVENTFD;
545
546 info.count = vfio_pci_get_irq_count(vdev, info.index);
547
548 if (info.index == VFIO_PCI_INTX_IRQ_INDEX)
549 info.flags |= (VFIO_IRQ_INFO_MASKABLE |
550 VFIO_IRQ_INFO_AUTOMASKED);
551 else
552 info.flags |= VFIO_IRQ_INFO_NORESIZE;
553
554 return copy_to_user((void __user *)arg, &info, minsz);
555
556 } else if (cmd == VFIO_DEVICE_SET_IRQS) {
557 struct vfio_irq_set hdr;
558 u8 *data = NULL;
559 int ret = 0;
560
561 minsz = offsetofend(struct vfio_irq_set, count);
562
563 if (copy_from_user(&hdr, (void __user *)arg, minsz))
564 return -EFAULT;
565
566 if (hdr.argsz < minsz || hdr.index >= VFIO_PCI_NUM_IRQS ||
567 hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
568 VFIO_IRQ_SET_ACTION_TYPE_MASK))
569 return -EINVAL;
570
571 if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
572 size_t size;
Alex Williamson904c6802013-03-26 11:33:16 -0600573 int max = vfio_pci_get_irq_count(vdev, hdr.index);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600574
575 if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
576 size = sizeof(uint8_t);
577 else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
578 size = sizeof(int32_t);
579 else
580 return -EINVAL;
581
582 if (hdr.argsz - minsz < hdr.count * size ||
Alex Williamson904c6802013-03-26 11:33:16 -0600583 hdr.start >= max || hdr.start + hdr.count > max)
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600584 return -EINVAL;
585
Fengguang Wu3a1f7042012-12-07 13:43:49 -0700586 data = memdup_user((void __user *)(arg + minsz),
587 hdr.count * size);
588 if (IS_ERR(data))
589 return PTR_ERR(data);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600590 }
591
592 mutex_lock(&vdev->igate);
593
594 ret = vfio_pci_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
595 hdr.start, hdr.count, data);
596
597 mutex_unlock(&vdev->igate);
598 kfree(data);
599
600 return ret;
601
Alex Williamson8b27ee62013-09-04 11:28:04 -0600602 } else if (cmd == VFIO_DEVICE_RESET) {
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600603 return vdev->reset_works ?
Alex Williamson890ed572014-01-14 20:45:09 -0700604 pci_try_reset_function(vdev->pdev) : -EINVAL;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600605
Alex Williamson8b27ee62013-09-04 11:28:04 -0600606 } else if (cmd == VFIO_DEVICE_GET_PCI_HOT_RESET_INFO) {
607 struct vfio_pci_hot_reset_info hdr;
608 struct vfio_pci_fill_info fill = { 0 };
609 struct vfio_pci_dependent_device *devices = NULL;
610 bool slot = false;
611 int ret = 0;
612
613 minsz = offsetofend(struct vfio_pci_hot_reset_info, count);
614
615 if (copy_from_user(&hdr, (void __user *)arg, minsz))
616 return -EFAULT;
617
618 if (hdr.argsz < minsz)
619 return -EINVAL;
620
621 hdr.flags = 0;
622
623 /* Can we do a slot or bus reset or neither? */
624 if (!pci_probe_reset_slot(vdev->pdev->slot))
625 slot = true;
626 else if (pci_probe_reset_bus(vdev->pdev->bus))
627 return -ENODEV;
628
629 /* How many devices are affected? */
630 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
631 vfio_pci_count_devs,
632 &fill.max, slot);
633 if (ret)
634 return ret;
635
636 WARN_ON(!fill.max); /* Should always be at least one */
637
638 /*
639 * If there's enough space, fill it now, otherwise return
640 * -ENOSPC and the number of devices affected.
641 */
642 if (hdr.argsz < sizeof(hdr) + (fill.max * sizeof(*devices))) {
643 ret = -ENOSPC;
644 hdr.count = fill.max;
645 goto reset_info_exit;
646 }
647
648 devices = kcalloc(fill.max, sizeof(*devices), GFP_KERNEL);
649 if (!devices)
650 return -ENOMEM;
651
652 fill.devices = devices;
653
654 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
655 vfio_pci_fill_devs,
656 &fill, slot);
657
658 /*
659 * If a device was removed between counting and filling,
660 * we may come up short of fill.max. If a device was
661 * added, we'll have a return of -EAGAIN above.
662 */
663 if (!ret)
664 hdr.count = fill.cur;
665
666reset_info_exit:
667 if (copy_to_user((void __user *)arg, &hdr, minsz))
668 ret = -EFAULT;
669
670 if (!ret) {
671 if (copy_to_user((void __user *)(arg + minsz), devices,
672 hdr.count * sizeof(*devices)))
673 ret = -EFAULT;
674 }
675
676 kfree(devices);
677 return ret;
678
679 } else if (cmd == VFIO_DEVICE_PCI_HOT_RESET) {
680 struct vfio_pci_hot_reset hdr;
681 int32_t *group_fds;
682 struct vfio_pci_group_entry *groups;
683 struct vfio_pci_group_info info;
684 bool slot = false;
685 int i, count = 0, ret = 0;
686
687 minsz = offsetofend(struct vfio_pci_hot_reset, count);
688
689 if (copy_from_user(&hdr, (void __user *)arg, minsz))
690 return -EFAULT;
691
692 if (hdr.argsz < minsz || hdr.flags)
693 return -EINVAL;
694
695 /* Can we do a slot or bus reset or neither? */
696 if (!pci_probe_reset_slot(vdev->pdev->slot))
697 slot = true;
698 else if (pci_probe_reset_bus(vdev->pdev->bus))
699 return -ENODEV;
700
701 /*
702 * We can't let userspace give us an arbitrarily large
703 * buffer to copy, so verify how many we think there
704 * could be. Note groups can have multiple devices so
705 * one group per device is the max.
706 */
707 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
708 vfio_pci_count_devs,
709 &count, slot);
710 if (ret)
711 return ret;
712
713 /* Somewhere between 1 and count is OK */
714 if (!hdr.count || hdr.count > count)
715 return -EINVAL;
716
717 group_fds = kcalloc(hdr.count, sizeof(*group_fds), GFP_KERNEL);
718 groups = kcalloc(hdr.count, sizeof(*groups), GFP_KERNEL);
719 if (!group_fds || !groups) {
720 kfree(group_fds);
721 kfree(groups);
722 return -ENOMEM;
723 }
724
725 if (copy_from_user(group_fds, (void __user *)(arg + minsz),
726 hdr.count * sizeof(*group_fds))) {
727 kfree(group_fds);
728 kfree(groups);
729 return -EFAULT;
730 }
731
732 /*
733 * For each group_fd, get the group through the vfio external
734 * user interface and store the group and iommu ID. This
735 * ensures the group is held across the reset.
736 */
737 for (i = 0; i < hdr.count; i++) {
738 struct vfio_group *group;
739 struct fd f = fdget(group_fds[i]);
740 if (!f.file) {
741 ret = -EBADF;
742 break;
743 }
744
745 group = vfio_group_get_external_user(f.file);
746 fdput(f);
747 if (IS_ERR(group)) {
748 ret = PTR_ERR(group);
749 break;
750 }
751
752 groups[i].group = group;
753 groups[i].id = vfio_external_user_iommu_id(group);
754 }
755
756 kfree(group_fds);
757
758 /* release reference to groups on error */
759 if (ret)
760 goto hot_reset_release;
761
762 info.count = hdr.count;
763 info.groups = groups;
764
765 /*
766 * Test whether all the affected devices are contained
767 * by the set of groups provided by the user.
768 */
769 ret = vfio_pci_for_each_slot_or_bus(vdev->pdev,
770 vfio_pci_validate_devs,
771 &info, slot);
772 if (!ret)
773 /* User has access, do the reset */
Alex Williamson890ed572014-01-14 20:45:09 -0700774 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
775 pci_try_reset_bus(vdev->pdev->bus);
Alex Williamson8b27ee62013-09-04 11:28:04 -0600776
777hot_reset_release:
778 for (i--; i >= 0; i--)
779 vfio_group_put_external_user(groups[i].group);
780
781 kfree(groups);
782 return ret;
783 }
784
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600785 return -ENOTTY;
786}
787
Alex Williamson5b279a12013-02-14 14:02:12 -0700788static ssize_t vfio_pci_rw(void *device_data, char __user *buf,
789 size_t count, loff_t *ppos, bool iswrite)
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600790{
791 unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
792 struct vfio_pci_device *vdev = device_data;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600793
794 if (index >= VFIO_PCI_NUM_REGIONS)
795 return -EINVAL;
796
Alex Williamson5b279a12013-02-14 14:02:12 -0700797 switch (index) {
798 case VFIO_PCI_CONFIG_REGION_INDEX:
Alex Williamson906ee992013-02-14 14:02:12 -0700799 return vfio_pci_config_rw(vdev, buf, count, ppos, iswrite);
800
Alex Williamson5b279a12013-02-14 14:02:12 -0700801 case VFIO_PCI_ROM_REGION_INDEX:
802 if (iswrite)
803 return -EINVAL;
Alex Williamson906ee992013-02-14 14:02:12 -0700804 return vfio_pci_bar_rw(vdev, buf, count, ppos, false);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600805
Alex Williamson5b279a12013-02-14 14:02:12 -0700806 case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
Alex Williamson906ee992013-02-14 14:02:12 -0700807 return vfio_pci_bar_rw(vdev, buf, count, ppos, iswrite);
Alex Williamson84237a82013-02-18 10:11:13 -0700808
809 case VFIO_PCI_VGA_REGION_INDEX:
810 return vfio_pci_vga_rw(vdev, buf, count, ppos, iswrite);
Alex Williamson5b279a12013-02-14 14:02:12 -0700811 }
812
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600813 return -EINVAL;
814}
815
Alex Williamson5b279a12013-02-14 14:02:12 -0700816static ssize_t vfio_pci_read(void *device_data, char __user *buf,
817 size_t count, loff_t *ppos)
818{
Alex Williamson906ee992013-02-14 14:02:12 -0700819 if (!count)
820 return 0;
821
Alex Williamson5b279a12013-02-14 14:02:12 -0700822 return vfio_pci_rw(device_data, buf, count, ppos, false);
823}
824
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600825static ssize_t vfio_pci_write(void *device_data, const char __user *buf,
826 size_t count, loff_t *ppos)
827{
Alex Williamson906ee992013-02-14 14:02:12 -0700828 if (!count)
829 return 0;
830
831 return vfio_pci_rw(device_data, (char __user *)buf, count, ppos, true);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600832}
833
834static int vfio_pci_mmap(void *device_data, struct vm_area_struct *vma)
835{
836 struct vfio_pci_device *vdev = device_data;
837 struct pci_dev *pdev = vdev->pdev;
838 unsigned int index;
Alex Williamson34002f52012-10-10 09:10:31 -0600839 u64 phys_len, req_len, pgoff, req_start;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600840 int ret;
841
842 index = vma->vm_pgoff >> (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT);
843
844 if (vma->vm_end < vma->vm_start)
845 return -EINVAL;
846 if ((vma->vm_flags & VM_SHARED) == 0)
847 return -EINVAL;
848 if (index >= VFIO_PCI_ROM_REGION_INDEX)
849 return -EINVAL;
850 if (!(pci_resource_flags(pdev, index) & IORESOURCE_MEM))
851 return -EINVAL;
852
853 phys_len = pci_resource_len(pdev, index);
854 req_len = vma->vm_end - vma->vm_start;
855 pgoff = vma->vm_pgoff &
856 ((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
857 req_start = pgoff << PAGE_SHIFT;
858
859 if (phys_len < PAGE_SIZE || req_start + req_len > phys_len)
860 return -EINVAL;
861
862 if (index == vdev->msix_bar) {
863 /*
864 * Disallow mmaps overlapping the MSI-X table; users don't
865 * get to touch this directly. We could find somewhere
866 * else to map the overlap, but page granularity is only
867 * a recommendation, not a requirement, so the user needs
868 * to know which bits are real. Requiring them to mmap
869 * around the table makes that clear.
870 */
871
872 /* If neither entirely above nor below, then it overlaps */
873 if (!(req_start >= vdev->msix_offset + vdev->msix_size ||
874 req_start + req_len <= vdev->msix_offset))
875 return -EINVAL;
876 }
877
878 /*
879 * Even though we don't make use of the barmap for the mmap,
880 * we need to request the region and the barmap tracks that.
881 */
882 if (!vdev->barmap[index]) {
883 ret = pci_request_selected_regions(pdev,
884 1 << index, "vfio-pci");
885 if (ret)
886 return ret;
887
888 vdev->barmap[index] = pci_iomap(pdev, index, 0);
889 }
890
891 vma->vm_private_data = vdev;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600892 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
Alex Williamson34002f52012-10-10 09:10:31 -0600893 vma->vm_pgoff = (pci_resource_start(pdev, index) >> PAGE_SHIFT) + pgoff;
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600894
Alex Williamson34002f52012-10-10 09:10:31 -0600895 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600896 req_len, vma->vm_page_prot);
897}
898
Alex Williamson6140a8f2015-02-06 15:05:08 -0700899static void vfio_pci_request(void *device_data, unsigned int count)
900{
901 struct vfio_pci_device *vdev = device_data;
902
903 mutex_lock(&vdev->igate);
904
905 if (vdev->req_trigger) {
906 dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
907 eventfd_signal(vdev->req_trigger, 1);
908 }
909
910 mutex_unlock(&vdev->igate);
911}
912
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600913static const struct vfio_device_ops vfio_pci_ops = {
914 .name = "vfio-pci",
915 .open = vfio_pci_open,
916 .release = vfio_pci_release,
917 .ioctl = vfio_pci_ioctl,
918 .read = vfio_pci_read,
919 .write = vfio_pci_write,
920 .mmap = vfio_pci_mmap,
Alex Williamson6140a8f2015-02-06 15:05:08 -0700921 .request = vfio_pci_request,
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600922};
923
924static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
925{
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600926 struct vfio_pci_device *vdev;
927 struct iommu_group *group;
928 int ret;
929
Wei Yang7c2e2112015-01-07 10:29:11 -0700930 if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600931 return -EINVAL;
932
933 group = iommu_group_get(&pdev->dev);
934 if (!group)
935 return -EINVAL;
936
937 vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
938 if (!vdev) {
939 iommu_group_put(group);
940 return -ENOMEM;
941 }
942
943 vdev->pdev = pdev;
944 vdev->irq_type = VFIO_PCI_NUM_IRQS;
945 mutex_init(&vdev->igate);
946 spin_lock_init(&vdev->irqlock);
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600947
948 ret = vfio_add_group_dev(&pdev->dev, &vfio_pci_ops, vdev);
949 if (ret) {
950 iommu_group_put(group);
951 kfree(vdev);
952 }
953
Alex Williamsonecaa1f62015-04-07 11:14:41 -0600954 if (vfio_pci_is_vga(pdev)) {
955 vga_client_register(pdev, vdev, NULL, vfio_pci_set_vga_decode);
956 vga_set_legacy_decoding(pdev,
957 vfio_pci_set_vga_decode(vdev, false));
958 }
959
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600960 return ret;
961}
962
963static void vfio_pci_remove(struct pci_dev *pdev)
964{
965 struct vfio_pci_device *vdev;
966
Alex Williamson61d79252014-08-07 11:12:04 -0600967 vdev = vfio_del_group_dev(&pdev->dev);
Alex Williamsonecaa1f62015-04-07 11:14:41 -0600968 if (!vdev)
969 return;
970
971 iommu_group_put(pdev->dev.iommu_group);
972 kfree(vdev);
973
974 if (vfio_pci_is_vga(pdev)) {
975 vga_client_register(pdev, NULL, NULL, NULL);
976 vga_set_legacy_decoding(pdev,
977 VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM |
978 VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM);
Alex Williamson61d79252014-08-07 11:12:04 -0600979 }
Alex Williamson89e1f7d2012-07-31 08:16:24 -0600980}
981
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -0600982static pci_ers_result_t vfio_pci_aer_err_detected(struct pci_dev *pdev,
983 pci_channel_state_t state)
984{
985 struct vfio_pci_device *vdev;
986 struct vfio_device *device;
987
988 device = vfio_device_get_from_dev(&pdev->dev);
989 if (device == NULL)
990 return PCI_ERS_RESULT_DISCONNECT;
991
992 vdev = vfio_device_data(device);
993 if (vdev == NULL) {
994 vfio_device_put(device);
995 return PCI_ERS_RESULT_DISCONNECT;
996 }
997
Alex Williamson3be3a072014-01-14 16:12:55 -0700998 mutex_lock(&vdev->igate);
999
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -06001000 if (vdev->err_trigger)
1001 eventfd_signal(vdev->err_trigger, 1);
1002
Alex Williamson3be3a072014-01-14 16:12:55 -07001003 mutex_unlock(&vdev->igate);
1004
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -06001005 vfio_device_put(device);
1006
1007 return PCI_ERS_RESULT_CAN_RECOVER;
1008}
1009
1010static struct pci_error_handlers vfio_err_handlers = {
1011 .error_detected = vfio_pci_aer_err_detected,
1012};
1013
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001014static struct pci_driver vfio_pci_driver = {
1015 .name = "vfio-pci",
1016 .id_table = NULL, /* only dynamic ids */
1017 .probe = vfio_pci_probe,
1018 .remove = vfio_pci_remove,
Vijay Mohan Pandarathildad9f892013-03-11 09:31:22 -06001019 .err_handler = &vfio_err_handlers,
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001020};
1021
Alex Williamson93899a62014-09-29 17:18:39 -06001022struct vfio_devices {
1023 struct vfio_device **devices;
1024 int cur_index;
1025 int max_index;
1026};
1027
1028static int vfio_pci_get_devs(struct pci_dev *pdev, void *data)
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001029{
Alex Williamson93899a62014-09-29 17:18:39 -06001030 struct vfio_devices *devs = data;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001031 struct pci_driver *pci_drv = ACCESS_ONCE(pdev->driver);
1032
Alex Williamson93899a62014-09-29 17:18:39 -06001033 if (pci_drv != &vfio_pci_driver)
1034 return -EBUSY;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001035
Alex Williamson93899a62014-09-29 17:18:39 -06001036 if (devs->cur_index == devs->max_index)
1037 return -ENOSPC;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001038
Alex Williamson93899a62014-09-29 17:18:39 -06001039 devs->devices[devs->cur_index] = vfio_device_get_from_dev(&pdev->dev);
1040 if (!devs->devices[devs->cur_index])
1041 return -EINVAL;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001042
Alex Williamson93899a62014-09-29 17:18:39 -06001043 devs->cur_index++;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001044 return 0;
1045}
1046
1047/*
1048 * Attempt to do a bus/slot reset if there are devices affected by a reset for
1049 * this device that are needs_reset and all of the affected devices are unused
Alex Williamson93899a62014-09-29 17:18:39 -06001050 * (!refcnt). Callers are required to hold driver_lock when calling this to
1051 * prevent device opens and concurrent bus reset attempts. We prevent device
1052 * unbinds by acquiring and holding a reference to the vfio_device.
1053 *
1054 * NB: vfio-core considers a group to be viable even if some devices are
1055 * bound to drivers like pci-stub or pcieport. Here we require all devices
1056 * to be bound to vfio_pci since that's the only way we can be sure they
1057 * stay put.
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001058 */
1059static void vfio_pci_try_bus_reset(struct vfio_pci_device *vdev)
1060{
Alex Williamson93899a62014-09-29 17:18:39 -06001061 struct vfio_devices devs = { .cur_index = 0 };
1062 int i = 0, ret = -EINVAL;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001063 bool needs_reset = false, slot = false;
Alex Williamson93899a62014-09-29 17:18:39 -06001064 struct vfio_pci_device *tmp;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001065
1066 if (!pci_probe_reset_slot(vdev->pdev->slot))
1067 slot = true;
1068 else if (pci_probe_reset_bus(vdev->pdev->bus))
1069 return;
1070
Alex Williamson93899a62014-09-29 17:18:39 -06001071 if (vfio_pci_for_each_slot_or_bus(vdev->pdev, vfio_pci_count_devs,
1072 &i, slot) || !i)
1073 return;
1074
1075 devs.max_index = i;
1076 devs.devices = kcalloc(i, sizeof(struct vfio_device *), GFP_KERNEL);
1077 if (!devs.devices)
1078 return;
1079
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001080 if (vfio_pci_for_each_slot_or_bus(vdev->pdev,
Alex Williamson93899a62014-09-29 17:18:39 -06001081 vfio_pci_get_devs, &devs, slot))
1082 goto put_devs;
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001083
Alex Williamson93899a62014-09-29 17:18:39 -06001084 for (i = 0; i < devs.cur_index; i++) {
1085 tmp = vfio_device_data(devs.devices[i]);
1086 if (tmp->needs_reset)
1087 needs_reset = true;
1088 if (tmp->refcnt)
1089 goto put_devs;
1090 }
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001091
Alex Williamson93899a62014-09-29 17:18:39 -06001092 if (needs_reset)
1093 ret = slot ? pci_try_reset_slot(vdev->pdev->slot) :
1094 pci_try_reset_bus(vdev->pdev->bus);
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001095
Alex Williamson93899a62014-09-29 17:18:39 -06001096put_devs:
1097 for (i = 0; i < devs.cur_index; i++) {
1098 if (!ret) {
1099 tmp = vfio_device_data(devs.devices[i]);
1100 tmp->needs_reset = false;
1101 }
1102 vfio_device_put(devs.devices[i]);
1103 }
1104
1105 kfree(devs.devices);
Alex Williamsonbc4fba72014-08-07 11:12:07 -06001106}
1107
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001108static void __exit vfio_pci_cleanup(void)
1109{
1110 pci_unregister_driver(&vfio_pci_driver);
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001111 vfio_pci_uninit_perm_bits();
1112}
1113
Alex Williamson80c7e8c2015-04-07 11:14:43 -06001114static void __init vfio_pci_fill_ids(void)
1115{
1116 char *p, *id;
1117 int rc;
1118
1119 /* no ids passed actually */
1120 if (ids[0] == '\0')
1121 return;
1122
1123 /* add ids specified in the module parameter */
1124 p = ids;
1125 while ((id = strsep(&p, ","))) {
1126 unsigned int vendor, device, subvendor = PCI_ANY_ID,
1127 subdevice = PCI_ANY_ID, class = 0, class_mask = 0;
1128 int fields;
1129
1130 if (!strlen(id))
1131 continue;
1132
1133 fields = sscanf(id, "%x:%x:%x:%x:%x:%x",
1134 &vendor, &device, &subvendor, &subdevice,
1135 &class, &class_mask);
1136
1137 if (fields < 2) {
1138 pr_warn("invalid id string \"%s\"\n", id);
1139 continue;
1140 }
1141
1142 rc = pci_add_dynid(&vfio_pci_driver, vendor, device,
1143 subvendor, subdevice, class, class_mask, 0);
1144 if (rc)
1145 pr_warn("failed to add dynamic id [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x (%d)\n",
1146 vendor, device, subvendor, subdevice,
1147 class, class_mask, rc);
1148 else
1149 pr_info("add [%04hx:%04hx[%04hx:%04hx]] class %#08x/%08x\n",
1150 vendor, device, subvendor, subdevice,
1151 class, class_mask);
1152 }
1153}
1154
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001155static int __init vfio_pci_init(void)
1156{
1157 int ret;
1158
1159 /* Allocate shared config space permision data used by all devices */
1160 ret = vfio_pci_init_perm_bits();
1161 if (ret)
1162 return ret;
1163
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001164 /* Register and scan for devices */
1165 ret = pci_register_driver(&vfio_pci_driver);
1166 if (ret)
1167 goto out_driver;
1168
Alex Williamson80c7e8c2015-04-07 11:14:43 -06001169 vfio_pci_fill_ids();
1170
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001171 return 0;
1172
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001173out_driver:
Alex Williamson89e1f7d2012-07-31 08:16:24 -06001174 vfio_pci_uninit_perm_bits();
1175 return ret;
1176}
1177
1178module_init(vfio_pci_init);
1179module_exit(vfio_pci_cleanup);
1180
1181MODULE_VERSION(DRIVER_VERSION);
1182MODULE_LICENSE("GPL v2");
1183MODULE_AUTHOR(DRIVER_AUTHOR);
1184MODULE_DESCRIPTION(DRIVER_DESC);