| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* | 
 | 2 |  * File:	msi.c | 
 | 3 |  * Purpose:	PCI Message Signaled Interrupt (MSI) | 
 | 4 |  * | 
 | 5 |  * Copyright (C) 2003-2004 Intel | 
 | 6 |  * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) | 
 | 7 |  */ | 
 | 8 |  | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 9 | #include <linux/err.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/mm.h> | 
 | 11 | #include <linux/irq.h> | 
 | 12 | #include <linux/interrupt.h> | 
 | 13 | #include <linux/init.h> | 
| Paul Gortmaker | 363c75d | 2011-05-27 09:37:25 -0400 | [diff] [blame] | 14 | #include <linux/export.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/ioport.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | #include <linux/pci.h> | 
 | 17 | #include <linux/proc_fs.h> | 
| Eric W. Biederman | 3b7d192 | 2006-10-04 02:16:59 -0700 | [diff] [blame] | 18 | #include <linux/msi.h> | 
| Dan Williams | 4fdadeb | 2007-04-26 18:21:38 -0700 | [diff] [blame] | 19 | #include <linux/smp.h> | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 20 | #include <linux/errno.h> | 
 | 21 | #include <linux/io.h> | 
| Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 22 | #include <linux/slab.h> | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 |  | 
 | 24 | #include "pci.h" | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | static int pci_msi_enable = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 |  | 
| Bjorn Helgaas | 527eee2 | 2013-04-17 17:44:48 -0600 | [diff] [blame] | 28 | #define msix_table_size(flags)	((flags & PCI_MSIX_FLAGS_QSIZE) + 1) | 
 | 29 |  | 
 | 30 |  | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 31 | /* Arch hooks */ | 
 | 32 |  | 
| Michael Ellerman | 11df1f0 | 2009-01-19 11:31:00 +1100 | [diff] [blame] | 33 | #ifndef arch_msi_check_device | 
 | 34 | int arch_msi_check_device(struct pci_dev *dev, int nvec, int type) | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 35 | { | 
 | 36 | 	return 0; | 
 | 37 | } | 
| Michael Ellerman | 11df1f0 | 2009-01-19 11:31:00 +1100 | [diff] [blame] | 38 | #endif | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 39 |  | 
| Michael Ellerman | 11df1f0 | 2009-01-19 11:31:00 +1100 | [diff] [blame] | 40 | #ifndef arch_setup_msi_irqs | 
| Thomas Gleixner | 1525bf0 | 2010-10-06 16:05:35 -0400 | [diff] [blame] | 41 | # define arch_setup_msi_irqs default_setup_msi_irqs | 
 | 42 | # define HAVE_DEFAULT_MSI_SETUP_IRQS | 
 | 43 | #endif | 
 | 44 |  | 
 | 45 | #ifdef HAVE_DEFAULT_MSI_SETUP_IRQS | 
 | 46 | int default_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 47 | { | 
 | 48 | 	struct msi_desc *entry; | 
 | 49 | 	int ret; | 
 | 50 |  | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 51 | 	/* | 
 | 52 | 	 * If an architecture wants to support multiple MSI, it needs to | 
 | 53 | 	 * override arch_setup_msi_irqs() | 
 | 54 | 	 */ | 
 | 55 | 	if (type == PCI_CAP_ID_MSI && nvec > 1) | 
 | 56 | 		return 1; | 
 | 57 |  | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 58 | 	list_for_each_entry(entry, &dev->msi_list, list) { | 
 | 59 | 		ret = arch_setup_msi_irq(dev, entry); | 
| Michael Ellerman | b5fbf53 | 2009-02-11 22:27:02 +1100 | [diff] [blame] | 60 | 		if (ret < 0) | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 61 | 			return ret; | 
| Michael Ellerman | b5fbf53 | 2009-02-11 22:27:02 +1100 | [diff] [blame] | 62 | 		if (ret > 0) | 
 | 63 | 			return -ENOSPC; | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 64 | 	} | 
 | 65 |  | 
 | 66 | 	return 0; | 
 | 67 | } | 
| Michael Ellerman | 11df1f0 | 2009-01-19 11:31:00 +1100 | [diff] [blame] | 68 | #endif | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 69 |  | 
| Michael Ellerman | 11df1f0 | 2009-01-19 11:31:00 +1100 | [diff] [blame] | 70 | #ifndef arch_teardown_msi_irqs | 
| Thomas Gleixner | 1525bf0 | 2010-10-06 16:05:35 -0400 | [diff] [blame] | 71 | # define arch_teardown_msi_irqs default_teardown_msi_irqs | 
 | 72 | # define HAVE_DEFAULT_MSI_TEARDOWN_IRQS | 
 | 73 | #endif | 
 | 74 |  | 
 | 75 | #ifdef HAVE_DEFAULT_MSI_TEARDOWN_IRQS | 
 | 76 | void default_teardown_msi_irqs(struct pci_dev *dev) | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 77 | { | 
 | 78 | 	struct msi_desc *entry; | 
 | 79 |  | 
 | 80 | 	list_for_each_entry(entry, &dev->msi_list, list) { | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 81 | 		int i, nvec; | 
 | 82 | 		if (entry->irq == 0) | 
 | 83 | 			continue; | 
 | 84 | 		nvec = 1 << entry->msi_attrib.multiple; | 
 | 85 | 		for (i = 0; i < nvec; i++) | 
 | 86 | 			arch_teardown_msi_irq(entry->irq + i); | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 87 | 	} | 
 | 88 | } | 
| Michael Ellerman | 11df1f0 | 2009-01-19 11:31:00 +1100 | [diff] [blame] | 89 | #endif | 
| Adrian Bunk | 6a9e7f2 | 2007-12-11 23:19:41 +0100 | [diff] [blame] | 90 |  | 
| Konrad Rzeszutek Wilk | 76ccc29 | 2011-12-16 17:38:18 -0500 | [diff] [blame] | 91 | #ifndef arch_restore_msi_irqs | 
 | 92 | # define arch_restore_msi_irqs default_restore_msi_irqs | 
 | 93 | # define HAVE_DEFAULT_MSI_RESTORE_IRQS | 
 | 94 | #endif | 
 | 95 |  | 
 | 96 | #ifdef HAVE_DEFAULT_MSI_RESTORE_IRQS | 
 | 97 | void default_restore_msi_irqs(struct pci_dev *dev, int irq) | 
 | 98 | { | 
 | 99 | 	struct msi_desc *entry; | 
 | 100 |  | 
 | 101 | 	entry = NULL; | 
 | 102 | 	if (dev->msix_enabled) { | 
 | 103 | 		list_for_each_entry(entry, &dev->msi_list, list) { | 
 | 104 | 			if (irq == entry->irq) | 
 | 105 | 				break; | 
 | 106 | 		} | 
 | 107 | 	} else if (dev->msi_enabled)  { | 
 | 108 | 		entry = irq_get_msi_desc(irq); | 
 | 109 | 	} | 
 | 110 |  | 
 | 111 | 	if (entry) | 
 | 112 | 		write_msi_msg(irq, &entry->msg); | 
 | 113 | } | 
 | 114 | #endif | 
 | 115 |  | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 116 | static void msi_set_enable(struct pci_dev *dev, int enable) | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 117 | { | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 118 | 	u16 control; | 
 | 119 |  | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 120 | 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); | 
| Matthew Wilcox | 110828c | 2009-06-16 06:31:45 -0600 | [diff] [blame] | 121 | 	control &= ~PCI_MSI_FLAGS_ENABLE; | 
 | 122 | 	if (enable) | 
 | 123 | 		control |= PCI_MSI_FLAGS_ENABLE; | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 124 | 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); | 
| Hidetoshi Seto | 5ca5c02 | 2008-05-19 13:48:17 +0900 | [diff] [blame] | 125 | } | 
 | 126 |  | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 127 | static void msix_set_enable(struct pci_dev *dev, int enable) | 
 | 128 | { | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 129 | 	u16 control; | 
 | 130 |  | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 131 | 	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | 
 | 132 | 	control &= ~PCI_MSIX_FLAGS_ENABLE; | 
 | 133 | 	if (enable) | 
 | 134 | 		control |= PCI_MSIX_FLAGS_ENABLE; | 
 | 135 | 	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 136 | } | 
 | 137 |  | 
| Matthew Wilcox | bffac3c | 2009-01-21 19:19:19 -0500 | [diff] [blame] | 138 | static inline __attribute_const__ u32 msi_mask(unsigned x) | 
 | 139 | { | 
| Matthew Wilcox | 0b49ec3 | 2009-02-08 20:27:47 -0700 | [diff] [blame] | 140 | 	/* Don't shift by >= width of type */ | 
 | 141 | 	if (x >= 5) | 
 | 142 | 		return 0xffffffff; | 
 | 143 | 	return (1 << (1 << x)) - 1; | 
| Matthew Wilcox | bffac3c | 2009-01-21 19:19:19 -0500 | [diff] [blame] | 144 | } | 
 | 145 |  | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 146 | static inline __attribute_const__ u32 msi_capable_mask(u16 control) | 
| Mitch Williams | 988cbb1 | 2007-03-30 11:54:08 -0700 | [diff] [blame] | 147 | { | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 148 | 	return msi_mask((control >> 1) & 7); | 
 | 149 | } | 
| Mitch Williams | 988cbb1 | 2007-03-30 11:54:08 -0700 | [diff] [blame] | 150 |  | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 151 | static inline __attribute_const__ u32 msi_enabled_mask(u16 control) | 
 | 152 | { | 
 | 153 | 	return msi_mask((control >> 4) & 7); | 
| Mitch Williams | 988cbb1 | 2007-03-30 11:54:08 -0700 | [diff] [blame] | 154 | } | 
 | 155 |  | 
| Matthew Wilcox | ce6fce4 | 2008-07-25 15:42:58 -0600 | [diff] [blame] | 156 | /* | 
 | 157 |  * PCI 2.3 does not specify mask bits for each MSI interrupt.  Attempting to | 
 | 158 |  * mask all MSI interrupts by clearing the MSI enable bit does not work | 
 | 159 |  * reliably as devices without an INTx disable bit will then generate a | 
 | 160 |  * level IRQ which will never be cleared. | 
| Matthew Wilcox | ce6fce4 | 2008-07-25 15:42:58 -0600 | [diff] [blame] | 161 |  */ | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 162 | static u32 __msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | { | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 164 | 	u32 mask_bits = desc->masked; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 |  | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 166 | 	if (!desc->msi_attrib.maskbit) | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 167 | 		return 0; | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 168 |  | 
 | 169 | 	mask_bits &= ~mask; | 
 | 170 | 	mask_bits |= flag; | 
 | 171 | 	pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 172 |  | 
 | 173 | 	return mask_bits; | 
 | 174 | } | 
 | 175 |  | 
 | 176 | static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) | 
 | 177 | { | 
 | 178 | 	desc->masked = __msi_mask_irq(desc, mask, flag); | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 179 | } | 
 | 180 |  | 
 | 181 | /* | 
 | 182 |  * This internal function does not flush PCI writes to the device. | 
 | 183 |  * All users must ensure that they read from the device before either | 
 | 184 |  * assuming that the device state is up to date, or returning out of this | 
 | 185 |  * file.  This saves a few milliseconds when initialising devices with lots | 
 | 186 |  * of MSI-X interrupts. | 
 | 187 |  */ | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 188 | static u32 __msix_mask_irq(struct msi_desc *desc, u32 flag) | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 189 | { | 
 | 190 | 	u32 mask_bits = desc->masked; | 
 | 191 | 	unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + | 
| Hidetoshi Seto | 2c21fd4 | 2009-06-23 17:40:04 +0900 | [diff] [blame] | 192 | 						PCI_MSIX_ENTRY_VECTOR_CTRL; | 
| Sheng Yang | 8d80528 | 2010-11-11 15:46:55 +0800 | [diff] [blame] | 193 | 	mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; | 
 | 194 | 	if (flag) | 
 | 195 | 		mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 196 | 	writel(mask_bits, desc->mask_base + offset); | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 197 |  | 
 | 198 | 	return mask_bits; | 
 | 199 | } | 
 | 200 |  | 
 | 201 | static void msix_mask_irq(struct msi_desc *desc, u32 flag) | 
 | 202 | { | 
 | 203 | 	desc->masked = __msix_mask_irq(desc, flag); | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 204 | } | 
 | 205 |  | 
| Jan Glauber | 9a4da8a | 2012-11-29 13:05:05 +0100 | [diff] [blame] | 206 | #ifdef CONFIG_GENERIC_HARDIRQS | 
 | 207 |  | 
| Thomas Gleixner | 1c9db52 | 2010-09-28 16:46:51 +0200 | [diff] [blame] | 208 | static void msi_set_mask_bit(struct irq_data *data, u32 flag) | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 209 | { | 
| Thomas Gleixner | 1c9db52 | 2010-09-28 16:46:51 +0200 | [diff] [blame] | 210 | 	struct msi_desc *desc = irq_data_get_msi(data); | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 211 |  | 
 | 212 | 	if (desc->msi_attrib.is_msix) { | 
 | 213 | 		msix_mask_irq(desc, flag); | 
 | 214 | 		readl(desc->mask_base);		/* Flush write to device */ | 
| Matthew Wilcox | 24d2755 | 2009-03-17 08:54:06 -0400 | [diff] [blame] | 215 | 	} else { | 
| Thomas Gleixner | 1c9db52 | 2010-09-28 16:46:51 +0200 | [diff] [blame] | 216 | 		unsigned offset = data->irq - desc->dev->irq; | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 217 | 		msi_mask_irq(desc, 1 << offset, flag << offset); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | 	} | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 219 | } | 
 | 220 |  | 
| Thomas Gleixner | 1c9db52 | 2010-09-28 16:46:51 +0200 | [diff] [blame] | 221 | void mask_msi_irq(struct irq_data *data) | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 222 | { | 
| Thomas Gleixner | 1c9db52 | 2010-09-28 16:46:51 +0200 | [diff] [blame] | 223 | 	msi_set_mask_bit(data, 1); | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 224 | } | 
 | 225 |  | 
| Thomas Gleixner | 1c9db52 | 2010-09-28 16:46:51 +0200 | [diff] [blame] | 226 | void unmask_msi_irq(struct irq_data *data) | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 227 | { | 
| Thomas Gleixner | 1c9db52 | 2010-09-28 16:46:51 +0200 | [diff] [blame] | 228 | 	msi_set_mask_bit(data, 0); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | } | 
 | 230 |  | 
| Jan Glauber | 9a4da8a | 2012-11-29 13:05:05 +0100 | [diff] [blame] | 231 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 
 | 232 |  | 
| Thomas Gleixner | 39431ac | 2010-09-28 19:09:51 +0200 | [diff] [blame] | 233 | void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 234 | { | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 235 | 	BUG_ON(entry->dev->current_state != PCI_D0); | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 236 |  | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 237 | 	if (entry->msi_attrib.is_msix) { | 
 | 238 | 		void __iomem *base = entry->mask_base + | 
 | 239 | 			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 
 | 240 |  | 
 | 241 | 		msg->address_lo = readl(base + PCI_MSIX_ENTRY_LOWER_ADDR); | 
 | 242 | 		msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); | 
 | 243 | 		msg->data = readl(base + PCI_MSIX_ENTRY_DATA); | 
 | 244 | 	} else { | 
 | 245 | 		struct pci_dev *dev = entry->dev; | 
| Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 246 | 		int pos = dev->msi_cap; | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 247 | 		u16 data; | 
 | 248 |  | 
| Bjorn Helgaas | 9925ad0 | 2013-04-17 17:39:57 -0600 | [diff] [blame] | 249 | 		pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, | 
 | 250 | 				      &msg->address_lo); | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 251 | 		if (entry->msi_attrib.is_64) { | 
| Bjorn Helgaas | 9925ad0 | 2013-04-17 17:39:57 -0600 | [diff] [blame] | 252 | 			pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, | 
 | 253 | 					      &msg->address_hi); | 
| Bjorn Helgaas | 2f22134 | 2013-04-17 17:41:13 -0600 | [diff] [blame] | 254 | 			pci_read_config_word(dev, pos + PCI_MSI_DATA_64, &data); | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 255 | 		} else { | 
 | 256 | 			msg->address_hi = 0; | 
| Bjorn Helgaas | 2f22134 | 2013-04-17 17:41:13 -0600 | [diff] [blame] | 257 | 			pci_read_config_word(dev, pos + PCI_MSI_DATA_32, &data); | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 258 | 		} | 
 | 259 | 		msg->data = data; | 
 | 260 | 	} | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 261 | } | 
 | 262 |  | 
| Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 263 | void read_msi_msg(unsigned int irq, struct msi_msg *msg) | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 264 | { | 
| Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 265 | 	struct msi_desc *entry = irq_get_msi_desc(irq); | 
| Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 266 |  | 
| Thomas Gleixner | 39431ac | 2010-09-28 19:09:51 +0200 | [diff] [blame] | 267 | 	__read_msi_msg(entry, msg); | 
| Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 268 | } | 
 | 269 |  | 
| Thomas Gleixner | 39431ac | 2010-09-28 19:09:51 +0200 | [diff] [blame] | 270 | void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 271 | { | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 272 | 	/* Assert that the cache is valid, assuming that | 
 | 273 | 	 * valid messages are not all-zeroes. */ | 
 | 274 | 	BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | | 
 | 275 | 		 entry->msg.data)); | 
 | 276 |  | 
 | 277 | 	*msg = entry->msg; | 
 | 278 | } | 
 | 279 |  | 
 | 280 | void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) | 
 | 281 | { | 
| Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 282 | 	struct msi_desc *entry = irq_get_msi_desc(irq); | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 283 |  | 
| Thomas Gleixner | 39431ac | 2010-09-28 19:09:51 +0200 | [diff] [blame] | 284 | 	__get_cached_msi_msg(entry, msg); | 
| Ben Hutchings | 30da552 | 2010-07-23 14:56:28 +0100 | [diff] [blame] | 285 | } | 
 | 286 |  | 
| Thomas Gleixner | 39431ac | 2010-09-28 19:09:51 +0200 | [diff] [blame] | 287 | void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) | 
| Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 288 | { | 
| Ben Hutchings | fcd097f | 2010-06-17 20:16:36 +0100 | [diff] [blame] | 289 | 	if (entry->dev->current_state != PCI_D0) { | 
 | 290 | 		/* Don't touch the hardware now */ | 
 | 291 | 	} else if (entry->msi_attrib.is_msix) { | 
| Matthew Wilcox | 24d2755 | 2009-03-17 08:54:06 -0400 | [diff] [blame] | 292 | 		void __iomem *base; | 
 | 293 | 		base = entry->mask_base + | 
 | 294 | 			entry->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE; | 
 | 295 |  | 
| Hidetoshi Seto | 2c21fd4 | 2009-06-23 17:40:04 +0900 | [diff] [blame] | 296 | 		writel(msg->address_lo, base + PCI_MSIX_ENTRY_LOWER_ADDR); | 
 | 297 | 		writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); | 
 | 298 | 		writel(msg->data, base + PCI_MSIX_ENTRY_DATA); | 
| Matthew Wilcox | 24d2755 | 2009-03-17 08:54:06 -0400 | [diff] [blame] | 299 | 	} else { | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 300 | 		struct pci_dev *dev = entry->dev; | 
| Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 301 | 		int pos = dev->msi_cap; | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 302 | 		u16 msgctl; | 
 | 303 |  | 
| Bjorn Helgaas | f84ecd2 | 2013-04-17 17:38:32 -0600 | [diff] [blame] | 304 | 		pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &msgctl); | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 305 | 		msgctl &= ~PCI_MSI_FLAGS_QSIZE; | 
 | 306 | 		msgctl |= entry->msi_attrib.multiple << 4; | 
| Bjorn Helgaas | f84ecd2 | 2013-04-17 17:38:32 -0600 | [diff] [blame] | 307 | 		pci_write_config_word(dev, pos + PCI_MSI_FLAGS, msgctl); | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 308 |  | 
| Bjorn Helgaas | 9925ad0 | 2013-04-17 17:39:57 -0600 | [diff] [blame] | 309 | 		pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO, | 
 | 310 | 				       msg->address_lo); | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 311 | 		if (entry->msi_attrib.is_64) { | 
| Bjorn Helgaas | 9925ad0 | 2013-04-17 17:39:57 -0600 | [diff] [blame] | 312 | 			pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI, | 
 | 313 | 					       msg->address_hi); | 
| Bjorn Helgaas | 2f22134 | 2013-04-17 17:41:13 -0600 | [diff] [blame] | 314 | 			pci_write_config_word(dev, pos + PCI_MSI_DATA_64, | 
 | 315 | 					      msg->data); | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 316 | 		} else { | 
| Bjorn Helgaas | 2f22134 | 2013-04-17 17:41:13 -0600 | [diff] [blame] | 317 | 			pci_write_config_word(dev, pos + PCI_MSI_DATA_32, | 
 | 318 | 					      msg->data); | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 319 | 		} | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 320 | 	} | 
| Eric W. Biederman | 392ee1e | 2007-03-08 13:04:57 -0700 | [diff] [blame] | 321 | 	entry->msg = *msg; | 
| Eric W. Biederman | 0366f8f | 2006-10-04 02:16:33 -0700 | [diff] [blame] | 322 | } | 
 | 323 |  | 
| Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 324 | void write_msi_msg(unsigned int irq, struct msi_msg *msg) | 
 | 325 | { | 
| Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 326 | 	struct msi_desc *entry = irq_get_msi_desc(irq); | 
| Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 327 |  | 
| Thomas Gleixner | 39431ac | 2010-09-28 19:09:51 +0200 | [diff] [blame] | 328 | 	__write_msi_msg(entry, msg); | 
| Yinghai Lu | 3145e94 | 2008-12-05 18:58:34 -0800 | [diff] [blame] | 329 | } | 
 | 330 |  | 
| Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 331 | static void free_msi_irqs(struct pci_dev *dev) | 
 | 332 | { | 
 | 333 | 	struct msi_desc *entry, *tmp; | 
 | 334 |  | 
 | 335 | 	list_for_each_entry(entry, &dev->msi_list, list) { | 
 | 336 | 		int i, nvec; | 
 | 337 | 		if (!entry->irq) | 
 | 338 | 			continue; | 
 | 339 | 		nvec = 1 << entry->msi_attrib.multiple; | 
| Jan Glauber | 9a4da8a | 2012-11-29 13:05:05 +0100 | [diff] [blame] | 340 | #ifdef CONFIG_GENERIC_HARDIRQS | 
| Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 341 | 		for (i = 0; i < nvec; i++) | 
 | 342 | 			BUG_ON(irq_has_action(entry->irq + i)); | 
| Jan Glauber | 9a4da8a | 2012-11-29 13:05:05 +0100 | [diff] [blame] | 343 | #endif | 
| Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 344 | 	} | 
 | 345 |  | 
 | 346 | 	arch_teardown_msi_irqs(dev); | 
 | 347 |  | 
 | 348 | 	list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { | 
 | 349 | 		if (entry->msi_attrib.is_msix) { | 
 | 350 | 			if (list_is_last(&entry->list, &dev->msi_list)) | 
 | 351 | 				iounmap(entry->mask_base); | 
 | 352 | 		} | 
| Neil Horman | 424eb39 | 2012-01-03 10:29:54 -0500 | [diff] [blame] | 353 |  | 
 | 354 | 		/* | 
 | 355 | 		 * Its possible that we get into this path | 
 | 356 | 		 * When populate_msi_sysfs fails, which means the entries | 
 | 357 | 		 * were not registered with sysfs.  In that case don't | 
 | 358 | 		 * unregister them. | 
 | 359 | 		 */ | 
 | 360 | 		if (entry->kobj.parent) { | 
 | 361 | 			kobject_del(&entry->kobj); | 
 | 362 | 			kobject_put(&entry->kobj); | 
 | 363 | 		} | 
 | 364 |  | 
| Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 365 | 		list_del(&entry->list); | 
 | 366 | 		kfree(entry); | 
 | 367 | 	} | 
 | 368 | } | 
| Satoru Takeuchi | c54c187 | 2007-01-18 13:50:05 +0900 | [diff] [blame] | 369 |  | 
| Matthew Wilcox | 379f532 | 2009-03-17 08:54:07 -0400 | [diff] [blame] | 370 | static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 371 | { | 
| Matthew Wilcox | 379f532 | 2009-03-17 08:54:07 -0400 | [diff] [blame] | 372 | 	struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); | 
 | 373 | 	if (!desc) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 374 | 		return NULL; | 
 | 375 |  | 
| Matthew Wilcox | 379f532 | 2009-03-17 08:54:07 -0400 | [diff] [blame] | 376 | 	INIT_LIST_HEAD(&desc->list); | 
 | 377 | 	desc->dev = dev; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 378 |  | 
| Matthew Wilcox | 379f532 | 2009-03-17 08:54:07 -0400 | [diff] [blame] | 379 | 	return desc; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | } | 
 | 381 |  | 
| David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 382 | static void pci_intx_for_msi(struct pci_dev *dev, int enable) | 
 | 383 | { | 
 | 384 | 	if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) | 
 | 385 | 		pci_intx(dev, enable); | 
 | 386 | } | 
 | 387 |  | 
| Michael Ellerman | 8fed4b6 | 2007-01-25 19:34:08 +1100 | [diff] [blame] | 388 | static void __pci_restore_msi_state(struct pci_dev *dev) | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 389 | { | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 390 | 	u16 control; | 
| Eric W. Biederman | 392ee1e | 2007-03-08 13:04:57 -0700 | [diff] [blame] | 391 | 	struct msi_desc *entry; | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 392 |  | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 393 | 	if (!dev->msi_enabled) | 
 | 394 | 		return; | 
 | 395 |  | 
| Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 396 | 	entry = irq_get_msi_desc(dev->irq); | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 397 |  | 
| David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 398 | 	pci_intx_for_msi(dev, 0); | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 399 | 	msi_set_enable(dev, 0); | 
| Konrad Rzeszutek Wilk | 76ccc29 | 2011-12-16 17:38:18 -0500 | [diff] [blame] | 400 | 	arch_restore_msi_irqs(dev, dev->irq); | 
| Eric W. Biederman | 392ee1e | 2007-03-08 13:04:57 -0700 | [diff] [blame] | 401 |  | 
| Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 402 | 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 403 | 	msi_mask_irq(entry, msi_capable_mask(control), entry->masked); | 
| Jesse Barnes | abad2ec | 2008-08-07 08:52:37 -0700 | [diff] [blame] | 404 | 	control &= ~PCI_MSI_FLAGS_QSIZE; | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 405 | 	control |= (entry->msi_attrib.multiple << 4) | PCI_MSI_FLAGS_ENABLE; | 
| Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 406 | 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); | 
| Michael Ellerman | 8fed4b6 | 2007-01-25 19:34:08 +1100 | [diff] [blame] | 407 | } | 
 | 408 |  | 
 | 409 | static void __pci_restore_msix_state(struct pci_dev *dev) | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 410 | { | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 411 | 	struct msi_desc *entry; | 
| Eric W. Biederman | 392ee1e | 2007-03-08 13:04:57 -0700 | [diff] [blame] | 412 | 	u16 control; | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 413 |  | 
| Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 414 | 	if (!dev->msix_enabled) | 
 | 415 | 		return; | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 416 | 	BUG_ON(list_empty(&dev->msi_list)); | 
| Hidetoshi Seto | 9cc8d54 | 2009-08-06 11:32:04 +0900 | [diff] [blame] | 417 | 	entry = list_first_entry(&dev->msi_list, struct msi_desc, list); | 
| Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 418 | 	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | 
| Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 419 |  | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 420 | 	/* route the table */ | 
| David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 421 | 	pci_intx_for_msi(dev, 0); | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 422 | 	control |= PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL; | 
| Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 423 | 	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 424 |  | 
| Michael Ellerman | 4aa9bc9 | 2007-04-05 17:19:10 +1000 | [diff] [blame] | 425 | 	list_for_each_entry(entry, &dev->msi_list, list) { | 
| Konrad Rzeszutek Wilk | 76ccc29 | 2011-12-16 17:38:18 -0500 | [diff] [blame] | 426 | 		arch_restore_msi_irqs(dev, entry->irq); | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 427 | 		msix_mask_irq(entry, entry->masked); | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 428 | 	} | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 429 |  | 
| Eric W. Biederman | 392ee1e | 2007-03-08 13:04:57 -0700 | [diff] [blame] | 430 | 	control &= ~PCI_MSIX_FLAGS_MASKALL; | 
| Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 431 | 	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 432 | } | 
| Michael Ellerman | 8fed4b6 | 2007-01-25 19:34:08 +1100 | [diff] [blame] | 433 |  | 
 | 434 | void pci_restore_msi_state(struct pci_dev *dev) | 
 | 435 | { | 
 | 436 | 	__pci_restore_msi_state(dev); | 
 | 437 | 	__pci_restore_msix_state(dev); | 
 | 438 | } | 
| Linas Vepstas | 94688cf | 2007-11-07 15:43:59 -0600 | [diff] [blame] | 439 | EXPORT_SYMBOL_GPL(pci_restore_msi_state); | 
| Shaohua Li | 41017f0 | 2006-02-08 17:11:38 +0800 | [diff] [blame] | 440 |  | 
| Neil Horman | da8d1c8 | 2011-10-06 14:08:18 -0400 | [diff] [blame] | 441 |  | 
 | 442 | #define to_msi_attr(obj) container_of(obj, struct msi_attribute, attr) | 
 | 443 | #define to_msi_desc(obj) container_of(obj, struct msi_desc, kobj) | 
 | 444 |  | 
 | 445 | struct msi_attribute { | 
 | 446 | 	struct attribute        attr; | 
 | 447 | 	ssize_t (*show)(struct msi_desc *entry, struct msi_attribute *attr, | 
 | 448 | 			char *buf); | 
 | 449 | 	ssize_t (*store)(struct msi_desc *entry, struct msi_attribute *attr, | 
 | 450 | 			 const char *buf, size_t count); | 
 | 451 | }; | 
 | 452 |  | 
 | 453 | static ssize_t show_msi_mode(struct msi_desc *entry, struct msi_attribute *atr, | 
 | 454 | 			     char *buf) | 
 | 455 | { | 
 | 456 | 	return sprintf(buf, "%s\n", entry->msi_attrib.is_msix ? "msix" : "msi"); | 
 | 457 | } | 
 | 458 |  | 
 | 459 | static ssize_t msi_irq_attr_show(struct kobject *kobj, | 
 | 460 | 				 struct attribute *attr, char *buf) | 
 | 461 | { | 
 | 462 | 	struct msi_attribute *attribute = to_msi_attr(attr); | 
 | 463 | 	struct msi_desc *entry = to_msi_desc(kobj); | 
 | 464 |  | 
 | 465 | 	if (!attribute->show) | 
 | 466 | 		return -EIO; | 
 | 467 |  | 
 | 468 | 	return attribute->show(entry, attribute, buf); | 
 | 469 | } | 
 | 470 |  | 
 | 471 | static const struct sysfs_ops msi_irq_sysfs_ops = { | 
 | 472 | 	.show = msi_irq_attr_show, | 
 | 473 | }; | 
 | 474 |  | 
 | 475 | static struct msi_attribute mode_attribute = | 
 | 476 | 	__ATTR(mode, S_IRUGO, show_msi_mode, NULL); | 
 | 477 |  | 
 | 478 |  | 
| Bjorn Helgaas | 9738abe | 2013-04-12 11:20:03 -0600 | [diff] [blame] | 479 | static struct attribute *msi_irq_default_attrs[] = { | 
| Neil Horman | da8d1c8 | 2011-10-06 14:08:18 -0400 | [diff] [blame] | 480 | 	&mode_attribute.attr, | 
 | 481 | 	NULL | 
 | 482 | }; | 
 | 483 |  | 
| Bjorn Helgaas | 9738abe | 2013-04-12 11:20:03 -0600 | [diff] [blame] | 484 | static void msi_kobj_release(struct kobject *kobj) | 
| Neil Horman | da8d1c8 | 2011-10-06 14:08:18 -0400 | [diff] [blame] | 485 | { | 
 | 486 | 	struct msi_desc *entry = to_msi_desc(kobj); | 
 | 487 |  | 
 | 488 | 	pci_dev_put(entry->dev); | 
 | 489 | } | 
 | 490 |  | 
 | 491 | static struct kobj_type msi_irq_ktype = { | 
 | 492 | 	.release = msi_kobj_release, | 
 | 493 | 	.sysfs_ops = &msi_irq_sysfs_ops, | 
 | 494 | 	.default_attrs = msi_irq_default_attrs, | 
 | 495 | }; | 
 | 496 |  | 
 | 497 | static int populate_msi_sysfs(struct pci_dev *pdev) | 
 | 498 | { | 
 | 499 | 	struct msi_desc *entry; | 
 | 500 | 	struct kobject *kobj; | 
 | 501 | 	int ret; | 
 | 502 | 	int count = 0; | 
 | 503 |  | 
 | 504 | 	pdev->msi_kset = kset_create_and_add("msi_irqs", NULL, &pdev->dev.kobj); | 
 | 505 | 	if (!pdev->msi_kset) | 
 | 506 | 		return -ENOMEM; | 
 | 507 |  | 
 | 508 | 	list_for_each_entry(entry, &pdev->msi_list, list) { | 
 | 509 | 		kobj = &entry->kobj; | 
 | 510 | 		kobj->kset = pdev->msi_kset; | 
 | 511 | 		pci_dev_get(pdev); | 
 | 512 | 		ret = kobject_init_and_add(kobj, &msi_irq_ktype, NULL, | 
 | 513 | 				     "%u", entry->irq); | 
 | 514 | 		if (ret) | 
 | 515 | 			goto out_unroll; | 
 | 516 |  | 
 | 517 | 		count++; | 
 | 518 | 	} | 
 | 519 |  | 
 | 520 | 	return 0; | 
 | 521 |  | 
 | 522 | out_unroll: | 
 | 523 | 	list_for_each_entry(entry, &pdev->msi_list, list) { | 
 | 524 | 		if (!count) | 
 | 525 | 			break; | 
 | 526 | 		kobject_del(&entry->kobj); | 
 | 527 | 		kobject_put(&entry->kobj); | 
 | 528 | 		count--; | 
 | 529 | 	} | 
 | 530 | 	return ret; | 
 | 531 | } | 
 | 532 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 533 | /** | 
 | 534 |  * msi_capability_init - configure device's MSI capability structure | 
 | 535 |  * @dev: pointer to the pci_dev data structure of MSI device function | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 536 |  * @nvec: number of interrupts to allocate | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 537 |  * | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 538 |  * Setup the MSI capability structure of the device with the requested | 
 | 539 |  * number of interrupts.  A return value of zero indicates the successful | 
 | 540 |  * setup of an entry with the new MSI irq.  A negative return value indicates | 
 | 541 |  * an error, and a positive return value indicates the number of interrupts | 
 | 542 |  * which could have been allocated. | 
 | 543 |  */ | 
 | 544 | static int msi_capability_init(struct pci_dev *dev, int nvec) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 545 | { | 
 | 546 | 	struct msi_desc *entry; | 
| Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 547 | 	int ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 548 | 	u16 control; | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 549 | 	unsigned mask; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 550 |  | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 551 | 	msi_set_enable(dev, 0);	/* Disable MSI during set up */ | 
| Matthew Wilcox | 110828c | 2009-06-16 06:31:45 -0600 | [diff] [blame] | 552 |  | 
| Bjorn Helgaas | f84ecd2 | 2013-04-17 17:38:32 -0600 | [diff] [blame] | 553 | 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 554 | 	/* MSI Entry Initialization */ | 
| Matthew Wilcox | 379f532 | 2009-03-17 08:54:07 -0400 | [diff] [blame] | 555 | 	entry = alloc_msi_entry(dev); | 
| Eric W. Biederman | f7feaca | 2007-01-28 12:56:37 -0700 | [diff] [blame] | 556 | 	if (!entry) | 
 | 557 | 		return -ENOMEM; | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 558 |  | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 559 | 	entry->msi_attrib.is_msix	= 0; | 
| Bjorn Helgaas | 4987ce8 | 2013-04-17 17:42:30 -0600 | [diff] [blame] | 560 | 	entry->msi_attrib.is_64		= !!(control & PCI_MSI_FLAGS_64BIT); | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 561 | 	entry->msi_attrib.entry_nr	= 0; | 
| Bjorn Helgaas | 4987ce8 | 2013-04-17 17:42:30 -0600 | [diff] [blame] | 562 | 	entry->msi_attrib.maskbit	= !!(control & PCI_MSI_FLAGS_MASKBIT); | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 563 | 	entry->msi_attrib.default_irq	= dev->irq;	/* Save IOAPIC IRQ */ | 
| Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 564 | 	entry->msi_attrib.pos		= dev->msi_cap; | 
| Hidetoshi Seto | 0db29af | 2008-12-24 17:27:04 +0900 | [diff] [blame] | 565 |  | 
| Dan Carpenter | e5f66ea | 2013-04-30 10:44:54 +0300 | [diff] [blame] | 566 | 	if (control & PCI_MSI_FLAGS_64BIT) | 
 | 567 | 		entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; | 
 | 568 | 	else | 
 | 569 | 		entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_32; | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 570 | 	/* All MSIs are unmasked by default, Mask them all */ | 
 | 571 | 	if (entry->msi_attrib.maskbit) | 
 | 572 | 		pci_read_config_dword(dev, entry->mask_pos, &entry->masked); | 
 | 573 | 	mask = msi_capable_mask(control); | 
 | 574 | 	msi_mask_irq(entry, mask, mask); | 
 | 575 |  | 
| Eric W. Biederman | 0dd11f9 | 2007-06-01 00:46:32 -0700 | [diff] [blame] | 576 | 	list_add_tail(&entry->list, &dev->msi_list); | 
| Michael Ellerman | 9c83133 | 2007-04-18 19:39:21 +1000 | [diff] [blame] | 577 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 578 | 	/* Configure MSI capability structure */ | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 579 | 	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); | 
| Michael Ellerman | 7fe3730 | 2007-04-18 19:39:21 +1000 | [diff] [blame] | 580 | 	if (ret) { | 
| Hidetoshi Seto | 7ba1930 | 2009-06-23 17:39:27 +0900 | [diff] [blame] | 581 | 		msi_mask_irq(entry, mask, ~mask); | 
| Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 582 | 		free_msi_irqs(dev); | 
| Michael Ellerman | 7fe3730 | 2007-04-18 19:39:21 +1000 | [diff] [blame] | 583 | 		return ret; | 
| Mark Maule | fd58e55 | 2006-04-10 21:17:48 -0500 | [diff] [blame] | 584 | 	} | 
| Eric W. Biederman | f7feaca | 2007-01-28 12:56:37 -0700 | [diff] [blame] | 585 |  | 
| Neil Horman | da8d1c8 | 2011-10-06 14:08:18 -0400 | [diff] [blame] | 586 | 	ret = populate_msi_sysfs(dev); | 
 | 587 | 	if (ret) { | 
 | 588 | 		msi_mask_irq(entry, mask, ~mask); | 
 | 589 | 		free_msi_irqs(dev); | 
 | 590 | 		return ret; | 
 | 591 | 	} | 
 | 592 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 593 | 	/* Set MSI enabled bits	 */ | 
| David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 594 | 	pci_intx_for_msi(dev, 0); | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 595 | 	msi_set_enable(dev, 1); | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 596 | 	dev->msi_enabled = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 597 |  | 
| Michael Ellerman | 7fe3730 | 2007-04-18 19:39:21 +1000 | [diff] [blame] | 598 | 	dev->irq = entry->irq; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 599 | 	return 0; | 
 | 600 | } | 
 | 601 |  | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 602 | static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) | 
| Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 603 | { | 
| Kenji Kaneshige | 4302e0f | 2010-06-17 10:42:44 +0900 | [diff] [blame] | 604 | 	resource_size_t phys_addr; | 
| Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 605 | 	u32 table_offset; | 
 | 606 | 	u8 bir; | 
 | 607 |  | 
| Bjorn Helgaas | 909094c | 2013-04-17 17:43:40 -0600 | [diff] [blame] | 608 | 	pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, | 
 | 609 | 			      &table_offset); | 
| Bjorn Helgaas | 4d18760 | 2013-04-17 18:10:07 -0600 | [diff] [blame] | 610 | 	bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); | 
 | 611 | 	table_offset &= PCI_MSIX_TABLE_OFFSET; | 
| Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 612 | 	phys_addr = pci_resource_start(dev, bir) + table_offset; | 
 | 613 |  | 
 | 614 | 	return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE); | 
 | 615 | } | 
 | 616 |  | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 617 | static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, | 
 | 618 | 			      struct msix_entry *entries, int nvec) | 
| Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 619 | { | 
 | 620 | 	struct msi_desc *entry; | 
 | 621 | 	int i; | 
 | 622 |  | 
 | 623 | 	for (i = 0; i < nvec; i++) { | 
 | 624 | 		entry = alloc_msi_entry(dev); | 
 | 625 | 		if (!entry) { | 
 | 626 | 			if (!i) | 
 | 627 | 				iounmap(base); | 
 | 628 | 			else | 
 | 629 | 				free_msi_irqs(dev); | 
 | 630 | 			/* No enough memory. Don't try again */ | 
 | 631 | 			return -ENOMEM; | 
 | 632 | 		} | 
 | 633 |  | 
 | 634 | 		entry->msi_attrib.is_msix	= 1; | 
 | 635 | 		entry->msi_attrib.is_64		= 1; | 
 | 636 | 		entry->msi_attrib.entry_nr	= entries[i].entry; | 
 | 637 | 		entry->msi_attrib.default_irq	= dev->irq; | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 638 | 		entry->msi_attrib.pos		= dev->msix_cap; | 
| Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 639 | 		entry->mask_base		= base; | 
 | 640 |  | 
 | 641 | 		list_add_tail(&entry->list, &dev->msi_list); | 
 | 642 | 	} | 
 | 643 |  | 
 | 644 | 	return 0; | 
 | 645 | } | 
 | 646 |  | 
| Hidetoshi Seto | 75cb342 | 2009-08-06 11:35:10 +0900 | [diff] [blame] | 647 | static void msix_program_entries(struct pci_dev *dev, | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 648 | 				 struct msix_entry *entries) | 
| Hidetoshi Seto | 75cb342 | 2009-08-06 11:35:10 +0900 | [diff] [blame] | 649 | { | 
 | 650 | 	struct msi_desc *entry; | 
 | 651 | 	int i = 0; | 
 | 652 |  | 
 | 653 | 	list_for_each_entry(entry, &dev->msi_list, list) { | 
 | 654 | 		int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + | 
 | 655 | 						PCI_MSIX_ENTRY_VECTOR_CTRL; | 
 | 656 |  | 
 | 657 | 		entries[i].vector = entry->irq; | 
| Thomas Gleixner | dced35a | 2011-03-28 17:49:12 +0200 | [diff] [blame] | 658 | 		irq_set_msi_desc(entry->irq, entry); | 
| Hidetoshi Seto | 75cb342 | 2009-08-06 11:35:10 +0900 | [diff] [blame] | 659 | 		entry->masked = readl(entry->mask_base + offset); | 
 | 660 | 		msix_mask_irq(entry, 1); | 
 | 661 | 		i++; | 
 | 662 | 	} | 
 | 663 | } | 
 | 664 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 665 | /** | 
 | 666 |  * msix_capability_init - configure device's MSI-X capability | 
 | 667 |  * @dev: pointer to the pci_dev data structure of MSI-X device function | 
| Randy Dunlap | 8f7020d | 2005-10-23 11:57:38 -0700 | [diff] [blame] | 668 |  * @entries: pointer to an array of struct msix_entry entries | 
 | 669 |  * @nvec: number of @entries | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 670 |  * | 
| Steven Cole | eaae4b3 | 2005-05-03 18:38:30 -0600 | [diff] [blame] | 671 |  * Setup the MSI-X capability structure of device function with a | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 672 |  * single MSI-X irq. A return of zero indicates the successful setup of | 
 | 673 |  * requested MSI-X entries with allocated irqs or non-zero for otherwise. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 674 |  **/ | 
 | 675 | static int msix_capability_init(struct pci_dev *dev, | 
 | 676 | 				struct msix_entry *entries, int nvec) | 
 | 677 | { | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 678 | 	int ret; | 
| Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 679 | 	u16 control; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 680 | 	void __iomem *base; | 
 | 681 |  | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 682 | 	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 683 |  | 
 | 684 | 	/* Ensure MSI-X is disabled while it is set up */ | 
 | 685 | 	control &= ~PCI_MSIX_FLAGS_ENABLE; | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 686 | 	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 687 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 688 | 	/* Request & Map MSI-X table region */ | 
| Bjorn Helgaas | 527eee2 | 2013-04-17 17:44:48 -0600 | [diff] [blame] | 689 | 	base = msix_map_region(dev, msix_table_size(control)); | 
| Hidetoshi Seto | 5a05a9d | 2009-08-06 11:34:34 +0900 | [diff] [blame] | 690 | 	if (!base) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 691 | 		return -ENOMEM; | 
 | 692 |  | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 693 | 	ret = msix_setup_entries(dev, base, entries, nvec); | 
| Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 694 | 	if (ret) | 
 | 695 | 		return ret; | 
| Michael Ellerman | 9c83133 | 2007-04-18 19:39:21 +1000 | [diff] [blame] | 696 |  | 
 | 697 | 	ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); | 
| Hidetoshi Seto | 583871d | 2009-08-06 11:33:39 +0900 | [diff] [blame] | 698 | 	if (ret) | 
 | 699 | 		goto error; | 
| Michael Ellerman | 9c83133 | 2007-04-18 19:39:21 +1000 | [diff] [blame] | 700 |  | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 701 | 	/* | 
 | 702 | 	 * Some devices require MSI-X to be enabled before we can touch the | 
 | 703 | 	 * MSI-X registers.  We need to mask all the vectors to prevent | 
 | 704 | 	 * interrupts coming in before they're fully set up. | 
 | 705 | 	 */ | 
 | 706 | 	control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE; | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 707 | 	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 708 |  | 
| Hidetoshi Seto | 75cb342 | 2009-08-06 11:35:10 +0900 | [diff] [blame] | 709 | 	msix_program_entries(dev, entries); | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 710 |  | 
| Neil Horman | da8d1c8 | 2011-10-06 14:08:18 -0400 | [diff] [blame] | 711 | 	ret = populate_msi_sysfs(dev); | 
 | 712 | 	if (ret) { | 
 | 713 | 		ret = 0; | 
 | 714 | 		goto error; | 
 | 715 | 	} | 
 | 716 |  | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 717 | 	/* Set MSI-X enabled bits and unmask the function */ | 
| David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 718 | 	pci_intx_for_msi(dev, 0); | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 719 | 	dev->msix_enabled = 1; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 720 |  | 
| Matthew Wilcox | f598282 | 2009-06-18 19:15:59 -0700 | [diff] [blame] | 721 | 	control &= ~PCI_MSIX_FLAGS_MASKALL; | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 722 | 	pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, control); | 
| Matthew Wilcox | 8d18101 | 2009-05-08 07:13:33 -0600 | [diff] [blame] | 723 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 724 | 	return 0; | 
| Hidetoshi Seto | 583871d | 2009-08-06 11:33:39 +0900 | [diff] [blame] | 725 |  | 
 | 726 | error: | 
 | 727 | 	if (ret < 0) { | 
 | 728 | 		/* | 
 | 729 | 		 * If we had some success, report the number of irqs | 
 | 730 | 		 * we succeeded in setting up. | 
 | 731 | 		 */ | 
| Hidetoshi Seto | d9d7070 | 2009-08-06 11:35:48 +0900 | [diff] [blame] | 732 | 		struct msi_desc *entry; | 
| Hidetoshi Seto | 583871d | 2009-08-06 11:33:39 +0900 | [diff] [blame] | 733 | 		int avail = 0; | 
 | 734 |  | 
 | 735 | 		list_for_each_entry(entry, &dev->msi_list, list) { | 
 | 736 | 			if (entry->irq != 0) | 
 | 737 | 				avail++; | 
 | 738 | 		} | 
 | 739 | 		if (avail != 0) | 
 | 740 | 			ret = avail; | 
 | 741 | 	} | 
 | 742 |  | 
 | 743 | 	free_msi_irqs(dev); | 
 | 744 |  | 
 | 745 | 	return ret; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 746 | } | 
 | 747 |  | 
 | 748 | /** | 
| Michael Ellerman | 17bbc12 | 2007-04-05 17:19:07 +1000 | [diff] [blame] | 749 |  * pci_msi_check_device - check whether MSI may be enabled on a device | 
| Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 750 |  * @dev: pointer to the pci_dev data structure of MSI device function | 
| Michael Ellerman | c9953a7 | 2007-04-05 17:19:08 +1000 | [diff] [blame] | 751 |  * @nvec: how many MSIs have been requested ? | 
| Michael Ellerman | b1e2303 | 2007-03-22 21:51:39 +1100 | [diff] [blame] | 752 |  * @type: are we checking for MSI or MSI-X ? | 
| Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 753 |  * | 
| Brice Goglin | 0306ebf | 2006-10-05 10:24:31 +0200 | [diff] [blame] | 754 |  * Look at global flags, the device itself, and its parent busses | 
| Michael Ellerman | 17bbc12 | 2007-04-05 17:19:07 +1000 | [diff] [blame] | 755 |  * to determine if MSI/-X are supported for the device. If MSI/-X is | 
 | 756 |  * supported return 0, else return an error code. | 
| Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 757 |  **/ | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 758 | static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type) | 
| Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 759 | { | 
 | 760 | 	struct pci_bus *bus; | 
| Michael Ellerman | c9953a7 | 2007-04-05 17:19:08 +1000 | [diff] [blame] | 761 | 	int ret; | 
| Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 762 |  | 
| Brice Goglin | 0306ebf | 2006-10-05 10:24:31 +0200 | [diff] [blame] | 763 | 	/* MSI must be globally enabled and supported by the device */ | 
| Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 764 | 	if (!pci_msi_enable || !dev || dev->no_msi) | 
 | 765 | 		return -EINVAL; | 
 | 766 |  | 
| Michael Ellerman | 314e77b | 2007-04-05 17:19:12 +1000 | [diff] [blame] | 767 | 	/* | 
 | 768 | 	 * You can't ask to have 0 or less MSIs configured. | 
 | 769 | 	 *  a) it's stupid .. | 
 | 770 | 	 *  b) the list manipulation code assumes nvec >= 1. | 
 | 771 | 	 */ | 
 | 772 | 	if (nvec < 1) | 
 | 773 | 		return -ERANGE; | 
 | 774 |  | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 775 | 	/* | 
 | 776 | 	 * Any bridge which does NOT route MSI transactions from its | 
 | 777 | 	 * secondary bus to its primary bus must set NO_MSI flag on | 
| Brice Goglin | 0306ebf | 2006-10-05 10:24:31 +0200 | [diff] [blame] | 778 | 	 * the secondary pci_bus. | 
 | 779 | 	 * We expect only arch-specific PCI host bus controller driver | 
 | 780 | 	 * or quirks for specific PCI bridges to be setting NO_MSI. | 
 | 781 | 	 */ | 
| Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 782 | 	for (bus = dev->bus; bus; bus = bus->parent) | 
 | 783 | 		if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI) | 
 | 784 | 			return -EINVAL; | 
 | 785 |  | 
| Michael Ellerman | c9953a7 | 2007-04-05 17:19:08 +1000 | [diff] [blame] | 786 | 	ret = arch_msi_check_device(dev, nvec, type); | 
 | 787 | 	if (ret) | 
 | 788 | 		return ret; | 
 | 789 |  | 
| Brice Goglin | 24334a1 | 2006-08-31 01:55:07 -0400 | [diff] [blame] | 790 | 	return 0; | 
 | 791 | } | 
 | 792 |  | 
 | 793 | /** | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 794 |  * pci_enable_msi_block - configure device's MSI capability structure | 
 | 795 |  * @dev: device to configure | 
 | 796 |  * @nvec: number of interrupts to configure | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 797 |  * | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 798 |  * Allocate IRQs for a device with the MSI capability. | 
 | 799 |  * This function returns a negative errno if an error occurs.  If it | 
 | 800 |  * is unable to allocate the number of interrupts requested, it returns | 
 | 801 |  * the number of interrupts it might be able to allocate.  If it successfully | 
 | 802 |  * allocates at least the number of interrupts requested, it returns 0 and | 
 | 803 |  * updates the @dev's irq member to the lowest new interrupt number; the | 
 | 804 |  * other interrupt numbers allocated to this device are consecutive. | 
 | 805 |  */ | 
 | 806 | int pci_enable_msi_block(struct pci_dev *dev, unsigned int nvec) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 807 | { | 
| Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 808 | 	int status, maxvec; | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 809 | 	u16 msgctl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 810 |  | 
| Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 811 | 	if (!dev->msi_cap) | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 812 | 		return -EINVAL; | 
| Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 813 |  | 
 | 814 | 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 815 | 	maxvec = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); | 
 | 816 | 	if (nvec > maxvec) | 
 | 817 | 		return maxvec; | 
 | 818 |  | 
 | 819 | 	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSI); | 
| Michael Ellerman | c9953a7 | 2007-04-05 17:19:08 +1000 | [diff] [blame] | 820 | 	if (status) | 
 | 821 | 		return status; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 822 |  | 
| Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 823 | 	WARN_ON(!!dev->msi_enabled); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 824 |  | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 825 | 	/* Check whether driver already requested MSI-X irqs */ | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 826 | 	if (dev->msix_enabled) { | 
| Bjorn Helgaas | 80ccba1 | 2008-06-13 10:52:11 -0600 | [diff] [blame] | 827 | 		dev_info(&dev->dev, "can't enable MSI " | 
 | 828 | 			 "(MSI-X already enabled)\n"); | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 829 | 		return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 830 | 	} | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 831 |  | 
 | 832 | 	status = msi_capability_init(dev, nvec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 833 | 	return status; | 
 | 834 | } | 
| Matthew Wilcox | 1c8d7b0 | 2009-03-17 08:54:10 -0400 | [diff] [blame] | 835 | EXPORT_SYMBOL(pci_enable_msi_block); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 836 |  | 
| Alexander Gordeev | 08261d8 | 2012-11-19 16:02:10 +0100 | [diff] [blame] | 837 | int pci_enable_msi_block_auto(struct pci_dev *dev, unsigned int *maxvec) | 
 | 838 | { | 
| Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 839 | 	int ret, nvec; | 
| Alexander Gordeev | 08261d8 | 2012-11-19 16:02:10 +0100 | [diff] [blame] | 840 | 	u16 msgctl; | 
 | 841 |  | 
| Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 842 | 	if (!dev->msi_cap) | 
| Alexander Gordeev | 08261d8 | 2012-11-19 16:02:10 +0100 | [diff] [blame] | 843 | 		return -EINVAL; | 
 | 844 |  | 
| Gavin Shan | f465136 | 2013-04-04 16:54:32 +0000 | [diff] [blame] | 845 | 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &msgctl); | 
| Alexander Gordeev | 08261d8 | 2012-11-19 16:02:10 +0100 | [diff] [blame] | 846 | 	ret = 1 << ((msgctl & PCI_MSI_FLAGS_QMASK) >> 1); | 
 | 847 |  | 
 | 848 | 	if (maxvec) | 
 | 849 | 		*maxvec = ret; | 
 | 850 |  | 
 | 851 | 	do { | 
 | 852 | 		nvec = ret; | 
 | 853 | 		ret = pci_enable_msi_block(dev, nvec); | 
 | 854 | 	} while (ret > 0); | 
 | 855 |  | 
 | 856 | 	if (ret < 0) | 
 | 857 | 		return ret; | 
 | 858 | 	return nvec; | 
 | 859 | } | 
 | 860 | EXPORT_SYMBOL(pci_enable_msi_block_auto); | 
 | 861 |  | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 862 | void pci_msi_shutdown(struct pci_dev *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 863 | { | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 864 | 	struct msi_desc *desc; | 
 | 865 | 	u32 mask; | 
 | 866 | 	u16 ctrl; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 867 |  | 
| Michael Ellerman | 128bc5f | 2007-03-22 21:51:39 +1100 | [diff] [blame] | 868 | 	if (!pci_msi_enable || !dev || !dev->msi_enabled) | 
| Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 869 | 		return; | 
 | 870 |  | 
| Matthew Wilcox | 110828c | 2009-06-16 06:31:45 -0600 | [diff] [blame] | 871 | 	BUG_ON(list_empty(&dev->msi_list)); | 
 | 872 | 	desc = list_first_entry(&dev->msi_list, struct msi_desc, list); | 
| Matthew Wilcox | 110828c | 2009-06-16 06:31:45 -0600 | [diff] [blame] | 873 |  | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 874 | 	msi_set_enable(dev, 0); | 
| David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 875 | 	pci_intx_for_msi(dev, 1); | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 876 | 	dev->msi_enabled = 0; | 
| Eric W. Biederman | 7bd007e | 2006-10-04 02:16:31 -0700 | [diff] [blame] | 877 |  | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 878 | 	/* Return the device with MSI unmasked as initial states */ | 
| Bjorn Helgaas | f532216 | 2013-04-17 17:34:36 -0600 | [diff] [blame] | 879 | 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &ctrl); | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 880 | 	mask = msi_capable_mask(ctrl); | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 881 | 	/* Keep cached state to be restored */ | 
 | 882 | 	__msi_mask_irq(desc, mask, ~mask); | 
| Michael Ellerman | e387b9e | 2007-03-22 21:51:27 +1100 | [diff] [blame] | 883 |  | 
 | 884 | 	/* Restore dev->irq to its default pin-assertion irq */ | 
| Matthew Wilcox | f2440d9 | 2009-03-17 08:54:09 -0400 | [diff] [blame] | 885 | 	dev->irq = desc->msi_attrib.default_irq; | 
| Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 886 | } | 
| Matthew Wilcox | 24d2755 | 2009-03-17 08:54:06 -0400 | [diff] [blame] | 887 |  | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 888 | void pci_disable_msi(struct pci_dev *dev) | 
| Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 889 | { | 
| Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 890 | 	if (!pci_msi_enable || !dev || !dev->msi_enabled) | 
 | 891 | 		return; | 
 | 892 |  | 
 | 893 | 	pci_msi_shutdown(dev); | 
| Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 894 | 	free_msi_irqs(dev); | 
| Neil Horman | da8d1c8 | 2011-10-06 14:08:18 -0400 | [diff] [blame] | 895 | 	kset_unregister(dev->msi_kset); | 
 | 896 | 	dev->msi_kset = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 897 | } | 
| Michael Ellerman | 4cc086f | 2007-03-22 21:51:34 +1100 | [diff] [blame] | 898 | EXPORT_SYMBOL(pci_disable_msi); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 899 |  | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 900 | /** | 
| Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 901 |  * pci_msix_table_size - return the number of device's MSI-X table entries | 
 | 902 |  * @dev: pointer to the pci_dev data structure of MSI-X device function | 
 | 903 |  */ | 
 | 904 | int pci_msix_table_size(struct pci_dev *dev) | 
 | 905 | { | 
| Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 906 | 	u16 control; | 
 | 907 |  | 
| Gavin Shan | 520fe9d | 2013-04-04 16:54:33 +0000 | [diff] [blame] | 908 | 	if (!dev->msix_cap) | 
| Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 909 | 		return 0; | 
 | 910 |  | 
| Bjorn Helgaas | f84ecd2 | 2013-04-17 17:38:32 -0600 | [diff] [blame] | 911 | 	pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); | 
| Bjorn Helgaas | 527eee2 | 2013-04-17 17:44:48 -0600 | [diff] [blame] | 912 | 	return msix_table_size(control); | 
| Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 913 | } | 
 | 914 |  | 
 | 915 | /** | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 916 |  * pci_enable_msix - configure device's MSI-X capability structure | 
 | 917 |  * @dev: pointer to the pci_dev data structure of MSI-X device function | 
| Greg Kroah-Hartman | 70549ad | 2005-06-06 23:07:46 -0700 | [diff] [blame] | 918 |  * @entries: pointer to an array of MSI-X entries | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 919 |  * @nvec: number of MSI-X irqs requested for allocation by device driver | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 920 |  * | 
 | 921 |  * Setup the MSI-X capability structure of device function with the number | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 922 |  * of requested irqs upon its software driver call to request for | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 923 |  * MSI-X mode enabled on its hardware device function. A return of zero | 
 | 924 |  * indicates the successful configuration of MSI-X capability structure | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 925 |  * with new allocated MSI-X irqs. A return of < 0 indicates a failure. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 926 |  * Or a return of > 0 indicates that driver request is exceeding the number | 
| Michael S. Tsirkin | 57fbf52 | 2009-05-07 11:28:41 +0300 | [diff] [blame] | 927 |  * of irqs or MSI-X vectors available. Driver should use the returned value to | 
 | 928 |  * re-send its request. | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 929 |  **/ | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 930 | int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 931 | { | 
| Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 932 | 	int status, nr_entries; | 
| Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 933 | 	int i, j; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 934 |  | 
| Gavin Shan | cdf1fd4 | 2013-04-04 16:54:31 +0000 | [diff] [blame] | 935 | 	if (!entries || !dev->msix_cap) | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 936 | 		return -EINVAL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 937 |  | 
| Michael Ellerman | c9953a7 | 2007-04-05 17:19:08 +1000 | [diff] [blame] | 938 | 	status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX); | 
 | 939 | 	if (status) | 
 | 940 | 		return status; | 
 | 941 |  | 
| Rafael J. Wysocki | a52e2e3 | 2009-01-24 00:21:14 +0100 | [diff] [blame] | 942 | 	nr_entries = pci_msix_table_size(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 943 | 	if (nvec > nr_entries) | 
| Michael S. Tsirkin | 57fbf52 | 2009-05-07 11:28:41 +0300 | [diff] [blame] | 944 | 		return nr_entries; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 945 |  | 
 | 946 | 	/* Check for any invalid entries */ | 
 | 947 | 	for (i = 0; i < nvec; i++) { | 
 | 948 | 		if (entries[i].entry >= nr_entries) | 
 | 949 | 			return -EINVAL;		/* invalid entry */ | 
 | 950 | 		for (j = i + 1; j < nvec; j++) { | 
 | 951 | 			if (entries[i].entry == entries[j].entry) | 
 | 952 | 				return -EINVAL;	/* duplicate entry */ | 
 | 953 | 		} | 
 | 954 | 	} | 
| Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 955 | 	WARN_ON(!!dev->msix_enabled); | 
| Eric W. Biederman | 7bd007e | 2006-10-04 02:16:31 -0700 | [diff] [blame] | 956 |  | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 957 | 	/* Check whether driver already requested for MSI irq */ | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 958 | 	if (dev->msi_enabled) { | 
| Bjorn Helgaas | 80ccba1 | 2008-06-13 10:52:11 -0600 | [diff] [blame] | 959 | 		dev_info(&dev->dev, "can't enable MSI-X " | 
 | 960 | 		       "(MSI IRQ already assigned)\n"); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 961 | 		return -EINVAL; | 
 | 962 | 	} | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 963 | 	status = msix_capability_init(dev, entries, nvec); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 964 | 	return status; | 
 | 965 | } | 
| Michael Ellerman | 4cc086f | 2007-03-22 21:51:34 +1100 | [diff] [blame] | 966 | EXPORT_SYMBOL(pci_enable_msix); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 967 |  | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 968 | void pci_msix_shutdown(struct pci_dev *dev) | 
| Michael Ellerman | fc4afc7 | 2007-03-22 21:51:33 +1100 | [diff] [blame] | 969 | { | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 970 | 	struct msi_desc *entry; | 
 | 971 |  | 
| Michael Ellerman | 128bc5f | 2007-03-22 21:51:39 +1100 | [diff] [blame] | 972 | 	if (!pci_msi_enable || !dev || !dev->msix_enabled) | 
| Eric W. Biederman | ded86d8 | 2007-01-28 12:42:52 -0700 | [diff] [blame] | 973 | 		return; | 
 | 974 |  | 
| Hidetoshi Seto | 12abb8b | 2009-06-24 12:08:09 +0900 | [diff] [blame] | 975 | 	/* Return the device with MSI-X masked as initial states */ | 
 | 976 | 	list_for_each_entry(entry, &dev->msi_list, list) { | 
 | 977 | 		/* Keep cached states to be restored */ | 
 | 978 | 		__msix_mask_irq(entry, 1); | 
 | 979 | 	} | 
 | 980 |  | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 981 | 	msix_set_enable(dev, 0); | 
| David Miller | ba698ad | 2007-10-25 01:16:30 -0700 | [diff] [blame] | 982 | 	pci_intx_for_msi(dev, 1); | 
| Eric W. Biederman | b1cbf4e | 2007-03-05 00:30:10 -0800 | [diff] [blame] | 983 | 	dev->msix_enabled = 0; | 
| Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 984 | } | 
| Hidetoshi Seto | c901851 | 2009-08-06 11:31:27 +0900 | [diff] [blame] | 985 |  | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 986 | void pci_disable_msix(struct pci_dev *dev) | 
| Yinghai Lu | d52877c | 2008-04-23 14:58:09 -0700 | [diff] [blame] | 987 | { | 
 | 988 | 	if (!pci_msi_enable || !dev || !dev->msix_enabled) | 
 | 989 | 		return; | 
 | 990 |  | 
 | 991 | 	pci_msix_shutdown(dev); | 
| Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 992 | 	free_msi_irqs(dev); | 
| Neil Horman | da8d1c8 | 2011-10-06 14:08:18 -0400 | [diff] [blame] | 993 | 	kset_unregister(dev->msi_kset); | 
 | 994 | 	dev->msi_kset = NULL; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 995 | } | 
| Michael Ellerman | 4cc086f | 2007-03-22 21:51:34 +1100 | [diff] [blame] | 996 | EXPORT_SYMBOL(pci_disable_msix); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 997 |  | 
 | 998 | /** | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 999 |  * msi_remove_pci_irq_vectors - reclaim MSI(X) irqs to unused state | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1000 |  * @dev: pointer to the pci_dev data structure of MSI(X) device function | 
 | 1001 |  * | 
| Steven Cole | eaae4b3 | 2005-05-03 18:38:30 -0600 | [diff] [blame] | 1002 |  * Being called during hotplug remove, from which the device function | 
| Eric W. Biederman | 1ce0337 | 2006-10-04 02:16:41 -0700 | [diff] [blame] | 1003 |  * is hot-removed. All previous assigned MSI/MSI-X irqs, if | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1004 |  * allocated for this device function, are reclaimed to unused state, | 
 | 1005 |  * which may be used later on. | 
 | 1006 |  **/ | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 1007 | void msi_remove_pci_irq_vectors(struct pci_dev *dev) | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1008 | { | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1009 | 	if (!pci_msi_enable || !dev) | 
| Hidetoshi Seto | 500559a | 2009-08-10 10:14:15 +0900 | [diff] [blame] | 1010 | 		return; | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1011 |  | 
| Hidetoshi Seto | f56e448 | 2009-08-06 11:32:51 +0900 | [diff] [blame] | 1012 | 	if (dev->msi_enabled || dev->msix_enabled) | 
 | 1013 | 		free_msi_irqs(dev); | 
| Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1014 | } | 
 | 1015 |  | 
| Matthew Wilcox | 309e57d | 2006-03-05 22:33:34 -0700 | [diff] [blame] | 1016 | void pci_no_msi(void) | 
 | 1017 | { | 
 | 1018 | 	pci_msi_enable = 0; | 
 | 1019 | } | 
| Michael Ellerman | c9953a7 | 2007-04-05 17:19:08 +1000 | [diff] [blame] | 1020 |  | 
| Andrew Patterson | 07ae95f | 2008-11-10 15:31:05 -0700 | [diff] [blame] | 1021 | /** | 
 | 1022 |  * pci_msi_enabled - is MSI enabled? | 
 | 1023 |  * | 
 | 1024 |  * Returns true if MSI has not been disabled by the command-line option | 
 | 1025 |  * pci=nomsi. | 
 | 1026 |  **/ | 
 | 1027 | int pci_msi_enabled(void) | 
 | 1028 | { | 
 | 1029 | 	return pci_msi_enable; | 
 | 1030 | } | 
 | 1031 | EXPORT_SYMBOL(pci_msi_enabled); | 
 | 1032 |  | 
| Michael Ellerman | 4aa9bc9 | 2007-04-05 17:19:10 +1000 | [diff] [blame] | 1033 | void pci_msi_init_pci_dev(struct pci_dev *dev) | 
 | 1034 | { | 
 | 1035 | 	INIT_LIST_HEAD(&dev->msi_list); | 
| Eric W. Biederman | d5dea7d | 2011-10-17 11:46:06 -0700 | [diff] [blame] | 1036 |  | 
 | 1037 | 	/* Disable the msi hardware to avoid screaming interrupts | 
 | 1038 | 	 * during boot.  This is the power on reset default so | 
 | 1039 | 	 * usually this should be a noop. | 
 | 1040 | 	 */ | 
| Gavin Shan | e375b56 | 2013-04-04 16:54:30 +0000 | [diff] [blame] | 1041 | 	dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); | 
 | 1042 | 	if (dev->msi_cap) | 
 | 1043 | 		msi_set_enable(dev, 0); | 
 | 1044 |  | 
 | 1045 | 	dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); | 
 | 1046 | 	if (dev->msix_cap) | 
 | 1047 | 		msix_set_enable(dev, 0); | 
| Michael Ellerman | 4aa9bc9 | 2007-04-05 17:19:10 +1000 | [diff] [blame] | 1048 | } |