Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | // SPDX-License-Identifier: GPL-2.0 |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 2 | /* |
Kuppuswamy Sathyanarayanan | 05454c2 | 2013-10-17 15:35:27 -0700 | [diff] [blame] | 3 | * Intel MID PCI support |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 4 | * Copyright (c) 2008 Intel Corporation |
| 5 | * Jesse Barnes <jesse.barnes@intel.com> |
| 6 | * |
| 7 | * Moorestown has an interesting PCI implementation: |
| 8 | * - configuration space is memory mapped (as defined by MCFG) |
| 9 | * - Lincroft devices also have a real, type 1 configuration space |
| 10 | * - Early Lincroft silicon has a type 1 access bug that will cause |
| 11 | * a hang if non-existent devices are accessed |
| 12 | * - some devices have the "fixed BAR" capability, which means |
| 13 | * they can't be relocated or modified; check for that during |
| 14 | * BAR sizing |
| 15 | * |
| 16 | * So, we use the MCFG space for all reads and writes, but also send |
| 17 | * Lincroft writes to type 1 space. But only read/write if the device |
| 18 | * actually exists, otherwise return all 1s for reads and bit bucket |
| 19 | * the writes. |
| 20 | */ |
| 21 | |
| 22 | #include <linux/sched.h> |
| 23 | #include <linux/pci.h> |
| 24 | #include <linux/ioport.h> |
| 25 | #include <linux/init.h> |
| 26 | #include <linux/dmi.h> |
Valentina Manea | 7cc24e1 | 2013-07-15 10:40:48 +0300 | [diff] [blame] | 27 | #include <linux/acpi.h> |
| 28 | #include <linux/io.h> |
| 29 | #include <linux/smp.h> |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 30 | |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 31 | #include <asm/segment.h> |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 32 | #include <asm/pci_x86.h> |
| 33 | #include <asm/hw_irq.h> |
| 34 | #include <asm/io_apic.h> |
David Cohen | bc20aa48 | 2013-12-16 12:07:38 -0800 | [diff] [blame] | 35 | #include <asm/intel-mid.h> |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 36 | |
| 37 | #define PCIE_CAP_OFFSET 0x100 |
| 38 | |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 39 | /* Quirks for the listed devices */ |
Andy Shevchenko | 707a605 | 2016-07-12 14:04:22 +0300 | [diff] [blame] | 40 | #define PCI_DEVICE_ID_INTEL_MRFLD_MMC 0x1190 |
| 41 | #define PCI_DEVICE_ID_INTEL_MRFLD_HSU 0x1191 |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 42 | |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 43 | /* Fixed BAR fields */ |
| 44 | #define PCIE_VNDR_CAP_ID_FIXED_BAR 0x00 /* Fixed BAR (TBD) */ |
| 45 | #define PCI_FIXED_BAR_0_SIZE 0x04 |
| 46 | #define PCI_FIXED_BAR_1_SIZE 0x08 |
| 47 | #define PCI_FIXED_BAR_2_SIZE 0x0c |
| 48 | #define PCI_FIXED_BAR_3_SIZE 0x10 |
| 49 | #define PCI_FIXED_BAR_4_SIZE 0x14 |
| 50 | #define PCI_FIXED_BAR_5_SIZE 0x1c |
| 51 | |
Valentina Manea | 7cc24e1 | 2013-07-15 10:40:48 +0300 | [diff] [blame] | 52 | static int pci_soc_mode; |
Alan Cox | 823806f | 2012-02-13 12:59:37 +0000 | [diff] [blame] | 53 | |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 54 | /** |
| 55 | * fixed_bar_cap - return the offset of the fixed BAR cap if found |
| 56 | * @bus: PCI bus |
| 57 | * @devfn: device in question |
| 58 | * |
| 59 | * Look for the fixed BAR cap on @bus and @devfn, returning its offset |
| 60 | * if found or 0 otherwise. |
| 61 | */ |
| 62 | static int fixed_bar_cap(struct pci_bus *bus, unsigned int devfn) |
| 63 | { |
| 64 | int pos; |
| 65 | u32 pcie_cap = 0, cap_data; |
| 66 | |
| 67 | pos = PCIE_CAP_OFFSET; |
Jacob Pan | c541138 | 2010-02-24 09:42:50 -0800 | [diff] [blame] | 68 | |
| 69 | if (!raw_pci_ext_ops) |
| 70 | return 0; |
| 71 | |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 72 | while (pos) { |
| 73 | if (raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number, |
| 74 | devfn, pos, 4, &pcie_cap)) |
| 75 | return 0; |
| 76 | |
Jacob Pan | f82c3d7 | 2010-07-16 11:58:26 -0700 | [diff] [blame] | 77 | if (PCI_EXT_CAP_ID(pcie_cap) == 0x0000 || |
| 78 | PCI_EXT_CAP_ID(pcie_cap) == 0xffff) |
| 79 | break; |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 80 | |
| 81 | if (PCI_EXT_CAP_ID(pcie_cap) == PCI_EXT_CAP_ID_VNDR) { |
| 82 | raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number, |
| 83 | devfn, pos + 4, 4, &cap_data); |
| 84 | if ((cap_data & 0xffff) == PCIE_VNDR_CAP_ID_FIXED_BAR) |
| 85 | return pos; |
| 86 | } |
| 87 | |
Jacob Pan | f82c3d7 | 2010-07-16 11:58:26 -0700 | [diff] [blame] | 88 | pos = PCI_EXT_CAP_NEXT(pcie_cap); |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 89 | } |
| 90 | |
| 91 | return 0; |
| 92 | } |
| 93 | |
| 94 | static int pci_device_update_fixed(struct pci_bus *bus, unsigned int devfn, |
| 95 | int reg, int len, u32 val, int offset) |
| 96 | { |
| 97 | u32 size; |
| 98 | unsigned int domain, busnum; |
| 99 | int bar = (reg - PCI_BASE_ADDRESS_0) >> 2; |
| 100 | |
| 101 | domain = pci_domain_nr(bus); |
| 102 | busnum = bus->number; |
| 103 | |
| 104 | if (val == ~0 && len == 4) { |
| 105 | unsigned long decode; |
| 106 | |
| 107 | raw_pci_ext_ops->read(domain, busnum, devfn, |
| 108 | offset + 8 + (bar * 4), 4, &size); |
| 109 | |
| 110 | /* Turn the size into a decode pattern for the sizing code */ |
| 111 | if (size) { |
| 112 | decode = size - 1; |
| 113 | decode |= decode >> 1; |
| 114 | decode |= decode >> 2; |
| 115 | decode |= decode >> 4; |
| 116 | decode |= decode >> 8; |
| 117 | decode |= decode >> 16; |
| 118 | decode++; |
| 119 | decode = ~(decode - 1); |
| 120 | } else { |
Jacob Pan | e4af426 | 2010-05-14 14:41:14 -0700 | [diff] [blame] | 121 | decode = 0; |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 122 | } |
| 123 | |
| 124 | /* |
| 125 | * If val is all ones, the core code is trying to size the reg, |
| 126 | * so update the mmconfig space with the real size. |
| 127 | * |
| 128 | * Note: this assumes the fixed size we got is a power of two. |
| 129 | */ |
| 130 | return raw_pci_ext_ops->write(domain, busnum, devfn, reg, 4, |
| 131 | decode); |
| 132 | } |
| 133 | |
| 134 | /* This is some other kind of BAR write, so just do it. */ |
| 135 | return raw_pci_ext_ops->write(domain, busnum, devfn, reg, len, val); |
| 136 | } |
| 137 | |
| 138 | /** |
| 139 | * type1_access_ok - check whether to use type 1 |
| 140 | * @bus: bus number |
| 141 | * @devfn: device & function in question |
| 142 | * |
| 143 | * If the bus is on a Lincroft chip and it exists, or is not on a Lincroft at |
| 144 | * all, the we can go ahead with any reads & writes. If it's on a Lincroft, |
| 145 | * but doesn't exist, avoid the access altogether to keep the chip from |
| 146 | * hanging. |
| 147 | */ |
| 148 | static bool type1_access_ok(unsigned int bus, unsigned int devfn, int reg) |
| 149 | { |
Valentina Manea | 7cc24e1 | 2013-07-15 10:40:48 +0300 | [diff] [blame] | 150 | /* |
| 151 | * This is a workaround for A0 LNC bug where PCI status register does |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 152 | * not have new CAP bit set. can not be written by SW either. |
| 153 | * |
| 154 | * PCI header type in real LNC indicates a single function device, this |
| 155 | * will prevent probing other devices under the same function in PCI |
| 156 | * shim. Therefore, use the header type in shim instead. |
| 157 | */ |
| 158 | if (reg >= 0x100 || reg == PCI_STATUS || reg == PCI_HEADER_TYPE) |
Fengguang Wu | 6c21b176 | 2013-10-17 15:35:28 -0700 | [diff] [blame] | 159 | return false; |
Bjorn Helgaas | f3f0117 | 2013-05-20 10:20:21 -0600 | [diff] [blame] | 160 | if (bus == 0 && (devfn == PCI_DEVFN(2, 0) |
| 161 | || devfn == PCI_DEVFN(0, 0) |
| 162 | || devfn == PCI_DEVFN(3, 0))) |
Fengguang Wu | 6c21b176 | 2013-10-17 15:35:28 -0700 | [diff] [blame] | 163 | return true; |
| 164 | return false; /* Langwell on others */ |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, |
| 168 | int size, u32 *value) |
| 169 | { |
| 170 | if (type1_access_ok(bus->number, devfn, where)) |
| 171 | return pci_direct_conf1.read(pci_domain_nr(bus), bus->number, |
| 172 | devfn, where, size, value); |
| 173 | return raw_pci_ext_ops->read(pci_domain_nr(bus), bus->number, |
| 174 | devfn, where, size, value); |
| 175 | } |
| 176 | |
| 177 | static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, |
| 178 | int size, u32 value) |
| 179 | { |
| 180 | int offset; |
| 181 | |
Valentina Manea | 7cc24e1 | 2013-07-15 10:40:48 +0300 | [diff] [blame] | 182 | /* |
| 183 | * On MRST, there is no PCI ROM BAR, this will cause a subsequent read |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 184 | * to ROM BAR return 0 then being ignored. |
| 185 | */ |
| 186 | if (where == PCI_ROM_ADDRESS) |
| 187 | return 0; |
| 188 | |
| 189 | /* |
| 190 | * Devices with fixed BARs need special handling: |
| 191 | * - BAR sizing code will save, write ~0, read size, restore |
| 192 | * - so writes to fixed BARs need special handling |
| 193 | * - other writes to fixed BAR devices should go through mmconfig |
| 194 | */ |
| 195 | offset = fixed_bar_cap(bus, devfn); |
| 196 | if (offset && |
| 197 | (where >= PCI_BASE_ADDRESS_0 && where <= PCI_BASE_ADDRESS_5)) { |
| 198 | return pci_device_update_fixed(bus, devfn, where, size, value, |
| 199 | offset); |
| 200 | } |
| 201 | |
| 202 | /* |
| 203 | * On Moorestown update both real & mmconfig space |
| 204 | * Note: early Lincroft silicon can't handle type 1 accesses to |
| 205 | * non-existent devices, so just eat the write in that case. |
| 206 | */ |
| 207 | if (type1_access_ok(bus->number, devfn, where)) |
| 208 | return pci_direct_conf1.write(pci_domain_nr(bus), bus->number, |
| 209 | devfn, where, size, value); |
| 210 | return raw_pci_ext_ops->write(pci_domain_nr(bus), bus->number, devfn, |
| 211 | where, size, value); |
| 212 | } |
| 213 | |
Kuppuswamy Sathyanarayanan | 712b6aa | 2013-10-17 15:35:29 -0700 | [diff] [blame] | 214 | static int intel_mid_pci_irq_enable(struct pci_dev *dev) |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 215 | { |
Jiang Liu | c4d05a2 | 2015-04-13 14:11:54 +0800 | [diff] [blame] | 216 | struct irq_alloc_info info; |
Jiang Liu | ecc527d | 2014-06-09 16:20:01 +0800 | [diff] [blame] | 217 | int polarity; |
Andy Shevchenko | 2a61c8e | 2015-07-29 12:16:48 +0300 | [diff] [blame] | 218 | int ret; |
Andy Shevchenko | 5b395e2 | 2017-07-24 20:34:02 +0300 | [diff] [blame] | 219 | u8 gsi; |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 220 | |
Bjorn Helgaas | 67b4eab | 2016-02-17 12:26:38 -0600 | [diff] [blame] | 221 | if (dev->irq_managed && dev->irq > 0) |
Jiang Liu | cffe0a2 | 2014-10-27 13:21:42 +0800 | [diff] [blame] | 222 | return 0; |
| 223 | |
Andy Shevchenko | 5b395e2 | 2017-07-24 20:34:02 +0300 | [diff] [blame] | 224 | ret = pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &gsi); |
| 225 | if (ret < 0) { |
| 226 | dev_warn(&dev->dev, "Failed to read interrupt line: %d\n", ret); |
| 227 | return ret; |
| 228 | } |
| 229 | |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 230 | switch (intel_mid_identify_cpu()) { |
| 231 | case INTEL_MID_CPU_CHIP_TANGIER: |
Thomas Gleixner | 5054e1e | 2015-07-29 21:16:19 +0200 | [diff] [blame] | 232 | polarity = IOAPIC_POL_HIGH; |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 233 | |
| 234 | /* Special treatment for IRQ0 */ |
Andy Shevchenko | 5b395e2 | 2017-07-24 20:34:02 +0300 | [diff] [blame] | 235 | if (gsi == 0) { |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 236 | /* |
Andy Shevchenko | bb27570 | 2016-06-13 21:28:00 +0300 | [diff] [blame] | 237 | * Skip HS UART common registers device since it has |
| 238 | * IRQ0 assigned and not used by the kernel. |
| 239 | */ |
Andy Shevchenko | 707a605 | 2016-07-12 14:04:22 +0300 | [diff] [blame] | 240 | if (dev->device == PCI_DEVICE_ID_INTEL_MRFLD_HSU) |
Andy Shevchenko | bb27570 | 2016-06-13 21:28:00 +0300 | [diff] [blame] | 241 | return -EBUSY; |
| 242 | /* |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 243 | * TNG has IRQ0 assigned to eMMC controller. But there |
| 244 | * are also other devices with bogus PCI configuration |
| 245 | * that have IRQ0 assigned. This check ensures that |
Andy Shevchenko | bb27570 | 2016-06-13 21:28:00 +0300 | [diff] [blame] | 246 | * eMMC gets it. The rest of devices still could be |
| 247 | * enabled without interrupt line being allocated. |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 248 | */ |
Andy Shevchenko | 707a605 | 2016-07-12 14:04:22 +0300 | [diff] [blame] | 249 | if (dev->device != PCI_DEVICE_ID_INTEL_MRFLD_MMC) |
Andy Shevchenko | bb27570 | 2016-06-13 21:28:00 +0300 | [diff] [blame] | 250 | return 0; |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 251 | } |
| 252 | break; |
| 253 | default: |
Thomas Gleixner | 5054e1e | 2015-07-29 21:16:19 +0200 | [diff] [blame] | 254 | polarity = IOAPIC_POL_LOW; |
Andy Shevchenko | 39d9b77 | 2015-07-29 12:16:47 +0300 | [diff] [blame] | 255 | break; |
| 256 | } |
| 257 | |
Jiang Liu | c4d05a2 | 2015-04-13 14:11:54 +0800 | [diff] [blame] | 258 | ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity); |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 259 | |
Valentina Manea | 7cc24e1 | 2013-07-15 10:40:48 +0300 | [diff] [blame] | 260 | /* |
| 261 | * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 262 | * IOAPIC RTE entries, so we just enable RTE for the device. |
| 263 | */ |
Andy Shevchenko | 5b395e2 | 2017-07-24 20:34:02 +0300 | [diff] [blame] | 264 | ret = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info); |
Andy Shevchenko | 2a61c8e | 2015-07-29 12:16:48 +0300 | [diff] [blame] | 265 | if (ret < 0) |
| 266 | return ret; |
Jiang Liu | 1b5d3e0 | 2014-06-09 16:19:56 +0800 | [diff] [blame] | 267 | |
Andy Shevchenko | 5b395e2 | 2017-07-24 20:34:02 +0300 | [diff] [blame] | 268 | dev->irq = ret; |
Jiang Liu | cffe0a2 | 2014-10-27 13:21:42 +0800 | [diff] [blame] | 269 | dev->irq_managed = 1; |
| 270 | |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 271 | return 0; |
| 272 | } |
| 273 | |
Jiang Liu | 8a3e533 | 2014-06-09 16:20:09 +0800 | [diff] [blame] | 274 | static void intel_mid_pci_irq_disable(struct pci_dev *dev) |
| 275 | { |
Bjorn Helgaas | 6c777e8 | 2016-02-17 12:26:42 -0600 | [diff] [blame] | 276 | if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed && |
| 277 | dev->irq > 0) { |
Jiang Liu | 8a3e533 | 2014-06-09 16:20:09 +0800 | [diff] [blame] | 278 | mp_unmap_irq(dev->irq); |
Jiang Liu | cffe0a2 | 2014-10-27 13:21:42 +0800 | [diff] [blame] | 279 | dev->irq_managed = 0; |
| 280 | } |
Jiang Liu | 8a3e533 | 2014-06-09 16:20:09 +0800 | [diff] [blame] | 281 | } |
| 282 | |
Bhumika Goyal | 4113b0e | 2017-09-18 21:54:55 +0530 | [diff] [blame] | 283 | static const struct pci_ops intel_mid_pci_ops __initconst = { |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 284 | .read = pci_read, |
| 285 | .write = pci_write, |
| 286 | }; |
| 287 | |
| 288 | /** |
Kuppuswamy Sathyanarayanan | 712b6aa | 2013-10-17 15:35:29 -0700 | [diff] [blame] | 289 | * intel_mid_pci_init - installs intel_mid_pci_ops |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 290 | * |
| 291 | * Moorestown has an interesting PCI implementation (see above). |
| 292 | * Called when the early platform detection installs it. |
| 293 | */ |
Kuppuswamy Sathyanarayanan | 712b6aa | 2013-10-17 15:35:29 -0700 | [diff] [blame] | 294 | int __init intel_mid_pci_init(void) |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 295 | { |
Valentina Manea | 7cc24e1 | 2013-07-15 10:40:48 +0300 | [diff] [blame] | 296 | pr_info("Intel MID platform detected, using MID PCI ops\n"); |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 297 | pci_mmcfg_late_init(); |
Kuppuswamy Sathyanarayanan | 712b6aa | 2013-10-17 15:35:29 -0700 | [diff] [blame] | 298 | pcibios_enable_irq = intel_mid_pci_irq_enable; |
Jiang Liu | 8a3e533 | 2014-06-09 16:20:09 +0800 | [diff] [blame] | 299 | pcibios_disable_irq = intel_mid_pci_irq_disable; |
Kuppuswamy Sathyanarayanan | 712b6aa | 2013-10-17 15:35:29 -0700 | [diff] [blame] | 300 | pci_root_ops = intel_mid_pci_ops; |
Alan Cox | 823806f | 2012-02-13 12:59:37 +0000 | [diff] [blame] | 301 | pci_soc_mode = 1; |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 302 | /* Continue with standard init */ |
Andy Shevchenko | a912a75 | 2018-01-17 19:34:08 +0200 | [diff] [blame] | 303 | acpi_noirq_set(); |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 304 | return 1; |
| 305 | } |
| 306 | |
Valentina Manea | 7cc24e1 | 2013-07-15 10:40:48 +0300 | [diff] [blame] | 307 | /* |
| 308 | * Langwell devices are not true PCI devices; they are not subject to 10 ms |
| 309 | * d3 to d0 delay required by PCI spec. |
Jacob Pan | 990a30c | 2012-02-13 12:59:00 +0000 | [diff] [blame] | 310 | */ |
Greg Kroah-Hartman | a18e369 | 2012-12-21 14:02:53 -0800 | [diff] [blame] | 311 | static void pci_d3delay_fixup(struct pci_dev *dev) |
Jacob Pan | 990a30c | 2012-02-13 12:59:00 +0000 | [diff] [blame] | 312 | { |
Valentina Manea | 7cc24e1 | 2013-07-15 10:40:48 +0300 | [diff] [blame] | 313 | /* |
| 314 | * PCI fixups are effectively decided compile time. If we have a dual |
| 315 | * SoC/non-SoC kernel we don't want to mangle d3 on non-SoC devices. |
| 316 | */ |
| 317 | if (!pci_soc_mode) |
| 318 | return; |
| 319 | /* |
| 320 | * True PCI devices in Lincroft should allow type 1 access, the rest |
| 321 | * are Langwell fake PCI devices. |
Jacob Pan | 990a30c | 2012-02-13 12:59:00 +0000 | [diff] [blame] | 322 | */ |
| 323 | if (type1_access_ok(dev->bus->number, dev->devfn, PCI_DEVICE_ID)) |
| 324 | return; |
| 325 | dev->d3_delay = 0; |
| 326 | } |
| 327 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_d3delay_fixup); |
| 328 | |
Andy Shevchenko | e99a074 | 2016-07-05 23:09:07 +0300 | [diff] [blame] | 329 | static void mid_power_off_one_device(struct pci_dev *dev) |
Jacob Pan | 990a30c | 2012-02-13 12:59:00 +0000 | [diff] [blame] | 330 | { |
Andy Shevchenko | 5823d089 | 2016-06-14 21:29:45 +0300 | [diff] [blame] | 331 | u16 pmcsr; |
| 332 | |
| 333 | /* |
| 334 | * Update current state first, otherwise PCI core enforces PCI_D0 in |
| 335 | * pci_set_power_state() for devices which status was PCI_UNKNOWN. |
| 336 | */ |
| 337 | pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr); |
| 338 | dev->current_state = (pci_power_t __force)(pmcsr & PCI_PM_CTRL_STATE_MASK); |
| 339 | |
Huang Ying | 8497f69 | 2012-06-23 10:23:50 +0800 | [diff] [blame] | 340 | pci_set_power_state(dev, PCI_D3hot); |
Jacob Pan | 990a30c | 2012-02-13 12:59:00 +0000 | [diff] [blame] | 341 | } |
Andy Shevchenko | 5823d089 | 2016-06-14 21:29:45 +0300 | [diff] [blame] | 342 | |
Andy Shevchenko | e99a074 | 2016-07-05 23:09:07 +0300 | [diff] [blame] | 343 | static void mid_power_off_devices(struct pci_dev *dev) |
Andy Shevchenko | 5823d089 | 2016-06-14 21:29:45 +0300 | [diff] [blame] | 344 | { |
| 345 | int id; |
| 346 | |
| 347 | if (!pci_soc_mode) |
| 348 | return; |
| 349 | |
| 350 | id = intel_mid_pwr_get_lss_id(dev); |
| 351 | if (id < 0) |
| 352 | return; |
| 353 | |
| 354 | /* |
| 355 | * This sets only PMCSR bits. The actual power off will happen in |
| 356 | * arch/x86/platform/intel-mid/pwr.c. |
| 357 | */ |
Andy Shevchenko | e99a074 | 2016-07-05 23:09:07 +0300 | [diff] [blame] | 358 | mid_power_off_one_device(dev); |
Andy Shevchenko | 5823d089 | 2016-06-14 21:29:45 +0300 | [diff] [blame] | 359 | } |
| 360 | |
Andy Shevchenko | e99a074 | 2016-07-05 23:09:07 +0300 | [diff] [blame] | 361 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, mid_power_off_devices); |
Jacob Pan | 990a30c | 2012-02-13 12:59:00 +0000 | [diff] [blame] | 362 | |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 363 | /* |
| 364 | * Langwell devices reside at fixed offsets, don't try to move them. |
| 365 | */ |
Greg Kroah-Hartman | a18e369 | 2012-12-21 14:02:53 -0800 | [diff] [blame] | 366 | static void pci_fixed_bar_fixup(struct pci_dev *dev) |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 367 | { |
| 368 | unsigned long offset; |
| 369 | u32 size; |
| 370 | int i; |
| 371 | |
Alan Cox | 823806f | 2012-02-13 12:59:37 +0000 | [diff] [blame] | 372 | if (!pci_soc_mode) |
| 373 | return; |
| 374 | |
H. Peter Anvin | e9b1d5d | 2010-05-14 13:55:57 -0700 | [diff] [blame] | 375 | /* Must have extended configuration space */ |
| 376 | if (dev->cfg_size < PCIE_CAP_OFFSET + 4) |
| 377 | return; |
| 378 | |
Jesse Barnes | a712ffb | 2010-02-04 10:59:27 -0800 | [diff] [blame] | 379 | /* Fixup the BAR sizes for fixed BAR devices and make them unmoveable */ |
| 380 | offset = fixed_bar_cap(dev->bus, dev->devfn); |
| 381 | if (!offset || PCI_DEVFN(2, 0) == dev->devfn || |
| 382 | PCI_DEVFN(2, 2) == dev->devfn) |
| 383 | return; |
| 384 | |
| 385 | for (i = 0; i < PCI_ROM_RESOURCE; i++) { |
| 386 | pci_read_config_dword(dev, offset + 8 + (i * 4), &size); |
| 387 | dev->resource[i].end = dev->resource[i].start + size - 1; |
| 388 | dev->resource[i].flags |= IORESOURCE_PCI_FIXED; |
| 389 | } |
| 390 | } |
| 391 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_fixed_bar_fixup); |