Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 1 | /* |
| 2 | * arch/arm/mach-tegra/pci.c |
| 3 | * |
| 4 | * PCIe host controller driver for TEGRA(2) SOCs |
| 5 | * |
| 6 | * Copyright (c) 2010, CompuLab, Ltd. |
| 7 | * Author: Mike Rapoport <mike@compulab.co.il> |
| 8 | * |
| 9 | * Based on NVIDIA PCIe driver |
| 10 | * Copyright (c) 2008-2009, NVIDIA Corporation. |
| 11 | * |
| 12 | * Bits taken from arch/arm/mach-dove/pcie.c |
| 13 | * |
| 14 | * This program is free software; you can redistribute it and/or modify |
| 15 | * it under the terms of the GNU General Public License as published by |
| 16 | * the Free Software Foundation; either version 2 of the License, or |
| 17 | * (at your option) any later version. |
| 18 | * |
| 19 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 20 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 21 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 22 | * more details. |
| 23 | * |
| 24 | * You should have received a copy of the GNU General Public License along |
| 25 | * with this program; if not, write to the Free Software Foundation, Inc., |
| 26 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 27 | */ |
| 28 | |
| 29 | #include <linux/kernel.h> |
| 30 | #include <linux/pci.h> |
| 31 | #include <linux/interrupt.h> |
| 32 | #include <linux/irq.h> |
| 33 | #include <linux/clk.h> |
| 34 | #include <linux/delay.h> |
| 35 | |
| 36 | #include <asm/sizes.h> |
| 37 | #include <asm/mach/pci.h> |
| 38 | |
| 39 | #include <mach/pinmux.h> |
| 40 | #include <mach/iomap.h> |
| 41 | #include <mach/clk.h> |
Mike Rapoport | b96cc7f | 2011-03-02 14:34:05 +0200 | [diff] [blame] | 42 | #include <mach/powergate.h> |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 43 | |
Olof Johansson | 3ead513 | 2011-09-08 18:11:24 -0700 | [diff] [blame^] | 44 | #include "board.h" |
| 45 | |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 46 | /* register definitions */ |
| 47 | #define AFI_OFFSET 0x3800 |
| 48 | #define PADS_OFFSET 0x3000 |
| 49 | #define RP0_OFFSET 0x0000 |
| 50 | #define RP1_OFFSET 0x1000 |
| 51 | |
| 52 | #define AFI_AXI_BAR0_SZ 0x00 |
| 53 | #define AFI_AXI_BAR1_SZ 0x04 |
| 54 | #define AFI_AXI_BAR2_SZ 0x08 |
| 55 | #define AFI_AXI_BAR3_SZ 0x0c |
| 56 | #define AFI_AXI_BAR4_SZ 0x10 |
| 57 | #define AFI_AXI_BAR5_SZ 0x14 |
| 58 | |
| 59 | #define AFI_AXI_BAR0_START 0x18 |
| 60 | #define AFI_AXI_BAR1_START 0x1c |
| 61 | #define AFI_AXI_BAR2_START 0x20 |
| 62 | #define AFI_AXI_BAR3_START 0x24 |
| 63 | #define AFI_AXI_BAR4_START 0x28 |
| 64 | #define AFI_AXI_BAR5_START 0x2c |
| 65 | |
| 66 | #define AFI_FPCI_BAR0 0x30 |
| 67 | #define AFI_FPCI_BAR1 0x34 |
| 68 | #define AFI_FPCI_BAR2 0x38 |
| 69 | #define AFI_FPCI_BAR3 0x3c |
| 70 | #define AFI_FPCI_BAR4 0x40 |
| 71 | #define AFI_FPCI_BAR5 0x44 |
| 72 | |
| 73 | #define AFI_CACHE_BAR0_SZ 0x48 |
| 74 | #define AFI_CACHE_BAR0_ST 0x4c |
| 75 | #define AFI_CACHE_BAR1_SZ 0x50 |
| 76 | #define AFI_CACHE_BAR1_ST 0x54 |
| 77 | |
| 78 | #define AFI_MSI_BAR_SZ 0x60 |
| 79 | #define AFI_MSI_FPCI_BAR_ST 0x64 |
| 80 | #define AFI_MSI_AXI_BAR_ST 0x68 |
| 81 | |
| 82 | #define AFI_CONFIGURATION 0xac |
| 83 | #define AFI_CONFIGURATION_EN_FPCI (1 << 0) |
| 84 | |
| 85 | #define AFI_FPCI_ERROR_MASKS 0xb0 |
| 86 | |
| 87 | #define AFI_INTR_MASK 0xb4 |
| 88 | #define AFI_INTR_MASK_INT_MASK (1 << 0) |
| 89 | #define AFI_INTR_MASK_MSI_MASK (1 << 8) |
| 90 | |
| 91 | #define AFI_INTR_CODE 0xb8 |
| 92 | #define AFI_INTR_CODE_MASK 0xf |
| 93 | #define AFI_INTR_MASTER_ABORT 4 |
| 94 | #define AFI_INTR_LEGACY 6 |
| 95 | |
| 96 | #define AFI_INTR_SIGNATURE 0xbc |
| 97 | #define AFI_SM_INTR_ENABLE 0xc4 |
| 98 | |
| 99 | #define AFI_AFI_INTR_ENABLE 0xc8 |
| 100 | #define AFI_INTR_EN_INI_SLVERR (1 << 0) |
| 101 | #define AFI_INTR_EN_INI_DECERR (1 << 1) |
| 102 | #define AFI_INTR_EN_TGT_SLVERR (1 << 2) |
| 103 | #define AFI_INTR_EN_TGT_DECERR (1 << 3) |
| 104 | #define AFI_INTR_EN_TGT_WRERR (1 << 4) |
| 105 | #define AFI_INTR_EN_DFPCI_DECERR (1 << 5) |
| 106 | #define AFI_INTR_EN_AXI_DECERR (1 << 6) |
| 107 | #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7) |
| 108 | |
| 109 | #define AFI_PCIE_CONFIG 0x0f8 |
| 110 | #define AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE (1 << 1) |
| 111 | #define AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE (1 << 2) |
| 112 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20) |
| 113 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20) |
| 114 | #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20) |
| 115 | |
| 116 | #define AFI_FUSE 0x104 |
| 117 | #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2) |
| 118 | |
| 119 | #define AFI_PEX0_CTRL 0x110 |
| 120 | #define AFI_PEX1_CTRL 0x118 |
| 121 | #define AFI_PEX_CTRL_RST (1 << 0) |
| 122 | #define AFI_PEX_CTRL_REFCLK_EN (1 << 3) |
| 123 | |
| 124 | #define RP_VEND_XP 0x00000F00 |
| 125 | #define RP_VEND_XP_DL_UP (1 << 30) |
| 126 | |
| 127 | #define RP_LINK_CONTROL_STATUS 0x00000090 |
| 128 | #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000 |
| 129 | |
| 130 | #define PADS_CTL_SEL 0x0000009C |
| 131 | |
| 132 | #define PADS_CTL 0x000000A0 |
| 133 | #define PADS_CTL_IDDQ_1L (1 << 0) |
| 134 | #define PADS_CTL_TX_DATA_EN_1L (1 << 6) |
| 135 | #define PADS_CTL_RX_DATA_EN_1L (1 << 10) |
| 136 | |
| 137 | #define PADS_PLL_CTL 0x000000B8 |
| 138 | #define PADS_PLL_CTL_RST_B4SM (1 << 1) |
| 139 | #define PADS_PLL_CTL_LOCKDET (1 << 8) |
| 140 | #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16) |
| 141 | #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16) |
| 142 | #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16) |
| 143 | #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16) |
| 144 | #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20) |
| 145 | #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20) |
| 146 | #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20) |
| 147 | |
| 148 | /* PMC access is required for PCIE xclk (un)clamping */ |
| 149 | #define PMC_SCRATCH42 0x144 |
| 150 | #define PMC_SCRATCH42_PCX_CLAMP (1 << 0) |
| 151 | |
| 152 | static void __iomem *reg_pmc_base = IO_ADDRESS(TEGRA_PMC_BASE); |
| 153 | |
| 154 | #define pmc_writel(value, reg) \ |
Olof Johansson | efaa19a | 2011-09-08 18:09:54 -0700 | [diff] [blame] | 155 | __raw_writel(value, reg_pmc_base + (reg)) |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 156 | #define pmc_readl(reg) \ |
Olof Johansson | efaa19a | 2011-09-08 18:09:54 -0700 | [diff] [blame] | 157 | __raw_readl(reg_pmc_base + (reg)) |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 158 | |
| 159 | /* |
| 160 | * Tegra2 defines 1GB in the AXI address map for PCIe. |
| 161 | * |
| 162 | * That address space is split into different regions, with sizes and |
| 163 | * offsets as follows: |
| 164 | * |
| 165 | * 0x80000000 - 0x80003fff - PCI controller registers |
| 166 | * 0x80004000 - 0x80103fff - PCI configuration space |
| 167 | * 0x80104000 - 0x80203fff - PCI extended configuration space |
| 168 | * 0x80203fff - 0x803fffff - unused |
| 169 | * 0x80400000 - 0x8040ffff - downstream IO |
| 170 | * 0x80410000 - 0x8fffffff - unused |
| 171 | * 0x90000000 - 0x9fffffff - non-prefetchable memory |
| 172 | * 0xa0000000 - 0xbfffffff - prefetchable memory |
| 173 | */ |
| 174 | #define TEGRA_PCIE_BASE 0x80000000 |
| 175 | |
| 176 | #define PCIE_REGS_SZ SZ_16K |
| 177 | #define PCIE_CFG_OFF PCIE_REGS_SZ |
| 178 | #define PCIE_CFG_SZ SZ_1M |
| 179 | #define PCIE_EXT_CFG_OFF (PCIE_CFG_SZ + PCIE_CFG_OFF) |
| 180 | #define PCIE_EXT_CFG_SZ SZ_1M |
| 181 | #define PCIE_IOMAP_SZ (PCIE_REGS_SZ + PCIE_CFG_SZ + PCIE_EXT_CFG_SZ) |
| 182 | |
| 183 | #define MMIO_BASE (TEGRA_PCIE_BASE + SZ_4M) |
| 184 | #define MMIO_SIZE SZ_64K |
| 185 | #define MEM_BASE_0 (TEGRA_PCIE_BASE + SZ_256M) |
| 186 | #define MEM_SIZE_0 SZ_128M |
| 187 | #define MEM_BASE_1 (MEM_BASE_0 + MEM_SIZE_0) |
| 188 | #define MEM_SIZE_1 SZ_128M |
| 189 | #define PREFETCH_MEM_BASE_0 (MEM_BASE_1 + MEM_SIZE_1) |
| 190 | #define PREFETCH_MEM_SIZE_0 SZ_128M |
| 191 | #define PREFETCH_MEM_BASE_1 (PREFETCH_MEM_BASE_0 + PREFETCH_MEM_SIZE_0) |
| 192 | #define PREFETCH_MEM_SIZE_1 SZ_128M |
| 193 | |
| 194 | #define PCIE_CONF_BUS(b) ((b) << 16) |
| 195 | #define PCIE_CONF_DEV(d) ((d) << 11) |
| 196 | #define PCIE_CONF_FUNC(f) ((f) << 8) |
| 197 | #define PCIE_CONF_REG(r) \ |
| 198 | (((r) & ~0x3) | (((r) < 256) ? PCIE_CFG_OFF : PCIE_EXT_CFG_OFF)) |
| 199 | |
| 200 | struct tegra_pcie_port { |
| 201 | int index; |
| 202 | u8 root_bus_nr; |
| 203 | void __iomem *base; |
| 204 | |
| 205 | bool link_up; |
| 206 | |
| 207 | char io_space_name[16]; |
| 208 | char mem_space_name[16]; |
| 209 | char prefetch_space_name[20]; |
| 210 | struct resource res[3]; |
| 211 | }; |
| 212 | |
| 213 | struct tegra_pcie_info { |
| 214 | struct tegra_pcie_port port[2]; |
| 215 | int num_ports; |
| 216 | |
| 217 | void __iomem *regs; |
| 218 | struct resource res_mmio; |
| 219 | |
| 220 | struct clk *pex_clk; |
| 221 | struct clk *afi_clk; |
| 222 | struct clk *pcie_xclk; |
| 223 | struct clk *pll_e; |
| 224 | }; |
| 225 | |
| 226 | static struct tegra_pcie_info tegra_pcie = { |
| 227 | .res_mmio = { |
| 228 | .name = "PCI IO", |
| 229 | .start = MMIO_BASE, |
| 230 | .end = MMIO_BASE + MMIO_SIZE - 1, |
| 231 | .flags = IORESOURCE_MEM, |
| 232 | }, |
| 233 | }; |
| 234 | |
| 235 | void __iomem *tegra_pcie_io_base; |
| 236 | EXPORT_SYMBOL(tegra_pcie_io_base); |
| 237 | |
| 238 | static inline void afi_writel(u32 value, unsigned long offset) |
| 239 | { |
| 240 | writel(value, offset + AFI_OFFSET + tegra_pcie.regs); |
| 241 | } |
| 242 | |
| 243 | static inline u32 afi_readl(unsigned long offset) |
| 244 | { |
| 245 | return readl(offset + AFI_OFFSET + tegra_pcie.regs); |
| 246 | } |
| 247 | |
| 248 | static inline void pads_writel(u32 value, unsigned long offset) |
| 249 | { |
| 250 | writel(value, offset + PADS_OFFSET + tegra_pcie.regs); |
| 251 | } |
| 252 | |
| 253 | static inline u32 pads_readl(unsigned long offset) |
| 254 | { |
| 255 | return readl(offset + PADS_OFFSET + tegra_pcie.regs); |
| 256 | } |
| 257 | |
| 258 | static struct tegra_pcie_port *bus_to_port(int bus) |
| 259 | { |
| 260 | int i; |
| 261 | |
| 262 | for (i = tegra_pcie.num_ports - 1; i >= 0; i--) { |
| 263 | int rbus = tegra_pcie.port[i].root_bus_nr; |
| 264 | if (rbus != -1 && rbus == bus) |
| 265 | break; |
| 266 | } |
| 267 | |
| 268 | return i >= 0 ? tegra_pcie.port + i : NULL; |
| 269 | } |
| 270 | |
| 271 | static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn, |
| 272 | int where, int size, u32 *val) |
| 273 | { |
| 274 | struct tegra_pcie_port *pp = bus_to_port(bus->number); |
| 275 | void __iomem *addr; |
| 276 | |
| 277 | if (pp) { |
| 278 | if (devfn != 0) { |
| 279 | *val = 0xffffffff; |
| 280 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 281 | } |
| 282 | |
| 283 | addr = pp->base + (where & ~0x3); |
| 284 | } else { |
| 285 | addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) + |
| 286 | PCIE_CONF_DEV(PCI_SLOT(devfn)) + |
| 287 | PCIE_CONF_FUNC(PCI_FUNC(devfn)) + |
| 288 | PCIE_CONF_REG(where)); |
| 289 | } |
| 290 | |
| 291 | *val = readl(addr); |
| 292 | |
| 293 | if (size == 1) |
| 294 | *val = (*val >> (8 * (where & 3))) & 0xff; |
| 295 | else if (size == 2) |
| 296 | *val = (*val >> (8 * (where & 3))) & 0xffff; |
| 297 | |
| 298 | return PCIBIOS_SUCCESSFUL; |
| 299 | } |
| 300 | |
| 301 | static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn, |
| 302 | int where, int size, u32 val) |
| 303 | { |
| 304 | struct tegra_pcie_port *pp = bus_to_port(bus->number); |
| 305 | void __iomem *addr; |
| 306 | |
| 307 | u32 mask; |
| 308 | u32 tmp; |
| 309 | |
| 310 | if (pp) { |
| 311 | if (devfn != 0) |
| 312 | return PCIBIOS_DEVICE_NOT_FOUND; |
| 313 | |
| 314 | addr = pp->base + (where & ~0x3); |
| 315 | } else { |
| 316 | addr = tegra_pcie.regs + (PCIE_CONF_BUS(bus->number) + |
| 317 | PCIE_CONF_DEV(PCI_SLOT(devfn)) + |
| 318 | PCIE_CONF_FUNC(PCI_FUNC(devfn)) + |
| 319 | PCIE_CONF_REG(where)); |
| 320 | } |
| 321 | |
| 322 | if (size == 4) { |
| 323 | writel(val, addr); |
| 324 | return PCIBIOS_SUCCESSFUL; |
| 325 | } |
| 326 | |
| 327 | if (size == 2) |
| 328 | mask = ~(0xffff << ((where & 0x3) * 8)); |
| 329 | else if (size == 1) |
| 330 | mask = ~(0xff << ((where & 0x3) * 8)); |
| 331 | else |
| 332 | return PCIBIOS_BAD_REGISTER_NUMBER; |
| 333 | |
| 334 | tmp = readl(addr) & mask; |
| 335 | tmp |= val << ((where & 0x3) * 8); |
| 336 | writel(tmp, addr); |
| 337 | |
| 338 | return PCIBIOS_SUCCESSFUL; |
| 339 | } |
| 340 | |
| 341 | static struct pci_ops tegra_pcie_ops = { |
| 342 | .read = tegra_pcie_read_conf, |
| 343 | .write = tegra_pcie_write_conf, |
| 344 | }; |
| 345 | |
| 346 | static void __devinit tegra_pcie_fixup_bridge(struct pci_dev *dev) |
| 347 | { |
| 348 | u16 reg; |
| 349 | |
| 350 | if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) { |
| 351 | pci_read_config_word(dev, PCI_COMMAND, ®); |
| 352 | reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY | |
| 353 | PCI_COMMAND_MASTER | PCI_COMMAND_SERR); |
| 354 | pci_write_config_word(dev, PCI_COMMAND, reg); |
| 355 | } |
| 356 | } |
| 357 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge); |
| 358 | |
| 359 | /* Tegra PCIE root complex wrongly reports device class */ |
| 360 | static void __devinit tegra_pcie_fixup_class(struct pci_dev *dev) |
| 361 | { |
| 362 | dev->class = PCI_CLASS_BRIDGE_PCI << 8; |
| 363 | } |
| 364 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class); |
| 365 | DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class); |
| 366 | |
| 367 | /* Tegra PCIE requires relaxed ordering */ |
| 368 | static void __devinit tegra_pcie_relax_enable(struct pci_dev *dev) |
| 369 | { |
| 370 | u16 val16; |
| 371 | int pos = pci_find_capability(dev, PCI_CAP_ID_EXP); |
| 372 | |
| 373 | if (pos <= 0) { |
| 374 | dev_err(&dev->dev, "skipping relaxed ordering fixup\n"); |
| 375 | return; |
| 376 | } |
| 377 | |
| 378 | pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &val16); |
| 379 | val16 |= PCI_EXP_DEVCTL_RELAX_EN; |
| 380 | pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, val16); |
| 381 | } |
| 382 | DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable); |
| 383 | |
| 384 | static int tegra_pcie_setup(int nr, struct pci_sys_data *sys) |
| 385 | { |
| 386 | struct tegra_pcie_port *pp; |
| 387 | |
| 388 | if (nr >= tegra_pcie.num_ports) |
| 389 | return 0; |
| 390 | |
| 391 | pp = tegra_pcie.port + nr; |
| 392 | pp->root_bus_nr = sys->busnr; |
| 393 | |
| 394 | /* |
| 395 | * IORESOURCE_IO |
| 396 | */ |
| 397 | snprintf(pp->io_space_name, sizeof(pp->io_space_name), |
| 398 | "PCIe %d I/O", pp->index); |
| 399 | pp->io_space_name[sizeof(pp->io_space_name) - 1] = 0; |
| 400 | pp->res[0].name = pp->io_space_name; |
| 401 | if (pp->index == 0) { |
| 402 | pp->res[0].start = PCIBIOS_MIN_IO; |
| 403 | pp->res[0].end = pp->res[0].start + SZ_32K - 1; |
| 404 | } else { |
| 405 | pp->res[0].start = PCIBIOS_MIN_IO + SZ_32K; |
| 406 | pp->res[0].end = IO_SPACE_LIMIT; |
| 407 | } |
| 408 | pp->res[0].flags = IORESOURCE_IO; |
| 409 | if (request_resource(&ioport_resource, &pp->res[0])) |
| 410 | panic("Request PCIe IO resource failed\n"); |
| 411 | sys->resource[0] = &pp->res[0]; |
| 412 | |
| 413 | /* |
| 414 | * IORESOURCE_MEM |
| 415 | */ |
| 416 | snprintf(pp->mem_space_name, sizeof(pp->mem_space_name), |
| 417 | "PCIe %d MEM", pp->index); |
| 418 | pp->mem_space_name[sizeof(pp->mem_space_name) - 1] = 0; |
| 419 | pp->res[1].name = pp->mem_space_name; |
| 420 | if (pp->index == 0) { |
| 421 | pp->res[1].start = MEM_BASE_0; |
| 422 | pp->res[1].end = pp->res[1].start + MEM_SIZE_0 - 1; |
| 423 | } else { |
| 424 | pp->res[1].start = MEM_BASE_1; |
| 425 | pp->res[1].end = pp->res[1].start + MEM_SIZE_1 - 1; |
| 426 | } |
| 427 | pp->res[1].flags = IORESOURCE_MEM; |
| 428 | if (request_resource(&iomem_resource, &pp->res[1])) |
| 429 | panic("Request PCIe Memory resource failed\n"); |
| 430 | sys->resource[1] = &pp->res[1]; |
| 431 | |
| 432 | /* |
| 433 | * IORESOURCE_MEM | IORESOURCE_PREFETCH |
| 434 | */ |
| 435 | snprintf(pp->prefetch_space_name, sizeof(pp->prefetch_space_name), |
| 436 | "PCIe %d PREFETCH MEM", pp->index); |
| 437 | pp->prefetch_space_name[sizeof(pp->prefetch_space_name) - 1] = 0; |
| 438 | pp->res[2].name = pp->prefetch_space_name; |
| 439 | if (pp->index == 0) { |
| 440 | pp->res[2].start = PREFETCH_MEM_BASE_0; |
| 441 | pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_0 - 1; |
| 442 | } else { |
| 443 | pp->res[2].start = PREFETCH_MEM_BASE_1; |
| 444 | pp->res[2].end = pp->res[2].start + PREFETCH_MEM_SIZE_1 - 1; |
| 445 | } |
| 446 | pp->res[2].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; |
| 447 | if (request_resource(&iomem_resource, &pp->res[2])) |
| 448 | panic("Request PCIe Prefetch Memory resource failed\n"); |
| 449 | sys->resource[2] = &pp->res[2]; |
| 450 | |
| 451 | return 1; |
| 452 | } |
| 453 | |
Ralf Baechle | d534194 | 2011-06-10 15:30:21 +0100 | [diff] [blame] | 454 | static int tegra_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 455 | { |
| 456 | return INT_PCIE_INTR; |
| 457 | } |
| 458 | |
| 459 | static struct pci_bus __init *tegra_pcie_scan_bus(int nr, |
| 460 | struct pci_sys_data *sys) |
| 461 | { |
| 462 | struct tegra_pcie_port *pp; |
| 463 | |
| 464 | if (nr >= tegra_pcie.num_ports) |
| 465 | return 0; |
| 466 | |
| 467 | pp = tegra_pcie.port + nr; |
| 468 | pp->root_bus_nr = sys->busnr; |
| 469 | |
| 470 | return pci_scan_bus(sys->busnr, &tegra_pcie_ops, sys); |
| 471 | } |
| 472 | |
| 473 | static struct hw_pci tegra_pcie_hw __initdata = { |
| 474 | .nr_controllers = 2, |
| 475 | .setup = tegra_pcie_setup, |
| 476 | .scan = tegra_pcie_scan_bus, |
| 477 | .swizzle = pci_std_swizzle, |
| 478 | .map_irq = tegra_pcie_map_irq, |
| 479 | }; |
| 480 | |
| 481 | |
| 482 | static irqreturn_t tegra_pcie_isr(int irq, void *arg) |
| 483 | { |
| 484 | const char *err_msg[] = { |
| 485 | "Unknown", |
| 486 | "AXI slave error", |
| 487 | "AXI decode error", |
| 488 | "Target abort", |
| 489 | "Master abort", |
| 490 | "Invalid write", |
| 491 | "Response decoding error", |
| 492 | "AXI response decoding error", |
| 493 | "Transcation timeout", |
| 494 | }; |
| 495 | |
| 496 | u32 code, signature; |
| 497 | |
| 498 | code = afi_readl(AFI_INTR_CODE) & AFI_INTR_CODE_MASK; |
| 499 | signature = afi_readl(AFI_INTR_SIGNATURE); |
| 500 | afi_writel(0, AFI_INTR_CODE); |
| 501 | |
| 502 | if (code == AFI_INTR_LEGACY) |
| 503 | return IRQ_NONE; |
| 504 | |
| 505 | if (code >= ARRAY_SIZE(err_msg)) |
| 506 | code = 0; |
| 507 | |
| 508 | /* |
| 509 | * do not pollute kernel log with master abort reports since they |
| 510 | * happen a lot during enumeration |
| 511 | */ |
| 512 | if (code == AFI_INTR_MASTER_ABORT) |
| 513 | pr_debug("PCIE: %s, signature: %08x\n", err_msg[code], signature); |
| 514 | else |
| 515 | pr_err("PCIE: %s, signature: %08x\n", err_msg[code], signature); |
| 516 | |
| 517 | return IRQ_HANDLED; |
| 518 | } |
| 519 | |
| 520 | static void tegra_pcie_setup_translations(void) |
| 521 | { |
| 522 | u32 fpci_bar; |
| 523 | u32 size; |
| 524 | u32 axi_address; |
| 525 | |
| 526 | /* Bar 0: config Bar */ |
| 527 | fpci_bar = ((u32)0xfdff << 16); |
| 528 | size = PCIE_CFG_SZ; |
| 529 | axi_address = TEGRA_PCIE_BASE + PCIE_CFG_OFF; |
| 530 | afi_writel(axi_address, AFI_AXI_BAR0_START); |
| 531 | afi_writel(size >> 12, AFI_AXI_BAR0_SZ); |
| 532 | afi_writel(fpci_bar, AFI_FPCI_BAR0); |
| 533 | |
| 534 | /* Bar 1: extended config Bar */ |
| 535 | fpci_bar = ((u32)0xfe1 << 20); |
| 536 | size = PCIE_EXT_CFG_SZ; |
| 537 | axi_address = TEGRA_PCIE_BASE + PCIE_EXT_CFG_OFF; |
| 538 | afi_writel(axi_address, AFI_AXI_BAR1_START); |
| 539 | afi_writel(size >> 12, AFI_AXI_BAR1_SZ); |
| 540 | afi_writel(fpci_bar, AFI_FPCI_BAR1); |
| 541 | |
| 542 | /* Bar 2: downstream IO bar */ |
| 543 | fpci_bar = ((__u32)0xfdfc << 16); |
| 544 | size = MMIO_SIZE; |
| 545 | axi_address = MMIO_BASE; |
| 546 | afi_writel(axi_address, AFI_AXI_BAR2_START); |
| 547 | afi_writel(size >> 12, AFI_AXI_BAR2_SZ); |
| 548 | afi_writel(fpci_bar, AFI_FPCI_BAR2); |
| 549 | |
| 550 | /* Bar 3: prefetchable memory BAR */ |
| 551 | fpci_bar = (((PREFETCH_MEM_BASE_0 >> 12) & 0x0fffffff) << 4) | 0x1; |
| 552 | size = PREFETCH_MEM_SIZE_0 + PREFETCH_MEM_SIZE_1; |
| 553 | axi_address = PREFETCH_MEM_BASE_0; |
| 554 | afi_writel(axi_address, AFI_AXI_BAR3_START); |
| 555 | afi_writel(size >> 12, AFI_AXI_BAR3_SZ); |
| 556 | afi_writel(fpci_bar, AFI_FPCI_BAR3); |
| 557 | |
| 558 | /* Bar 4: non prefetchable memory BAR */ |
| 559 | fpci_bar = (((MEM_BASE_0 >> 12) & 0x0FFFFFFF) << 4) | 0x1; |
| 560 | size = MEM_SIZE_0 + MEM_SIZE_1; |
| 561 | axi_address = MEM_BASE_0; |
| 562 | afi_writel(axi_address, AFI_AXI_BAR4_START); |
| 563 | afi_writel(size >> 12, AFI_AXI_BAR4_SZ); |
| 564 | afi_writel(fpci_bar, AFI_FPCI_BAR4); |
| 565 | |
| 566 | /* Bar 5: NULL out the remaining BAR as it is not used */ |
| 567 | fpci_bar = 0; |
| 568 | size = 0; |
| 569 | axi_address = 0; |
| 570 | afi_writel(axi_address, AFI_AXI_BAR5_START); |
| 571 | afi_writel(size >> 12, AFI_AXI_BAR5_SZ); |
| 572 | afi_writel(fpci_bar, AFI_FPCI_BAR5); |
| 573 | |
| 574 | /* map all upstream transactions as uncached */ |
| 575 | afi_writel(PHYS_OFFSET, AFI_CACHE_BAR0_ST); |
| 576 | afi_writel(0, AFI_CACHE_BAR0_SZ); |
| 577 | afi_writel(0, AFI_CACHE_BAR1_ST); |
| 578 | afi_writel(0, AFI_CACHE_BAR1_SZ); |
| 579 | |
| 580 | /* No MSI */ |
| 581 | afi_writel(0, AFI_MSI_FPCI_BAR_ST); |
| 582 | afi_writel(0, AFI_MSI_BAR_SZ); |
| 583 | afi_writel(0, AFI_MSI_AXI_BAR_ST); |
| 584 | afi_writel(0, AFI_MSI_BAR_SZ); |
| 585 | } |
| 586 | |
| 587 | static void tegra_pcie_enable_controller(void) |
| 588 | { |
| 589 | u32 val, reg; |
| 590 | int i; |
| 591 | |
| 592 | /* Enable slot clock and pulse the reset signals */ |
| 593 | for (i = 0, reg = AFI_PEX0_CTRL; i < 2; i++, reg += 0x8) { |
| 594 | val = afi_readl(reg) | AFI_PEX_CTRL_REFCLK_EN; |
| 595 | afi_writel(val, reg); |
| 596 | val &= ~AFI_PEX_CTRL_RST; |
| 597 | afi_writel(val, reg); |
| 598 | |
| 599 | val = afi_readl(reg) | AFI_PEX_CTRL_RST; |
| 600 | afi_writel(val, reg); |
| 601 | } |
| 602 | |
| 603 | /* Enable dual controller and both ports */ |
| 604 | val = afi_readl(AFI_PCIE_CONFIG); |
| 605 | val &= ~(AFI_PCIE_CONFIG_PCIEC0_DISABLE_DEVICE | |
| 606 | AFI_PCIE_CONFIG_PCIEC1_DISABLE_DEVICE | |
| 607 | AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK); |
| 608 | val |= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL; |
| 609 | afi_writel(val, AFI_PCIE_CONFIG); |
| 610 | |
| 611 | val = afi_readl(AFI_FUSE) & ~AFI_FUSE_PCIE_T0_GEN2_DIS; |
| 612 | afi_writel(val, AFI_FUSE); |
| 613 | |
| 614 | /* Initialze internal PHY, enable up to 16 PCIE lanes */ |
| 615 | pads_writel(0x0, PADS_CTL_SEL); |
| 616 | |
| 617 | /* override IDDQ to 1 on all 4 lanes */ |
| 618 | val = pads_readl(PADS_CTL) | PADS_CTL_IDDQ_1L; |
| 619 | pads_writel(val, PADS_CTL); |
| 620 | |
| 621 | /* |
| 622 | * set up PHY PLL inputs select PLLE output as refclock, |
| 623 | * set TX ref sel to div10 (not div5) |
| 624 | */ |
| 625 | val = pads_readl(PADS_PLL_CTL); |
| 626 | val &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK); |
| 627 | val |= (PADS_PLL_CTL_REFCLK_INTERNAL_CML | PADS_PLL_CTL_TXCLKREF_DIV10); |
| 628 | pads_writel(val, PADS_PLL_CTL); |
| 629 | |
| 630 | /* take PLL out of reset */ |
| 631 | val = pads_readl(PADS_PLL_CTL) | PADS_PLL_CTL_RST_B4SM; |
| 632 | pads_writel(val, PADS_PLL_CTL); |
| 633 | |
| 634 | /* |
| 635 | * Hack, set the clock voltage to the DEFAULT provided by hw folks. |
| 636 | * This doesn't exist in the documentation |
| 637 | */ |
| 638 | pads_writel(0xfa5cfa5c, 0xc8); |
| 639 | |
| 640 | /* Wait for the PLL to lock */ |
| 641 | do { |
| 642 | val = pads_readl(PADS_PLL_CTL); |
| 643 | } while (!(val & PADS_PLL_CTL_LOCKDET)); |
| 644 | |
| 645 | /* turn off IDDQ override */ |
| 646 | val = pads_readl(PADS_CTL) & ~PADS_CTL_IDDQ_1L; |
| 647 | pads_writel(val, PADS_CTL); |
| 648 | |
| 649 | /* enable TX/RX data */ |
| 650 | val = pads_readl(PADS_CTL); |
| 651 | val |= (PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L); |
| 652 | pads_writel(val, PADS_CTL); |
| 653 | |
| 654 | /* Take the PCIe interface module out of reset */ |
| 655 | tegra_periph_reset_deassert(tegra_pcie.pcie_xclk); |
| 656 | |
| 657 | /* Finally enable PCIe */ |
| 658 | val = afi_readl(AFI_CONFIGURATION) | AFI_CONFIGURATION_EN_FPCI; |
| 659 | afi_writel(val, AFI_CONFIGURATION); |
| 660 | |
| 661 | val = (AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR | |
| 662 | AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR | |
| 663 | AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR); |
| 664 | afi_writel(val, AFI_AFI_INTR_ENABLE); |
| 665 | afi_writel(0xffffffff, AFI_SM_INTR_ENABLE); |
| 666 | |
| 667 | /* FIXME: No MSI for now, only INT */ |
| 668 | afi_writel(AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK); |
| 669 | |
| 670 | /* Disable all execptions */ |
| 671 | afi_writel(0, AFI_FPCI_ERROR_MASKS); |
| 672 | |
| 673 | return; |
| 674 | } |
| 675 | |
| 676 | static void tegra_pcie_xclk_clamp(bool clamp) |
| 677 | { |
| 678 | u32 reg; |
| 679 | |
| 680 | reg = pmc_readl(PMC_SCRATCH42) & ~PMC_SCRATCH42_PCX_CLAMP; |
| 681 | |
| 682 | if (clamp) |
| 683 | reg |= PMC_SCRATCH42_PCX_CLAMP; |
| 684 | |
| 685 | pmc_writel(reg, PMC_SCRATCH42); |
| 686 | } |
| 687 | |
Mike Rapoport | 1e40a97 | 2011-03-02 14:34:04 +0200 | [diff] [blame] | 688 | static void tegra_pcie_power_off(void) |
| 689 | { |
| 690 | tegra_periph_reset_assert(tegra_pcie.pcie_xclk); |
| 691 | tegra_periph_reset_assert(tegra_pcie.afi_clk); |
| 692 | tegra_periph_reset_assert(tegra_pcie.pex_clk); |
| 693 | |
Mike Rapoport | b96cc7f | 2011-03-02 14:34:05 +0200 | [diff] [blame] | 694 | tegra_powergate_power_off(TEGRA_POWERGATE_PCIE); |
Mike Rapoport | 1e40a97 | 2011-03-02 14:34:04 +0200 | [diff] [blame] | 695 | tegra_pcie_xclk_clamp(true); |
| 696 | } |
| 697 | |
Mike Rapoport | b96cc7f | 2011-03-02 14:34:05 +0200 | [diff] [blame] | 698 | static int tegra_pcie_power_regate(void) |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 699 | { |
Mike Rapoport | b96cc7f | 2011-03-02 14:34:05 +0200 | [diff] [blame] | 700 | int err; |
| 701 | |
| 702 | tegra_pcie_power_off(); |
| 703 | |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 704 | tegra_pcie_xclk_clamp(true); |
Mike Rapoport | b96cc7f | 2011-03-02 14:34:05 +0200 | [diff] [blame] | 705 | |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 706 | tegra_periph_reset_assert(tegra_pcie.pcie_xclk); |
Mike Rapoport | b96cc7f | 2011-03-02 14:34:05 +0200 | [diff] [blame] | 707 | tegra_periph_reset_assert(tegra_pcie.afi_clk); |
| 708 | |
| 709 | err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE, |
| 710 | tegra_pcie.pex_clk); |
| 711 | if (err) { |
| 712 | pr_err("PCIE: powerup sequence failed: %d\n", err); |
| 713 | return err; |
| 714 | } |
| 715 | |
| 716 | tegra_periph_reset_deassert(tegra_pcie.afi_clk); |
| 717 | |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 718 | tegra_pcie_xclk_clamp(false); |
| 719 | |
| 720 | clk_enable(tegra_pcie.afi_clk); |
| 721 | clk_enable(tegra_pcie.pex_clk); |
| 722 | return clk_enable(tegra_pcie.pll_e); |
| 723 | } |
| 724 | |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 725 | static int tegra_pcie_clocks_get(void) |
| 726 | { |
| 727 | int err; |
| 728 | |
| 729 | tegra_pcie.pex_clk = clk_get(NULL, "pex"); |
| 730 | if (IS_ERR(tegra_pcie.pex_clk)) |
| 731 | return PTR_ERR(tegra_pcie.pex_clk); |
| 732 | |
| 733 | tegra_pcie.afi_clk = clk_get(NULL, "afi"); |
| 734 | if (IS_ERR(tegra_pcie.afi_clk)) { |
| 735 | err = PTR_ERR(tegra_pcie.afi_clk); |
| 736 | goto err_afi_clk; |
| 737 | } |
| 738 | |
| 739 | tegra_pcie.pcie_xclk = clk_get(NULL, "pcie_xclk"); |
| 740 | if (IS_ERR(tegra_pcie.pcie_xclk)) { |
| 741 | err = PTR_ERR(tegra_pcie.pcie_xclk); |
| 742 | goto err_pcie_xclk; |
| 743 | } |
| 744 | |
| 745 | tegra_pcie.pll_e = clk_get_sys(NULL, "pll_e"); |
| 746 | if (IS_ERR(tegra_pcie.pll_e)) { |
| 747 | err = PTR_ERR(tegra_pcie.pll_e); |
| 748 | goto err_pll_e; |
| 749 | } |
| 750 | |
| 751 | return 0; |
| 752 | |
| 753 | err_pll_e: |
| 754 | clk_put(tegra_pcie.pcie_xclk); |
| 755 | err_pcie_xclk: |
| 756 | clk_put(tegra_pcie.afi_clk); |
| 757 | err_afi_clk: |
| 758 | clk_put(tegra_pcie.pex_clk); |
| 759 | |
| 760 | return err; |
| 761 | } |
| 762 | |
| 763 | static void tegra_pcie_clocks_put(void) |
| 764 | { |
| 765 | clk_put(tegra_pcie.pll_e); |
| 766 | clk_put(tegra_pcie.pcie_xclk); |
| 767 | clk_put(tegra_pcie.afi_clk); |
| 768 | clk_put(tegra_pcie.pex_clk); |
| 769 | } |
| 770 | |
| 771 | static int __init tegra_pcie_get_resources(void) |
| 772 | { |
| 773 | struct resource *res_mmio = &tegra_pcie.res_mmio; |
| 774 | int err; |
| 775 | |
| 776 | err = tegra_pcie_clocks_get(); |
| 777 | if (err) { |
| 778 | pr_err("PCIE: failed to get clocks: %d\n", err); |
| 779 | return err; |
| 780 | } |
| 781 | |
Mike Rapoport | b96cc7f | 2011-03-02 14:34:05 +0200 | [diff] [blame] | 782 | err = tegra_pcie_power_regate(); |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 783 | if (err) { |
| 784 | pr_err("PCIE: failed to power up: %d\n", err); |
| 785 | goto err_pwr_on; |
| 786 | } |
| 787 | |
| 788 | tegra_pcie.regs = ioremap_nocache(TEGRA_PCIE_BASE, PCIE_IOMAP_SZ); |
| 789 | if (tegra_pcie.regs == NULL) { |
| 790 | pr_err("PCIE: Failed to map PCI/AFI registers\n"); |
| 791 | err = -ENOMEM; |
| 792 | goto err_map_reg; |
| 793 | } |
| 794 | |
| 795 | err = request_resource(&iomem_resource, res_mmio); |
| 796 | if (err) { |
| 797 | pr_err("PCIE: Failed to request resources: %d\n", err); |
| 798 | goto err_req_io; |
| 799 | } |
| 800 | |
| 801 | tegra_pcie_io_base = ioremap_nocache(res_mmio->start, |
| 802 | resource_size(res_mmio)); |
| 803 | if (tegra_pcie_io_base == NULL) { |
| 804 | pr_err("PCIE: Failed to map IO\n"); |
| 805 | err = -ENOMEM; |
| 806 | goto err_map_io; |
| 807 | } |
| 808 | |
| 809 | err = request_irq(INT_PCIE_INTR, tegra_pcie_isr, |
| 810 | IRQF_SHARED, "PCIE", &tegra_pcie); |
| 811 | if (err) { |
| 812 | pr_err("PCIE: Failed to register IRQ: %d\n", err); |
| 813 | goto err_irq; |
| 814 | } |
| 815 | set_irq_flags(INT_PCIE_INTR, IRQF_VALID); |
| 816 | |
| 817 | return 0; |
| 818 | |
| 819 | err_irq: |
| 820 | iounmap(tegra_pcie_io_base); |
| 821 | err_map_io: |
| 822 | release_resource(&tegra_pcie.res_mmio); |
| 823 | err_req_io: |
| 824 | iounmap(tegra_pcie.regs); |
| 825 | err_map_reg: |
| 826 | tegra_pcie_power_off(); |
| 827 | err_pwr_on: |
| 828 | tegra_pcie_clocks_put(); |
| 829 | |
| 830 | return err; |
| 831 | } |
| 832 | |
| 833 | /* |
| 834 | * FIXME: If there are no PCIe cards attached, then calling this function |
| 835 | * can result in the increase of the bootup time as there are big timeout |
| 836 | * loops. |
| 837 | */ |
| 838 | #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */ |
| 839 | static bool tegra_pcie_check_link(struct tegra_pcie_port *pp, int idx, |
| 840 | u32 reset_reg) |
| 841 | { |
| 842 | u32 reg; |
| 843 | int retries = 3; |
| 844 | int timeout; |
| 845 | |
| 846 | do { |
| 847 | timeout = TEGRA_PCIE_LINKUP_TIMEOUT; |
| 848 | while (timeout) { |
| 849 | reg = readl(pp->base + RP_VEND_XP); |
| 850 | |
| 851 | if (reg & RP_VEND_XP_DL_UP) |
| 852 | break; |
| 853 | |
| 854 | mdelay(1); |
| 855 | timeout--; |
| 856 | } |
| 857 | |
| 858 | if (!timeout) { |
| 859 | pr_err("PCIE: port %d: link down, retrying\n", idx); |
| 860 | goto retry; |
| 861 | } |
| 862 | |
| 863 | timeout = TEGRA_PCIE_LINKUP_TIMEOUT; |
| 864 | while (timeout) { |
| 865 | reg = readl(pp->base + RP_LINK_CONTROL_STATUS); |
| 866 | |
| 867 | if (reg & 0x20000000) |
| 868 | return true; |
| 869 | |
| 870 | mdelay(1); |
| 871 | timeout--; |
| 872 | } |
| 873 | |
| 874 | retry: |
| 875 | /* Pulse the PEX reset */ |
| 876 | reg = afi_readl(reset_reg) | AFI_PEX_CTRL_RST; |
| 877 | afi_writel(reg, reset_reg); |
| 878 | mdelay(1); |
| 879 | reg = afi_readl(reset_reg) & ~AFI_PEX_CTRL_RST; |
| 880 | afi_writel(reg, reset_reg); |
| 881 | |
| 882 | retries--; |
| 883 | } while (retries); |
| 884 | |
| 885 | return false; |
| 886 | } |
| 887 | |
| 888 | static void __init tegra_pcie_add_port(int index, u32 offset, u32 reset_reg) |
| 889 | { |
| 890 | struct tegra_pcie_port *pp; |
| 891 | |
| 892 | pp = tegra_pcie.port + tegra_pcie.num_ports; |
| 893 | |
| 894 | pp->index = -1; |
| 895 | pp->base = tegra_pcie.regs + offset; |
| 896 | pp->link_up = tegra_pcie_check_link(pp, index, reset_reg); |
| 897 | |
| 898 | if (!pp->link_up) { |
| 899 | pp->base = NULL; |
| 900 | printk(KERN_INFO "PCIE: port %d: link down, ignoring\n", index); |
| 901 | return; |
| 902 | } |
| 903 | |
| 904 | tegra_pcie.num_ports++; |
| 905 | pp->index = index; |
| 906 | pp->root_bus_nr = -1; |
| 907 | memset(pp->res, 0, sizeof(pp->res)); |
| 908 | } |
| 909 | |
| 910 | int __init tegra_pcie_init(bool init_port0, bool init_port1) |
| 911 | { |
| 912 | int err; |
| 913 | |
| 914 | if (!(init_port0 || init_port1)) |
| 915 | return -ENODEV; |
| 916 | |
Rob Herring | c9d95fb | 2011-06-28 21:16:13 -0500 | [diff] [blame] | 917 | pcibios_min_mem = 0; |
| 918 | |
Mike Rapoport | 77ffc14 | 2010-09-27 11:26:33 +0200 | [diff] [blame] | 919 | err = tegra_pcie_get_resources(); |
| 920 | if (err) |
| 921 | return err; |
| 922 | |
| 923 | tegra_pcie_enable_controller(); |
| 924 | |
| 925 | /* setup the AFI address translations */ |
| 926 | tegra_pcie_setup_translations(); |
| 927 | |
| 928 | if (init_port0) |
| 929 | tegra_pcie_add_port(0, RP0_OFFSET, AFI_PEX0_CTRL); |
| 930 | |
| 931 | if (init_port1) |
| 932 | tegra_pcie_add_port(1, RP1_OFFSET, AFI_PEX1_CTRL); |
| 933 | |
| 934 | pci_common_init(&tegra_pcie_hw); |
| 935 | |
| 936 | return 0; |
| 937 | } |