blob: a54a0113892737659ac710fcc1d501ad0a7fa707 [file] [log] [blame]
Thierry Redingd1523b52013-08-09 16:49:19 +02001/*
2 * PCIe host controller driver for TEGRA(2) SOCs
3 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 */
26
27#include <linux/clk.h>
28#include <linux/clk/tegra.h>
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/irqdomain.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/msi.h>
37#include <linux/of_address.h>
38#include <linux/of_pci.h>
39#include <linux/of_platform.h>
40#include <linux/pci.h>
41#include <linux/platform_device.h>
42#include <linux/sizes.h>
43#include <linux/slab.h>
44#include <linux/tegra-powergate.h>
45#include <linux/vmalloc.h>
46#include <linux/regulator/consumer.h>
47
48#include <asm/mach/irq.h>
49#include <asm/mach/map.h>
50#include <asm/mach/pci.h>
51
52#define INT_PCI_MSI_NR (8 * 32)
53#define TEGRA_MAX_PORTS 2
54
55/* register definitions */
56
57#define AFI_AXI_BAR0_SZ 0x00
58#define AFI_AXI_BAR1_SZ 0x04
59#define AFI_AXI_BAR2_SZ 0x08
60#define AFI_AXI_BAR3_SZ 0x0c
61#define AFI_AXI_BAR4_SZ 0x10
62#define AFI_AXI_BAR5_SZ 0x14
63
64#define AFI_AXI_BAR0_START 0x18
65#define AFI_AXI_BAR1_START 0x1c
66#define AFI_AXI_BAR2_START 0x20
67#define AFI_AXI_BAR3_START 0x24
68#define AFI_AXI_BAR4_START 0x28
69#define AFI_AXI_BAR5_START 0x2c
70
71#define AFI_FPCI_BAR0 0x30
72#define AFI_FPCI_BAR1 0x34
73#define AFI_FPCI_BAR2 0x38
74#define AFI_FPCI_BAR3 0x3c
75#define AFI_FPCI_BAR4 0x40
76#define AFI_FPCI_BAR5 0x44
77
78#define AFI_CACHE_BAR0_SZ 0x48
79#define AFI_CACHE_BAR0_ST 0x4c
80#define AFI_CACHE_BAR1_SZ 0x50
81#define AFI_CACHE_BAR1_ST 0x54
82
83#define AFI_MSI_BAR_SZ 0x60
84#define AFI_MSI_FPCI_BAR_ST 0x64
85#define AFI_MSI_AXI_BAR_ST 0x68
86
87#define AFI_MSI_VEC0 0x6c
88#define AFI_MSI_VEC1 0x70
89#define AFI_MSI_VEC2 0x74
90#define AFI_MSI_VEC3 0x78
91#define AFI_MSI_VEC4 0x7c
92#define AFI_MSI_VEC5 0x80
93#define AFI_MSI_VEC6 0x84
94#define AFI_MSI_VEC7 0x88
95
96#define AFI_MSI_EN_VEC0 0x8c
97#define AFI_MSI_EN_VEC1 0x90
98#define AFI_MSI_EN_VEC2 0x94
99#define AFI_MSI_EN_VEC3 0x98
100#define AFI_MSI_EN_VEC4 0x9c
101#define AFI_MSI_EN_VEC5 0xa0
102#define AFI_MSI_EN_VEC6 0xa4
103#define AFI_MSI_EN_VEC7 0xa8
104
105#define AFI_CONFIGURATION 0xac
106#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
107
108#define AFI_FPCI_ERROR_MASKS 0xb0
109
110#define AFI_INTR_MASK 0xb4
111#define AFI_INTR_MASK_INT_MASK (1 << 0)
112#define AFI_INTR_MASK_MSI_MASK (1 << 8)
113
114#define AFI_INTR_CODE 0xb8
115#define AFI_INTR_CODE_MASK 0xf
116#define AFI_INTR_AXI_SLAVE_ERROR 1
117#define AFI_INTR_AXI_DECODE_ERROR 2
118#define AFI_INTR_TARGET_ABORT 3
119#define AFI_INTR_MASTER_ABORT 4
120#define AFI_INTR_INVALID_WRITE 5
121#define AFI_INTR_LEGACY 6
122#define AFI_INTR_FPCI_DECODE_ERROR 7
123
124#define AFI_INTR_SIGNATURE 0xbc
125#define AFI_UPPER_FPCI_ADDRESS 0xc0
126#define AFI_SM_INTR_ENABLE 0xc4
127#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
128#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
129#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
130#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
131#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
132#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
133#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
134#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
135
136#define AFI_AFI_INTR_ENABLE 0xc8
137#define AFI_INTR_EN_INI_SLVERR (1 << 0)
138#define AFI_INTR_EN_INI_DECERR (1 << 1)
139#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
140#define AFI_INTR_EN_TGT_DECERR (1 << 3)
141#define AFI_INTR_EN_TGT_WRERR (1 << 4)
142#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
143#define AFI_INTR_EN_AXI_DECERR (1 << 6)
144#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
145
146#define AFI_PCIE_CONFIG 0x0f8
147#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
148#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
149#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
150#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
151#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
152
153#define AFI_FUSE 0x104
154#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
155
156#define AFI_PEX0_CTRL 0x110
157#define AFI_PEX1_CTRL 0x118
158#define AFI_PEX_CTRL_RST (1 << 0)
159#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
160
161#define RP_VEND_XP 0x00000F00
162#define RP_VEND_XP_DL_UP (1 << 30)
163
164#define RP_LINK_CONTROL_STATUS 0x00000090
165#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
166#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
167
168#define PADS_CTL_SEL 0x0000009C
169
170#define PADS_CTL 0x000000A0
171#define PADS_CTL_IDDQ_1L (1 << 0)
172#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
173#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
174
175#define PADS_PLL_CTL 0x000000B8
176#define PADS_PLL_CTL_RST_B4SM (1 << 1)
177#define PADS_PLL_CTL_LOCKDET (1 << 8)
178#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
179#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
180#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
181#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
182#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
183#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
184#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
185
186struct tegra_msi {
187 struct msi_chip chip;
188 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
189 struct irq_domain *domain;
190 unsigned long pages;
191 struct mutex lock;
192 int irq;
193};
194
195static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
196{
197 return container_of(chip, struct tegra_msi, chip);
198}
199
200struct tegra_pcie {
201 struct device *dev;
202
203 void __iomem *pads;
204 void __iomem *afi;
205 int irq;
206
207 struct list_head busses;
208 struct resource *cs;
209
210 struct resource io;
211 struct resource mem;
212 struct resource prefetch;
213 struct resource busn;
214
215 struct clk *pex_clk;
216 struct clk *afi_clk;
217 struct clk *pcie_xclk;
218 struct clk *pll_e;
219
220 struct tegra_msi msi;
221
222 struct list_head ports;
223 unsigned int num_ports;
224 u32 xbar_config;
225
226 struct regulator *pex_clk_supply;
227 struct regulator *vdd_supply;
228};
229
230struct tegra_pcie_port {
231 struct tegra_pcie *pcie;
232 struct list_head list;
233 struct resource regs;
234 void __iomem *base;
235 unsigned int index;
236 unsigned int lanes;
237};
238
239struct tegra_pcie_bus {
240 struct vm_struct *area;
241 struct list_head list;
242 unsigned int nr;
243};
244
245static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
246{
247 return sys->private_data;
248}
249
250static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
251 unsigned long offset)
252{
253 writel(value, pcie->afi + offset);
254}
255
256static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
257{
258 return readl(pcie->afi + offset);
259}
260
261static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
262 unsigned long offset)
263{
264 writel(value, pcie->pads + offset);
265}
266
267static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
268{
269 return readl(pcie->pads + offset);
270}
271
272/*
273 * The configuration space mapping on Tegra is somewhat similar to the ECAM
274 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
275 * register accesses are mapped:
276 *
277 * [27:24] extended register number
278 * [23:16] bus number
279 * [15:11] device number
280 * [10: 8] function number
281 * [ 7: 0] register number
282 *
283 * Mapping the whole extended configuration space would require 256 MiB of
284 * virtual address space, only a small part of which will actually be used.
285 * To work around this, a 1 MiB of virtual addresses are allocated per bus
286 * when the bus is first accessed. When the physical range is mapped, the
287 * the bus number bits are hidden so that the extended register number bits
288 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
289 *
290 * [19:16] extended register number
291 * [15:11] device number
292 * [10: 8] function number
293 * [ 7: 0] register number
294 *
295 * This is achieved by stitching together 16 chunks of 64 KiB of physical
296 * address space via the MMU.
297 */
298static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
299{
300 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
301 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
302}
303
304static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
305 unsigned int busnr)
306{
307 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
308 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
309 phys_addr_t cs = pcie->cs->start;
310 struct tegra_pcie_bus *bus;
311 unsigned int i;
312 int err;
313
314 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
315 if (!bus)
316 return ERR_PTR(-ENOMEM);
317
318 INIT_LIST_HEAD(&bus->list);
319 bus->nr = busnr;
320
321 /* allocate 1 MiB of virtual addresses */
322 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
323 if (!bus->area) {
324 err = -ENOMEM;
325 goto free;
326 }
327
328 /* map each of the 16 chunks of 64 KiB each */
329 for (i = 0; i < 16; i++) {
330 unsigned long virt = (unsigned long)bus->area->addr +
331 i * SZ_64K;
332 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
333
334 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
335 if (err < 0) {
336 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
337 err);
338 goto unmap;
339 }
340 }
341
342 return bus;
343
344unmap:
345 vunmap(bus->area->addr);
346free:
347 kfree(bus);
348 return ERR_PTR(err);
349}
350
351/*
352 * Look up a virtual address mapping for the specified bus number. If no such
353 * mapping existis, try to create one.
354 */
355static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
356 unsigned int busnr)
357{
358 struct tegra_pcie_bus *bus;
359
360 list_for_each_entry(bus, &pcie->busses, list)
361 if (bus->nr == busnr)
362 return bus->area->addr;
363
364 bus = tegra_pcie_bus_alloc(pcie, busnr);
365 if (IS_ERR(bus))
366 return NULL;
367
368 list_add_tail(&bus->list, &pcie->busses);
369
370 return bus->area->addr;
371}
372
373static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
374 unsigned int devfn,
375 int where)
376{
377 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
378 void __iomem *addr = NULL;
379
380 if (bus->number == 0) {
381 unsigned int slot = PCI_SLOT(devfn);
382 struct tegra_pcie_port *port;
383
384 list_for_each_entry(port, &pcie->ports, list) {
385 if (port->index + 1 == slot) {
386 addr = port->base + (where & ~3);
387 break;
388 }
389 }
390 } else {
391 addr = tegra_pcie_bus_map(pcie, bus->number);
392 if (!addr) {
393 dev_err(pcie->dev,
394 "failed to map cfg. space for bus %u\n",
395 bus->number);
396 return NULL;
397 }
398
399 addr += tegra_pcie_conf_offset(devfn, where);
400 }
401
402 return addr;
403}
404
405static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
406 int where, int size, u32 *value)
407{
408 void __iomem *addr;
409
410 addr = tegra_pcie_conf_address(bus, devfn, where);
411 if (!addr) {
412 *value = 0xffffffff;
413 return PCIBIOS_DEVICE_NOT_FOUND;
414 }
415
416 *value = readl(addr);
417
418 if (size == 1)
419 *value = (*value >> (8 * (where & 3))) & 0xff;
420 else if (size == 2)
421 *value = (*value >> (8 * (where & 3))) & 0xffff;
422
423 return PCIBIOS_SUCCESSFUL;
424}
425
426static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
427 int where, int size, u32 value)
428{
429 void __iomem *addr;
430 u32 mask, tmp;
431
432 addr = tegra_pcie_conf_address(bus, devfn, where);
433 if (!addr)
434 return PCIBIOS_DEVICE_NOT_FOUND;
435
436 if (size == 4) {
437 writel(value, addr);
438 return PCIBIOS_SUCCESSFUL;
439 }
440
441 if (size == 2)
442 mask = ~(0xffff << ((where & 0x3) * 8));
443 else if (size == 1)
444 mask = ~(0xff << ((where & 0x3) * 8));
445 else
446 return PCIBIOS_BAD_REGISTER_NUMBER;
447
448 tmp = readl(addr) & mask;
449 tmp |= value << ((where & 0x3) * 8);
450 writel(tmp, addr);
451
452 return PCIBIOS_SUCCESSFUL;
453}
454
455static struct pci_ops tegra_pcie_ops = {
456 .read = tegra_pcie_read_conf,
457 .write = tegra_pcie_write_conf,
458};
459
460static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
461{
462 unsigned long ret = 0;
463
464 switch (port->index) {
465 case 0:
466 ret = AFI_PEX0_CTRL;
467 break;
468
469 case 1:
470 ret = AFI_PEX1_CTRL;
471 break;
472 }
473
474 return ret;
475}
476
477static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
478{
479 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
480 unsigned long value;
481
482 /* pulse reset signal */
483 value = afi_readl(port->pcie, ctrl);
484 value &= ~AFI_PEX_CTRL_RST;
485 afi_writel(port->pcie, value, ctrl);
486
487 usleep_range(1000, 2000);
488
489 value = afi_readl(port->pcie, ctrl);
490 value |= AFI_PEX_CTRL_RST;
491 afi_writel(port->pcie, value, ctrl);
492}
493
494static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
495{
496 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
497 unsigned long value;
498
499 /* enable reference clock */
500 value = afi_readl(port->pcie, ctrl);
501 value |= AFI_PEX_CTRL_REFCLK_EN;
502 afi_writel(port->pcie, value, ctrl);
503
504 tegra_pcie_port_reset(port);
505}
506
507static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
508{
509 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
510 unsigned long value;
511
512 /* assert port reset */
513 value = afi_readl(port->pcie, ctrl);
514 value &= ~AFI_PEX_CTRL_RST;
515 afi_writel(port->pcie, value, ctrl);
516
517 /* disable reference clock */
518 value = afi_readl(port->pcie, ctrl);
519 value &= ~AFI_PEX_CTRL_REFCLK_EN;
520 afi_writel(port->pcie, value, ctrl);
521}
522
523static void tegra_pcie_port_free(struct tegra_pcie_port *port)
524{
525 struct tegra_pcie *pcie = port->pcie;
526
527 devm_iounmap(pcie->dev, port->base);
528 devm_release_mem_region(pcie->dev, port->regs.start,
529 resource_size(&port->regs));
530 list_del(&port->list);
531 devm_kfree(pcie->dev, port);
532}
533
534static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
535{
536 u16 reg;
537
538 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
539 pci_read_config_word(dev, PCI_COMMAND, &reg);
540 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
541 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
542 pci_write_config_word(dev, PCI_COMMAND, reg);
543 }
544}
545DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
546
547/* Tegra PCIE root complex wrongly reports device class */
548static void tegra_pcie_fixup_class(struct pci_dev *dev)
549{
550 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
551}
552DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
553DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
554
555/* Tegra PCIE requires relaxed ordering */
556static void tegra_pcie_relax_enable(struct pci_dev *dev)
557{
558 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
559}
560DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
561
562static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
563{
564 struct tegra_pcie *pcie = sys_to_pcie(sys);
565
566 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
567 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
568 sys->mem_offset);
569 pci_add_resource(&sys->resources, &pcie->busn);
570
571 pci_ioremap_io(nr * SZ_64K, pcie->io.start);
572
573 return 1;
574}
575
576static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
577{
578 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
579
580 return pcie->irq;
581}
582
583static void tegra_pcie_add_bus(struct pci_bus *bus)
584{
585 if (IS_ENABLED(CONFIG_PCI_MSI)) {
586 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
587
588 bus->msi = &pcie->msi.chip;
589 }
590}
591
592static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
593{
594 struct tegra_pcie *pcie = sys_to_pcie(sys);
595 struct pci_bus *bus;
596
597 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
598 &sys->resources);
599 if (!bus)
600 return NULL;
601
602 pci_scan_child_bus(bus);
603
604 return bus;
605}
606
607static irqreturn_t tegra_pcie_isr(int irq, void *arg)
608{
609 const char *err_msg[] = {
610 "Unknown",
611 "AXI slave error",
612 "AXI decode error",
613 "Target abort",
614 "Master abort",
615 "Invalid write",
616 "Response decoding error",
617 "AXI response decoding error",
618 "Transaction timeout",
619 };
620 struct tegra_pcie *pcie = arg;
621 u32 code, signature;
622
623 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
624 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
625 afi_writel(pcie, 0, AFI_INTR_CODE);
626
627 if (code == AFI_INTR_LEGACY)
628 return IRQ_NONE;
629
630 if (code >= ARRAY_SIZE(err_msg))
631 code = 0;
632
633 /*
634 * do not pollute kernel log with master abort reports since they
635 * happen a lot during enumeration
636 */
637 if (code == AFI_INTR_MASTER_ABORT)
638 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
639 signature);
640 else
641 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
642 signature);
643
644 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
645 code == AFI_INTR_FPCI_DECODE_ERROR) {
646 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
647 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
648
649 if (code == AFI_INTR_MASTER_ABORT)
650 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
651 else
652 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
653 }
654
655 return IRQ_HANDLED;
656}
657
658/*
659 * FPCI map is as follows:
660 * - 0xfdfc000000: I/O space
661 * - 0xfdfe000000: type 0 configuration space
662 * - 0xfdff000000: type 1 configuration space
663 * - 0xfe00000000: type 0 extended configuration space
664 * - 0xfe10000000: type 1 extended configuration space
665 */
666static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
667{
668 u32 fpci_bar, size, axi_address;
669
670 /* Bar 0: type 1 extended configuration space */
671 fpci_bar = 0xfe100000;
672 size = resource_size(pcie->cs);
673 axi_address = pcie->cs->start;
674 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
675 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
676 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
677
678 /* Bar 1: downstream IO bar */
679 fpci_bar = 0xfdfc0000;
680 size = resource_size(&pcie->io);
681 axi_address = pcie->io.start;
682 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
683 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
684 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
685
686 /* Bar 2: prefetchable memory BAR */
687 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
688 size = resource_size(&pcie->prefetch);
689 axi_address = pcie->prefetch.start;
690 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
691 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
692 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
693
694 /* Bar 3: non prefetchable memory BAR */
695 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
696 size = resource_size(&pcie->mem);
697 axi_address = pcie->mem.start;
698 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
699 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
700 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
701
702 /* NULL out the remaining BARs as they are not used */
703 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
704 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
705 afi_writel(pcie, 0, AFI_FPCI_BAR4);
706
707 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
708 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
709 afi_writel(pcie, 0, AFI_FPCI_BAR5);
710
711 /* map all upstream transactions as uncached */
712 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
713 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
714 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
715 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
716
717 /* MSI translations are setup only when needed */
718 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
719 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
720 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
721 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
722}
723
724static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
725{
726 struct tegra_pcie_port *port;
727 unsigned int timeout;
728 unsigned long value;
729
730 /* configure mode and disable all ports */
731 value = afi_readl(pcie, AFI_PCIE_CONFIG);
732 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
733 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
734
735 list_for_each_entry(port, &pcie->ports, list)
736 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
737
738 afi_writel(pcie, value, AFI_PCIE_CONFIG);
739
740 value = afi_readl(pcie, AFI_FUSE);
741 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
742 afi_writel(pcie, value, AFI_FUSE);
743
744 /* initialze internal PHY, enable up to 16 PCIE lanes */
745 pads_writel(pcie, 0x0, PADS_CTL_SEL);
746
747 /* override IDDQ to 1 on all 4 lanes */
748 value = pads_readl(pcie, PADS_CTL);
749 value |= PADS_CTL_IDDQ_1L;
750 pads_writel(pcie, value, PADS_CTL);
751
752 /*
753 * Set up PHY PLL inputs select PLLE output as refclock,
754 * set TX ref sel to div10 (not div5).
755 */
756 value = pads_readl(pcie, PADS_PLL_CTL);
757 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
758 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML |
759 PADS_PLL_CTL_TXCLKREF_DIV10;
760 pads_writel(pcie, value, PADS_PLL_CTL);
761
762 /* take PLL out of reset */
763 value = pads_readl(pcie, PADS_PLL_CTL);
764 value |= PADS_PLL_CTL_RST_B4SM;
765 pads_writel(pcie, value, PADS_PLL_CTL);
766
767 /*
768 * Hack, set the clock voltage to the DEFAULT provided by hw folks.
769 * This doesn't exist in the documentation.
770 */
771 pads_writel(pcie, 0xfa5cfa5c, 0xc8);
772
773 /* wait for the PLL to lock */
774 timeout = 300;
775 do {
776 value = pads_readl(pcie, PADS_PLL_CTL);
777 usleep_range(1000, 2000);
778 if (--timeout == 0) {
779 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
780 return -EBUSY;
781 }
782 } while (!(value & PADS_PLL_CTL_LOCKDET));
783
784 /* turn off IDDQ override */
785 value = pads_readl(pcie, PADS_CTL);
786 value &= ~PADS_CTL_IDDQ_1L;
787 pads_writel(pcie, value, PADS_CTL);
788
789 /* enable TX/RX data */
790 value = pads_readl(pcie, PADS_CTL);
791 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
792 pads_writel(pcie, value, PADS_CTL);
793
794 /* take the PCIe interface module out of reset */
795 tegra_periph_reset_deassert(pcie->pcie_xclk);
796
797 /* finally enable PCIe */
798 value = afi_readl(pcie, AFI_CONFIGURATION);
799 value |= AFI_CONFIGURATION_EN_FPCI;
800 afi_writel(pcie, value, AFI_CONFIGURATION);
801
802 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
803 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
804 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
805 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
806 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
807
808 /* don't enable MSI for now, only when needed */
809 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
810
811 /* disable all exceptions */
812 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
813
814 return 0;
815}
816
817static void tegra_pcie_power_off(struct tegra_pcie *pcie)
818{
819 int err;
820
821 /* TODO: disable and unprepare clocks? */
822
823 tegra_periph_reset_assert(pcie->pcie_xclk);
824 tegra_periph_reset_assert(pcie->afi_clk);
825 tegra_periph_reset_assert(pcie->pex_clk);
826
827 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
828
829 err = regulator_disable(pcie->pex_clk_supply);
830 if (err < 0)
831 dev_err(pcie->dev, "failed to disable pex-clk regulator: %d\n",
832 err);
833
834 err = regulator_disable(pcie->vdd_supply);
835 if (err < 0)
836 dev_err(pcie->dev, "failed to disable VDD regulator: %d\n",
837 err);
838}
839
840static int tegra_pcie_power_on(struct tegra_pcie *pcie)
841{
842 int err;
843
844 tegra_periph_reset_assert(pcie->pcie_xclk);
845 tegra_periph_reset_assert(pcie->afi_clk);
846 tegra_periph_reset_assert(pcie->pex_clk);
847
848 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
849
850 /* enable regulators */
851 err = regulator_enable(pcie->vdd_supply);
852 if (err < 0) {
853 dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
854 return err;
855 }
856
857 err = regulator_enable(pcie->pex_clk_supply);
858 if (err < 0) {
859 dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
860 err);
861 return err;
862 }
863
864 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
865 pcie->pex_clk);
866 if (err) {
867 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
868 return err;
869 }
870
871 tegra_periph_reset_deassert(pcie->afi_clk);
872
873 err = clk_prepare_enable(pcie->afi_clk);
874 if (err < 0) {
875 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
876 return err;
877 }
878
879 err = clk_prepare_enable(pcie->pll_e);
880 if (err < 0) {
881 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
882 return err;
883 }
884
885 return 0;
886}
887
888static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
889{
890 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
891 if (IS_ERR(pcie->pex_clk))
892 return PTR_ERR(pcie->pex_clk);
893
894 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
895 if (IS_ERR(pcie->afi_clk))
896 return PTR_ERR(pcie->afi_clk);
897
898 pcie->pcie_xclk = devm_clk_get(pcie->dev, "pcie_xclk");
899 if (IS_ERR(pcie->pcie_xclk))
900 return PTR_ERR(pcie->pcie_xclk);
901
902 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
903 if (IS_ERR(pcie->pll_e))
904 return PTR_ERR(pcie->pll_e);
905
906 return 0;
907}
908
909static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
910{
911 struct platform_device *pdev = to_platform_device(pcie->dev);
912 struct resource *pads, *afi, *res;
913 int err;
914
915 err = tegra_pcie_clocks_get(pcie);
916 if (err) {
917 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
918 return err;
919 }
920
921 err = tegra_pcie_power_on(pcie);
922 if (err) {
923 dev_err(&pdev->dev, "failed to power up: %d\n", err);
924 return err;
925 }
926
927 /* request and remap controller registers */
928 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
929 if (!pads) {
930 err = -EADDRNOTAVAIL;
931 goto poweroff;
932 }
933
934 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
935 if (!afi) {
936 err = -EADDRNOTAVAIL;
937 goto poweroff;
938 }
939
940 pcie->pads = devm_request_and_ioremap(&pdev->dev, pads);
941 if (!pcie->pads) {
942 err = -EADDRNOTAVAIL;
943 goto poweroff;
944 }
945
946 pcie->afi = devm_request_and_ioremap(&pdev->dev, afi);
947 if (!pcie->afi) {
948 err = -EADDRNOTAVAIL;
949 goto poweroff;
950 }
951
952 /* request and remap configuration space */
953 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
954 if (!res) {
955 err = -EADDRNOTAVAIL;
956 goto poweroff;
957 }
958
959 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
960 resource_size(res), res->name);
961 if (!pcie->cs) {
962 err = -EADDRNOTAVAIL;
963 goto poweroff;
964 }
965
966 /* request interrupt */
967 err = platform_get_irq_byname(pdev, "intr");
968 if (err < 0) {
969 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
970 goto poweroff;
971 }
972
973 pcie->irq = err;
974
975 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
976 if (err) {
977 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
978 goto poweroff;
979 }
980
981 return 0;
982
983poweroff:
984 tegra_pcie_power_off(pcie);
985 return err;
986}
987
988static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
989{
990 if (pcie->irq > 0)
991 free_irq(pcie->irq, pcie);
992
993 tegra_pcie_power_off(pcie);
994 return 0;
995}
996
997static int tegra_msi_alloc(struct tegra_msi *chip)
998{
999 int msi;
1000
1001 mutex_lock(&chip->lock);
1002
1003 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1004 if (msi < INT_PCI_MSI_NR)
1005 set_bit(msi, chip->used);
1006 else
1007 msi = -ENOSPC;
1008
1009 mutex_unlock(&chip->lock);
1010
1011 return msi;
1012}
1013
1014static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1015{
1016 struct device *dev = chip->chip.dev;
1017
1018 mutex_lock(&chip->lock);
1019
1020 if (!test_bit(irq, chip->used))
1021 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1022 else
1023 clear_bit(irq, chip->used);
1024
1025 mutex_unlock(&chip->lock);
1026}
1027
1028static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1029{
1030 struct tegra_pcie *pcie = data;
1031 struct tegra_msi *msi = &pcie->msi;
1032 unsigned int i, processed = 0;
1033
1034 for (i = 0; i < 8; i++) {
1035 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1036
1037 while (reg) {
1038 unsigned int offset = find_first_bit(&reg, 32);
1039 unsigned int index = i * 32 + offset;
1040 unsigned int irq;
1041
1042 /* clear the interrupt */
1043 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1044
1045 irq = irq_find_mapping(msi->domain, index);
1046 if (irq) {
1047 if (test_bit(index, msi->used))
1048 generic_handle_irq(irq);
1049 else
1050 dev_info(pcie->dev, "unhandled MSI\n");
1051 } else {
1052 /*
1053 * that's weird who triggered this?
1054 * just clear it
1055 */
1056 dev_info(pcie->dev, "unexpected MSI\n");
1057 }
1058
1059 /* see if there's any more pending in this vector */
1060 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1061
1062 processed++;
1063 }
1064 }
1065
1066 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1067}
1068
1069static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1070 struct msi_desc *desc)
1071{
1072 struct tegra_msi *msi = to_tegra_msi(chip);
1073 struct msi_msg msg;
1074 unsigned int irq;
1075 int hwirq;
1076
1077 hwirq = tegra_msi_alloc(msi);
1078 if (hwirq < 0)
1079 return hwirq;
1080
1081 irq = irq_create_mapping(msi->domain, hwirq);
1082 if (!irq)
1083 return -EINVAL;
1084
1085 irq_set_msi_desc(irq, desc);
1086
1087 msg.address_lo = virt_to_phys((void *)msi->pages);
1088 /* 32 bit address only */
1089 msg.address_hi = 0;
1090 msg.data = hwirq;
1091
1092 write_msi_msg(irq, &msg);
1093
1094 return 0;
1095}
1096
1097static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1098{
1099 struct tegra_msi *msi = to_tegra_msi(chip);
1100 struct irq_data *d = irq_get_irq_data(irq);
1101
1102 tegra_msi_free(msi, d->hwirq);
1103}
1104
1105static struct irq_chip tegra_msi_irq_chip = {
1106 .name = "Tegra PCIe MSI",
1107 .irq_enable = unmask_msi_irq,
1108 .irq_disable = mask_msi_irq,
1109 .irq_mask = mask_msi_irq,
1110 .irq_unmask = unmask_msi_irq,
1111};
1112
1113static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1114 irq_hw_number_t hwirq)
1115{
1116 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1117 irq_set_chip_data(irq, domain->host_data);
1118 set_irq_flags(irq, IRQF_VALID);
1119
1120 return 0;
1121}
1122
1123static const struct irq_domain_ops msi_domain_ops = {
1124 .map = tegra_msi_map,
1125};
1126
1127static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1128{
1129 struct platform_device *pdev = to_platform_device(pcie->dev);
1130 struct tegra_msi *msi = &pcie->msi;
1131 unsigned long base;
1132 int err;
1133 u32 reg;
1134
1135 mutex_init(&msi->lock);
1136
1137 msi->chip.dev = pcie->dev;
1138 msi->chip.setup_irq = tegra_msi_setup_irq;
1139 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1140
1141 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1142 &msi_domain_ops, &msi->chip);
1143 if (!msi->domain) {
1144 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1145 return -ENOMEM;
1146 }
1147
1148 err = platform_get_irq_byname(pdev, "msi");
1149 if (err < 0) {
1150 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1151 goto err;
1152 }
1153
1154 msi->irq = err;
1155
1156 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1157 tegra_msi_irq_chip.name, pcie);
1158 if (err < 0) {
1159 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1160 goto err;
1161 }
1162
1163 /* setup AFI/FPCI range */
1164 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1165 base = virt_to_phys((void *)msi->pages);
1166
1167 afi_writel(pcie, base, AFI_MSI_FPCI_BAR_ST);
1168 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1169 /* this register is in 4K increments */
1170 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1171
1172 /* enable all MSI vectors */
1173 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1174 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1175 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1176 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1177 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1178 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1179 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1180 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1181
1182 /* and unmask the MSI interrupt */
1183 reg = afi_readl(pcie, AFI_INTR_MASK);
1184 reg |= AFI_INTR_MASK_MSI_MASK;
1185 afi_writel(pcie, reg, AFI_INTR_MASK);
1186
1187 return 0;
1188
1189err:
1190 irq_domain_remove(msi->domain);
1191 return err;
1192}
1193
1194static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1195{
1196 struct tegra_msi *msi = &pcie->msi;
1197 unsigned int i, irq;
1198 u32 value;
1199
1200 /* mask the MSI interrupt */
1201 value = afi_readl(pcie, AFI_INTR_MASK);
1202 value &= ~AFI_INTR_MASK_MSI_MASK;
1203 afi_writel(pcie, value, AFI_INTR_MASK);
1204
1205 /* disable all MSI vectors */
1206 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1207 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1208 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1209 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1210 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1211 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1212 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1213 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1214
1215 free_pages(msi->pages, 0);
1216
1217 if (msi->irq > 0)
1218 free_irq(msi->irq, pcie);
1219
1220 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1221 irq = irq_find_mapping(msi->domain, i);
1222 if (irq > 0)
1223 irq_dispose_mapping(irq);
1224 }
1225
1226 irq_domain_remove(msi->domain);
1227
1228 return 0;
1229}
1230
1231static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1232 u32 *xbar)
1233{
1234 struct device_node *np = pcie->dev->of_node;
1235
1236 switch (lanes) {
1237 case 0x00000004:
1238 dev_info(pcie->dev, "single-mode configuration\n");
1239 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1240 return 0;
1241
1242 case 0x00000202:
1243 dev_info(pcie->dev, "dual-mode configuration\n");
1244 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1245 return 0;
1246 }
1247
1248 return -EINVAL;
1249}
1250
1251static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1252{
1253 struct device_node *np = pcie->dev->of_node, *port;
1254 struct of_pci_range_parser parser;
1255 struct of_pci_range range;
1256 struct resource res;
1257 u32 lanes = 0;
1258 int err;
1259
1260 if (of_pci_range_parser_init(&parser, np)) {
1261 dev_err(pcie->dev, "missing \"ranges\" property\n");
1262 return -EINVAL;
1263 }
1264
1265 pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
1266 if (IS_ERR(pcie->vdd_supply))
1267 return PTR_ERR(pcie->vdd_supply);
1268
1269 pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
1270 if (IS_ERR(pcie->pex_clk_supply))
1271 return PTR_ERR(pcie->pex_clk_supply);
1272
1273 for_each_of_pci_range(&parser, &range) {
1274 of_pci_range_to_resource(&range, np, &res);
1275
1276 switch (res.flags & IORESOURCE_TYPE_BITS) {
1277 case IORESOURCE_IO:
1278 memcpy(&pcie->io, &res, sizeof(res));
1279 pcie->io.name = "I/O";
1280 break;
1281
1282 case IORESOURCE_MEM:
1283 if (res.flags & IORESOURCE_PREFETCH) {
1284 memcpy(&pcie->prefetch, &res, sizeof(res));
1285 pcie->prefetch.name = "PREFETCH";
1286 } else {
1287 memcpy(&pcie->mem, &res, sizeof(res));
1288 pcie->mem.name = "MEM";
1289 }
1290 break;
1291 }
1292 }
1293
1294 err = of_pci_parse_bus_range(np, &pcie->busn);
1295 if (err < 0) {
1296 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1297 err);
1298 pcie->busn.name = np->name;
1299 pcie->busn.start = 0;
1300 pcie->busn.end = 0xff;
1301 pcie->busn.flags = IORESOURCE_BUS;
1302 }
1303
1304 /* parse root ports */
1305 for_each_child_of_node(np, port) {
1306 struct tegra_pcie_port *rp;
1307 unsigned int index;
1308 u32 value;
1309
1310 err = of_pci_get_devfn(port);
1311 if (err < 0) {
1312 dev_err(pcie->dev, "failed to parse address: %d\n",
1313 err);
1314 return err;
1315 }
1316
1317 index = PCI_SLOT(err);
1318
1319 if (index < 1 || index > TEGRA_MAX_PORTS) {
1320 dev_err(pcie->dev, "invalid port number: %d\n", index);
1321 return -EINVAL;
1322 }
1323
1324 index--;
1325
1326 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1327 if (err < 0) {
1328 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1329 err);
1330 return err;
1331 }
1332
1333 if (value > 16) {
1334 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1335 return -EINVAL;
1336 }
1337
1338 lanes |= value << (index << 3);
1339
1340 if (!of_device_is_available(port))
1341 continue;
1342
1343 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1344 if (!rp)
1345 return -ENOMEM;
1346
1347 err = of_address_to_resource(port, 0, &rp->regs);
1348 if (err < 0) {
1349 dev_err(pcie->dev, "failed to parse address: %d\n",
1350 err);
1351 return err;
1352 }
1353
1354 INIT_LIST_HEAD(&rp->list);
1355 rp->index = index;
1356 rp->lanes = value;
1357 rp->pcie = pcie;
1358
1359 rp->base = devm_request_and_ioremap(pcie->dev, &rp->regs);
1360 if (!rp->base)
1361 return -EADDRNOTAVAIL;
1362
1363 list_add_tail(&rp->list, &pcie->ports);
1364 }
1365
1366 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1367 if (err < 0) {
1368 dev_err(pcie->dev, "invalid lane configuration\n");
1369 return err;
1370 }
1371
1372 return 0;
1373}
1374
1375/*
1376 * FIXME: If there are no PCIe cards attached, then calling this function
1377 * can result in the increase of the bootup time as there are big timeout
1378 * loops.
1379 */
1380#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1381static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1382{
1383 unsigned int retries = 3;
1384 unsigned long value;
1385
1386 do {
1387 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1388
1389 do {
1390 value = readl(port->base + RP_VEND_XP);
1391
1392 if (value & RP_VEND_XP_DL_UP)
1393 break;
1394
1395 usleep_range(1000, 2000);
1396 } while (--timeout);
1397
1398 if (!timeout) {
1399 dev_err(port->pcie->dev, "link %u down, retrying\n",
1400 port->index);
1401 goto retry;
1402 }
1403
1404 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1405
1406 do {
1407 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1408
1409 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1410 return true;
1411
1412 usleep_range(1000, 2000);
1413 } while (--timeout);
1414
1415retry:
1416 tegra_pcie_port_reset(port);
1417 } while (--retries);
1418
1419 return false;
1420}
1421
1422static int tegra_pcie_enable(struct tegra_pcie *pcie)
1423{
1424 struct tegra_pcie_port *port, *tmp;
1425 struct hw_pci hw;
1426
1427 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1428 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1429 port->index, port->lanes);
1430
1431 tegra_pcie_port_enable(port);
1432
1433 if (tegra_pcie_port_check_link(port))
1434 continue;
1435
1436 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1437
1438 tegra_pcie_port_disable(port);
1439 tegra_pcie_port_free(port);
1440 }
1441
1442 memset(&hw, 0, sizeof(hw));
1443
1444 hw.nr_controllers = 1;
1445 hw.private_data = (void **)&pcie;
1446 hw.setup = tegra_pcie_setup;
1447 hw.map_irq = tegra_pcie_map_irq;
1448 hw.add_bus = tegra_pcie_add_bus;
1449 hw.scan = tegra_pcie_scan_bus;
1450 hw.ops = &tegra_pcie_ops;
1451
1452 pci_common_init_dev(pcie->dev, &hw);
1453
1454 return 0;
1455}
1456
1457static int tegra_pcie_probe(struct platform_device *pdev)
1458{
1459 struct tegra_pcie *pcie;
1460 int err;
1461
1462 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1463 if (!pcie)
1464 return -ENOMEM;
1465
1466 INIT_LIST_HEAD(&pcie->busses);
1467 INIT_LIST_HEAD(&pcie->ports);
1468 pcie->dev = &pdev->dev;
1469
1470 err = tegra_pcie_parse_dt(pcie);
1471 if (err < 0)
1472 return err;
1473
1474 pcibios_min_mem = 0;
1475
1476 err = tegra_pcie_get_resources(pcie);
1477 if (err < 0) {
1478 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1479 return err;
1480 }
1481
1482 err = tegra_pcie_enable_controller(pcie);
1483 if (err)
1484 goto put_resources;
1485
1486 /* setup the AFI address translations */
1487 tegra_pcie_setup_translations(pcie);
1488
1489 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1490 err = tegra_pcie_enable_msi(pcie);
1491 if (err < 0) {
1492 dev_err(&pdev->dev,
1493 "failed to enable MSI support: %d\n",
1494 err);
1495 goto put_resources;
1496 }
1497 }
1498
1499 err = tegra_pcie_enable(pcie);
1500 if (err < 0) {
1501 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
1502 goto disable_msi;
1503 }
1504
1505 platform_set_drvdata(pdev, pcie);
1506 return 0;
1507
1508disable_msi:
1509 if (IS_ENABLED(CONFIG_PCI_MSI))
1510 tegra_pcie_disable_msi(pcie);
1511put_resources:
1512 tegra_pcie_put_resources(pcie);
1513 return err;
1514}
1515
1516static const struct of_device_id tegra_pcie_of_match[] = {
1517 { .compatible = "nvidia,tegra20-pcie", },
1518 { },
1519};
1520MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1521
1522static struct platform_driver tegra_pcie_driver = {
1523 .driver = {
1524 .name = "tegra-pcie",
1525 .owner = THIS_MODULE,
1526 .of_match_table = tegra_pcie_of_match,
1527 .suppress_bind_attrs = true,
1528 },
1529 .probe = tegra_pcie_probe,
1530};
1531module_platform_driver(tegra_pcie_driver);
1532
1533MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1534MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
1535MODULE_LICENSE("GPLv2");