blob: ad95c406a6d08bf5a2c8e3fda6a9be4650aa0b80 [file] [log] [blame]
Thierry Redingd1523b52013-08-09 16:49:19 +02001/*
Jay Agarwal94716cd2013-08-09 16:49:24 +02002 * PCIe host controller driver for Tegra SoCs
Thierry Redingd1523b52013-08-09 16:49:19 +02003 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 */
26
27#include <linux/clk.h>
28#include <linux/clk/tegra.h>
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/irqdomain.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/msi.h>
37#include <linux/of_address.h>
38#include <linux/of_pci.h>
39#include <linux/of_platform.h>
40#include <linux/pci.h>
41#include <linux/platform_device.h>
42#include <linux/sizes.h>
43#include <linux/slab.h>
44#include <linux/tegra-powergate.h>
45#include <linux/vmalloc.h>
46#include <linux/regulator/consumer.h>
47
48#include <asm/mach/irq.h>
49#include <asm/mach/map.h>
50#include <asm/mach/pci.h>
51
52#define INT_PCI_MSI_NR (8 * 32)
Thierry Redingd1523b52013-08-09 16:49:19 +020053
54/* register definitions */
55
56#define AFI_AXI_BAR0_SZ 0x00
57#define AFI_AXI_BAR1_SZ 0x04
58#define AFI_AXI_BAR2_SZ 0x08
59#define AFI_AXI_BAR3_SZ 0x0c
60#define AFI_AXI_BAR4_SZ 0x10
61#define AFI_AXI_BAR5_SZ 0x14
62
63#define AFI_AXI_BAR0_START 0x18
64#define AFI_AXI_BAR1_START 0x1c
65#define AFI_AXI_BAR2_START 0x20
66#define AFI_AXI_BAR3_START 0x24
67#define AFI_AXI_BAR4_START 0x28
68#define AFI_AXI_BAR5_START 0x2c
69
70#define AFI_FPCI_BAR0 0x30
71#define AFI_FPCI_BAR1 0x34
72#define AFI_FPCI_BAR2 0x38
73#define AFI_FPCI_BAR3 0x3c
74#define AFI_FPCI_BAR4 0x40
75#define AFI_FPCI_BAR5 0x44
76
77#define AFI_CACHE_BAR0_SZ 0x48
78#define AFI_CACHE_BAR0_ST 0x4c
79#define AFI_CACHE_BAR1_SZ 0x50
80#define AFI_CACHE_BAR1_ST 0x54
81
82#define AFI_MSI_BAR_SZ 0x60
83#define AFI_MSI_FPCI_BAR_ST 0x64
84#define AFI_MSI_AXI_BAR_ST 0x68
85
86#define AFI_MSI_VEC0 0x6c
87#define AFI_MSI_VEC1 0x70
88#define AFI_MSI_VEC2 0x74
89#define AFI_MSI_VEC3 0x78
90#define AFI_MSI_VEC4 0x7c
91#define AFI_MSI_VEC5 0x80
92#define AFI_MSI_VEC6 0x84
93#define AFI_MSI_VEC7 0x88
94
95#define AFI_MSI_EN_VEC0 0x8c
96#define AFI_MSI_EN_VEC1 0x90
97#define AFI_MSI_EN_VEC2 0x94
98#define AFI_MSI_EN_VEC3 0x98
99#define AFI_MSI_EN_VEC4 0x9c
100#define AFI_MSI_EN_VEC5 0xa0
101#define AFI_MSI_EN_VEC6 0xa4
102#define AFI_MSI_EN_VEC7 0xa8
103
104#define AFI_CONFIGURATION 0xac
105#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
106
107#define AFI_FPCI_ERROR_MASKS 0xb0
108
109#define AFI_INTR_MASK 0xb4
110#define AFI_INTR_MASK_INT_MASK (1 << 0)
111#define AFI_INTR_MASK_MSI_MASK (1 << 8)
112
113#define AFI_INTR_CODE 0xb8
114#define AFI_INTR_CODE_MASK 0xf
115#define AFI_INTR_AXI_SLAVE_ERROR 1
116#define AFI_INTR_AXI_DECODE_ERROR 2
117#define AFI_INTR_TARGET_ABORT 3
118#define AFI_INTR_MASTER_ABORT 4
119#define AFI_INTR_INVALID_WRITE 5
120#define AFI_INTR_LEGACY 6
121#define AFI_INTR_FPCI_DECODE_ERROR 7
122
123#define AFI_INTR_SIGNATURE 0xbc
124#define AFI_UPPER_FPCI_ADDRESS 0xc0
125#define AFI_SM_INTR_ENABLE 0xc4
126#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
127#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
128#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
129#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
130#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
131#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
132#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
133#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
134
135#define AFI_AFI_INTR_ENABLE 0xc8
136#define AFI_INTR_EN_INI_SLVERR (1 << 0)
137#define AFI_INTR_EN_INI_DECERR (1 << 1)
138#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
139#define AFI_INTR_EN_TGT_DECERR (1 << 3)
140#define AFI_INTR_EN_TGT_WRERR (1 << 4)
141#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
142#define AFI_INTR_EN_AXI_DECERR (1 << 6)
143#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200144#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
Thierry Redingd1523b52013-08-09 16:49:19 +0200145
146#define AFI_PCIE_CONFIG 0x0f8
147#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
148#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
149#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
150#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200151#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
Thierry Redingd1523b52013-08-09 16:49:19 +0200152#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200153#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
154#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
Thierry Redingd1523b52013-08-09 16:49:19 +0200155
156#define AFI_FUSE 0x104
157#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
158
159#define AFI_PEX0_CTRL 0x110
160#define AFI_PEX1_CTRL 0x118
Jay Agarwal94716cd2013-08-09 16:49:24 +0200161#define AFI_PEX2_CTRL 0x128
Thierry Redingd1523b52013-08-09 16:49:19 +0200162#define AFI_PEX_CTRL_RST (1 << 0)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200163#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
Thierry Redingd1523b52013-08-09 16:49:19 +0200164#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
165
Jay Agarwal94716cd2013-08-09 16:49:24 +0200166#define AFI_PEXBIAS_CTRL_0 0x168
167
Thierry Redingd1523b52013-08-09 16:49:19 +0200168#define RP_VEND_XP 0x00000F00
169#define RP_VEND_XP_DL_UP (1 << 30)
170
171#define RP_LINK_CONTROL_STATUS 0x00000090
172#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
173#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
174
175#define PADS_CTL_SEL 0x0000009C
176
177#define PADS_CTL 0x000000A0
178#define PADS_CTL_IDDQ_1L (1 << 0)
179#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
180#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
181
Jay Agarwal94716cd2013-08-09 16:49:24 +0200182#define PADS_PLL_CTL_TEGRA20 0x000000B8
183#define PADS_PLL_CTL_TEGRA30 0x000000B4
Thierry Redingd1523b52013-08-09 16:49:19 +0200184#define PADS_PLL_CTL_RST_B4SM (1 << 1)
185#define PADS_PLL_CTL_LOCKDET (1 << 8)
186#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
187#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
188#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
189#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
190#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
191#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
192#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200193#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
194
195#define PADS_REFCLK_CFG0 0x000000C8
196#define PADS_REFCLK_CFG1 0x000000CC
Thierry Redingd1523b52013-08-09 16:49:19 +0200197
Stephen Warrenb02b07a2013-08-09 16:49:25 +0200198/*
199 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
200 * entries, one entry per PCIe port. These field definitions and desired
201 * values aren't in the TRM, but do come from NVIDIA.
202 */
203#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
204#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
205#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
206#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
207
208/* Default value provided by HW engineering is 0xfa5c */
209#define PADS_REFCLK_CFG_VALUE \
210 ( \
211 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
212 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
213 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
214 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
215 )
216
Thierry Redingd1523b52013-08-09 16:49:19 +0200217struct tegra_msi {
218 struct msi_chip chip;
219 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
220 struct irq_domain *domain;
221 unsigned long pages;
222 struct mutex lock;
223 int irq;
224};
225
Jay Agarwal94716cd2013-08-09 16:49:24 +0200226/* used to differentiate between Tegra SoC generations */
227struct tegra_pcie_soc_data {
228 unsigned int num_ports;
229 unsigned int msi_base_shift;
230 u32 pads_pll_ctl;
231 u32 tx_ref_sel;
232 bool has_pex_clkreq_en;
233 bool has_pex_bias_ctrl;
234 bool has_intr_prsnt_sense;
235 bool has_avdd_supply;
236 bool has_cml_clk;
237};
238
Thierry Redingd1523b52013-08-09 16:49:19 +0200239static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
240{
241 return container_of(chip, struct tegra_msi, chip);
242}
243
244struct tegra_pcie {
245 struct device *dev;
246
247 void __iomem *pads;
248 void __iomem *afi;
249 int irq;
250
251 struct list_head busses;
252 struct resource *cs;
253
254 struct resource io;
255 struct resource mem;
256 struct resource prefetch;
257 struct resource busn;
258
259 struct clk *pex_clk;
260 struct clk *afi_clk;
261 struct clk *pcie_xclk;
262 struct clk *pll_e;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200263 struct clk *cml_clk;
Thierry Redingd1523b52013-08-09 16:49:19 +0200264
265 struct tegra_msi msi;
266
267 struct list_head ports;
268 unsigned int num_ports;
269 u32 xbar_config;
270
271 struct regulator *pex_clk_supply;
272 struct regulator *vdd_supply;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200273 struct regulator *avdd_supply;
274
275 const struct tegra_pcie_soc_data *soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +0200276};
277
278struct tegra_pcie_port {
279 struct tegra_pcie *pcie;
280 struct list_head list;
281 struct resource regs;
282 void __iomem *base;
283 unsigned int index;
284 unsigned int lanes;
285};
286
287struct tegra_pcie_bus {
288 struct vm_struct *area;
289 struct list_head list;
290 unsigned int nr;
291};
292
293static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
294{
295 return sys->private_data;
296}
297
298static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
299 unsigned long offset)
300{
301 writel(value, pcie->afi + offset);
302}
303
304static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
305{
306 return readl(pcie->afi + offset);
307}
308
309static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
310 unsigned long offset)
311{
312 writel(value, pcie->pads + offset);
313}
314
315static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
316{
317 return readl(pcie->pads + offset);
318}
319
320/*
321 * The configuration space mapping on Tegra is somewhat similar to the ECAM
322 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
323 * register accesses are mapped:
324 *
325 * [27:24] extended register number
326 * [23:16] bus number
327 * [15:11] device number
328 * [10: 8] function number
329 * [ 7: 0] register number
330 *
331 * Mapping the whole extended configuration space would require 256 MiB of
332 * virtual address space, only a small part of which will actually be used.
333 * To work around this, a 1 MiB of virtual addresses are allocated per bus
334 * when the bus is first accessed. When the physical range is mapped, the
335 * the bus number bits are hidden so that the extended register number bits
336 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
337 *
338 * [19:16] extended register number
339 * [15:11] device number
340 * [10: 8] function number
341 * [ 7: 0] register number
342 *
343 * This is achieved by stitching together 16 chunks of 64 KiB of physical
344 * address space via the MMU.
345 */
346static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
347{
348 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
349 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
350}
351
352static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
353 unsigned int busnr)
354{
355 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
356 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
357 phys_addr_t cs = pcie->cs->start;
358 struct tegra_pcie_bus *bus;
359 unsigned int i;
360 int err;
361
362 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
363 if (!bus)
364 return ERR_PTR(-ENOMEM);
365
366 INIT_LIST_HEAD(&bus->list);
367 bus->nr = busnr;
368
369 /* allocate 1 MiB of virtual addresses */
370 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
371 if (!bus->area) {
372 err = -ENOMEM;
373 goto free;
374 }
375
376 /* map each of the 16 chunks of 64 KiB each */
377 for (i = 0; i < 16; i++) {
378 unsigned long virt = (unsigned long)bus->area->addr +
379 i * SZ_64K;
380 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
381
382 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
383 if (err < 0) {
384 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
385 err);
386 goto unmap;
387 }
388 }
389
390 return bus;
391
392unmap:
393 vunmap(bus->area->addr);
394free:
395 kfree(bus);
396 return ERR_PTR(err);
397}
398
399/*
400 * Look up a virtual address mapping for the specified bus number. If no such
401 * mapping existis, try to create one.
402 */
403static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
404 unsigned int busnr)
405{
406 struct tegra_pcie_bus *bus;
407
408 list_for_each_entry(bus, &pcie->busses, list)
409 if (bus->nr == busnr)
410 return bus->area->addr;
411
412 bus = tegra_pcie_bus_alloc(pcie, busnr);
413 if (IS_ERR(bus))
414 return NULL;
415
416 list_add_tail(&bus->list, &pcie->busses);
417
418 return bus->area->addr;
419}
420
421static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
422 unsigned int devfn,
423 int where)
424{
425 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
426 void __iomem *addr = NULL;
427
428 if (bus->number == 0) {
429 unsigned int slot = PCI_SLOT(devfn);
430 struct tegra_pcie_port *port;
431
432 list_for_each_entry(port, &pcie->ports, list) {
433 if (port->index + 1 == slot) {
434 addr = port->base + (where & ~3);
435 break;
436 }
437 }
438 } else {
439 addr = tegra_pcie_bus_map(pcie, bus->number);
440 if (!addr) {
441 dev_err(pcie->dev,
442 "failed to map cfg. space for bus %u\n",
443 bus->number);
444 return NULL;
445 }
446
447 addr += tegra_pcie_conf_offset(devfn, where);
448 }
449
450 return addr;
451}
452
453static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
454 int where, int size, u32 *value)
455{
456 void __iomem *addr;
457
458 addr = tegra_pcie_conf_address(bus, devfn, where);
459 if (!addr) {
460 *value = 0xffffffff;
461 return PCIBIOS_DEVICE_NOT_FOUND;
462 }
463
464 *value = readl(addr);
465
466 if (size == 1)
467 *value = (*value >> (8 * (where & 3))) & 0xff;
468 else if (size == 2)
469 *value = (*value >> (8 * (where & 3))) & 0xffff;
470
471 return PCIBIOS_SUCCESSFUL;
472}
473
474static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
475 int where, int size, u32 value)
476{
477 void __iomem *addr;
478 u32 mask, tmp;
479
480 addr = tegra_pcie_conf_address(bus, devfn, where);
481 if (!addr)
482 return PCIBIOS_DEVICE_NOT_FOUND;
483
484 if (size == 4) {
485 writel(value, addr);
486 return PCIBIOS_SUCCESSFUL;
487 }
488
489 if (size == 2)
490 mask = ~(0xffff << ((where & 0x3) * 8));
491 else if (size == 1)
492 mask = ~(0xff << ((where & 0x3) * 8));
493 else
494 return PCIBIOS_BAD_REGISTER_NUMBER;
495
496 tmp = readl(addr) & mask;
497 tmp |= value << ((where & 0x3) * 8);
498 writel(tmp, addr);
499
500 return PCIBIOS_SUCCESSFUL;
501}
502
503static struct pci_ops tegra_pcie_ops = {
504 .read = tegra_pcie_read_conf,
505 .write = tegra_pcie_write_conf,
506};
507
508static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
509{
510 unsigned long ret = 0;
511
512 switch (port->index) {
513 case 0:
514 ret = AFI_PEX0_CTRL;
515 break;
516
517 case 1:
518 ret = AFI_PEX1_CTRL;
519 break;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200520
521 case 2:
522 ret = AFI_PEX2_CTRL;
523 break;
Thierry Redingd1523b52013-08-09 16:49:19 +0200524 }
525
526 return ret;
527}
528
529static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
530{
531 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
532 unsigned long value;
533
534 /* pulse reset signal */
535 value = afi_readl(port->pcie, ctrl);
536 value &= ~AFI_PEX_CTRL_RST;
537 afi_writel(port->pcie, value, ctrl);
538
539 usleep_range(1000, 2000);
540
541 value = afi_readl(port->pcie, ctrl);
542 value |= AFI_PEX_CTRL_RST;
543 afi_writel(port->pcie, value, ctrl);
544}
545
546static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
547{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200548 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +0200549 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
550 unsigned long value;
551
552 /* enable reference clock */
553 value = afi_readl(port->pcie, ctrl);
554 value |= AFI_PEX_CTRL_REFCLK_EN;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200555
556 if (soc->has_pex_clkreq_en)
557 value |= AFI_PEX_CTRL_CLKREQ_EN;
558
Thierry Redingd1523b52013-08-09 16:49:19 +0200559 afi_writel(port->pcie, value, ctrl);
560
561 tegra_pcie_port_reset(port);
562}
563
564static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
565{
566 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
567 unsigned long value;
568
569 /* assert port reset */
570 value = afi_readl(port->pcie, ctrl);
571 value &= ~AFI_PEX_CTRL_RST;
572 afi_writel(port->pcie, value, ctrl);
573
574 /* disable reference clock */
575 value = afi_readl(port->pcie, ctrl);
576 value &= ~AFI_PEX_CTRL_REFCLK_EN;
577 afi_writel(port->pcie, value, ctrl);
578}
579
580static void tegra_pcie_port_free(struct tegra_pcie_port *port)
581{
582 struct tegra_pcie *pcie = port->pcie;
583
584 devm_iounmap(pcie->dev, port->base);
585 devm_release_mem_region(pcie->dev, port->regs.start,
586 resource_size(&port->regs));
587 list_del(&port->list);
588 devm_kfree(pcie->dev, port);
589}
590
591static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
592{
593 u16 reg;
594
595 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
596 pci_read_config_word(dev, PCI_COMMAND, &reg);
597 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
598 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
599 pci_write_config_word(dev, PCI_COMMAND, reg);
600 }
601}
602DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
603
604/* Tegra PCIE root complex wrongly reports device class */
605static void tegra_pcie_fixup_class(struct pci_dev *dev)
606{
607 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
608}
609DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
610DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
Jay Agarwal94716cd2013-08-09 16:49:24 +0200611DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
612DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
Thierry Redingd1523b52013-08-09 16:49:19 +0200613
614/* Tegra PCIE requires relaxed ordering */
615static void tegra_pcie_relax_enable(struct pci_dev *dev)
616{
617 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
618}
619DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
620
621static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
622{
623 struct tegra_pcie *pcie = sys_to_pcie(sys);
624
625 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
626 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
627 sys->mem_offset);
628 pci_add_resource(&sys->resources, &pcie->busn);
629
630 pci_ioremap_io(nr * SZ_64K, pcie->io.start);
631
632 return 1;
633}
634
635static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
636{
637 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
638
639 return pcie->irq;
640}
641
642static void tegra_pcie_add_bus(struct pci_bus *bus)
643{
644 if (IS_ENABLED(CONFIG_PCI_MSI)) {
645 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
646
647 bus->msi = &pcie->msi.chip;
648 }
649}
650
651static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
652{
653 struct tegra_pcie *pcie = sys_to_pcie(sys);
654 struct pci_bus *bus;
655
656 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
657 &sys->resources);
658 if (!bus)
659 return NULL;
660
661 pci_scan_child_bus(bus);
662
663 return bus;
664}
665
666static irqreturn_t tegra_pcie_isr(int irq, void *arg)
667{
668 const char *err_msg[] = {
669 "Unknown",
670 "AXI slave error",
671 "AXI decode error",
672 "Target abort",
673 "Master abort",
674 "Invalid write",
675 "Response decoding error",
676 "AXI response decoding error",
677 "Transaction timeout",
678 };
679 struct tegra_pcie *pcie = arg;
680 u32 code, signature;
681
682 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
683 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
684 afi_writel(pcie, 0, AFI_INTR_CODE);
685
686 if (code == AFI_INTR_LEGACY)
687 return IRQ_NONE;
688
689 if (code >= ARRAY_SIZE(err_msg))
690 code = 0;
691
692 /*
693 * do not pollute kernel log with master abort reports since they
694 * happen a lot during enumeration
695 */
696 if (code == AFI_INTR_MASTER_ABORT)
697 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
698 signature);
699 else
700 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
701 signature);
702
703 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
704 code == AFI_INTR_FPCI_DECODE_ERROR) {
705 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
706 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
707
708 if (code == AFI_INTR_MASTER_ABORT)
709 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
710 else
711 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
712 }
713
714 return IRQ_HANDLED;
715}
716
717/*
718 * FPCI map is as follows:
719 * - 0xfdfc000000: I/O space
720 * - 0xfdfe000000: type 0 configuration space
721 * - 0xfdff000000: type 1 configuration space
722 * - 0xfe00000000: type 0 extended configuration space
723 * - 0xfe10000000: type 1 extended configuration space
724 */
725static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
726{
727 u32 fpci_bar, size, axi_address;
728
729 /* Bar 0: type 1 extended configuration space */
730 fpci_bar = 0xfe100000;
731 size = resource_size(pcie->cs);
732 axi_address = pcie->cs->start;
733 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
734 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
735 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
736
737 /* Bar 1: downstream IO bar */
738 fpci_bar = 0xfdfc0000;
739 size = resource_size(&pcie->io);
740 axi_address = pcie->io.start;
741 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
742 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
743 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
744
745 /* Bar 2: prefetchable memory BAR */
746 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
747 size = resource_size(&pcie->prefetch);
748 axi_address = pcie->prefetch.start;
749 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
750 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
751 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
752
753 /* Bar 3: non prefetchable memory BAR */
754 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
755 size = resource_size(&pcie->mem);
756 axi_address = pcie->mem.start;
757 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
758 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
759 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
760
761 /* NULL out the remaining BARs as they are not used */
762 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
763 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
764 afi_writel(pcie, 0, AFI_FPCI_BAR4);
765
766 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
767 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
768 afi_writel(pcie, 0, AFI_FPCI_BAR5);
769
770 /* map all upstream transactions as uncached */
771 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
772 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
773 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
774 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
775
776 /* MSI translations are setup only when needed */
777 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
778 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
779 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
780 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
781}
782
783static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
784{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200785 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +0200786 struct tegra_pcie_port *port;
787 unsigned int timeout;
788 unsigned long value;
789
Jay Agarwal94716cd2013-08-09 16:49:24 +0200790 /* power down PCIe slot clock bias pad */
791 if (soc->has_pex_bias_ctrl)
792 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
793
Thierry Redingd1523b52013-08-09 16:49:19 +0200794 /* configure mode and disable all ports */
795 value = afi_readl(pcie, AFI_PCIE_CONFIG);
796 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
797 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
798
799 list_for_each_entry(port, &pcie->ports, list)
800 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
801
802 afi_writel(pcie, value, AFI_PCIE_CONFIG);
803
804 value = afi_readl(pcie, AFI_FUSE);
805 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
806 afi_writel(pcie, value, AFI_FUSE);
807
808 /* initialze internal PHY, enable up to 16 PCIE lanes */
809 pads_writel(pcie, 0x0, PADS_CTL_SEL);
810
811 /* override IDDQ to 1 on all 4 lanes */
812 value = pads_readl(pcie, PADS_CTL);
813 value |= PADS_CTL_IDDQ_1L;
814 pads_writel(pcie, value, PADS_CTL);
815
816 /*
817 * Set up PHY PLL inputs select PLLE output as refclock,
818 * set TX ref sel to div10 (not div5).
819 */
Jay Agarwal94716cd2013-08-09 16:49:24 +0200820 value = pads_readl(pcie, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200821 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
Jay Agarwal94716cd2013-08-09 16:49:24 +0200822 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
823 pads_writel(pcie, value, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200824
825 /* take PLL out of reset */
Jay Agarwal94716cd2013-08-09 16:49:24 +0200826 value = pads_readl(pcie, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200827 value |= PADS_PLL_CTL_RST_B4SM;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200828 pads_writel(pcie, value, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200829
Stephen Warrenb02b07a2013-08-09 16:49:25 +0200830 /* Configure the reference clock driver */
831 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
832 pads_writel(pcie, value, PADS_REFCLK_CFG0);
833 if (soc->num_ports > 2)
834 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
Thierry Redingd1523b52013-08-09 16:49:19 +0200835
836 /* wait for the PLL to lock */
837 timeout = 300;
838 do {
Jay Agarwal94716cd2013-08-09 16:49:24 +0200839 value = pads_readl(pcie, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200840 usleep_range(1000, 2000);
841 if (--timeout == 0) {
842 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
843 return -EBUSY;
844 }
845 } while (!(value & PADS_PLL_CTL_LOCKDET));
846
847 /* turn off IDDQ override */
848 value = pads_readl(pcie, PADS_CTL);
849 value &= ~PADS_CTL_IDDQ_1L;
850 pads_writel(pcie, value, PADS_CTL);
851
852 /* enable TX/RX data */
853 value = pads_readl(pcie, PADS_CTL);
854 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
855 pads_writel(pcie, value, PADS_CTL);
856
857 /* take the PCIe interface module out of reset */
858 tegra_periph_reset_deassert(pcie->pcie_xclk);
859
860 /* finally enable PCIe */
861 value = afi_readl(pcie, AFI_CONFIGURATION);
862 value |= AFI_CONFIGURATION_EN_FPCI;
863 afi_writel(pcie, value, AFI_CONFIGURATION);
864
865 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
866 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
867 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200868
869 if (soc->has_intr_prsnt_sense)
870 value |= AFI_INTR_EN_PRSNT_SENSE;
871
Thierry Redingd1523b52013-08-09 16:49:19 +0200872 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
873 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
874
875 /* don't enable MSI for now, only when needed */
876 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
877
878 /* disable all exceptions */
879 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
880
881 return 0;
882}
883
884static void tegra_pcie_power_off(struct tegra_pcie *pcie)
885{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200886 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +0200887 int err;
888
889 /* TODO: disable and unprepare clocks? */
890
891 tegra_periph_reset_assert(pcie->pcie_xclk);
892 tegra_periph_reset_assert(pcie->afi_clk);
893 tegra_periph_reset_assert(pcie->pex_clk);
894
895 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
896
Jay Agarwal94716cd2013-08-09 16:49:24 +0200897 if (soc->has_avdd_supply) {
898 err = regulator_disable(pcie->avdd_supply);
899 if (err < 0)
900 dev_warn(pcie->dev,
901 "failed to disable AVDD regulator: %d\n",
902 err);
903 }
904
Thierry Redingd1523b52013-08-09 16:49:19 +0200905 err = regulator_disable(pcie->pex_clk_supply);
906 if (err < 0)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200907 dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
908 err);
Thierry Redingd1523b52013-08-09 16:49:19 +0200909
910 err = regulator_disable(pcie->vdd_supply);
911 if (err < 0)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200912 dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
913 err);
Thierry Redingd1523b52013-08-09 16:49:19 +0200914}
915
916static int tegra_pcie_power_on(struct tegra_pcie *pcie)
917{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200918 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +0200919 int err;
920
921 tegra_periph_reset_assert(pcie->pcie_xclk);
922 tegra_periph_reset_assert(pcie->afi_clk);
923 tegra_periph_reset_assert(pcie->pex_clk);
924
925 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
926
927 /* enable regulators */
928 err = regulator_enable(pcie->vdd_supply);
929 if (err < 0) {
930 dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
931 return err;
932 }
933
934 err = regulator_enable(pcie->pex_clk_supply);
935 if (err < 0) {
936 dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
937 err);
938 return err;
939 }
940
Jay Agarwal94716cd2013-08-09 16:49:24 +0200941 if (soc->has_avdd_supply) {
942 err = regulator_enable(pcie->avdd_supply);
943 if (err < 0) {
944 dev_err(pcie->dev,
945 "failed to enable AVDD regulator: %d\n",
946 err);
947 return err;
948 }
949 }
950
Thierry Redingd1523b52013-08-09 16:49:19 +0200951 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
952 pcie->pex_clk);
953 if (err) {
954 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
955 return err;
956 }
957
958 tegra_periph_reset_deassert(pcie->afi_clk);
959
960 err = clk_prepare_enable(pcie->afi_clk);
961 if (err < 0) {
962 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
963 return err;
964 }
965
Jay Agarwal94716cd2013-08-09 16:49:24 +0200966 if (soc->has_cml_clk) {
967 err = clk_prepare_enable(pcie->cml_clk);
968 if (err < 0) {
969 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
970 err);
971 return err;
972 }
973 }
974
Thierry Redingd1523b52013-08-09 16:49:19 +0200975 err = clk_prepare_enable(pcie->pll_e);
976 if (err < 0) {
977 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
978 return err;
979 }
980
981 return 0;
982}
983
984static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
985{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200986 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
987
Thierry Redingd1523b52013-08-09 16:49:19 +0200988 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
989 if (IS_ERR(pcie->pex_clk))
990 return PTR_ERR(pcie->pex_clk);
991
992 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
993 if (IS_ERR(pcie->afi_clk))
994 return PTR_ERR(pcie->afi_clk);
995
996 pcie->pcie_xclk = devm_clk_get(pcie->dev, "pcie_xclk");
997 if (IS_ERR(pcie->pcie_xclk))
998 return PTR_ERR(pcie->pcie_xclk);
999
1000 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1001 if (IS_ERR(pcie->pll_e))
1002 return PTR_ERR(pcie->pll_e);
1003
Jay Agarwal94716cd2013-08-09 16:49:24 +02001004 if (soc->has_cml_clk) {
1005 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1006 if (IS_ERR(pcie->cml_clk))
1007 return PTR_ERR(pcie->cml_clk);
1008 }
1009
Thierry Redingd1523b52013-08-09 16:49:19 +02001010 return 0;
1011}
1012
1013static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1014{
1015 struct platform_device *pdev = to_platform_device(pcie->dev);
1016 struct resource *pads, *afi, *res;
1017 int err;
1018
1019 err = tegra_pcie_clocks_get(pcie);
1020 if (err) {
1021 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1022 return err;
1023 }
1024
1025 err = tegra_pcie_power_on(pcie);
1026 if (err) {
1027 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1028 return err;
1029 }
1030
1031 /* request and remap controller registers */
1032 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1033 if (!pads) {
1034 err = -EADDRNOTAVAIL;
1035 goto poweroff;
1036 }
1037
1038 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1039 if (!afi) {
1040 err = -EADDRNOTAVAIL;
1041 goto poweroff;
1042 }
1043
1044 pcie->pads = devm_request_and_ioremap(&pdev->dev, pads);
1045 if (!pcie->pads) {
1046 err = -EADDRNOTAVAIL;
1047 goto poweroff;
1048 }
1049
1050 pcie->afi = devm_request_and_ioremap(&pdev->dev, afi);
1051 if (!pcie->afi) {
1052 err = -EADDRNOTAVAIL;
1053 goto poweroff;
1054 }
1055
1056 /* request and remap configuration space */
1057 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1058 if (!res) {
1059 err = -EADDRNOTAVAIL;
1060 goto poweroff;
1061 }
1062
1063 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1064 resource_size(res), res->name);
1065 if (!pcie->cs) {
1066 err = -EADDRNOTAVAIL;
1067 goto poweroff;
1068 }
1069
1070 /* request interrupt */
1071 err = platform_get_irq_byname(pdev, "intr");
1072 if (err < 0) {
1073 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1074 goto poweroff;
1075 }
1076
1077 pcie->irq = err;
1078
1079 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1080 if (err) {
1081 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1082 goto poweroff;
1083 }
1084
1085 return 0;
1086
1087poweroff:
1088 tegra_pcie_power_off(pcie);
1089 return err;
1090}
1091
1092static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1093{
1094 if (pcie->irq > 0)
1095 free_irq(pcie->irq, pcie);
1096
1097 tegra_pcie_power_off(pcie);
1098 return 0;
1099}
1100
1101static int tegra_msi_alloc(struct tegra_msi *chip)
1102{
1103 int msi;
1104
1105 mutex_lock(&chip->lock);
1106
1107 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1108 if (msi < INT_PCI_MSI_NR)
1109 set_bit(msi, chip->used);
1110 else
1111 msi = -ENOSPC;
1112
1113 mutex_unlock(&chip->lock);
1114
1115 return msi;
1116}
1117
1118static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1119{
1120 struct device *dev = chip->chip.dev;
1121
1122 mutex_lock(&chip->lock);
1123
1124 if (!test_bit(irq, chip->used))
1125 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1126 else
1127 clear_bit(irq, chip->used);
1128
1129 mutex_unlock(&chip->lock);
1130}
1131
1132static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1133{
1134 struct tegra_pcie *pcie = data;
1135 struct tegra_msi *msi = &pcie->msi;
1136 unsigned int i, processed = 0;
1137
1138 for (i = 0; i < 8; i++) {
1139 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1140
1141 while (reg) {
1142 unsigned int offset = find_first_bit(&reg, 32);
1143 unsigned int index = i * 32 + offset;
1144 unsigned int irq;
1145
1146 /* clear the interrupt */
1147 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1148
1149 irq = irq_find_mapping(msi->domain, index);
1150 if (irq) {
1151 if (test_bit(index, msi->used))
1152 generic_handle_irq(irq);
1153 else
1154 dev_info(pcie->dev, "unhandled MSI\n");
1155 } else {
1156 /*
1157 * that's weird who triggered this?
1158 * just clear it
1159 */
1160 dev_info(pcie->dev, "unexpected MSI\n");
1161 }
1162
1163 /* see if there's any more pending in this vector */
1164 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1165
1166 processed++;
1167 }
1168 }
1169
1170 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1171}
1172
1173static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1174 struct msi_desc *desc)
1175{
1176 struct tegra_msi *msi = to_tegra_msi(chip);
1177 struct msi_msg msg;
1178 unsigned int irq;
1179 int hwirq;
1180
1181 hwirq = tegra_msi_alloc(msi);
1182 if (hwirq < 0)
1183 return hwirq;
1184
1185 irq = irq_create_mapping(msi->domain, hwirq);
1186 if (!irq)
1187 return -EINVAL;
1188
1189 irq_set_msi_desc(irq, desc);
1190
1191 msg.address_lo = virt_to_phys((void *)msi->pages);
1192 /* 32 bit address only */
1193 msg.address_hi = 0;
1194 msg.data = hwirq;
1195
1196 write_msi_msg(irq, &msg);
1197
1198 return 0;
1199}
1200
1201static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1202{
1203 struct tegra_msi *msi = to_tegra_msi(chip);
1204 struct irq_data *d = irq_get_irq_data(irq);
1205
1206 tegra_msi_free(msi, d->hwirq);
1207}
1208
1209static struct irq_chip tegra_msi_irq_chip = {
1210 .name = "Tegra PCIe MSI",
1211 .irq_enable = unmask_msi_irq,
1212 .irq_disable = mask_msi_irq,
1213 .irq_mask = mask_msi_irq,
1214 .irq_unmask = unmask_msi_irq,
1215};
1216
1217static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1218 irq_hw_number_t hwirq)
1219{
1220 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1221 irq_set_chip_data(irq, domain->host_data);
1222 set_irq_flags(irq, IRQF_VALID);
1223
1224 return 0;
1225}
1226
1227static const struct irq_domain_ops msi_domain_ops = {
1228 .map = tegra_msi_map,
1229};
1230
1231static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1232{
1233 struct platform_device *pdev = to_platform_device(pcie->dev);
Jay Agarwal94716cd2013-08-09 16:49:24 +02001234 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +02001235 struct tegra_msi *msi = &pcie->msi;
1236 unsigned long base;
1237 int err;
1238 u32 reg;
1239
1240 mutex_init(&msi->lock);
1241
1242 msi->chip.dev = pcie->dev;
1243 msi->chip.setup_irq = tegra_msi_setup_irq;
1244 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1245
1246 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1247 &msi_domain_ops, &msi->chip);
1248 if (!msi->domain) {
1249 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1250 return -ENOMEM;
1251 }
1252
1253 err = platform_get_irq_byname(pdev, "msi");
1254 if (err < 0) {
1255 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1256 goto err;
1257 }
1258
1259 msi->irq = err;
1260
1261 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1262 tegra_msi_irq_chip.name, pcie);
1263 if (err < 0) {
1264 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1265 goto err;
1266 }
1267
1268 /* setup AFI/FPCI range */
1269 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1270 base = virt_to_phys((void *)msi->pages);
1271
Jay Agarwal94716cd2013-08-09 16:49:24 +02001272 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
Thierry Redingd1523b52013-08-09 16:49:19 +02001273 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1274 /* this register is in 4K increments */
1275 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1276
1277 /* enable all MSI vectors */
1278 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1279 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1280 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1281 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1282 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1283 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1284 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1285 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1286
1287 /* and unmask the MSI interrupt */
1288 reg = afi_readl(pcie, AFI_INTR_MASK);
1289 reg |= AFI_INTR_MASK_MSI_MASK;
1290 afi_writel(pcie, reg, AFI_INTR_MASK);
1291
1292 return 0;
1293
1294err:
1295 irq_domain_remove(msi->domain);
1296 return err;
1297}
1298
1299static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1300{
1301 struct tegra_msi *msi = &pcie->msi;
1302 unsigned int i, irq;
1303 u32 value;
1304
1305 /* mask the MSI interrupt */
1306 value = afi_readl(pcie, AFI_INTR_MASK);
1307 value &= ~AFI_INTR_MASK_MSI_MASK;
1308 afi_writel(pcie, value, AFI_INTR_MASK);
1309
1310 /* disable all MSI vectors */
1311 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1312 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1313 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1314 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1315 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1316 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1317 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1318 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1319
1320 free_pages(msi->pages, 0);
1321
1322 if (msi->irq > 0)
1323 free_irq(msi->irq, pcie);
1324
1325 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1326 irq = irq_find_mapping(msi->domain, i);
1327 if (irq > 0)
1328 irq_dispose_mapping(irq);
1329 }
1330
1331 irq_domain_remove(msi->domain);
1332
1333 return 0;
1334}
1335
1336static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1337 u32 *xbar)
1338{
1339 struct device_node *np = pcie->dev->of_node;
1340
Jay Agarwal94716cd2013-08-09 16:49:24 +02001341 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1342 switch (lanes) {
1343 case 0x00000204:
1344 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1345 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1346 return 0;
Thierry Redingd1523b52013-08-09 16:49:19 +02001347
Jay Agarwal94716cd2013-08-09 16:49:24 +02001348 case 0x00020202:
1349 dev_info(pcie->dev, "2x3 configuration\n");
1350 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1351 return 0;
1352
1353 case 0x00010104:
1354 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1355 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1356 return 0;
1357 }
1358 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1359 switch (lanes) {
1360 case 0x00000004:
1361 dev_info(pcie->dev, "single-mode configuration\n");
1362 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1363 return 0;
1364
1365 case 0x00000202:
1366 dev_info(pcie->dev, "dual-mode configuration\n");
1367 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1368 return 0;
1369 }
Thierry Redingd1523b52013-08-09 16:49:19 +02001370 }
1371
1372 return -EINVAL;
1373}
1374
1375static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1376{
Jay Agarwal94716cd2013-08-09 16:49:24 +02001377 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +02001378 struct device_node *np = pcie->dev->of_node, *port;
1379 struct of_pci_range_parser parser;
1380 struct of_pci_range range;
1381 struct resource res;
1382 u32 lanes = 0;
1383 int err;
1384
1385 if (of_pci_range_parser_init(&parser, np)) {
1386 dev_err(pcie->dev, "missing \"ranges\" property\n");
1387 return -EINVAL;
1388 }
1389
1390 pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
1391 if (IS_ERR(pcie->vdd_supply))
1392 return PTR_ERR(pcie->vdd_supply);
1393
1394 pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
1395 if (IS_ERR(pcie->pex_clk_supply))
1396 return PTR_ERR(pcie->pex_clk_supply);
1397
Jay Agarwal94716cd2013-08-09 16:49:24 +02001398 if (soc->has_avdd_supply) {
1399 pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
1400 if (IS_ERR(pcie->avdd_supply))
1401 return PTR_ERR(pcie->avdd_supply);
1402 }
1403
Thierry Redingd1523b52013-08-09 16:49:19 +02001404 for_each_of_pci_range(&parser, &range) {
1405 of_pci_range_to_resource(&range, np, &res);
1406
1407 switch (res.flags & IORESOURCE_TYPE_BITS) {
1408 case IORESOURCE_IO:
1409 memcpy(&pcie->io, &res, sizeof(res));
1410 pcie->io.name = "I/O";
1411 break;
1412
1413 case IORESOURCE_MEM:
1414 if (res.flags & IORESOURCE_PREFETCH) {
1415 memcpy(&pcie->prefetch, &res, sizeof(res));
1416 pcie->prefetch.name = "PREFETCH";
1417 } else {
1418 memcpy(&pcie->mem, &res, sizeof(res));
1419 pcie->mem.name = "MEM";
1420 }
1421 break;
1422 }
1423 }
1424
1425 err = of_pci_parse_bus_range(np, &pcie->busn);
1426 if (err < 0) {
1427 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1428 err);
1429 pcie->busn.name = np->name;
1430 pcie->busn.start = 0;
1431 pcie->busn.end = 0xff;
1432 pcie->busn.flags = IORESOURCE_BUS;
1433 }
1434
1435 /* parse root ports */
1436 for_each_child_of_node(np, port) {
1437 struct tegra_pcie_port *rp;
1438 unsigned int index;
1439 u32 value;
1440
1441 err = of_pci_get_devfn(port);
1442 if (err < 0) {
1443 dev_err(pcie->dev, "failed to parse address: %d\n",
1444 err);
1445 return err;
1446 }
1447
1448 index = PCI_SLOT(err);
1449
Jay Agarwal94716cd2013-08-09 16:49:24 +02001450 if (index < 1 || index > soc->num_ports) {
Thierry Redingd1523b52013-08-09 16:49:19 +02001451 dev_err(pcie->dev, "invalid port number: %d\n", index);
1452 return -EINVAL;
1453 }
1454
1455 index--;
1456
1457 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1458 if (err < 0) {
1459 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1460 err);
1461 return err;
1462 }
1463
1464 if (value > 16) {
1465 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1466 return -EINVAL;
1467 }
1468
1469 lanes |= value << (index << 3);
1470
1471 if (!of_device_is_available(port))
1472 continue;
1473
1474 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1475 if (!rp)
1476 return -ENOMEM;
1477
1478 err = of_address_to_resource(port, 0, &rp->regs);
1479 if (err < 0) {
1480 dev_err(pcie->dev, "failed to parse address: %d\n",
1481 err);
1482 return err;
1483 }
1484
1485 INIT_LIST_HEAD(&rp->list);
1486 rp->index = index;
1487 rp->lanes = value;
1488 rp->pcie = pcie;
1489
1490 rp->base = devm_request_and_ioremap(pcie->dev, &rp->regs);
1491 if (!rp->base)
1492 return -EADDRNOTAVAIL;
1493
1494 list_add_tail(&rp->list, &pcie->ports);
1495 }
1496
1497 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1498 if (err < 0) {
1499 dev_err(pcie->dev, "invalid lane configuration\n");
1500 return err;
1501 }
1502
1503 return 0;
1504}
1505
1506/*
1507 * FIXME: If there are no PCIe cards attached, then calling this function
1508 * can result in the increase of the bootup time as there are big timeout
1509 * loops.
1510 */
1511#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1512static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1513{
1514 unsigned int retries = 3;
1515 unsigned long value;
1516
1517 do {
1518 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1519
1520 do {
1521 value = readl(port->base + RP_VEND_XP);
1522
1523 if (value & RP_VEND_XP_DL_UP)
1524 break;
1525
1526 usleep_range(1000, 2000);
1527 } while (--timeout);
1528
1529 if (!timeout) {
1530 dev_err(port->pcie->dev, "link %u down, retrying\n",
1531 port->index);
1532 goto retry;
1533 }
1534
1535 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1536
1537 do {
1538 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1539
1540 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1541 return true;
1542
1543 usleep_range(1000, 2000);
1544 } while (--timeout);
1545
1546retry:
1547 tegra_pcie_port_reset(port);
1548 } while (--retries);
1549
1550 return false;
1551}
1552
1553static int tegra_pcie_enable(struct tegra_pcie *pcie)
1554{
1555 struct tegra_pcie_port *port, *tmp;
1556 struct hw_pci hw;
1557
1558 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1559 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1560 port->index, port->lanes);
1561
1562 tegra_pcie_port_enable(port);
1563
1564 if (tegra_pcie_port_check_link(port))
1565 continue;
1566
1567 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1568
1569 tegra_pcie_port_disable(port);
1570 tegra_pcie_port_free(port);
1571 }
1572
1573 memset(&hw, 0, sizeof(hw));
1574
1575 hw.nr_controllers = 1;
1576 hw.private_data = (void **)&pcie;
1577 hw.setup = tegra_pcie_setup;
1578 hw.map_irq = tegra_pcie_map_irq;
1579 hw.add_bus = tegra_pcie_add_bus;
1580 hw.scan = tegra_pcie_scan_bus;
1581 hw.ops = &tegra_pcie_ops;
1582
1583 pci_common_init_dev(pcie->dev, &hw);
1584
1585 return 0;
1586}
1587
Jay Agarwal94716cd2013-08-09 16:49:24 +02001588static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1589 .num_ports = 2,
1590 .msi_base_shift = 0,
1591 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1592 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1593 .has_pex_clkreq_en = false,
1594 .has_pex_bias_ctrl = false,
1595 .has_intr_prsnt_sense = false,
1596 .has_avdd_supply = false,
1597 .has_cml_clk = false,
1598};
1599
1600static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1601 .num_ports = 3,
1602 .msi_base_shift = 8,
1603 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1604 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1605 .has_pex_clkreq_en = true,
1606 .has_pex_bias_ctrl = true,
1607 .has_intr_prsnt_sense = true,
1608 .has_avdd_supply = true,
1609 .has_cml_clk = true,
1610};
1611
1612static const struct of_device_id tegra_pcie_of_match[] = {
1613 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1614 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1615 { },
1616};
1617MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1618
Thierry Redingd1523b52013-08-09 16:49:19 +02001619static int tegra_pcie_probe(struct platform_device *pdev)
1620{
Jay Agarwal94716cd2013-08-09 16:49:24 +02001621 const struct of_device_id *match;
Thierry Redingd1523b52013-08-09 16:49:19 +02001622 struct tegra_pcie *pcie;
1623 int err;
1624
Jay Agarwal94716cd2013-08-09 16:49:24 +02001625 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
1626 if (!match)
1627 return -ENODEV;
1628
Thierry Redingd1523b52013-08-09 16:49:19 +02001629 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1630 if (!pcie)
1631 return -ENOMEM;
1632
1633 INIT_LIST_HEAD(&pcie->busses);
1634 INIT_LIST_HEAD(&pcie->ports);
Jay Agarwal94716cd2013-08-09 16:49:24 +02001635 pcie->soc_data = match->data;
Thierry Redingd1523b52013-08-09 16:49:19 +02001636 pcie->dev = &pdev->dev;
1637
1638 err = tegra_pcie_parse_dt(pcie);
1639 if (err < 0)
1640 return err;
1641
1642 pcibios_min_mem = 0;
1643
1644 err = tegra_pcie_get_resources(pcie);
1645 if (err < 0) {
1646 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1647 return err;
1648 }
1649
1650 err = tegra_pcie_enable_controller(pcie);
1651 if (err)
1652 goto put_resources;
1653
1654 /* setup the AFI address translations */
1655 tegra_pcie_setup_translations(pcie);
1656
1657 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1658 err = tegra_pcie_enable_msi(pcie);
1659 if (err < 0) {
1660 dev_err(&pdev->dev,
1661 "failed to enable MSI support: %d\n",
1662 err);
1663 goto put_resources;
1664 }
1665 }
1666
1667 err = tegra_pcie_enable(pcie);
1668 if (err < 0) {
1669 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
1670 goto disable_msi;
1671 }
1672
1673 platform_set_drvdata(pdev, pcie);
1674 return 0;
1675
1676disable_msi:
1677 if (IS_ENABLED(CONFIG_PCI_MSI))
1678 tegra_pcie_disable_msi(pcie);
1679put_resources:
1680 tegra_pcie_put_resources(pcie);
1681 return err;
1682}
1683
Thierry Redingd1523b52013-08-09 16:49:19 +02001684static struct platform_driver tegra_pcie_driver = {
1685 .driver = {
1686 .name = "tegra-pcie",
1687 .owner = THIS_MODULE,
1688 .of_match_table = tegra_pcie_of_match,
1689 .suppress_bind_attrs = true,
1690 },
1691 .probe = tegra_pcie_probe,
1692};
1693module_platform_driver(tegra_pcie_driver);
1694
1695MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1696MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
1697MODULE_LICENSE("GPLv2");