blob: 946935db62b6dcbb0ce407eb6bd2e00c24bd064e [file] [log] [blame]
Thierry Redingd1523b52013-08-09 16:49:19 +02001/*
Jay Agarwal94716cd2013-08-09 16:49:24 +02002 * PCIe host controller driver for Tegra SoCs
Thierry Redingd1523b52013-08-09 16:49:19 +02003 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 */
26
27#include <linux/clk.h>
Thierry Reding2cb989f2014-07-22 12:30:46 -060028#include <linux/debugfs.h>
Thierry Redingd1523b52013-08-09 16:49:19 +020029#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/interrupt.h>
32#include <linux/irq.h>
33#include <linux/irqdomain.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/msi.h>
37#include <linux/of_address.h>
38#include <linux/of_pci.h>
39#include <linux/of_platform.h>
40#include <linux/pci.h>
41#include <linux/platform_device.h>
Stephen Warren3127a6b2013-11-06 15:56:58 -070042#include <linux/reset.h>
Thierry Redingd1523b52013-08-09 16:49:19 +020043#include <linux/sizes.h>
44#include <linux/slab.h>
Thierry Redingd1523b52013-08-09 16:49:19 +020045#include <linux/vmalloc.h>
46#include <linux/regulator/consumer.h>
47
Thierry Reding306a7f92014-07-17 13:17:24 +020048#include <soc/tegra/cpuidle.h>
Thierry Reding72323982014-07-11 13:19:06 +020049#include <soc/tegra/pmc.h>
Thierry Reding306a7f92014-07-17 13:17:24 +020050
Thierry Redingd1523b52013-08-09 16:49:19 +020051#include <asm/mach/irq.h>
52#include <asm/mach/map.h>
53#include <asm/mach/pci.h>
54
55#define INT_PCI_MSI_NR (8 * 32)
Thierry Redingd1523b52013-08-09 16:49:19 +020056
57/* register definitions */
58
59#define AFI_AXI_BAR0_SZ 0x00
60#define AFI_AXI_BAR1_SZ 0x04
61#define AFI_AXI_BAR2_SZ 0x08
62#define AFI_AXI_BAR3_SZ 0x0c
63#define AFI_AXI_BAR4_SZ 0x10
64#define AFI_AXI_BAR5_SZ 0x14
65
66#define AFI_AXI_BAR0_START 0x18
67#define AFI_AXI_BAR1_START 0x1c
68#define AFI_AXI_BAR2_START 0x20
69#define AFI_AXI_BAR3_START 0x24
70#define AFI_AXI_BAR4_START 0x28
71#define AFI_AXI_BAR5_START 0x2c
72
73#define AFI_FPCI_BAR0 0x30
74#define AFI_FPCI_BAR1 0x34
75#define AFI_FPCI_BAR2 0x38
76#define AFI_FPCI_BAR3 0x3c
77#define AFI_FPCI_BAR4 0x40
78#define AFI_FPCI_BAR5 0x44
79
80#define AFI_CACHE_BAR0_SZ 0x48
81#define AFI_CACHE_BAR0_ST 0x4c
82#define AFI_CACHE_BAR1_SZ 0x50
83#define AFI_CACHE_BAR1_ST 0x54
84
85#define AFI_MSI_BAR_SZ 0x60
86#define AFI_MSI_FPCI_BAR_ST 0x64
87#define AFI_MSI_AXI_BAR_ST 0x68
88
89#define AFI_MSI_VEC0 0x6c
90#define AFI_MSI_VEC1 0x70
91#define AFI_MSI_VEC2 0x74
92#define AFI_MSI_VEC3 0x78
93#define AFI_MSI_VEC4 0x7c
94#define AFI_MSI_VEC5 0x80
95#define AFI_MSI_VEC6 0x84
96#define AFI_MSI_VEC7 0x88
97
98#define AFI_MSI_EN_VEC0 0x8c
99#define AFI_MSI_EN_VEC1 0x90
100#define AFI_MSI_EN_VEC2 0x94
101#define AFI_MSI_EN_VEC3 0x98
102#define AFI_MSI_EN_VEC4 0x9c
103#define AFI_MSI_EN_VEC5 0xa0
104#define AFI_MSI_EN_VEC6 0xa4
105#define AFI_MSI_EN_VEC7 0xa8
106
107#define AFI_CONFIGURATION 0xac
108#define AFI_CONFIGURATION_EN_FPCI (1 << 0)
109
110#define AFI_FPCI_ERROR_MASKS 0xb0
111
112#define AFI_INTR_MASK 0xb4
113#define AFI_INTR_MASK_INT_MASK (1 << 0)
114#define AFI_INTR_MASK_MSI_MASK (1 << 8)
115
116#define AFI_INTR_CODE 0xb8
117#define AFI_INTR_CODE_MASK 0xf
118#define AFI_INTR_AXI_SLAVE_ERROR 1
119#define AFI_INTR_AXI_DECODE_ERROR 2
120#define AFI_INTR_TARGET_ABORT 3
121#define AFI_INTR_MASTER_ABORT 4
122#define AFI_INTR_INVALID_WRITE 5
123#define AFI_INTR_LEGACY 6
124#define AFI_INTR_FPCI_DECODE_ERROR 7
125
126#define AFI_INTR_SIGNATURE 0xbc
127#define AFI_UPPER_FPCI_ADDRESS 0xc0
128#define AFI_SM_INTR_ENABLE 0xc4
129#define AFI_SM_INTR_INTA_ASSERT (1 << 0)
130#define AFI_SM_INTR_INTB_ASSERT (1 << 1)
131#define AFI_SM_INTR_INTC_ASSERT (1 << 2)
132#define AFI_SM_INTR_INTD_ASSERT (1 << 3)
133#define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
134#define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
135#define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
136#define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
137
138#define AFI_AFI_INTR_ENABLE 0xc8
139#define AFI_INTR_EN_INI_SLVERR (1 << 0)
140#define AFI_INTR_EN_INI_DECERR (1 << 1)
141#define AFI_INTR_EN_TGT_SLVERR (1 << 2)
142#define AFI_INTR_EN_TGT_DECERR (1 << 3)
143#define AFI_INTR_EN_TGT_WRERR (1 << 4)
144#define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
145#define AFI_INTR_EN_AXI_DECERR (1 << 6)
146#define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200147#define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
Thierry Redingd1523b52013-08-09 16:49:19 +0200148
149#define AFI_PCIE_CONFIG 0x0f8
150#define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
151#define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
152#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
153#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200154#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
Thierry Redingd1523b52013-08-09 16:49:19 +0200155#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200156#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
157#define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
Thierry Redingd1523b52013-08-09 16:49:19 +0200158
159#define AFI_FUSE 0x104
160#define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
161
162#define AFI_PEX0_CTRL 0x110
163#define AFI_PEX1_CTRL 0x118
Jay Agarwal94716cd2013-08-09 16:49:24 +0200164#define AFI_PEX2_CTRL 0x128
Thierry Redingd1523b52013-08-09 16:49:19 +0200165#define AFI_PEX_CTRL_RST (1 << 0)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200166#define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
Thierry Redingd1523b52013-08-09 16:49:19 +0200167#define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
168
Jay Agarwal94716cd2013-08-09 16:49:24 +0200169#define AFI_PEXBIAS_CTRL_0 0x168
170
Thierry Redingd1523b52013-08-09 16:49:19 +0200171#define RP_VEND_XP 0x00000F00
172#define RP_VEND_XP_DL_UP (1 << 30)
173
174#define RP_LINK_CONTROL_STATUS 0x00000090
175#define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
176#define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
177
178#define PADS_CTL_SEL 0x0000009C
179
180#define PADS_CTL 0x000000A0
181#define PADS_CTL_IDDQ_1L (1 << 0)
182#define PADS_CTL_TX_DATA_EN_1L (1 << 6)
183#define PADS_CTL_RX_DATA_EN_1L (1 << 10)
184
Jay Agarwal94716cd2013-08-09 16:49:24 +0200185#define PADS_PLL_CTL_TEGRA20 0x000000B8
186#define PADS_PLL_CTL_TEGRA30 0x000000B4
Thierry Redingd1523b52013-08-09 16:49:19 +0200187#define PADS_PLL_CTL_RST_B4SM (1 << 1)
188#define PADS_PLL_CTL_LOCKDET (1 << 8)
189#define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
190#define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
191#define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
192#define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
193#define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
194#define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
195#define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
Jay Agarwal94716cd2013-08-09 16:49:24 +0200196#define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
197
198#define PADS_REFCLK_CFG0 0x000000C8
199#define PADS_REFCLK_CFG1 0x000000CC
Thierry Redingd1523b52013-08-09 16:49:19 +0200200
Stephen Warrenb02b07a2013-08-09 16:49:25 +0200201/*
202 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
203 * entries, one entry per PCIe port. These field definitions and desired
204 * values aren't in the TRM, but do come from NVIDIA.
205 */
206#define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
207#define PADS_REFCLK_CFG_E_TERM_SHIFT 7
208#define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
209#define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
210
211/* Default value provided by HW engineering is 0xfa5c */
212#define PADS_REFCLK_CFG_VALUE \
213 ( \
214 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
215 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
216 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
217 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
218 )
219
Thierry Redingd1523b52013-08-09 16:49:19 +0200220struct tegra_msi {
221 struct msi_chip chip;
222 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
223 struct irq_domain *domain;
224 unsigned long pages;
225 struct mutex lock;
226 int irq;
227};
228
Jay Agarwal94716cd2013-08-09 16:49:24 +0200229/* used to differentiate between Tegra SoC generations */
230struct tegra_pcie_soc_data {
231 unsigned int num_ports;
232 unsigned int msi_base_shift;
233 u32 pads_pll_ctl;
234 u32 tx_ref_sel;
235 bool has_pex_clkreq_en;
236 bool has_pex_bias_ctrl;
237 bool has_intr_prsnt_sense;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200238 bool has_cml_clk;
239};
240
Thierry Redingd1523b52013-08-09 16:49:19 +0200241static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
242{
243 return container_of(chip, struct tegra_msi, chip);
244}
245
246struct tegra_pcie {
247 struct device *dev;
248
249 void __iomem *pads;
250 void __iomem *afi;
251 int irq;
252
Bjorn Helgaasf7625982013-11-14 11:28:18 -0700253 struct list_head buses;
Thierry Redingd1523b52013-08-09 16:49:19 +0200254 struct resource *cs;
255
256 struct resource io;
257 struct resource mem;
258 struct resource prefetch;
259 struct resource busn;
260
261 struct clk *pex_clk;
262 struct clk *afi_clk;
Thierry Redingd1523b52013-08-09 16:49:19 +0200263 struct clk *pll_e;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200264 struct clk *cml_clk;
Thierry Redingd1523b52013-08-09 16:49:19 +0200265
Stephen Warren3127a6b2013-11-06 15:56:58 -0700266 struct reset_control *pex_rst;
267 struct reset_control *afi_rst;
268 struct reset_control *pcie_xrst;
269
Thierry Redingd1523b52013-08-09 16:49:19 +0200270 struct tegra_msi msi;
271
272 struct list_head ports;
273 unsigned int num_ports;
274 u32 xbar_config;
275
Thierry Reding077fb152014-05-28 16:49:13 +0200276 struct regulator_bulk_data *supplies;
277 unsigned int num_supplies;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200278
279 const struct tegra_pcie_soc_data *soc_data;
Thierry Reding2cb989f2014-07-22 12:30:46 -0600280 struct dentry *debugfs;
Thierry Redingd1523b52013-08-09 16:49:19 +0200281};
282
283struct tegra_pcie_port {
284 struct tegra_pcie *pcie;
285 struct list_head list;
286 struct resource regs;
287 void __iomem *base;
288 unsigned int index;
289 unsigned int lanes;
290};
291
292struct tegra_pcie_bus {
293 struct vm_struct *area;
294 struct list_head list;
295 unsigned int nr;
296};
297
298static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
299{
300 return sys->private_data;
301}
302
303static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
304 unsigned long offset)
305{
306 writel(value, pcie->afi + offset);
307}
308
309static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
310{
311 return readl(pcie->afi + offset);
312}
313
314static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
315 unsigned long offset)
316{
317 writel(value, pcie->pads + offset);
318}
319
320static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
321{
322 return readl(pcie->pads + offset);
323}
324
325/*
326 * The configuration space mapping on Tegra is somewhat similar to the ECAM
327 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
328 * register accesses are mapped:
329 *
330 * [27:24] extended register number
331 * [23:16] bus number
332 * [15:11] device number
333 * [10: 8] function number
334 * [ 7: 0] register number
335 *
336 * Mapping the whole extended configuration space would require 256 MiB of
337 * virtual address space, only a small part of which will actually be used.
338 * To work around this, a 1 MiB of virtual addresses are allocated per bus
339 * when the bus is first accessed. When the physical range is mapped, the
340 * the bus number bits are hidden so that the extended register number bits
341 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
342 *
343 * [19:16] extended register number
344 * [15:11] device number
345 * [10: 8] function number
346 * [ 7: 0] register number
347 *
348 * This is achieved by stitching together 16 chunks of 64 KiB of physical
349 * address space via the MMU.
350 */
351static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
352{
353 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
354 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
355}
356
357static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
358 unsigned int busnr)
359{
360 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
361 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
362 phys_addr_t cs = pcie->cs->start;
363 struct tegra_pcie_bus *bus;
364 unsigned int i;
365 int err;
366
367 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
368 if (!bus)
369 return ERR_PTR(-ENOMEM);
370
371 INIT_LIST_HEAD(&bus->list);
372 bus->nr = busnr;
373
374 /* allocate 1 MiB of virtual addresses */
375 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
376 if (!bus->area) {
377 err = -ENOMEM;
378 goto free;
379 }
380
381 /* map each of the 16 chunks of 64 KiB each */
382 for (i = 0; i < 16; i++) {
383 unsigned long virt = (unsigned long)bus->area->addr +
384 i * SZ_64K;
385 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
386
387 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
388 if (err < 0) {
389 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
390 err);
391 goto unmap;
392 }
393 }
394
395 return bus;
396
397unmap:
398 vunmap(bus->area->addr);
399free:
400 kfree(bus);
401 return ERR_PTR(err);
402}
403
404/*
405 * Look up a virtual address mapping for the specified bus number. If no such
Bjorn Helgaasf7625982013-11-14 11:28:18 -0700406 * mapping exists, try to create one.
Thierry Redingd1523b52013-08-09 16:49:19 +0200407 */
408static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
409 unsigned int busnr)
410{
411 struct tegra_pcie_bus *bus;
412
Bjorn Helgaasf7625982013-11-14 11:28:18 -0700413 list_for_each_entry(bus, &pcie->buses, list)
Thierry Redingd1523b52013-08-09 16:49:19 +0200414 if (bus->nr == busnr)
Jingoo Han1e652492013-09-25 16:40:54 -0600415 return (void __iomem *)bus->area->addr;
Thierry Redingd1523b52013-08-09 16:49:19 +0200416
417 bus = tegra_pcie_bus_alloc(pcie, busnr);
418 if (IS_ERR(bus))
419 return NULL;
420
Bjorn Helgaasf7625982013-11-14 11:28:18 -0700421 list_add_tail(&bus->list, &pcie->buses);
Thierry Redingd1523b52013-08-09 16:49:19 +0200422
Jingoo Han1e652492013-09-25 16:40:54 -0600423 return (void __iomem *)bus->area->addr;
Thierry Redingd1523b52013-08-09 16:49:19 +0200424}
425
426static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
427 unsigned int devfn,
428 int where)
429{
430 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
431 void __iomem *addr = NULL;
432
433 if (bus->number == 0) {
434 unsigned int slot = PCI_SLOT(devfn);
435 struct tegra_pcie_port *port;
436
437 list_for_each_entry(port, &pcie->ports, list) {
438 if (port->index + 1 == slot) {
439 addr = port->base + (where & ~3);
440 break;
441 }
442 }
443 } else {
444 addr = tegra_pcie_bus_map(pcie, bus->number);
445 if (!addr) {
446 dev_err(pcie->dev,
447 "failed to map cfg. space for bus %u\n",
448 bus->number);
449 return NULL;
450 }
451
452 addr += tegra_pcie_conf_offset(devfn, where);
453 }
454
455 return addr;
456}
457
458static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
459 int where, int size, u32 *value)
460{
461 void __iomem *addr;
462
463 addr = tegra_pcie_conf_address(bus, devfn, where);
464 if (!addr) {
465 *value = 0xffffffff;
466 return PCIBIOS_DEVICE_NOT_FOUND;
467 }
468
469 *value = readl(addr);
470
471 if (size == 1)
472 *value = (*value >> (8 * (where & 3))) & 0xff;
473 else if (size == 2)
474 *value = (*value >> (8 * (where & 3))) & 0xffff;
475
476 return PCIBIOS_SUCCESSFUL;
477}
478
479static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
480 int where, int size, u32 value)
481{
482 void __iomem *addr;
483 u32 mask, tmp;
484
485 addr = tegra_pcie_conf_address(bus, devfn, where);
486 if (!addr)
487 return PCIBIOS_DEVICE_NOT_FOUND;
488
489 if (size == 4) {
490 writel(value, addr);
491 return PCIBIOS_SUCCESSFUL;
492 }
493
494 if (size == 2)
495 mask = ~(0xffff << ((where & 0x3) * 8));
496 else if (size == 1)
497 mask = ~(0xff << ((where & 0x3) * 8));
498 else
499 return PCIBIOS_BAD_REGISTER_NUMBER;
500
501 tmp = readl(addr) & mask;
502 tmp |= value << ((where & 0x3) * 8);
503 writel(tmp, addr);
504
505 return PCIBIOS_SUCCESSFUL;
506}
507
508static struct pci_ops tegra_pcie_ops = {
509 .read = tegra_pcie_read_conf,
510 .write = tegra_pcie_write_conf,
511};
512
513static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
514{
515 unsigned long ret = 0;
516
517 switch (port->index) {
518 case 0:
519 ret = AFI_PEX0_CTRL;
520 break;
521
522 case 1:
523 ret = AFI_PEX1_CTRL;
524 break;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200525
526 case 2:
527 ret = AFI_PEX2_CTRL;
528 break;
Thierry Redingd1523b52013-08-09 16:49:19 +0200529 }
530
531 return ret;
532}
533
534static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
535{
536 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
537 unsigned long value;
538
539 /* pulse reset signal */
540 value = afi_readl(port->pcie, ctrl);
541 value &= ~AFI_PEX_CTRL_RST;
542 afi_writel(port->pcie, value, ctrl);
543
544 usleep_range(1000, 2000);
545
546 value = afi_readl(port->pcie, ctrl);
547 value |= AFI_PEX_CTRL_RST;
548 afi_writel(port->pcie, value, ctrl);
549}
550
551static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
552{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200553 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +0200554 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
555 unsigned long value;
556
557 /* enable reference clock */
558 value = afi_readl(port->pcie, ctrl);
559 value |= AFI_PEX_CTRL_REFCLK_EN;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200560
561 if (soc->has_pex_clkreq_en)
562 value |= AFI_PEX_CTRL_CLKREQ_EN;
563
Thierry Redingd1523b52013-08-09 16:49:19 +0200564 afi_writel(port->pcie, value, ctrl);
565
566 tegra_pcie_port_reset(port);
567}
568
569static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
570{
571 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
572 unsigned long value;
573
574 /* assert port reset */
575 value = afi_readl(port->pcie, ctrl);
576 value &= ~AFI_PEX_CTRL_RST;
577 afi_writel(port->pcie, value, ctrl);
578
579 /* disable reference clock */
580 value = afi_readl(port->pcie, ctrl);
581 value &= ~AFI_PEX_CTRL_REFCLK_EN;
582 afi_writel(port->pcie, value, ctrl);
583}
584
585static void tegra_pcie_port_free(struct tegra_pcie_port *port)
586{
587 struct tegra_pcie *pcie = port->pcie;
588
589 devm_iounmap(pcie->dev, port->base);
590 devm_release_mem_region(pcie->dev, port->regs.start,
591 resource_size(&port->regs));
592 list_del(&port->list);
593 devm_kfree(pcie->dev, port);
594}
595
596static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
597{
598 u16 reg;
599
600 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
601 pci_read_config_word(dev, PCI_COMMAND, &reg);
602 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
603 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
604 pci_write_config_word(dev, PCI_COMMAND, reg);
605 }
606}
607DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
608
609/* Tegra PCIE root complex wrongly reports device class */
610static void tegra_pcie_fixup_class(struct pci_dev *dev)
611{
612 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
613}
614DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
615DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
Jay Agarwal94716cd2013-08-09 16:49:24 +0200616DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
617DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
Thierry Redingd1523b52013-08-09 16:49:19 +0200618
619/* Tegra PCIE requires relaxed ordering */
620static void tegra_pcie_relax_enable(struct pci_dev *dev)
621{
622 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
623}
624DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
625
626static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
627{
628 struct tegra_pcie *pcie = sys_to_pcie(sys);
Liviu Dudau0b0b0892014-09-29 15:29:25 +0100629 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
Thierry Redingd1523b52013-08-09 16:49:19 +0200630
631 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
632 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
633 sys->mem_offset);
634 pci_add_resource(&sys->resources, &pcie->busn);
635
Liviu Dudau0b0b0892014-09-29 15:29:25 +0100636 pci_ioremap_io(nr * SZ_64K, io_start);
Thierry Redingd1523b52013-08-09 16:49:19 +0200637
638 return 1;
639}
640
641static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
642{
643 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
Lucas Stachf5d33522014-04-16 10:24:32 -0600644 int irq;
Thierry Redingd1523b52013-08-09 16:49:19 +0200645
Stephen Warrenb4f17372013-05-06 14:19:19 -0600646 tegra_cpuidle_pcie_irqs_in_use();
647
Lucas Stachf5d33522014-04-16 10:24:32 -0600648 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
649 if (!irq)
650 irq = pcie->irq;
651
652 return irq;
Thierry Redingd1523b52013-08-09 16:49:19 +0200653}
654
655static void tegra_pcie_add_bus(struct pci_bus *bus)
656{
657 if (IS_ENABLED(CONFIG_PCI_MSI)) {
658 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
659
660 bus->msi = &pcie->msi.chip;
661 }
662}
663
664static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
665{
666 struct tegra_pcie *pcie = sys_to_pcie(sys);
667 struct pci_bus *bus;
668
669 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
670 &sys->resources);
671 if (!bus)
672 return NULL;
673
674 pci_scan_child_bus(bus);
675
676 return bus;
677}
678
679static irqreturn_t tegra_pcie_isr(int irq, void *arg)
680{
681 const char *err_msg[] = {
682 "Unknown",
683 "AXI slave error",
684 "AXI decode error",
685 "Target abort",
686 "Master abort",
687 "Invalid write",
688 "Response decoding error",
689 "AXI response decoding error",
690 "Transaction timeout",
691 };
692 struct tegra_pcie *pcie = arg;
693 u32 code, signature;
694
695 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
696 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
697 afi_writel(pcie, 0, AFI_INTR_CODE);
698
699 if (code == AFI_INTR_LEGACY)
700 return IRQ_NONE;
701
702 if (code >= ARRAY_SIZE(err_msg))
703 code = 0;
704
705 /*
706 * do not pollute kernel log with master abort reports since they
707 * happen a lot during enumeration
708 */
709 if (code == AFI_INTR_MASTER_ABORT)
710 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
711 signature);
712 else
713 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
714 signature);
715
716 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
717 code == AFI_INTR_FPCI_DECODE_ERROR) {
718 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
719 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
720
721 if (code == AFI_INTR_MASTER_ABORT)
722 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
723 else
724 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
725 }
726
727 return IRQ_HANDLED;
728}
729
730/*
731 * FPCI map is as follows:
732 * - 0xfdfc000000: I/O space
733 * - 0xfdfe000000: type 0 configuration space
734 * - 0xfdff000000: type 1 configuration space
735 * - 0xfe00000000: type 0 extended configuration space
736 * - 0xfe10000000: type 1 extended configuration space
737 */
738static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
739{
740 u32 fpci_bar, size, axi_address;
Liviu Dudau0b0b0892014-09-29 15:29:25 +0100741 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
Thierry Redingd1523b52013-08-09 16:49:19 +0200742
743 /* Bar 0: type 1 extended configuration space */
744 fpci_bar = 0xfe100000;
745 size = resource_size(pcie->cs);
746 axi_address = pcie->cs->start;
747 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
748 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
749 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
750
751 /* Bar 1: downstream IO bar */
752 fpci_bar = 0xfdfc0000;
753 size = resource_size(&pcie->io);
Liviu Dudau0b0b0892014-09-29 15:29:25 +0100754 axi_address = io_start;
Thierry Redingd1523b52013-08-09 16:49:19 +0200755 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
756 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
757 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
758
759 /* Bar 2: prefetchable memory BAR */
760 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
761 size = resource_size(&pcie->prefetch);
762 axi_address = pcie->prefetch.start;
763 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
764 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
765 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
766
767 /* Bar 3: non prefetchable memory BAR */
768 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
769 size = resource_size(&pcie->mem);
770 axi_address = pcie->mem.start;
771 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
772 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
773 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
774
775 /* NULL out the remaining BARs as they are not used */
776 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
777 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
778 afi_writel(pcie, 0, AFI_FPCI_BAR4);
779
780 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
781 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
782 afi_writel(pcie, 0, AFI_FPCI_BAR5);
783
784 /* map all upstream transactions as uncached */
785 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
786 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
787 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
788 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
789
790 /* MSI translations are setup only when needed */
791 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
792 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
793 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
794 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
795}
796
797static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
798{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200799 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +0200800 struct tegra_pcie_port *port;
801 unsigned int timeout;
802 unsigned long value;
803
Jay Agarwal94716cd2013-08-09 16:49:24 +0200804 /* power down PCIe slot clock bias pad */
805 if (soc->has_pex_bias_ctrl)
806 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
807
Thierry Redingd1523b52013-08-09 16:49:19 +0200808 /* configure mode and disable all ports */
809 value = afi_readl(pcie, AFI_PCIE_CONFIG);
810 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
811 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
812
813 list_for_each_entry(port, &pcie->ports, list)
814 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
815
816 afi_writel(pcie, value, AFI_PCIE_CONFIG);
817
818 value = afi_readl(pcie, AFI_FUSE);
Eric Brower17bd86c2013-11-18 14:55:06 -0800819 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
Thierry Redingd1523b52013-08-09 16:49:19 +0200820 afi_writel(pcie, value, AFI_FUSE);
821
Bjorn Helgaasf7625982013-11-14 11:28:18 -0700822 /* initialize internal PHY, enable up to 16 PCIE lanes */
Thierry Redingd1523b52013-08-09 16:49:19 +0200823 pads_writel(pcie, 0x0, PADS_CTL_SEL);
824
825 /* override IDDQ to 1 on all 4 lanes */
826 value = pads_readl(pcie, PADS_CTL);
827 value |= PADS_CTL_IDDQ_1L;
828 pads_writel(pcie, value, PADS_CTL);
829
830 /*
831 * Set up PHY PLL inputs select PLLE output as refclock,
832 * set TX ref sel to div10 (not div5).
833 */
Jay Agarwal94716cd2013-08-09 16:49:24 +0200834 value = pads_readl(pcie, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200835 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
Jay Agarwal94716cd2013-08-09 16:49:24 +0200836 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
837 pads_writel(pcie, value, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200838
839 /* take PLL out of reset */
Jay Agarwal94716cd2013-08-09 16:49:24 +0200840 value = pads_readl(pcie, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200841 value |= PADS_PLL_CTL_RST_B4SM;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200842 pads_writel(pcie, value, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200843
Stephen Warrenb02b07a2013-08-09 16:49:25 +0200844 /* Configure the reference clock driver */
845 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
846 pads_writel(pcie, value, PADS_REFCLK_CFG0);
847 if (soc->num_ports > 2)
848 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
Thierry Redingd1523b52013-08-09 16:49:19 +0200849
850 /* wait for the PLL to lock */
851 timeout = 300;
852 do {
Jay Agarwal94716cd2013-08-09 16:49:24 +0200853 value = pads_readl(pcie, soc->pads_pll_ctl);
Thierry Redingd1523b52013-08-09 16:49:19 +0200854 usleep_range(1000, 2000);
855 if (--timeout == 0) {
856 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
857 return -EBUSY;
858 }
859 } while (!(value & PADS_PLL_CTL_LOCKDET));
860
861 /* turn off IDDQ override */
862 value = pads_readl(pcie, PADS_CTL);
863 value &= ~PADS_CTL_IDDQ_1L;
864 pads_writel(pcie, value, PADS_CTL);
865
866 /* enable TX/RX data */
867 value = pads_readl(pcie, PADS_CTL);
868 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
869 pads_writel(pcie, value, PADS_CTL);
870
871 /* take the PCIe interface module out of reset */
Stephen Warren3127a6b2013-11-06 15:56:58 -0700872 reset_control_deassert(pcie->pcie_xrst);
Thierry Redingd1523b52013-08-09 16:49:19 +0200873
874 /* finally enable PCIe */
875 value = afi_readl(pcie, AFI_CONFIGURATION);
876 value |= AFI_CONFIGURATION_EN_FPCI;
877 afi_writel(pcie, value, AFI_CONFIGURATION);
878
879 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
880 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
881 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
Jay Agarwal94716cd2013-08-09 16:49:24 +0200882
883 if (soc->has_intr_prsnt_sense)
884 value |= AFI_INTR_EN_PRSNT_SENSE;
885
Thierry Redingd1523b52013-08-09 16:49:19 +0200886 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
887 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
888
889 /* don't enable MSI for now, only when needed */
890 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
891
892 /* disable all exceptions */
893 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
894
895 return 0;
896}
897
898static void tegra_pcie_power_off(struct tegra_pcie *pcie)
899{
900 int err;
901
902 /* TODO: disable and unprepare clocks? */
903
Stephen Warren3127a6b2013-11-06 15:56:58 -0700904 reset_control_assert(pcie->pcie_xrst);
905 reset_control_assert(pcie->afi_rst);
906 reset_control_assert(pcie->pex_rst);
Thierry Redingd1523b52013-08-09 16:49:19 +0200907
908 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
909
Thierry Reding077fb152014-05-28 16:49:13 +0200910 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
Thierry Redingd1523b52013-08-09 16:49:19 +0200911 if (err < 0)
Thierry Reding077fb152014-05-28 16:49:13 +0200912 dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
Thierry Redingd1523b52013-08-09 16:49:19 +0200913}
914
915static int tegra_pcie_power_on(struct tegra_pcie *pcie)
916{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200917 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +0200918 int err;
919
Stephen Warren3127a6b2013-11-06 15:56:58 -0700920 reset_control_assert(pcie->pcie_xrst);
921 reset_control_assert(pcie->afi_rst);
922 reset_control_assert(pcie->pex_rst);
Thierry Redingd1523b52013-08-09 16:49:19 +0200923
924 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
925
926 /* enable regulators */
Thierry Reding077fb152014-05-28 16:49:13 +0200927 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
928 if (err < 0)
929 dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
Jay Agarwal94716cd2013-08-09 16:49:24 +0200930
Thierry Redingd1523b52013-08-09 16:49:19 +0200931 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
Stephen Warren80b28792013-11-06 15:45:46 -0700932 pcie->pex_clk,
933 pcie->pex_rst);
Thierry Redingd1523b52013-08-09 16:49:19 +0200934 if (err) {
935 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
936 return err;
937 }
938
Stephen Warren3127a6b2013-11-06 15:56:58 -0700939 reset_control_deassert(pcie->afi_rst);
Thierry Redingd1523b52013-08-09 16:49:19 +0200940
941 err = clk_prepare_enable(pcie->afi_clk);
942 if (err < 0) {
943 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
944 return err;
945 }
946
Jay Agarwal94716cd2013-08-09 16:49:24 +0200947 if (soc->has_cml_clk) {
948 err = clk_prepare_enable(pcie->cml_clk);
949 if (err < 0) {
950 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
951 err);
952 return err;
953 }
954 }
955
Thierry Redingd1523b52013-08-09 16:49:19 +0200956 err = clk_prepare_enable(pcie->pll_e);
957 if (err < 0) {
958 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
959 return err;
960 }
961
962 return 0;
963}
964
965static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
966{
Jay Agarwal94716cd2013-08-09 16:49:24 +0200967 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
968
Thierry Redingd1523b52013-08-09 16:49:19 +0200969 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
970 if (IS_ERR(pcie->pex_clk))
971 return PTR_ERR(pcie->pex_clk);
972
973 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
974 if (IS_ERR(pcie->afi_clk))
975 return PTR_ERR(pcie->afi_clk);
976
Thierry Redingd1523b52013-08-09 16:49:19 +0200977 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
978 if (IS_ERR(pcie->pll_e))
979 return PTR_ERR(pcie->pll_e);
980
Jay Agarwal94716cd2013-08-09 16:49:24 +0200981 if (soc->has_cml_clk) {
982 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
983 if (IS_ERR(pcie->cml_clk))
984 return PTR_ERR(pcie->cml_clk);
985 }
986
Thierry Redingd1523b52013-08-09 16:49:19 +0200987 return 0;
988}
989
Stephen Warren3127a6b2013-11-06 15:56:58 -0700990static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
991{
992 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
993 if (IS_ERR(pcie->pex_rst))
994 return PTR_ERR(pcie->pex_rst);
995
996 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
997 if (IS_ERR(pcie->afi_rst))
998 return PTR_ERR(pcie->afi_rst);
999
1000 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1001 if (IS_ERR(pcie->pcie_xrst))
1002 return PTR_ERR(pcie->pcie_xrst);
1003
1004 return 0;
1005}
1006
Thierry Redingd1523b52013-08-09 16:49:19 +02001007static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1008{
1009 struct platform_device *pdev = to_platform_device(pcie->dev);
1010 struct resource *pads, *afi, *res;
1011 int err;
1012
1013 err = tegra_pcie_clocks_get(pcie);
1014 if (err) {
1015 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1016 return err;
1017 }
1018
Stephen Warren3127a6b2013-11-06 15:56:58 -07001019 err = tegra_pcie_resets_get(pcie);
1020 if (err) {
1021 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1022 return err;
1023 }
1024
Thierry Redingd1523b52013-08-09 16:49:19 +02001025 err = tegra_pcie_power_on(pcie);
1026 if (err) {
1027 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1028 return err;
1029 }
1030
Thierry Redingd1523b52013-08-09 16:49:19 +02001031 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
Julia Lawalldc05ee32013-08-26 11:11:09 +02001032 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1033 if (IS_ERR(pcie->pads)) {
1034 err = PTR_ERR(pcie->pads);
Thierry Redingd1523b52013-08-09 16:49:19 +02001035 goto poweroff;
1036 }
1037
1038 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
Julia Lawalldc05ee32013-08-26 11:11:09 +02001039 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1040 if (IS_ERR(pcie->afi)) {
1041 err = PTR_ERR(pcie->afi);
Thierry Redingd1523b52013-08-09 16:49:19 +02001042 goto poweroff;
1043 }
1044
Julia Lawalldc05ee32013-08-26 11:11:09 +02001045 /* request configuration space, but remap later, on demand */
Thierry Redingd1523b52013-08-09 16:49:19 +02001046 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1047 if (!res) {
1048 err = -EADDRNOTAVAIL;
1049 goto poweroff;
1050 }
1051
1052 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1053 resource_size(res), res->name);
1054 if (!pcie->cs) {
1055 err = -EADDRNOTAVAIL;
1056 goto poweroff;
1057 }
1058
1059 /* request interrupt */
1060 err = platform_get_irq_byname(pdev, "intr");
1061 if (err < 0) {
1062 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1063 goto poweroff;
1064 }
1065
1066 pcie->irq = err;
1067
1068 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1069 if (err) {
1070 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1071 goto poweroff;
1072 }
1073
1074 return 0;
1075
1076poweroff:
1077 tegra_pcie_power_off(pcie);
1078 return err;
1079}
1080
1081static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1082{
1083 if (pcie->irq > 0)
1084 free_irq(pcie->irq, pcie);
1085
1086 tegra_pcie_power_off(pcie);
1087 return 0;
1088}
1089
1090static int tegra_msi_alloc(struct tegra_msi *chip)
1091{
1092 int msi;
1093
1094 mutex_lock(&chip->lock);
1095
1096 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1097 if (msi < INT_PCI_MSI_NR)
1098 set_bit(msi, chip->used);
1099 else
1100 msi = -ENOSPC;
1101
1102 mutex_unlock(&chip->lock);
1103
1104 return msi;
1105}
1106
1107static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1108{
1109 struct device *dev = chip->chip.dev;
1110
1111 mutex_lock(&chip->lock);
1112
1113 if (!test_bit(irq, chip->used))
1114 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1115 else
1116 clear_bit(irq, chip->used);
1117
1118 mutex_unlock(&chip->lock);
1119}
1120
1121static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1122{
1123 struct tegra_pcie *pcie = data;
1124 struct tegra_msi *msi = &pcie->msi;
1125 unsigned int i, processed = 0;
1126
1127 for (i = 0; i < 8; i++) {
1128 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1129
1130 while (reg) {
1131 unsigned int offset = find_first_bit(&reg, 32);
1132 unsigned int index = i * 32 + offset;
1133 unsigned int irq;
1134
1135 /* clear the interrupt */
1136 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1137
1138 irq = irq_find_mapping(msi->domain, index);
1139 if (irq) {
1140 if (test_bit(index, msi->used))
1141 generic_handle_irq(irq);
1142 else
1143 dev_info(pcie->dev, "unhandled MSI\n");
1144 } else {
1145 /*
1146 * that's weird who triggered this?
1147 * just clear it
1148 */
1149 dev_info(pcie->dev, "unexpected MSI\n");
1150 }
1151
1152 /* see if there's any more pending in this vector */
1153 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1154
1155 processed++;
1156 }
1157 }
1158
1159 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1160}
1161
1162static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1163 struct msi_desc *desc)
1164{
1165 struct tegra_msi *msi = to_tegra_msi(chip);
1166 struct msi_msg msg;
1167 unsigned int irq;
1168 int hwirq;
1169
1170 hwirq = tegra_msi_alloc(msi);
1171 if (hwirq < 0)
1172 return hwirq;
1173
1174 irq = irq_create_mapping(msi->domain, hwirq);
1175 if (!irq)
1176 return -EINVAL;
1177
1178 irq_set_msi_desc(irq, desc);
1179
1180 msg.address_lo = virt_to_phys((void *)msi->pages);
1181 /* 32 bit address only */
1182 msg.address_hi = 0;
1183 msg.data = hwirq;
1184
1185 write_msi_msg(irq, &msg);
1186
1187 return 0;
1188}
1189
1190static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1191{
1192 struct tegra_msi *msi = to_tegra_msi(chip);
1193 struct irq_data *d = irq_get_irq_data(irq);
1194
1195 tegra_msi_free(msi, d->hwirq);
1196}
1197
1198static struct irq_chip tegra_msi_irq_chip = {
1199 .name = "Tegra PCIe MSI",
1200 .irq_enable = unmask_msi_irq,
1201 .irq_disable = mask_msi_irq,
1202 .irq_mask = mask_msi_irq,
1203 .irq_unmask = unmask_msi_irq,
1204};
1205
1206static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1207 irq_hw_number_t hwirq)
1208{
1209 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1210 irq_set_chip_data(irq, domain->host_data);
1211 set_irq_flags(irq, IRQF_VALID);
1212
Stephen Warrenb4f17372013-05-06 14:19:19 -06001213 tegra_cpuidle_pcie_irqs_in_use();
1214
Thierry Redingd1523b52013-08-09 16:49:19 +02001215 return 0;
1216}
1217
1218static const struct irq_domain_ops msi_domain_ops = {
1219 .map = tegra_msi_map,
1220};
1221
1222static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1223{
1224 struct platform_device *pdev = to_platform_device(pcie->dev);
Jay Agarwal94716cd2013-08-09 16:49:24 +02001225 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +02001226 struct tegra_msi *msi = &pcie->msi;
1227 unsigned long base;
1228 int err;
1229 u32 reg;
1230
1231 mutex_init(&msi->lock);
1232
1233 msi->chip.dev = pcie->dev;
1234 msi->chip.setup_irq = tegra_msi_setup_irq;
1235 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1236
1237 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1238 &msi_domain_ops, &msi->chip);
1239 if (!msi->domain) {
1240 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1241 return -ENOMEM;
1242 }
1243
1244 err = platform_get_irq_byname(pdev, "msi");
1245 if (err < 0) {
1246 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1247 goto err;
1248 }
1249
1250 msi->irq = err;
1251
1252 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1253 tegra_msi_irq_chip.name, pcie);
1254 if (err < 0) {
1255 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1256 goto err;
1257 }
1258
1259 /* setup AFI/FPCI range */
1260 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1261 base = virt_to_phys((void *)msi->pages);
1262
Jay Agarwal94716cd2013-08-09 16:49:24 +02001263 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
Thierry Redingd1523b52013-08-09 16:49:19 +02001264 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1265 /* this register is in 4K increments */
1266 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1267
1268 /* enable all MSI vectors */
1269 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1270 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1271 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1272 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1273 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1274 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1275 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1276 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1277
1278 /* and unmask the MSI interrupt */
1279 reg = afi_readl(pcie, AFI_INTR_MASK);
1280 reg |= AFI_INTR_MASK_MSI_MASK;
1281 afi_writel(pcie, reg, AFI_INTR_MASK);
1282
1283 return 0;
1284
1285err:
1286 irq_domain_remove(msi->domain);
1287 return err;
1288}
1289
1290static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1291{
1292 struct tegra_msi *msi = &pcie->msi;
1293 unsigned int i, irq;
1294 u32 value;
1295
1296 /* mask the MSI interrupt */
1297 value = afi_readl(pcie, AFI_INTR_MASK);
1298 value &= ~AFI_INTR_MASK_MSI_MASK;
1299 afi_writel(pcie, value, AFI_INTR_MASK);
1300
1301 /* disable all MSI vectors */
1302 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1303 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1304 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1305 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1306 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1307 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1308 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1309 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1310
1311 free_pages(msi->pages, 0);
1312
1313 if (msi->irq > 0)
1314 free_irq(msi->irq, pcie);
1315
1316 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1317 irq = irq_find_mapping(msi->domain, i);
1318 if (irq > 0)
1319 irq_dispose_mapping(irq);
1320 }
1321
1322 irq_domain_remove(msi->domain);
1323
1324 return 0;
1325}
1326
1327static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1328 u32 *xbar)
1329{
1330 struct device_node *np = pcie->dev->of_node;
1331
Jay Agarwal94716cd2013-08-09 16:49:24 +02001332 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1333 switch (lanes) {
1334 case 0x00000204:
1335 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1336 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1337 return 0;
Thierry Redingd1523b52013-08-09 16:49:19 +02001338
Jay Agarwal94716cd2013-08-09 16:49:24 +02001339 case 0x00020202:
1340 dev_info(pcie->dev, "2x3 configuration\n");
1341 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1342 return 0;
1343
1344 case 0x00010104:
1345 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1346 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1347 return 0;
1348 }
1349 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1350 switch (lanes) {
1351 case 0x00000004:
1352 dev_info(pcie->dev, "single-mode configuration\n");
1353 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1354 return 0;
1355
1356 case 0x00000202:
1357 dev_info(pcie->dev, "dual-mode configuration\n");
1358 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1359 return 0;
1360 }
Thierry Redingd1523b52013-08-09 16:49:19 +02001361 }
1362
1363 return -EINVAL;
1364}
1365
Thierry Reding077fb152014-05-28 16:49:13 +02001366/*
1367 * Check whether a given set of supplies is available in a device tree node.
1368 * This is used to check whether the new or the legacy device tree bindings
1369 * should be used.
1370 */
1371static bool of_regulator_bulk_available(struct device_node *np,
1372 struct regulator_bulk_data *supplies,
1373 unsigned int num_supplies)
1374{
1375 char property[32];
1376 unsigned int i;
1377
1378 for (i = 0; i < num_supplies; i++) {
1379 snprintf(property, 32, "%s-supply", supplies[i].supply);
1380
1381 if (of_find_property(np, property, NULL) == NULL)
1382 return false;
1383 }
1384
1385 return true;
1386}
1387
1388/*
1389 * Old versions of the device tree binding for this device used a set of power
1390 * supplies that didn't match the hardware inputs. This happened to work for a
1391 * number of cases but is not future proof. However to preserve backwards-
1392 * compatibility with old device trees, this function will try to use the old
1393 * set of supplies.
1394 */
1395static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1396{
1397 struct device_node *np = pcie->dev->of_node;
1398
1399 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1400 pcie->num_supplies = 3;
1401 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1402 pcie->num_supplies = 2;
1403
1404 if (pcie->num_supplies == 0) {
1405 dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1406 np->full_name);
1407 return -ENODEV;
1408 }
1409
1410 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1411 sizeof(*pcie->supplies),
1412 GFP_KERNEL);
1413 if (!pcie->supplies)
1414 return -ENOMEM;
1415
1416 pcie->supplies[0].supply = "pex-clk";
1417 pcie->supplies[1].supply = "vdd";
1418
1419 if (pcie->num_supplies > 2)
1420 pcie->supplies[2].supply = "avdd";
1421
1422 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1423 pcie->supplies);
1424}
1425
1426/*
1427 * Obtains the list of regulators required for a particular generation of the
1428 * IP block.
1429 *
1430 * This would've been nice to do simply by providing static tables for use
1431 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1432 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1433 * and either seems to be optional depending on which ports are being used.
1434 */
1435static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1436{
1437 struct device_node *np = pcie->dev->of_node;
1438 unsigned int i = 0;
1439
1440 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1441 bool need_pexa = false, need_pexb = false;
1442
1443 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1444 if (lane_mask & 0x0f)
1445 need_pexa = true;
1446
1447 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1448 if (lane_mask & 0x30)
1449 need_pexb = true;
1450
1451 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1452 (need_pexb ? 2 : 0);
1453
1454 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1455 sizeof(*pcie->supplies),
1456 GFP_KERNEL);
1457 if (!pcie->supplies)
1458 return -ENOMEM;
1459
1460 pcie->supplies[i++].supply = "avdd-pex-pll";
1461 pcie->supplies[i++].supply = "hvdd-pex";
1462 pcie->supplies[i++].supply = "vddio-pex-ctl";
1463 pcie->supplies[i++].supply = "avdd-plle";
1464
1465 if (need_pexa) {
1466 pcie->supplies[i++].supply = "avdd-pexa";
1467 pcie->supplies[i++].supply = "vdd-pexa";
1468 }
1469
1470 if (need_pexb) {
1471 pcie->supplies[i++].supply = "avdd-pexb";
1472 pcie->supplies[i++].supply = "vdd-pexb";
1473 }
1474 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1475 pcie->num_supplies = 5;
1476
1477 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1478 sizeof(*pcie->supplies),
1479 GFP_KERNEL);
1480 if (!pcie->supplies)
1481 return -ENOMEM;
1482
1483 pcie->supplies[0].supply = "avdd-pex";
1484 pcie->supplies[1].supply = "vdd-pex";
1485 pcie->supplies[2].supply = "avdd-pex-pll";
1486 pcie->supplies[3].supply = "avdd-plle";
1487 pcie->supplies[4].supply = "vddio-pex-clk";
1488 }
1489
1490 if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1491 pcie->num_supplies))
1492 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1493 pcie->supplies);
1494
1495 /*
1496 * If not all regulators are available for this new scheme, assume
1497 * that the device tree complies with an older version of the device
1498 * tree binding.
1499 */
1500 dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1501
1502 devm_kfree(pcie->dev, pcie->supplies);
1503 pcie->num_supplies = 0;
1504
1505 return tegra_pcie_get_legacy_regulators(pcie);
1506}
1507
Thierry Redingd1523b52013-08-09 16:49:19 +02001508static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1509{
Jay Agarwal94716cd2013-08-09 16:49:24 +02001510 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
Thierry Redingd1523b52013-08-09 16:49:19 +02001511 struct device_node *np = pcie->dev->of_node, *port;
1512 struct of_pci_range_parser parser;
1513 struct of_pci_range range;
Thierry Reding077fb152014-05-28 16:49:13 +02001514 u32 lanes = 0, mask = 0;
1515 unsigned int lane = 0;
Thierry Redingd1523b52013-08-09 16:49:19 +02001516 struct resource res;
Thierry Redingd1523b52013-08-09 16:49:19 +02001517 int err;
1518
1519 if (of_pci_range_parser_init(&parser, np)) {
1520 dev_err(pcie->dev, "missing \"ranges\" property\n");
1521 return -EINVAL;
1522 }
1523
Thierry Redingd1523b52013-08-09 16:49:19 +02001524 for_each_of_pci_range(&parser, &range) {
Liviu Dudau0b0b0892014-09-29 15:29:25 +01001525 err = of_pci_range_to_resource(&range, np, &res);
1526 if (err < 0)
1527 return err;
Thierry Redingd1523b52013-08-09 16:49:19 +02001528
1529 switch (res.flags & IORESOURCE_TYPE_BITS) {
1530 case IORESOURCE_IO:
1531 memcpy(&pcie->io, &res, sizeof(res));
1532 pcie->io.name = "I/O";
1533 break;
1534
1535 case IORESOURCE_MEM:
1536 if (res.flags & IORESOURCE_PREFETCH) {
1537 memcpy(&pcie->prefetch, &res, sizeof(res));
1538 pcie->prefetch.name = "PREFETCH";
1539 } else {
1540 memcpy(&pcie->mem, &res, sizeof(res));
1541 pcie->mem.name = "MEM";
1542 }
1543 break;
1544 }
1545 }
1546
1547 err = of_pci_parse_bus_range(np, &pcie->busn);
1548 if (err < 0) {
1549 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1550 err);
1551 pcie->busn.name = np->name;
1552 pcie->busn.start = 0;
1553 pcie->busn.end = 0xff;
1554 pcie->busn.flags = IORESOURCE_BUS;
1555 }
1556
1557 /* parse root ports */
1558 for_each_child_of_node(np, port) {
1559 struct tegra_pcie_port *rp;
1560 unsigned int index;
1561 u32 value;
1562
1563 err = of_pci_get_devfn(port);
1564 if (err < 0) {
1565 dev_err(pcie->dev, "failed to parse address: %d\n",
1566 err);
1567 return err;
1568 }
1569
1570 index = PCI_SLOT(err);
1571
Jay Agarwal94716cd2013-08-09 16:49:24 +02001572 if (index < 1 || index > soc->num_ports) {
Thierry Redingd1523b52013-08-09 16:49:19 +02001573 dev_err(pcie->dev, "invalid port number: %d\n", index);
1574 return -EINVAL;
1575 }
1576
1577 index--;
1578
1579 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1580 if (err < 0) {
1581 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1582 err);
1583 return err;
1584 }
1585
1586 if (value > 16) {
1587 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1588 return -EINVAL;
1589 }
1590
1591 lanes |= value << (index << 3);
1592
Thierry Reding077fb152014-05-28 16:49:13 +02001593 if (!of_device_is_available(port)) {
1594 lane += value;
Thierry Redingd1523b52013-08-09 16:49:19 +02001595 continue;
Thierry Reding077fb152014-05-28 16:49:13 +02001596 }
1597
1598 mask |= ((1 << value) - 1) << lane;
1599 lane += value;
Thierry Redingd1523b52013-08-09 16:49:19 +02001600
1601 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1602 if (!rp)
1603 return -ENOMEM;
1604
1605 err = of_address_to_resource(port, 0, &rp->regs);
1606 if (err < 0) {
1607 dev_err(pcie->dev, "failed to parse address: %d\n",
1608 err);
1609 return err;
1610 }
1611
1612 INIT_LIST_HEAD(&rp->list);
1613 rp->index = index;
1614 rp->lanes = value;
1615 rp->pcie = pcie;
1616
Julia Lawalldc05ee32013-08-26 11:11:09 +02001617 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1618 if (IS_ERR(rp->base))
1619 return PTR_ERR(rp->base);
Thierry Redingd1523b52013-08-09 16:49:19 +02001620
1621 list_add_tail(&rp->list, &pcie->ports);
1622 }
1623
1624 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1625 if (err < 0) {
1626 dev_err(pcie->dev, "invalid lane configuration\n");
1627 return err;
1628 }
1629
Thierry Reding077fb152014-05-28 16:49:13 +02001630 err = tegra_pcie_get_regulators(pcie, mask);
1631 if (err < 0)
1632 return err;
1633
Thierry Redingd1523b52013-08-09 16:49:19 +02001634 return 0;
1635}
1636
1637/*
1638 * FIXME: If there are no PCIe cards attached, then calling this function
1639 * can result in the increase of the bootup time as there are big timeout
1640 * loops.
1641 */
1642#define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1643static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1644{
1645 unsigned int retries = 3;
1646 unsigned long value;
1647
1648 do {
1649 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1650
1651 do {
1652 value = readl(port->base + RP_VEND_XP);
1653
1654 if (value & RP_VEND_XP_DL_UP)
1655 break;
1656
1657 usleep_range(1000, 2000);
1658 } while (--timeout);
1659
1660 if (!timeout) {
1661 dev_err(port->pcie->dev, "link %u down, retrying\n",
1662 port->index);
1663 goto retry;
1664 }
1665
1666 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1667
1668 do {
1669 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1670
1671 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1672 return true;
1673
1674 usleep_range(1000, 2000);
1675 } while (--timeout);
1676
1677retry:
1678 tegra_pcie_port_reset(port);
1679 } while (--retries);
1680
1681 return false;
1682}
1683
1684static int tegra_pcie_enable(struct tegra_pcie *pcie)
1685{
1686 struct tegra_pcie_port *port, *tmp;
1687 struct hw_pci hw;
1688
1689 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1690 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1691 port->index, port->lanes);
1692
1693 tegra_pcie_port_enable(port);
1694
1695 if (tegra_pcie_port_check_link(port))
1696 continue;
1697
1698 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1699
1700 tegra_pcie_port_disable(port);
1701 tegra_pcie_port_free(port);
1702 }
1703
1704 memset(&hw, 0, sizeof(hw));
1705
1706 hw.nr_controllers = 1;
1707 hw.private_data = (void **)&pcie;
1708 hw.setup = tegra_pcie_setup;
1709 hw.map_irq = tegra_pcie_map_irq;
1710 hw.add_bus = tegra_pcie_add_bus;
1711 hw.scan = tegra_pcie_scan_bus;
1712 hw.ops = &tegra_pcie_ops;
1713
1714 pci_common_init_dev(pcie->dev, &hw);
1715
1716 return 0;
1717}
1718
Jay Agarwal94716cd2013-08-09 16:49:24 +02001719static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1720 .num_ports = 2,
1721 .msi_base_shift = 0,
1722 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1723 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1724 .has_pex_clkreq_en = false,
1725 .has_pex_bias_ctrl = false,
1726 .has_intr_prsnt_sense = false,
Jay Agarwal94716cd2013-08-09 16:49:24 +02001727 .has_cml_clk = false,
1728};
1729
1730static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1731 .num_ports = 3,
1732 .msi_base_shift = 8,
1733 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1734 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1735 .has_pex_clkreq_en = true,
1736 .has_pex_bias_ctrl = true,
1737 .has_intr_prsnt_sense = true,
Jay Agarwal94716cd2013-08-09 16:49:24 +02001738 .has_cml_clk = true,
1739};
1740
1741static const struct of_device_id tegra_pcie_of_match[] = {
1742 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1743 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1744 { },
1745};
1746MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1747
Thierry Reding2cb989f2014-07-22 12:30:46 -06001748static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1749{
1750 struct tegra_pcie *pcie = s->private;
1751
1752 if (list_empty(&pcie->ports))
1753 return NULL;
1754
1755 seq_printf(s, "Index Status\n");
1756
1757 return seq_list_start(&pcie->ports, *pos);
1758}
1759
1760static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1761{
1762 struct tegra_pcie *pcie = s->private;
1763
1764 return seq_list_next(v, &pcie->ports, pos);
1765}
1766
1767static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1768{
1769}
1770
1771static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1772{
1773 bool up = false, active = false;
1774 struct tegra_pcie_port *port;
1775 unsigned int value;
1776
1777 port = list_entry(v, struct tegra_pcie_port, list);
1778
1779 value = readl(port->base + RP_VEND_XP);
1780
1781 if (value & RP_VEND_XP_DL_UP)
1782 up = true;
1783
1784 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1785
1786 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1787 active = true;
1788
1789 seq_printf(s, "%2u ", port->index);
1790
1791 if (up)
1792 seq_printf(s, "up");
1793
1794 if (active) {
1795 if (up)
1796 seq_printf(s, ", ");
1797
1798 seq_printf(s, "active");
1799 }
1800
1801 seq_printf(s, "\n");
1802 return 0;
1803}
1804
1805static const struct seq_operations tegra_pcie_ports_seq_ops = {
1806 .start = tegra_pcie_ports_seq_start,
1807 .next = tegra_pcie_ports_seq_next,
1808 .stop = tegra_pcie_ports_seq_stop,
1809 .show = tegra_pcie_ports_seq_show,
1810};
1811
1812static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
1813{
1814 struct tegra_pcie *pcie = inode->i_private;
1815 struct seq_file *s;
1816 int err;
1817
1818 err = seq_open(file, &tegra_pcie_ports_seq_ops);
1819 if (err)
1820 return err;
1821
1822 s = file->private_data;
1823 s->private = pcie;
1824
1825 return 0;
1826}
1827
1828static const struct file_operations tegra_pcie_ports_ops = {
1829 .owner = THIS_MODULE,
1830 .open = tegra_pcie_ports_open,
1831 .read = seq_read,
1832 .llseek = seq_lseek,
1833 .release = seq_release,
1834};
1835
1836static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
1837{
1838 struct dentry *file;
1839
1840 pcie->debugfs = debugfs_create_dir("pcie", NULL);
1841 if (!pcie->debugfs)
1842 return -ENOMEM;
1843
1844 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
1845 pcie, &tegra_pcie_ports_ops);
1846 if (!file)
1847 goto remove;
1848
1849 return 0;
1850
1851remove:
1852 debugfs_remove_recursive(pcie->debugfs);
1853 pcie->debugfs = NULL;
1854 return -ENOMEM;
1855}
1856
Thierry Redingd1523b52013-08-09 16:49:19 +02001857static int tegra_pcie_probe(struct platform_device *pdev)
1858{
Jay Agarwal94716cd2013-08-09 16:49:24 +02001859 const struct of_device_id *match;
Thierry Redingd1523b52013-08-09 16:49:19 +02001860 struct tegra_pcie *pcie;
1861 int err;
1862
Jay Agarwal94716cd2013-08-09 16:49:24 +02001863 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
1864 if (!match)
1865 return -ENODEV;
1866
Thierry Redingd1523b52013-08-09 16:49:19 +02001867 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1868 if (!pcie)
1869 return -ENOMEM;
1870
Bjorn Helgaasf7625982013-11-14 11:28:18 -07001871 INIT_LIST_HEAD(&pcie->buses);
Thierry Redingd1523b52013-08-09 16:49:19 +02001872 INIT_LIST_HEAD(&pcie->ports);
Jay Agarwal94716cd2013-08-09 16:49:24 +02001873 pcie->soc_data = match->data;
Thierry Redingd1523b52013-08-09 16:49:19 +02001874 pcie->dev = &pdev->dev;
1875
1876 err = tegra_pcie_parse_dt(pcie);
1877 if (err < 0)
1878 return err;
1879
1880 pcibios_min_mem = 0;
1881
1882 err = tegra_pcie_get_resources(pcie);
1883 if (err < 0) {
1884 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1885 return err;
1886 }
1887
1888 err = tegra_pcie_enable_controller(pcie);
1889 if (err)
1890 goto put_resources;
1891
1892 /* setup the AFI address translations */
1893 tegra_pcie_setup_translations(pcie);
1894
1895 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1896 err = tegra_pcie_enable_msi(pcie);
1897 if (err < 0) {
1898 dev_err(&pdev->dev,
1899 "failed to enable MSI support: %d\n",
1900 err);
1901 goto put_resources;
1902 }
1903 }
1904
1905 err = tegra_pcie_enable(pcie);
1906 if (err < 0) {
1907 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
1908 goto disable_msi;
1909 }
1910
Thierry Reding2cb989f2014-07-22 12:30:46 -06001911 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1912 err = tegra_pcie_debugfs_init(pcie);
1913 if (err < 0)
1914 dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
1915 err);
1916 }
1917
Thierry Redingd1523b52013-08-09 16:49:19 +02001918 platform_set_drvdata(pdev, pcie);
1919 return 0;
1920
1921disable_msi:
1922 if (IS_ENABLED(CONFIG_PCI_MSI))
1923 tegra_pcie_disable_msi(pcie);
1924put_resources:
1925 tegra_pcie_put_resources(pcie);
1926 return err;
1927}
1928
Thierry Redingd1523b52013-08-09 16:49:19 +02001929static struct platform_driver tegra_pcie_driver = {
1930 .driver = {
1931 .name = "tegra-pcie",
1932 .owner = THIS_MODULE,
1933 .of_match_table = tegra_pcie_of_match,
1934 .suppress_bind_attrs = true,
1935 },
1936 .probe = tegra_pcie_probe,
1937};
1938module_platform_driver(tegra_pcie_driver);
1939
1940MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1941MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
Thierry Redingd975cb52014-07-11 08:58:58 +02001942MODULE_LICENSE("GPL v2");