blob: f157b50ee42b9bbf44735915fadb0eed08c5613b [file] [log] [blame]
Phil Edworthyc25da472014-05-12 11:57:48 +01001/*
2 * PCIe driver for Renesas R-Car SoCs
3 * Copyright (C) 2014 Renesas Electronics Europe Ltd
4 *
5 * Based on:
6 * arch/sh/drivers/pci/pcie-sh7786.c
7 * arch/sh/drivers/pci/ops-sh7786.c
8 * Copyright (C) 2009 - 2011 Paul Mundt
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/interrupt.h>
Phil Edworthy290c1fb2014-05-12 11:57:49 +010018#include <linux/irq.h>
19#include <linux/irqdomain.h>
Phil Edworthyc25da472014-05-12 11:57:48 +010020#include <linux/kernel.h>
21#include <linux/module.h>
Phil Edworthy290c1fb2014-05-12 11:57:49 +010022#include <linux/msi.h>
Phil Edworthyc25da472014-05-12 11:57:48 +010023#include <linux/of_address.h>
24#include <linux/of_irq.h>
25#include <linux/of_pci.h>
26#include <linux/of_platform.h>
27#include <linux/pci.h>
28#include <linux/platform_device.h>
Phil Edworthyde1be9a2016-01-05 13:00:30 +000029#include <linux/pm_runtime.h>
Phil Edworthyc25da472014-05-12 11:57:48 +010030#include <linux/slab.h>
31
32#define DRV_NAME "rcar-pcie"
33
34#define PCIECAR 0x000010
35#define PCIECCTLR 0x000018
36#define CONFIG_SEND_ENABLE (1 << 31)
37#define TYPE0 (0 << 8)
38#define TYPE1 (1 << 8)
39#define PCIECDR 0x000020
40#define PCIEMSR 0x000028
41#define PCIEINTXR 0x000400
Phil Edworthy290c1fb2014-05-12 11:57:49 +010042#define PCIEMSITXR 0x000840
Phil Edworthyc25da472014-05-12 11:57:48 +010043
44/* Transfer control */
45#define PCIETCTLR 0x02000
46#define CFINIT 1
47#define PCIETSTR 0x02004
48#define DATA_LINK_ACTIVE 1
49#define PCIEERRFR 0x02020
50#define UNSUPPORTED_REQUEST (1 << 4)
Phil Edworthy290c1fb2014-05-12 11:57:49 +010051#define PCIEMSIFR 0x02044
52#define PCIEMSIALR 0x02048
53#define MSIFE 1
54#define PCIEMSIAUR 0x0204c
55#define PCIEMSIIER 0x02050
Phil Edworthyc25da472014-05-12 11:57:48 +010056
57/* root port address */
58#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
59
60/* local address reg & mask */
61#define PCIELAR(x) (0x02200 + ((x) * 0x20))
62#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
63#define LAM_PREFETCH (1 << 3)
64#define LAM_64BIT (1 << 2)
65#define LAR_ENABLE (1 << 1)
66
67/* PCIe address reg & mask */
Nobuhiro Iwamatsuecd06302015-02-04 18:02:55 +090068#define PCIEPALR(x) (0x03400 + ((x) * 0x20))
69#define PCIEPAUR(x) (0x03404 + ((x) * 0x20))
Phil Edworthyc25da472014-05-12 11:57:48 +010070#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
71#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
72#define PAR_ENABLE (1 << 31)
73#define IO_SPACE (1 << 8)
74
75/* Configuration */
76#define PCICONF(x) (0x010000 + ((x) * 0x4))
77#define PMCAP(x) (0x010040 + ((x) * 0x4))
78#define EXPCAP(x) (0x010070 + ((x) * 0x4))
79#define VCCAP(x) (0x010100 + ((x) * 0x4))
80
81/* link layer */
82#define IDSETR1 0x011004
83#define TLCTLR 0x011048
84#define MACSR 0x011054
85#define MACCTLR 0x011058
86#define SCRAMBLE_DISABLE (1 << 27)
87
88/* R-Car H1 PHY */
89#define H1_PCIEPHYADRR 0x04000c
90#define WRITE_CMD (1 << 16)
91#define PHY_ACK (1 << 24)
92#define RATE_POS 12
93#define LANE_POS 8
94#define ADR_POS 0
95#define H1_PCIEPHYDOUTR 0x040014
96#define H1_PCIEPHYSR 0x040018
97
Phil Edworthy290c1fb2014-05-12 11:57:49 +010098#define INT_PCI_MSI_NR 32
99
Phil Edworthyc25da472014-05-12 11:57:48 +0100100#define RCONF(x) (PCICONF(0)+(x))
101#define RPMCAP(x) (PMCAP(0)+(x))
102#define REXPCAP(x) (EXPCAP(0)+(x))
103#define RVCCAP(x) (VCCAP(0)+(x))
104
105#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
106#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
107#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
108
Phil Edworthyb77188492014-06-30 08:54:23 +0100109#define RCAR_PCI_MAX_RESOURCES 4
Phil Edworthyc25da472014-05-12 11:57:48 +0100110#define MAX_NR_INBOUND_MAPS 6
111
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100112struct rcar_msi {
113 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
114 struct irq_domain *domain;
Yijing Wangc2791b82014-11-11 17:45:45 -0700115 struct msi_controller chip;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100116 unsigned long pages;
117 struct mutex lock;
118 int irq1;
119 int irq2;
120};
121
Yijing Wangc2791b82014-11-11 17:45:45 -0700122static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip)
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100123{
124 return container_of(chip, struct rcar_msi, chip);
125}
126
Phil Edworthyc25da472014-05-12 11:57:48 +0100127/* Structure representing the PCIe interface */
128struct rcar_pcie {
129 struct device *dev;
130 void __iomem *base;
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000131 struct list_head resources;
Phil Edworthyc25da472014-05-12 11:57:48 +0100132 int root_bus_nr;
133 struct clk *clk;
134 struct clk *bus_clk;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100135 struct rcar_msi msi;
Phil Edworthyc25da472014-05-12 11:57:48 +0100136};
137
Phil Edworthyb77188492014-06-30 08:54:23 +0100138static void rcar_pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
139 unsigned long reg)
Phil Edworthyc25da472014-05-12 11:57:48 +0100140{
141 writel(val, pcie->base + reg);
142}
143
Phil Edworthyb77188492014-06-30 08:54:23 +0100144static unsigned long rcar_pci_read_reg(struct rcar_pcie *pcie,
145 unsigned long reg)
Phil Edworthyc25da472014-05-12 11:57:48 +0100146{
147 return readl(pcie->base + reg);
148}
149
150enum {
Phil Edworthyb77188492014-06-30 08:54:23 +0100151 RCAR_PCI_ACCESS_READ,
152 RCAR_PCI_ACCESS_WRITE,
Phil Edworthyc25da472014-05-12 11:57:48 +0100153};
154
155static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
156{
157 int shift = 8 * (where & 3);
Phil Edworthyb77188492014-06-30 08:54:23 +0100158 u32 val = rcar_pci_read_reg(pcie, where & ~3);
Phil Edworthyc25da472014-05-12 11:57:48 +0100159
160 val &= ~(mask << shift);
161 val |= data << shift;
Phil Edworthyb77188492014-06-30 08:54:23 +0100162 rcar_pci_write_reg(pcie, val, where & ~3);
Phil Edworthyc25da472014-05-12 11:57:48 +0100163}
164
165static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
166{
167 int shift = 8 * (where & 3);
Phil Edworthyb77188492014-06-30 08:54:23 +0100168 u32 val = rcar_pci_read_reg(pcie, where & ~3);
Phil Edworthyc25da472014-05-12 11:57:48 +0100169
170 return val >> shift;
171}
172
173/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
174static int rcar_pcie_config_access(struct rcar_pcie *pcie,
175 unsigned char access_type, struct pci_bus *bus,
176 unsigned int devfn, int where, u32 *data)
177{
178 int dev, func, reg, index;
179
180 dev = PCI_SLOT(devfn);
181 func = PCI_FUNC(devfn);
182 reg = where & ~3;
183 index = reg / 4;
184
185 /*
186 * While each channel has its own memory-mapped extended config
187 * space, it's generally only accessible when in endpoint mode.
188 * When in root complex mode, the controller is unable to target
189 * itself with either type 0 or type 1 accesses, and indeed, any
190 * controller initiated target transfer to its own config space
191 * result in a completer abort.
192 *
193 * Each channel effectively only supports a single device, but as
194 * the same channel <-> device access works for any PCI_SLOT()
195 * value, we cheat a bit here and bind the controller's config
196 * space to devfn 0 in order to enable self-enumeration. In this
197 * case the regular ECAR/ECDR path is sidelined and the mangled
198 * config access itself is initiated as an internal bus transaction.
199 */
200 if (pci_is_root_bus(bus)) {
201 if (dev != 0)
202 return PCIBIOS_DEVICE_NOT_FOUND;
203
Phil Edworthyb77188492014-06-30 08:54:23 +0100204 if (access_type == RCAR_PCI_ACCESS_READ) {
205 *data = rcar_pci_read_reg(pcie, PCICONF(index));
Phil Edworthyc25da472014-05-12 11:57:48 +0100206 } else {
207 /* Keep an eye out for changes to the root bus number */
208 if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
209 pcie->root_bus_nr = *data & 0xff;
210
Phil Edworthyb77188492014-06-30 08:54:23 +0100211 rcar_pci_write_reg(pcie, *data, PCICONF(index));
Phil Edworthyc25da472014-05-12 11:57:48 +0100212 }
213
214 return PCIBIOS_SUCCESSFUL;
215 }
216
217 if (pcie->root_bus_nr < 0)
218 return PCIBIOS_DEVICE_NOT_FOUND;
219
220 /* Clear errors */
Phil Edworthyb77188492014-06-30 08:54:23 +0100221 rcar_pci_write_reg(pcie, rcar_pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100222
223 /* Set the PIO address */
Phil Edworthyb77188492014-06-30 08:54:23 +0100224 rcar_pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) |
225 PCIE_CONF_DEV(dev) | PCIE_CONF_FUNC(func) | reg, PCIECAR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100226
227 /* Enable the configuration access */
228 if (bus->parent->number == pcie->root_bus_nr)
Phil Edworthyb77188492014-06-30 08:54:23 +0100229 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100230 else
Phil Edworthyb77188492014-06-30 08:54:23 +0100231 rcar_pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100232
233 /* Check for errors */
Phil Edworthyb77188492014-06-30 08:54:23 +0100234 if (rcar_pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
Phil Edworthyc25da472014-05-12 11:57:48 +0100235 return PCIBIOS_DEVICE_NOT_FOUND;
236
237 /* Check for master and target aborts */
238 if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
239 (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
240 return PCIBIOS_DEVICE_NOT_FOUND;
241
Phil Edworthyb77188492014-06-30 08:54:23 +0100242 if (access_type == RCAR_PCI_ACCESS_READ)
243 *data = rcar_pci_read_reg(pcie, PCIECDR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100244 else
Phil Edworthyb77188492014-06-30 08:54:23 +0100245 rcar_pci_write_reg(pcie, *data, PCIECDR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100246
247 /* Disable the configuration access */
Phil Edworthyb77188492014-06-30 08:54:23 +0100248 rcar_pci_write_reg(pcie, 0, PCIECCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100249
250 return PCIBIOS_SUCCESSFUL;
251}
252
253static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
254 int where, int size, u32 *val)
255{
Phil Edworthy79953dd2015-10-02 11:25:05 +0100256 struct rcar_pcie *pcie = bus->sysdata;
Phil Edworthyc25da472014-05-12 11:57:48 +0100257 int ret;
258
Phil Edworthyb77188492014-06-30 08:54:23 +0100259 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
Phil Edworthyc25da472014-05-12 11:57:48 +0100260 bus, devfn, where, val);
261 if (ret != PCIBIOS_SUCCESSFUL) {
262 *val = 0xffffffff;
263 return ret;
264 }
265
266 if (size == 1)
267 *val = (*val >> (8 * (where & 3))) & 0xff;
268 else if (size == 2)
269 *val = (*val >> (8 * (where & 2))) & 0xffff;
270
Ryan Desfosses227f0642014-04-18 20:13:50 -0400271 dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
272 bus->number, devfn, where, size, (unsigned long)*val);
Phil Edworthyc25da472014-05-12 11:57:48 +0100273
274 return ret;
275}
276
277/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
278static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
279 int where, int size, u32 val)
280{
Phil Edworthy79953dd2015-10-02 11:25:05 +0100281 struct rcar_pcie *pcie = bus->sysdata;
Phil Edworthyc25da472014-05-12 11:57:48 +0100282 int shift, ret;
283 u32 data;
284
Phil Edworthyb77188492014-06-30 08:54:23 +0100285 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_READ,
Phil Edworthyc25da472014-05-12 11:57:48 +0100286 bus, devfn, where, &data);
287 if (ret != PCIBIOS_SUCCESSFUL)
288 return ret;
289
Ryan Desfosses227f0642014-04-18 20:13:50 -0400290 dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
291 bus->number, devfn, where, size, (unsigned long)val);
Phil Edworthyc25da472014-05-12 11:57:48 +0100292
293 if (size == 1) {
294 shift = 8 * (where & 3);
295 data &= ~(0xff << shift);
296 data |= ((val & 0xff) << shift);
297 } else if (size == 2) {
298 shift = 8 * (where & 2);
299 data &= ~(0xffff << shift);
300 data |= ((val & 0xffff) << shift);
301 } else
302 data = val;
303
Phil Edworthyb77188492014-06-30 08:54:23 +0100304 ret = rcar_pcie_config_access(pcie, RCAR_PCI_ACCESS_WRITE,
Phil Edworthyc25da472014-05-12 11:57:48 +0100305 bus, devfn, where, &data);
306
307 return ret;
308}
309
310static struct pci_ops rcar_pcie_ops = {
311 .read = rcar_pcie_read_conf,
312 .write = rcar_pcie_write_conf,
313};
314
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000315static void rcar_pcie_setup_window(int win, struct rcar_pcie *pcie,
316 struct resource *res)
Phil Edworthyc25da472014-05-12 11:57:48 +0100317{
318 /* Setup PCIe address space mappings for each resource */
319 resource_size_t size;
Liviu Dudau0b0b0892014-09-29 15:29:25 +0100320 resource_size_t res_start;
Phil Edworthyc25da472014-05-12 11:57:48 +0100321 u32 mask;
322
Phil Edworthyb77188492014-06-30 08:54:23 +0100323 rcar_pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
Phil Edworthyc25da472014-05-12 11:57:48 +0100324
325 /*
326 * The PAMR mask is calculated in units of 128Bytes, which
327 * keeps things pretty simple.
328 */
329 size = resource_size(res);
330 mask = (roundup_pow_of_two(size) / SZ_128) - 1;
Phil Edworthyb77188492014-06-30 08:54:23 +0100331 rcar_pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
Phil Edworthyc25da472014-05-12 11:57:48 +0100332
Liviu Dudau0b0b0892014-09-29 15:29:25 +0100333 if (res->flags & IORESOURCE_IO)
334 res_start = pci_pio_to_address(res->start);
335 else
336 res_start = res->start;
337
Nobuhiro Iwamatsuecd06302015-02-04 18:02:55 +0900338 rcar_pci_write_reg(pcie, upper_32_bits(res_start), PCIEPAUR(win));
Nobuhiro Iwamatsu2ea2a272015-02-02 14:09:58 +0900339 rcar_pci_write_reg(pcie, lower_32_bits(res_start) & ~0x7F,
Nobuhiro Iwamatsuecd06302015-02-04 18:02:55 +0900340 PCIEPALR(win));
Phil Edworthyc25da472014-05-12 11:57:48 +0100341
342 /* First resource is for IO */
343 mask = PAR_ENABLE;
344 if (res->flags & IORESOURCE_IO)
345 mask |= IO_SPACE;
346
Phil Edworthyb77188492014-06-30 08:54:23 +0100347 rcar_pci_write_reg(pcie, mask, PCIEPTCTLR(win));
Phil Edworthyc25da472014-05-12 11:57:48 +0100348}
349
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000350static int rcar_pcie_setup(struct list_head *resource, struct rcar_pcie *pci)
Phil Edworthyc25da472014-05-12 11:57:48 +0100351{
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000352 struct resource_entry *win;
353 int i = 0;
Phil Edworthyc25da472014-05-12 11:57:48 +0100354
355 /* Setup PCI resources */
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000356 resource_list_for_each_entry(win, &pci->resources) {
357 struct resource *res = win->res;
Phil Edworthyc25da472014-05-12 11:57:48 +0100358
Phil Edworthyc25da472014-05-12 11:57:48 +0100359 if (!res->flags)
360 continue;
361
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000362 switch (resource_type(res)) {
363 case IORESOURCE_IO:
364 case IORESOURCE_MEM:
365 rcar_pcie_setup_window(i, pci, res);
366 i++;
367 break;
368 case IORESOURCE_BUS:
369 pci->root_bus_nr = res->start;
370 break;
371 default:
372 continue;
Phil Edworthyd0c3f4d2015-10-02 11:25:04 +0100373 }
374
Phil Edworthy79953dd2015-10-02 11:25:05 +0100375 pci_add_resource(resource, res);
Phil Edworthyc25da472014-05-12 11:57:48 +0100376 }
Phil Edworthyc25da472014-05-12 11:57:48 +0100377
378 return 1;
379}
380
Phil Edworthy79953dd2015-10-02 11:25:05 +0100381static int rcar_pcie_enable(struct rcar_pcie *pcie)
Phil Edworthyc25da472014-05-12 11:57:48 +0100382{
Phil Edworthy79953dd2015-10-02 11:25:05 +0100383 struct pci_bus *bus, *child;
384 LIST_HEAD(res);
Phil Edworthyc25da472014-05-12 11:57:48 +0100385
Phil Edworthy8c53e8e2015-10-02 11:25:07 +0100386 rcar_pcie_setup(&res, pcie);
Phil Edworthyc25da472014-05-12 11:57:48 +0100387
Phil Edworthy79953dd2015-10-02 11:25:05 +0100388 /* Do not reassign resources if probe only */
389 if (!pci_has_flag(PCI_PROBE_ONLY))
390 pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
391
392 if (IS_ENABLED(CONFIG_PCI_MSI))
393 bus = pci_scan_root_bus_msi(pcie->dev, pcie->root_bus_nr,
394 &rcar_pcie_ops, pcie, &res, &pcie->msi.chip);
395 else
396 bus = pci_scan_root_bus(pcie->dev, pcie->root_bus_nr,
397 &rcar_pcie_ops, pcie, &res);
398
399 if (!bus) {
400 dev_err(pcie->dev, "Scanning rootbus failed");
401 return -ENODEV;
402 }
403
404 pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci);
405
406 if (!pci_has_flag(PCI_PROBE_ONLY)) {
407 pci_bus_size_bridges(bus);
408 pci_bus_assign_resources(bus);
409
410 list_for_each_entry(child, &bus->children, node)
411 pcie_bus_configure_settings(child);
412 }
413
414 pci_bus_add_devices(bus);
415
416 return 0;
Phil Edworthyc25da472014-05-12 11:57:48 +0100417}
418
419static int phy_wait_for_ack(struct rcar_pcie *pcie)
420{
421 unsigned int timeout = 100;
422
423 while (timeout--) {
Phil Edworthyb77188492014-06-30 08:54:23 +0100424 if (rcar_pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
Phil Edworthyc25da472014-05-12 11:57:48 +0100425 return 0;
426
427 udelay(100);
428 }
429
430 dev_err(pcie->dev, "Access to PCIe phy timed out\n");
431
432 return -ETIMEDOUT;
433}
434
435static void phy_write_reg(struct rcar_pcie *pcie,
436 unsigned int rate, unsigned int addr,
437 unsigned int lane, unsigned int data)
438{
439 unsigned long phyaddr;
440
441 phyaddr = WRITE_CMD |
442 ((rate & 1) << RATE_POS) |
443 ((lane & 0xf) << LANE_POS) |
444 ((addr & 0xff) << ADR_POS);
445
446 /* Set write data */
Phil Edworthyb77188492014-06-30 08:54:23 +0100447 rcar_pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
448 rcar_pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100449
450 /* Ignore errors as they will be dealt with if the data link is down */
451 phy_wait_for_ack(pcie);
452
453 /* Clear command */
Phil Edworthyb77188492014-06-30 08:54:23 +0100454 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
455 rcar_pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100456
457 /* Ignore errors as they will be dealt with if the data link is down */
458 phy_wait_for_ack(pcie);
459}
460
461static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
462{
463 unsigned int timeout = 10;
464
465 while (timeout--) {
Phil Edworthyb77188492014-06-30 08:54:23 +0100466 if ((rcar_pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
Phil Edworthyc25da472014-05-12 11:57:48 +0100467 return 0;
468
469 msleep(5);
470 }
471
472 return -ETIMEDOUT;
473}
474
475static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
476{
477 int err;
478
479 /* Begin initialization */
Phil Edworthyb77188492014-06-30 08:54:23 +0100480 rcar_pci_write_reg(pcie, 0, PCIETCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100481
482 /* Set mode */
Phil Edworthyb77188492014-06-30 08:54:23 +0100483 rcar_pci_write_reg(pcie, 1, PCIEMSR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100484
485 /*
486 * Initial header for port config space is type 1, set the device
487 * class to match. Hardware takes care of propagating the IDSETR
488 * settings, so there is no need to bother with a quirk.
489 */
Phil Edworthyb77188492014-06-30 08:54:23 +0100490 rcar_pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
Phil Edworthyc25da472014-05-12 11:57:48 +0100491
492 /*
493 * Setup Secondary Bus Number & Subordinate Bus Number, even though
494 * they aren't used, to avoid bridge being detected as broken.
495 */
496 rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
497 rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
498
499 /* Initialize default capabilities. */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100500 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
Phil Edworthyc25da472014-05-12 11:57:48 +0100501 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
502 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
503 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
504 PCI_HEADER_TYPE_BRIDGE);
505
506 /* Enable data link layer active state reporting */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100507 rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
508 PCI_EXP_LNKCAP_DLLLARC);
Phil Edworthyc25da472014-05-12 11:57:48 +0100509
510 /* Write out the physical slot number = 0 */
511 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
512
513 /* Set the completion timer timeout to the maximum 50ms. */
Phil Edworthyb77188492014-06-30 08:54:23 +0100514 rcar_rmw32(pcie, TLCTLR + 1, 0x3f, 50);
Phil Edworthyc25da472014-05-12 11:57:48 +0100515
516 /* Terminate list of capabilities (Next Capability Offset=0) */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100517 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
Phil Edworthyc25da472014-05-12 11:57:48 +0100518
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100519 /* Enable MSI */
520 if (IS_ENABLED(CONFIG_PCI_MSI))
Nobuhiro Iwamatsu1fc6aa92015-02-02 14:09:39 +0900521 rcar_pci_write_reg(pcie, 0x801f0000, PCIEMSITXR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100522
Phil Edworthyc25da472014-05-12 11:57:48 +0100523 /* Finish initialization - establish a PCI Express link */
Phil Edworthyb77188492014-06-30 08:54:23 +0100524 rcar_pci_write_reg(pcie, CFINIT, PCIETCTLR);
Phil Edworthyc25da472014-05-12 11:57:48 +0100525
526 /* This will timeout if we don't have a link. */
527 err = rcar_pcie_wait_for_dl(pcie);
528 if (err)
529 return err;
530
531 /* Enable INTx interrupts */
532 rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
533
Phil Edworthyc25da472014-05-12 11:57:48 +0100534 wmb();
535
536 return 0;
537}
538
539static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
540{
541 unsigned int timeout = 10;
542
543 /* Initialize the phy */
544 phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
545 phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
546 phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
547 phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
548 phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
549 phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
550 phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
551 phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
552 phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
553 phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
554 phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
555 phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
556
557 phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
558 phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
559 phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
560
561 while (timeout--) {
Phil Edworthyb77188492014-06-30 08:54:23 +0100562 if (rcar_pci_read_reg(pcie, H1_PCIEPHYSR))
Phil Edworthyc25da472014-05-12 11:57:48 +0100563 return rcar_pcie_hw_init(pcie);
564
565 msleep(5);
566 }
567
568 return -ETIMEDOUT;
569}
570
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100571static int rcar_msi_alloc(struct rcar_msi *chip)
572{
573 int msi;
574
575 mutex_lock(&chip->lock);
576
577 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
578 if (msi < INT_PCI_MSI_NR)
579 set_bit(msi, chip->used);
580 else
581 msi = -ENOSPC;
582
583 mutex_unlock(&chip->lock);
584
585 return msi;
586}
587
588static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
589{
590 mutex_lock(&chip->lock);
591 clear_bit(irq, chip->used);
592 mutex_unlock(&chip->lock);
593}
594
595static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
596{
597 struct rcar_pcie *pcie = data;
598 struct rcar_msi *msi = &pcie->msi;
599 unsigned long reg;
600
Phil Edworthyb77188492014-06-30 08:54:23 +0100601 reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100602
603 /* MSI & INTx share an interrupt - we only handle MSI here */
604 if (!reg)
605 return IRQ_NONE;
606
607 while (reg) {
608 unsigned int index = find_first_bit(&reg, 32);
609 unsigned int irq;
610
611 /* clear the interrupt */
Phil Edworthyb77188492014-06-30 08:54:23 +0100612 rcar_pci_write_reg(pcie, 1 << index, PCIEMSIFR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100613
614 irq = irq_find_mapping(msi->domain, index);
615 if (irq) {
616 if (test_bit(index, msi->used))
617 generic_handle_irq(irq);
618 else
619 dev_info(pcie->dev, "unhandled MSI\n");
620 } else {
621 /* Unknown MSI, just clear it */
622 dev_dbg(pcie->dev, "unexpected MSI\n");
623 }
624
625 /* see if there's any more pending in this vector */
Phil Edworthyb77188492014-06-30 08:54:23 +0100626 reg = rcar_pci_read_reg(pcie, PCIEMSIFR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100627 }
628
629 return IRQ_HANDLED;
630}
631
Yijing Wangc2791b82014-11-11 17:45:45 -0700632static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev,
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100633 struct msi_desc *desc)
634{
635 struct rcar_msi *msi = to_rcar_msi(chip);
636 struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
637 struct msi_msg msg;
638 unsigned int irq;
639 int hwirq;
640
641 hwirq = rcar_msi_alloc(msi);
642 if (hwirq < 0)
643 return hwirq;
644
645 irq = irq_create_mapping(msi->domain, hwirq);
646 if (!irq) {
647 rcar_msi_free(msi, hwirq);
648 return -EINVAL;
649 }
650
651 irq_set_msi_desc(irq, desc);
652
Phil Edworthyb77188492014-06-30 08:54:23 +0100653 msg.address_lo = rcar_pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
654 msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100655 msg.data = hwirq;
656
Jiang Liu83a18912014-11-09 23:10:34 +0800657 pci_write_msi_msg(irq, &msg);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100658
659 return 0;
660}
661
Yijing Wangc2791b82014-11-11 17:45:45 -0700662static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq)
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100663{
664 struct rcar_msi *msi = to_rcar_msi(chip);
665 struct irq_data *d = irq_get_irq_data(irq);
666
667 rcar_msi_free(msi, d->hwirq);
668}
669
670static struct irq_chip rcar_msi_irq_chip = {
671 .name = "R-Car PCIe MSI",
Thomas Gleixner280510f2014-11-23 12:23:20 +0100672 .irq_enable = pci_msi_unmask_irq,
673 .irq_disable = pci_msi_mask_irq,
674 .irq_mask = pci_msi_mask_irq,
675 .irq_unmask = pci_msi_unmask_irq,
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100676};
677
678static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
679 irq_hw_number_t hwirq)
680{
681 irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
682 irq_set_chip_data(irq, domain->host_data);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100683
684 return 0;
685}
686
687static const struct irq_domain_ops msi_domain_ops = {
688 .map = rcar_msi_map,
689};
690
691static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
692{
693 struct platform_device *pdev = to_platform_device(pcie->dev);
694 struct rcar_msi *msi = &pcie->msi;
695 unsigned long base;
696 int err;
697
698 mutex_init(&msi->lock);
699
700 msi->chip.dev = pcie->dev;
701 msi->chip.setup_irq = rcar_msi_setup_irq;
702 msi->chip.teardown_irq = rcar_msi_teardown_irq;
703
704 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
705 &msi_domain_ops, &msi->chip);
706 if (!msi->domain) {
707 dev_err(&pdev->dev, "failed to create IRQ domain\n");
708 return -ENOMEM;
709 }
710
711 /* Two irqs are for MSI, but they are also used for non-MSI irqs */
712 err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
713 IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
714 if (err < 0) {
715 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
716 goto err;
717 }
718
719 err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
720 IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
721 if (err < 0) {
722 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
723 goto err;
724 }
725
726 /* setup MSI data target */
727 msi->pages = __get_free_pages(GFP_KERNEL, 0);
728 base = virt_to_phys((void *)msi->pages);
729
Phil Edworthyb77188492014-06-30 08:54:23 +0100730 rcar_pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
731 rcar_pci_write_reg(pcie, 0, PCIEMSIAUR);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100732
733 /* enable all MSI interrupts */
Phil Edworthyb77188492014-06-30 08:54:23 +0100734 rcar_pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100735
736 return 0;
737
738err:
739 irq_domain_remove(msi->domain);
740 return err;
741}
742
Phil Edworthyc25da472014-05-12 11:57:48 +0100743static int rcar_pcie_get_resources(struct platform_device *pdev,
744 struct rcar_pcie *pcie)
745{
746 struct resource res;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100747 int err, i;
Phil Edworthyc25da472014-05-12 11:57:48 +0100748
749 err = of_address_to_resource(pdev->dev.of_node, 0, &res);
750 if (err)
751 return err;
752
753 pcie->clk = devm_clk_get(&pdev->dev, "pcie");
754 if (IS_ERR(pcie->clk)) {
755 dev_err(pcie->dev, "cannot get platform clock\n");
756 return PTR_ERR(pcie->clk);
757 }
758 err = clk_prepare_enable(pcie->clk);
759 if (err)
760 goto fail_clk;
761
762 pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
763 if (IS_ERR(pcie->bus_clk)) {
764 dev_err(pcie->dev, "cannot get pcie bus clock\n");
765 err = PTR_ERR(pcie->bus_clk);
766 goto fail_clk;
767 }
768 err = clk_prepare_enable(pcie->bus_clk);
769 if (err)
770 goto err_map_reg;
771
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100772 i = irq_of_parse_and_map(pdev->dev.of_node, 0);
Dmitry Torokhovc51d4112014-11-14 14:21:53 -0800773 if (!i) {
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100774 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
775 err = -ENOENT;
776 goto err_map_reg;
777 }
778 pcie->msi.irq1 = i;
779
780 i = irq_of_parse_and_map(pdev->dev.of_node, 1);
Dmitry Torokhovc51d4112014-11-14 14:21:53 -0800781 if (!i) {
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100782 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
783 err = -ENOENT;
784 goto err_map_reg;
785 }
786 pcie->msi.irq2 = i;
787
Phil Edworthyc25da472014-05-12 11:57:48 +0100788 pcie->base = devm_ioremap_resource(&pdev->dev, &res);
789 if (IS_ERR(pcie->base)) {
790 err = PTR_ERR(pcie->base);
791 goto err_map_reg;
792 }
793
794 return 0;
795
796err_map_reg:
797 clk_disable_unprepare(pcie->bus_clk);
798fail_clk:
799 clk_disable_unprepare(pcie->clk);
800
801 return err;
802}
803
804static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
805 struct of_pci_range *range,
806 int *index)
807{
808 u64 restype = range->flags;
809 u64 cpu_addr = range->cpu_addr;
810 u64 cpu_end = range->cpu_addr + range->size;
811 u64 pci_addr = range->pci_addr;
812 u32 flags = LAM_64BIT | LAR_ENABLE;
813 u64 mask;
814 u64 size;
815 int idx = *index;
816
817 if (restype & IORESOURCE_PREFETCH)
818 flags |= LAM_PREFETCH;
819
820 /*
821 * If the size of the range is larger than the alignment of the start
822 * address, we have to use multiple entries to perform the mapping.
823 */
824 if (cpu_addr > 0) {
825 unsigned long nr_zeros = __ffs64(cpu_addr);
826 u64 alignment = 1ULL << nr_zeros;
Phil Edworthyb77188492014-06-30 08:54:23 +0100827
Phil Edworthyc25da472014-05-12 11:57:48 +0100828 size = min(range->size, alignment);
829 } else {
830 size = range->size;
831 }
832 /* Hardware supports max 4GiB inbound region */
833 size = min(size, 1ULL << 32);
834
835 mask = roundup_pow_of_two(size) - 1;
836 mask &= ~0xf;
837
838 while (cpu_addr < cpu_end) {
839 /*
840 * Set up 64-bit inbound regions as the range parser doesn't
841 * distinguish between 32 and 64-bit types.
842 */
Phil Edworthyb77188492014-06-30 08:54:23 +0100843 rcar_pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
844 rcar_pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
845 rcar_pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
Phil Edworthyc25da472014-05-12 11:57:48 +0100846
Phil Edworthyb77188492014-06-30 08:54:23 +0100847 rcar_pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
848 rcar_pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
849 rcar_pci_write_reg(pcie, 0, PCIELAMR(idx + 1));
Phil Edworthyc25da472014-05-12 11:57:48 +0100850
851 pci_addr += size;
852 cpu_addr += size;
853 idx += 2;
854
855 if (idx > MAX_NR_INBOUND_MAPS) {
856 dev_err(pcie->dev, "Failed to map inbound regions!\n");
857 return -EINVAL;
858 }
859 }
860 *index = idx;
861
862 return 0;
863}
864
865static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
866 struct device_node *node)
867{
868 const int na = 3, ns = 2;
869 int rlen;
870
871 parser->node = node;
872 parser->pna = of_n_addr_cells(node);
873 parser->np = parser->pna + na + ns;
874
875 parser->range = of_get_property(node, "dma-ranges", &rlen);
876 if (!parser->range)
877 return -ENOENT;
878
879 parser->end = parser->range + rlen / sizeof(__be32);
880 return 0;
881}
882
883static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
884 struct device_node *np)
885{
886 struct of_pci_range range;
887 struct of_pci_range_parser parser;
888 int index = 0;
889 int err;
890
891 if (pci_dma_range_parser_init(&parser, np))
892 return -EINVAL;
893
894 /* Get the dma-ranges from DT */
895 for_each_of_pci_range(&parser, &range) {
896 u64 end = range.cpu_addr + range.size - 1;
897 dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
898 range.flags, range.cpu_addr, end, range.pci_addr);
899
900 err = rcar_pcie_inbound_ranges(pcie, &range, &index);
901 if (err)
902 return err;
903 }
904
905 return 0;
906}
907
908static const struct of_device_id rcar_pcie_of_match[] = {
909 { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
Simon Hormana37b3ea2015-12-03 07:51:39 +0900910 { .compatible = "renesas,pcie-rcar-gen2", .data = rcar_pcie_hw_init },
Phil Edworthyc25da472014-05-12 11:57:48 +0100911 { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init },
912 { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init },
Harunobu Kurokawae015f882015-11-25 15:30:39 +0000913 { .compatible = "renesas,pcie-r8a7795", .data = rcar_pcie_hw_init },
Phil Edworthyc25da472014-05-12 11:57:48 +0100914 {},
915};
916MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
917
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000918static void rcar_pcie_release_of_pci_ranges(struct rcar_pcie *pci)
919{
920 pci_free_resource_list(&pci->resources);
921}
922
923static int rcar_pcie_parse_request_of_pci_ranges(struct rcar_pcie *pci)
924{
925 int err;
926 struct device *dev = pci->dev;
927 struct device_node *np = dev->of_node;
928 resource_size_t iobase;
929 struct resource_entry *win;
930
931 err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase);
932 if (err)
933 return err;
934
935 resource_list_for_each_entry(win, &pci->resources) {
936 struct resource *parent, *res = win->res;
937
938 switch (resource_type(res)) {
939 case IORESOURCE_IO:
940 parent = &ioport_resource;
941 err = pci_remap_iospace(res, iobase);
942 if (err) {
943 dev_warn(dev, "error %d: failed to map resource %pR\n",
944 err, res);
945 continue;
946 }
947 break;
948 case IORESOURCE_MEM:
949 parent = &iomem_resource;
950 break;
951
952 case IORESOURCE_BUS:
953 default:
954 continue;
955 }
956
957 err = devm_request_resource(dev, parent, res);
958 if (err)
959 goto out_release_res;
960 }
961
962 return 0;
963
964out_release_res:
965 rcar_pcie_release_of_pci_ranges(pci);
966 return err;
967}
968
Phil Edworthyc25da472014-05-12 11:57:48 +0100969static int rcar_pcie_probe(struct platform_device *pdev)
970{
971 struct rcar_pcie *pcie;
972 unsigned int data;
Phil Edworthyc25da472014-05-12 11:57:48 +0100973 const struct of_device_id *of_id;
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000974 int err;
Phil Edworthyc25da472014-05-12 11:57:48 +0100975 int (*hw_init_fn)(struct rcar_pcie *);
976
977 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
978 if (!pcie)
979 return -ENOMEM;
980
981 pcie->dev = &pdev->dev;
982 platform_set_drvdata(pdev, pcie);
983
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000984 INIT_LIST_HEAD(&pcie->resources);
Phil Edworthyc25da472014-05-12 11:57:48 +0100985
Phil Edworthy5d2917d2015-11-25 15:30:37 +0000986 rcar_pcie_parse_request_of_pci_ranges(pcie);
Phil Edworthyc25da472014-05-12 11:57:48 +0100987
988 err = rcar_pcie_get_resources(pdev, pcie);
989 if (err < 0) {
990 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
991 return err;
992 }
993
Phil Edworthyc25da472014-05-12 11:57:48 +0100994 err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
995 if (err)
996 return err;
997
Phil Edworthyde1be9a2016-01-05 13:00:30 +0000998 of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
999 if (!of_id || !of_id->data)
1000 return -EINVAL;
1001 hw_init_fn = of_id->data;
1002
1003 pm_runtime_enable(pcie->dev);
1004 err = pm_runtime_get_sync(pcie->dev);
1005 if (err < 0) {
1006 dev_err(pcie->dev, "pm_runtime_get_sync failed\n");
1007 goto err_pm_disable;
1008 }
1009
1010 /* Failure to get a link might just be that no cards are inserted */
1011 err = hw_init_fn(pcie);
1012 if (err) {
1013 dev_info(&pdev->dev, "PCIe link down\n");
1014 err = 0;
1015 goto err_pm_put;
1016 }
1017
1018 data = rcar_pci_read_reg(pcie, MACSR);
1019 dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
1020
Phil Edworthy290c1fb2014-05-12 11:57:49 +01001021 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1022 err = rcar_pcie_enable_msi(pcie);
1023 if (err < 0) {
1024 dev_err(&pdev->dev,
1025 "failed to enable MSI support: %d\n",
1026 err);
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001027 goto err_pm_put;
Phil Edworthy290c1fb2014-05-12 11:57:49 +01001028 }
1029 }
1030
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001031 err = rcar_pcie_enable(pcie);
1032 if (err)
1033 goto err_pm_put;
Phil Edworthyc25da472014-05-12 11:57:48 +01001034
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001035 return 0;
Phil Edworthyc25da472014-05-12 11:57:48 +01001036
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001037err_pm_put:
1038 pm_runtime_put(pcie->dev);
Phil Edworthyc25da472014-05-12 11:57:48 +01001039
Phil Edworthyde1be9a2016-01-05 13:00:30 +00001040err_pm_disable:
1041 pm_runtime_disable(pcie->dev);
1042 return err;
Phil Edworthyc25da472014-05-12 11:57:48 +01001043}
1044
1045static struct platform_driver rcar_pcie_driver = {
1046 .driver = {
1047 .name = DRV_NAME,
Phil Edworthyc25da472014-05-12 11:57:48 +01001048 .of_match_table = rcar_pcie_of_match,
1049 .suppress_bind_attrs = true,
1050 },
1051 .probe = rcar_pcie_probe,
1052};
1053module_platform_driver(rcar_pcie_driver);
1054
1055MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
1056MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
Bjorn Helgaas68947eb2014-07-15 15:06:12 -06001057MODULE_LICENSE("GPL v2");