blob: ab0ab0d8253c8a87685d8d3971e447be9304cb82 [file] [log] [blame]
Phil Edworthyc25da472014-05-12 11:57:48 +01001/*
2 * PCIe driver for Renesas R-Car SoCs
3 * Copyright (C) 2014 Renesas Electronics Europe Ltd
4 *
5 * Based on:
6 * arch/sh/drivers/pci/pcie-sh7786.c
7 * arch/sh/drivers/pci/ops-sh7786.c
8 * Copyright (C) 2009 - 2011 Paul Mundt
9 *
10 * This file is licensed under the terms of the GNU General Public
11 * License version 2. This program is licensed "as is" without any
12 * warranty of any kind, whether express or implied.
13 */
14
15#include <linux/clk.h>
16#include <linux/delay.h>
17#include <linux/interrupt.h>
Phil Edworthy290c1fb2014-05-12 11:57:49 +010018#include <linux/irq.h>
19#include <linux/irqdomain.h>
Phil Edworthyc25da472014-05-12 11:57:48 +010020#include <linux/kernel.h>
21#include <linux/module.h>
Phil Edworthy290c1fb2014-05-12 11:57:49 +010022#include <linux/msi.h>
Phil Edworthyc25da472014-05-12 11:57:48 +010023#include <linux/of_address.h>
24#include <linux/of_irq.h>
25#include <linux/of_pci.h>
26#include <linux/of_platform.h>
27#include <linux/pci.h>
28#include <linux/platform_device.h>
29#include <linux/slab.h>
30
31#define DRV_NAME "rcar-pcie"
32
33#define PCIECAR 0x000010
34#define PCIECCTLR 0x000018
35#define CONFIG_SEND_ENABLE (1 << 31)
36#define TYPE0 (0 << 8)
37#define TYPE1 (1 << 8)
38#define PCIECDR 0x000020
39#define PCIEMSR 0x000028
40#define PCIEINTXR 0x000400
Phil Edworthy290c1fb2014-05-12 11:57:49 +010041#define PCIEMSITXR 0x000840
Phil Edworthyc25da472014-05-12 11:57:48 +010042
43/* Transfer control */
44#define PCIETCTLR 0x02000
45#define CFINIT 1
46#define PCIETSTR 0x02004
47#define DATA_LINK_ACTIVE 1
48#define PCIEERRFR 0x02020
49#define UNSUPPORTED_REQUEST (1 << 4)
Phil Edworthy290c1fb2014-05-12 11:57:49 +010050#define PCIEMSIFR 0x02044
51#define PCIEMSIALR 0x02048
52#define MSIFE 1
53#define PCIEMSIAUR 0x0204c
54#define PCIEMSIIER 0x02050
Phil Edworthyc25da472014-05-12 11:57:48 +010055
56/* root port address */
57#define PCIEPRAR(x) (0x02080 + ((x) * 0x4))
58
59/* local address reg & mask */
60#define PCIELAR(x) (0x02200 + ((x) * 0x20))
61#define PCIELAMR(x) (0x02208 + ((x) * 0x20))
62#define LAM_PREFETCH (1 << 3)
63#define LAM_64BIT (1 << 2)
64#define LAR_ENABLE (1 << 1)
65
66/* PCIe address reg & mask */
67#define PCIEPARL(x) (0x03400 + ((x) * 0x20))
68#define PCIEPARH(x) (0x03404 + ((x) * 0x20))
69#define PCIEPAMR(x) (0x03408 + ((x) * 0x20))
70#define PCIEPTCTLR(x) (0x0340c + ((x) * 0x20))
71#define PAR_ENABLE (1 << 31)
72#define IO_SPACE (1 << 8)
73
74/* Configuration */
75#define PCICONF(x) (0x010000 + ((x) * 0x4))
76#define PMCAP(x) (0x010040 + ((x) * 0x4))
77#define EXPCAP(x) (0x010070 + ((x) * 0x4))
78#define VCCAP(x) (0x010100 + ((x) * 0x4))
79
80/* link layer */
81#define IDSETR1 0x011004
82#define TLCTLR 0x011048
83#define MACSR 0x011054
84#define MACCTLR 0x011058
85#define SCRAMBLE_DISABLE (1 << 27)
86
87/* R-Car H1 PHY */
88#define H1_PCIEPHYADRR 0x04000c
89#define WRITE_CMD (1 << 16)
90#define PHY_ACK (1 << 24)
91#define RATE_POS 12
92#define LANE_POS 8
93#define ADR_POS 0
94#define H1_PCIEPHYDOUTR 0x040014
95#define H1_PCIEPHYSR 0x040018
96
Phil Edworthy290c1fb2014-05-12 11:57:49 +010097#define INT_PCI_MSI_NR 32
98
Phil Edworthyc25da472014-05-12 11:57:48 +010099#define RCONF(x) (PCICONF(0)+(x))
100#define RPMCAP(x) (PMCAP(0)+(x))
101#define REXPCAP(x) (EXPCAP(0)+(x))
102#define RVCCAP(x) (VCCAP(0)+(x))
103
104#define PCIE_CONF_BUS(b) (((b) & 0xff) << 24)
105#define PCIE_CONF_DEV(d) (((d) & 0x1f) << 19)
106#define PCIE_CONF_FUNC(f) (((f) & 0x7) << 16)
107
108#define PCI_MAX_RESOURCES 4
109#define MAX_NR_INBOUND_MAPS 6
110
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100111struct rcar_msi {
112 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
113 struct irq_domain *domain;
114 struct msi_chip chip;
115 unsigned long pages;
116 struct mutex lock;
117 int irq1;
118 int irq2;
119};
120
121static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip)
122{
123 return container_of(chip, struct rcar_msi, chip);
124}
125
Phil Edworthyc25da472014-05-12 11:57:48 +0100126/* Structure representing the PCIe interface */
127struct rcar_pcie {
128 struct device *dev;
129 void __iomem *base;
130 struct resource res[PCI_MAX_RESOURCES];
131 struct resource busn;
132 int root_bus_nr;
133 struct clk *clk;
134 struct clk *bus_clk;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100135 struct rcar_msi msi;
Phil Edworthyc25da472014-05-12 11:57:48 +0100136};
137
138static inline struct rcar_pcie *sys_to_pcie(struct pci_sys_data *sys)
139{
140 return sys->private_data;
141}
142
143static void pci_write_reg(struct rcar_pcie *pcie, unsigned long val,
144 unsigned long reg)
145{
146 writel(val, pcie->base + reg);
147}
148
149static unsigned long pci_read_reg(struct rcar_pcie *pcie, unsigned long reg)
150{
151 return readl(pcie->base + reg);
152}
153
154enum {
155 PCI_ACCESS_READ,
156 PCI_ACCESS_WRITE,
157};
158
159static void rcar_rmw32(struct rcar_pcie *pcie, int where, u32 mask, u32 data)
160{
161 int shift = 8 * (where & 3);
162 u32 val = pci_read_reg(pcie, where & ~3);
163
164 val &= ~(mask << shift);
165 val |= data << shift;
166 pci_write_reg(pcie, val, where & ~3);
167}
168
169static u32 rcar_read_conf(struct rcar_pcie *pcie, int where)
170{
171 int shift = 8 * (where & 3);
172 u32 val = pci_read_reg(pcie, where & ~3);
173
174 return val >> shift;
175}
176
177/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
178static int rcar_pcie_config_access(struct rcar_pcie *pcie,
179 unsigned char access_type, struct pci_bus *bus,
180 unsigned int devfn, int where, u32 *data)
181{
182 int dev, func, reg, index;
183
184 dev = PCI_SLOT(devfn);
185 func = PCI_FUNC(devfn);
186 reg = where & ~3;
187 index = reg / 4;
188
189 /*
190 * While each channel has its own memory-mapped extended config
191 * space, it's generally only accessible when in endpoint mode.
192 * When in root complex mode, the controller is unable to target
193 * itself with either type 0 or type 1 accesses, and indeed, any
194 * controller initiated target transfer to its own config space
195 * result in a completer abort.
196 *
197 * Each channel effectively only supports a single device, but as
198 * the same channel <-> device access works for any PCI_SLOT()
199 * value, we cheat a bit here and bind the controller's config
200 * space to devfn 0 in order to enable self-enumeration. In this
201 * case the regular ECAR/ECDR path is sidelined and the mangled
202 * config access itself is initiated as an internal bus transaction.
203 */
204 if (pci_is_root_bus(bus)) {
205 if (dev != 0)
206 return PCIBIOS_DEVICE_NOT_FOUND;
207
208 if (access_type == PCI_ACCESS_READ) {
209 *data = pci_read_reg(pcie, PCICONF(index));
210 } else {
211 /* Keep an eye out for changes to the root bus number */
212 if (pci_is_root_bus(bus) && (reg == PCI_PRIMARY_BUS))
213 pcie->root_bus_nr = *data & 0xff;
214
215 pci_write_reg(pcie, *data, PCICONF(index));
216 }
217
218 return PCIBIOS_SUCCESSFUL;
219 }
220
221 if (pcie->root_bus_nr < 0)
222 return PCIBIOS_DEVICE_NOT_FOUND;
223
224 /* Clear errors */
225 pci_write_reg(pcie, pci_read_reg(pcie, PCIEERRFR), PCIEERRFR);
226
227 /* Set the PIO address */
228 pci_write_reg(pcie, PCIE_CONF_BUS(bus->number) | PCIE_CONF_DEV(dev) |
229 PCIE_CONF_FUNC(func) | reg, PCIECAR);
230
231 /* Enable the configuration access */
232 if (bus->parent->number == pcie->root_bus_nr)
233 pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE0, PCIECCTLR);
234 else
235 pci_write_reg(pcie, CONFIG_SEND_ENABLE | TYPE1, PCIECCTLR);
236
237 /* Check for errors */
238 if (pci_read_reg(pcie, PCIEERRFR) & UNSUPPORTED_REQUEST)
239 return PCIBIOS_DEVICE_NOT_FOUND;
240
241 /* Check for master and target aborts */
242 if (rcar_read_conf(pcie, RCONF(PCI_STATUS)) &
243 (PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_REC_TARGET_ABORT))
244 return PCIBIOS_DEVICE_NOT_FOUND;
245
246 if (access_type == PCI_ACCESS_READ)
247 *data = pci_read_reg(pcie, PCIECDR);
248 else
249 pci_write_reg(pcie, *data, PCIECDR);
250
251 /* Disable the configuration access */
252 pci_write_reg(pcie, 0, PCIECCTLR);
253
254 return PCIBIOS_SUCCESSFUL;
255}
256
257static int rcar_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
258 int where, int size, u32 *val)
259{
260 struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
261 int ret;
262
Phil Edworthyc25da472014-05-12 11:57:48 +0100263 ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
264 bus, devfn, where, val);
265 if (ret != PCIBIOS_SUCCESSFUL) {
266 *val = 0xffffffff;
267 return ret;
268 }
269
270 if (size == 1)
271 *val = (*val >> (8 * (where & 3))) & 0xff;
272 else if (size == 2)
273 *val = (*val >> (8 * (where & 2))) & 0xffff;
274
Ryan Desfosses227f0642014-04-18 20:13:50 -0400275 dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
276 bus->number, devfn, where, size, (unsigned long)*val);
Phil Edworthyc25da472014-05-12 11:57:48 +0100277
278 return ret;
279}
280
281/* Serialization is provided by 'pci_lock' in drivers/pci/access.c */
282static int rcar_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
283 int where, int size, u32 val)
284{
285 struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
286 int shift, ret;
287 u32 data;
288
Phil Edworthyc25da472014-05-12 11:57:48 +0100289 ret = rcar_pcie_config_access(pcie, PCI_ACCESS_READ,
290 bus, devfn, where, &data);
291 if (ret != PCIBIOS_SUCCESSFUL)
292 return ret;
293
Ryan Desfosses227f0642014-04-18 20:13:50 -0400294 dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x where=0x%04x size=%d val=0x%08lx\n",
295 bus->number, devfn, where, size, (unsigned long)val);
Phil Edworthyc25da472014-05-12 11:57:48 +0100296
297 if (size == 1) {
298 shift = 8 * (where & 3);
299 data &= ~(0xff << shift);
300 data |= ((val & 0xff) << shift);
301 } else if (size == 2) {
302 shift = 8 * (where & 2);
303 data &= ~(0xffff << shift);
304 data |= ((val & 0xffff) << shift);
305 } else
306 data = val;
307
308 ret = rcar_pcie_config_access(pcie, PCI_ACCESS_WRITE,
309 bus, devfn, where, &data);
310
311 return ret;
312}
313
314static struct pci_ops rcar_pcie_ops = {
315 .read = rcar_pcie_read_conf,
316 .write = rcar_pcie_write_conf,
317};
318
319static void rcar_pcie_setup_window(int win, struct resource *res,
320 struct rcar_pcie *pcie)
321{
322 /* Setup PCIe address space mappings for each resource */
323 resource_size_t size;
324 u32 mask;
325
326 pci_write_reg(pcie, 0x00000000, PCIEPTCTLR(win));
327
328 /*
329 * The PAMR mask is calculated in units of 128Bytes, which
330 * keeps things pretty simple.
331 */
332 size = resource_size(res);
333 mask = (roundup_pow_of_two(size) / SZ_128) - 1;
334 pci_write_reg(pcie, mask << 7, PCIEPAMR(win));
335
336 pci_write_reg(pcie, upper_32_bits(res->start), PCIEPARH(win));
337 pci_write_reg(pcie, lower_32_bits(res->start), PCIEPARL(win));
338
339 /* First resource is for IO */
340 mask = PAR_ENABLE;
341 if (res->flags & IORESOURCE_IO)
342 mask |= IO_SPACE;
343
344 pci_write_reg(pcie, mask, PCIEPTCTLR(win));
345}
346
347static int rcar_pcie_setup(int nr, struct pci_sys_data *sys)
348{
349 struct rcar_pcie *pcie = sys_to_pcie(sys);
350 struct resource *res;
351 int i;
352
353 pcie->root_bus_nr = -1;
354
355 /* Setup PCI resources */
356 for (i = 0; i < PCI_MAX_RESOURCES; i++) {
357
358 res = &pcie->res[i];
359 if (!res->flags)
360 continue;
361
362 rcar_pcie_setup_window(i, res, pcie);
363
364 if (res->flags & IORESOURCE_IO)
365 pci_ioremap_io(nr * SZ_64K, res->start);
366 else
367 pci_add_resource(&sys->resources, res);
368 }
369 pci_add_resource(&sys->resources, &pcie->busn);
370
371 return 1;
372}
373
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100374static void rcar_pcie_add_bus(struct pci_bus *bus)
375{
376 if (IS_ENABLED(CONFIG_PCI_MSI)) {
377 struct rcar_pcie *pcie = sys_to_pcie(bus->sysdata);
378
379 bus->msi = &pcie->msi.chip;
380 }
381}
382
Phil Edworthyc25da472014-05-12 11:57:48 +0100383struct hw_pci rcar_pci = {
384 .setup = rcar_pcie_setup,
385 .map_irq = of_irq_parse_and_map_pci,
386 .ops = &rcar_pcie_ops,
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100387 .add_bus = rcar_pcie_add_bus,
Phil Edworthyc25da472014-05-12 11:57:48 +0100388};
389
390static void rcar_pcie_enable(struct rcar_pcie *pcie)
391{
392 struct platform_device *pdev = to_platform_device(pcie->dev);
393
394 rcar_pci.nr_controllers = 1;
395 rcar_pci.private_data = (void **)&pcie;
396
397 pci_common_init_dev(&pdev->dev, &rcar_pci);
398#ifdef CONFIG_PCI_DOMAINS
399 rcar_pci.domain++;
400#endif
401}
402
403static int phy_wait_for_ack(struct rcar_pcie *pcie)
404{
405 unsigned int timeout = 100;
406
407 while (timeout--) {
408 if (pci_read_reg(pcie, H1_PCIEPHYADRR) & PHY_ACK)
409 return 0;
410
411 udelay(100);
412 }
413
414 dev_err(pcie->dev, "Access to PCIe phy timed out\n");
415
416 return -ETIMEDOUT;
417}
418
419static void phy_write_reg(struct rcar_pcie *pcie,
420 unsigned int rate, unsigned int addr,
421 unsigned int lane, unsigned int data)
422{
423 unsigned long phyaddr;
424
425 phyaddr = WRITE_CMD |
426 ((rate & 1) << RATE_POS) |
427 ((lane & 0xf) << LANE_POS) |
428 ((addr & 0xff) << ADR_POS);
429
430 /* Set write data */
431 pci_write_reg(pcie, data, H1_PCIEPHYDOUTR);
432 pci_write_reg(pcie, phyaddr, H1_PCIEPHYADRR);
433
434 /* Ignore errors as they will be dealt with if the data link is down */
435 phy_wait_for_ack(pcie);
436
437 /* Clear command */
438 pci_write_reg(pcie, 0, H1_PCIEPHYDOUTR);
439 pci_write_reg(pcie, 0, H1_PCIEPHYADRR);
440
441 /* Ignore errors as they will be dealt with if the data link is down */
442 phy_wait_for_ack(pcie);
443}
444
445static int rcar_pcie_wait_for_dl(struct rcar_pcie *pcie)
446{
447 unsigned int timeout = 10;
448
449 while (timeout--) {
450 if ((pci_read_reg(pcie, PCIETSTR) & DATA_LINK_ACTIVE))
451 return 0;
452
453 msleep(5);
454 }
455
456 return -ETIMEDOUT;
457}
458
459static int rcar_pcie_hw_init(struct rcar_pcie *pcie)
460{
461 int err;
462
463 /* Begin initialization */
464 pci_write_reg(pcie, 0, PCIETCTLR);
465
466 /* Set mode */
467 pci_write_reg(pcie, 1, PCIEMSR);
468
469 /*
470 * Initial header for port config space is type 1, set the device
471 * class to match. Hardware takes care of propagating the IDSETR
472 * settings, so there is no need to bother with a quirk.
473 */
474 pci_write_reg(pcie, PCI_CLASS_BRIDGE_PCI << 16, IDSETR1);
475
476 /*
477 * Setup Secondary Bus Number & Subordinate Bus Number, even though
478 * they aren't used, to avoid bridge being detected as broken.
479 */
480 rcar_rmw32(pcie, RCONF(PCI_SECONDARY_BUS), 0xff, 1);
481 rcar_rmw32(pcie, RCONF(PCI_SUBORDINATE_BUS), 0xff, 1);
482
483 /* Initialize default capabilities. */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100484 rcar_rmw32(pcie, REXPCAP(0), 0xff, PCI_CAP_ID_EXP);
Phil Edworthyc25da472014-05-12 11:57:48 +0100485 rcar_rmw32(pcie, REXPCAP(PCI_EXP_FLAGS),
486 PCI_EXP_FLAGS_TYPE, PCI_EXP_TYPE_ROOT_PORT << 4);
487 rcar_rmw32(pcie, RCONF(PCI_HEADER_TYPE), 0x7f,
488 PCI_HEADER_TYPE_BRIDGE);
489
490 /* Enable data link layer active state reporting */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100491 rcar_rmw32(pcie, REXPCAP(PCI_EXP_LNKCAP), PCI_EXP_LNKCAP_DLLLARC,
492 PCI_EXP_LNKCAP_DLLLARC);
Phil Edworthyc25da472014-05-12 11:57:48 +0100493
494 /* Write out the physical slot number = 0 */
495 rcar_rmw32(pcie, REXPCAP(PCI_EXP_SLTCAP), PCI_EXP_SLTCAP_PSN, 0);
496
497 /* Set the completion timer timeout to the maximum 50ms. */
498 rcar_rmw32(pcie, TLCTLR+1, 0x3f, 50);
499
500 /* Terminate list of capabilities (Next Capability Offset=0) */
Phil Edworthy2c3fd4c2014-06-30 08:54:22 +0100501 rcar_rmw32(pcie, RVCCAP(0), 0xfff00000, 0);
Phil Edworthyc25da472014-05-12 11:57:48 +0100502
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100503 /* Enable MSI */
504 if (IS_ENABLED(CONFIG_PCI_MSI))
505 pci_write_reg(pcie, 0x101f0000, PCIEMSITXR);
506
Phil Edworthyc25da472014-05-12 11:57:48 +0100507 /* Finish initialization - establish a PCI Express link */
508 pci_write_reg(pcie, CFINIT, PCIETCTLR);
509
510 /* This will timeout if we don't have a link. */
511 err = rcar_pcie_wait_for_dl(pcie);
512 if (err)
513 return err;
514
515 /* Enable INTx interrupts */
516 rcar_rmw32(pcie, PCIEINTXR, 0, 0xF << 8);
517
Phil Edworthyc25da472014-05-12 11:57:48 +0100518 wmb();
519
520 return 0;
521}
522
523static int rcar_pcie_hw_init_h1(struct rcar_pcie *pcie)
524{
525 unsigned int timeout = 10;
526
527 /* Initialize the phy */
528 phy_write_reg(pcie, 0, 0x42, 0x1, 0x0EC34191);
529 phy_write_reg(pcie, 1, 0x42, 0x1, 0x0EC34180);
530 phy_write_reg(pcie, 0, 0x43, 0x1, 0x00210188);
531 phy_write_reg(pcie, 1, 0x43, 0x1, 0x00210188);
532 phy_write_reg(pcie, 0, 0x44, 0x1, 0x015C0014);
533 phy_write_reg(pcie, 1, 0x44, 0x1, 0x015C0014);
534 phy_write_reg(pcie, 1, 0x4C, 0x1, 0x786174A0);
535 phy_write_reg(pcie, 1, 0x4D, 0x1, 0x048000BB);
536 phy_write_reg(pcie, 0, 0x51, 0x1, 0x079EC062);
537 phy_write_reg(pcie, 0, 0x52, 0x1, 0x20000000);
538 phy_write_reg(pcie, 1, 0x52, 0x1, 0x20000000);
539 phy_write_reg(pcie, 1, 0x56, 0x1, 0x00003806);
540
541 phy_write_reg(pcie, 0, 0x60, 0x1, 0x004B03A5);
542 phy_write_reg(pcie, 0, 0x64, 0x1, 0x3F0F1F0F);
543 phy_write_reg(pcie, 0, 0x66, 0x1, 0x00008000);
544
545 while (timeout--) {
546 if (pci_read_reg(pcie, H1_PCIEPHYSR))
547 return rcar_pcie_hw_init(pcie);
548
549 msleep(5);
550 }
551
552 return -ETIMEDOUT;
553}
554
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100555static int rcar_msi_alloc(struct rcar_msi *chip)
556{
557 int msi;
558
559 mutex_lock(&chip->lock);
560
561 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
562 if (msi < INT_PCI_MSI_NR)
563 set_bit(msi, chip->used);
564 else
565 msi = -ENOSPC;
566
567 mutex_unlock(&chip->lock);
568
569 return msi;
570}
571
572static void rcar_msi_free(struct rcar_msi *chip, unsigned long irq)
573{
574 mutex_lock(&chip->lock);
575 clear_bit(irq, chip->used);
576 mutex_unlock(&chip->lock);
577}
578
579static irqreturn_t rcar_pcie_msi_irq(int irq, void *data)
580{
581 struct rcar_pcie *pcie = data;
582 struct rcar_msi *msi = &pcie->msi;
583 unsigned long reg;
584
585 reg = pci_read_reg(pcie, PCIEMSIFR);
586
587 /* MSI & INTx share an interrupt - we only handle MSI here */
588 if (!reg)
589 return IRQ_NONE;
590
591 while (reg) {
592 unsigned int index = find_first_bit(&reg, 32);
593 unsigned int irq;
594
595 /* clear the interrupt */
596 pci_write_reg(pcie, 1 << index, PCIEMSIFR);
597
598 irq = irq_find_mapping(msi->domain, index);
599 if (irq) {
600 if (test_bit(index, msi->used))
601 generic_handle_irq(irq);
602 else
603 dev_info(pcie->dev, "unhandled MSI\n");
604 } else {
605 /* Unknown MSI, just clear it */
606 dev_dbg(pcie->dev, "unexpected MSI\n");
607 }
608
609 /* see if there's any more pending in this vector */
610 reg = pci_read_reg(pcie, PCIEMSIFR);
611 }
612
613 return IRQ_HANDLED;
614}
615
616static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
617 struct msi_desc *desc)
618{
619 struct rcar_msi *msi = to_rcar_msi(chip);
620 struct rcar_pcie *pcie = container_of(chip, struct rcar_pcie, msi.chip);
621 struct msi_msg msg;
622 unsigned int irq;
623 int hwirq;
624
625 hwirq = rcar_msi_alloc(msi);
626 if (hwirq < 0)
627 return hwirq;
628
629 irq = irq_create_mapping(msi->domain, hwirq);
630 if (!irq) {
631 rcar_msi_free(msi, hwirq);
632 return -EINVAL;
633 }
634
635 irq_set_msi_desc(irq, desc);
636
637 msg.address_lo = pci_read_reg(pcie, PCIEMSIALR) & ~MSIFE;
638 msg.address_hi = pci_read_reg(pcie, PCIEMSIAUR);
639 msg.data = hwirq;
640
641 write_msi_msg(irq, &msg);
642
643 return 0;
644}
645
646static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
647{
648 struct rcar_msi *msi = to_rcar_msi(chip);
649 struct irq_data *d = irq_get_irq_data(irq);
650
651 rcar_msi_free(msi, d->hwirq);
652}
653
654static struct irq_chip rcar_msi_irq_chip = {
655 .name = "R-Car PCIe MSI",
656 .irq_enable = unmask_msi_irq,
657 .irq_disable = mask_msi_irq,
658 .irq_mask = mask_msi_irq,
659 .irq_unmask = unmask_msi_irq,
660};
661
662static int rcar_msi_map(struct irq_domain *domain, unsigned int irq,
663 irq_hw_number_t hwirq)
664{
665 irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq);
666 irq_set_chip_data(irq, domain->host_data);
667 set_irq_flags(irq, IRQF_VALID);
668
669 return 0;
670}
671
672static const struct irq_domain_ops msi_domain_ops = {
673 .map = rcar_msi_map,
674};
675
676static int rcar_pcie_enable_msi(struct rcar_pcie *pcie)
677{
678 struct platform_device *pdev = to_platform_device(pcie->dev);
679 struct rcar_msi *msi = &pcie->msi;
680 unsigned long base;
681 int err;
682
683 mutex_init(&msi->lock);
684
685 msi->chip.dev = pcie->dev;
686 msi->chip.setup_irq = rcar_msi_setup_irq;
687 msi->chip.teardown_irq = rcar_msi_teardown_irq;
688
689 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
690 &msi_domain_ops, &msi->chip);
691 if (!msi->domain) {
692 dev_err(&pdev->dev, "failed to create IRQ domain\n");
693 return -ENOMEM;
694 }
695
696 /* Two irqs are for MSI, but they are also used for non-MSI irqs */
697 err = devm_request_irq(&pdev->dev, msi->irq1, rcar_pcie_msi_irq,
698 IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
699 if (err < 0) {
700 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
701 goto err;
702 }
703
704 err = devm_request_irq(&pdev->dev, msi->irq2, rcar_pcie_msi_irq,
705 IRQF_SHARED, rcar_msi_irq_chip.name, pcie);
706 if (err < 0) {
707 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
708 goto err;
709 }
710
711 /* setup MSI data target */
712 msi->pages = __get_free_pages(GFP_KERNEL, 0);
713 base = virt_to_phys((void *)msi->pages);
714
715 pci_write_reg(pcie, base | MSIFE, PCIEMSIALR);
716 pci_write_reg(pcie, 0, PCIEMSIAUR);
717
718 /* enable all MSI interrupts */
719 pci_write_reg(pcie, 0xffffffff, PCIEMSIIER);
720
721 return 0;
722
723err:
724 irq_domain_remove(msi->domain);
725 return err;
726}
727
Phil Edworthyc25da472014-05-12 11:57:48 +0100728static int rcar_pcie_get_resources(struct platform_device *pdev,
729 struct rcar_pcie *pcie)
730{
731 struct resource res;
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100732 int err, i;
Phil Edworthyc25da472014-05-12 11:57:48 +0100733
734 err = of_address_to_resource(pdev->dev.of_node, 0, &res);
735 if (err)
736 return err;
737
738 pcie->clk = devm_clk_get(&pdev->dev, "pcie");
739 if (IS_ERR(pcie->clk)) {
740 dev_err(pcie->dev, "cannot get platform clock\n");
741 return PTR_ERR(pcie->clk);
742 }
743 err = clk_prepare_enable(pcie->clk);
744 if (err)
745 goto fail_clk;
746
747 pcie->bus_clk = devm_clk_get(&pdev->dev, "pcie_bus");
748 if (IS_ERR(pcie->bus_clk)) {
749 dev_err(pcie->dev, "cannot get pcie bus clock\n");
750 err = PTR_ERR(pcie->bus_clk);
751 goto fail_clk;
752 }
753 err = clk_prepare_enable(pcie->bus_clk);
754 if (err)
755 goto err_map_reg;
756
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100757 i = irq_of_parse_and_map(pdev->dev.of_node, 0);
758 if (i < 0) {
759 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
760 err = -ENOENT;
761 goto err_map_reg;
762 }
763 pcie->msi.irq1 = i;
764
765 i = irq_of_parse_and_map(pdev->dev.of_node, 1);
766 if (i < 0) {
767 dev_err(pcie->dev, "cannot get platform resources for msi interrupt\n");
768 err = -ENOENT;
769 goto err_map_reg;
770 }
771 pcie->msi.irq2 = i;
772
Phil Edworthyc25da472014-05-12 11:57:48 +0100773 pcie->base = devm_ioremap_resource(&pdev->dev, &res);
774 if (IS_ERR(pcie->base)) {
775 err = PTR_ERR(pcie->base);
776 goto err_map_reg;
777 }
778
779 return 0;
780
781err_map_reg:
782 clk_disable_unprepare(pcie->bus_clk);
783fail_clk:
784 clk_disable_unprepare(pcie->clk);
785
786 return err;
787}
788
789static int rcar_pcie_inbound_ranges(struct rcar_pcie *pcie,
790 struct of_pci_range *range,
791 int *index)
792{
793 u64 restype = range->flags;
794 u64 cpu_addr = range->cpu_addr;
795 u64 cpu_end = range->cpu_addr + range->size;
796 u64 pci_addr = range->pci_addr;
797 u32 flags = LAM_64BIT | LAR_ENABLE;
798 u64 mask;
799 u64 size;
800 int idx = *index;
801
802 if (restype & IORESOURCE_PREFETCH)
803 flags |= LAM_PREFETCH;
804
805 /*
806 * If the size of the range is larger than the alignment of the start
807 * address, we have to use multiple entries to perform the mapping.
808 */
809 if (cpu_addr > 0) {
810 unsigned long nr_zeros = __ffs64(cpu_addr);
811 u64 alignment = 1ULL << nr_zeros;
812 size = min(range->size, alignment);
813 } else {
814 size = range->size;
815 }
816 /* Hardware supports max 4GiB inbound region */
817 size = min(size, 1ULL << 32);
818
819 mask = roundup_pow_of_two(size) - 1;
820 mask &= ~0xf;
821
822 while (cpu_addr < cpu_end) {
823 /*
824 * Set up 64-bit inbound regions as the range parser doesn't
825 * distinguish between 32 and 64-bit types.
826 */
827 pci_write_reg(pcie, lower_32_bits(pci_addr), PCIEPRAR(idx));
828 pci_write_reg(pcie, lower_32_bits(cpu_addr), PCIELAR(idx));
829 pci_write_reg(pcie, lower_32_bits(mask) | flags, PCIELAMR(idx));
830
831 pci_write_reg(pcie, upper_32_bits(pci_addr), PCIEPRAR(idx+1));
832 pci_write_reg(pcie, upper_32_bits(cpu_addr), PCIELAR(idx+1));
833 pci_write_reg(pcie, 0, PCIELAMR(idx+1));
834
835 pci_addr += size;
836 cpu_addr += size;
837 idx += 2;
838
839 if (idx > MAX_NR_INBOUND_MAPS) {
840 dev_err(pcie->dev, "Failed to map inbound regions!\n");
841 return -EINVAL;
842 }
843 }
844 *index = idx;
845
846 return 0;
847}
848
849static int pci_dma_range_parser_init(struct of_pci_range_parser *parser,
850 struct device_node *node)
851{
852 const int na = 3, ns = 2;
853 int rlen;
854
855 parser->node = node;
856 parser->pna = of_n_addr_cells(node);
857 parser->np = parser->pna + na + ns;
858
859 parser->range = of_get_property(node, "dma-ranges", &rlen);
860 if (!parser->range)
861 return -ENOENT;
862
863 parser->end = parser->range + rlen / sizeof(__be32);
864 return 0;
865}
866
867static int rcar_pcie_parse_map_dma_ranges(struct rcar_pcie *pcie,
868 struct device_node *np)
869{
870 struct of_pci_range range;
871 struct of_pci_range_parser parser;
872 int index = 0;
873 int err;
874
875 if (pci_dma_range_parser_init(&parser, np))
876 return -EINVAL;
877
878 /* Get the dma-ranges from DT */
879 for_each_of_pci_range(&parser, &range) {
880 u64 end = range.cpu_addr + range.size - 1;
881 dev_dbg(pcie->dev, "0x%08x 0x%016llx..0x%016llx -> 0x%016llx\n",
882 range.flags, range.cpu_addr, end, range.pci_addr);
883
884 err = rcar_pcie_inbound_ranges(pcie, &range, &index);
885 if (err)
886 return err;
887 }
888
889 return 0;
890}
891
892static const struct of_device_id rcar_pcie_of_match[] = {
893 { .compatible = "renesas,pcie-r8a7779", .data = rcar_pcie_hw_init_h1 },
894 { .compatible = "renesas,pcie-r8a7790", .data = rcar_pcie_hw_init },
895 { .compatible = "renesas,pcie-r8a7791", .data = rcar_pcie_hw_init },
896 {},
897};
898MODULE_DEVICE_TABLE(of, rcar_pcie_of_match);
899
900static int rcar_pcie_probe(struct platform_device *pdev)
901{
902 struct rcar_pcie *pcie;
903 unsigned int data;
904 struct of_pci_range range;
905 struct of_pci_range_parser parser;
906 const struct of_device_id *of_id;
907 int err, win = 0;
908 int (*hw_init_fn)(struct rcar_pcie *);
909
910 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
911 if (!pcie)
912 return -ENOMEM;
913
914 pcie->dev = &pdev->dev;
915 platform_set_drvdata(pdev, pcie);
916
917 /* Get the bus range */
918 if (of_pci_parse_bus_range(pdev->dev.of_node, &pcie->busn)) {
919 dev_err(&pdev->dev, "failed to parse bus-range property\n");
920 return -EINVAL;
921 }
922
923 if (of_pci_range_parser_init(&parser, pdev->dev.of_node)) {
924 dev_err(&pdev->dev, "missing ranges property\n");
925 return -EINVAL;
926 }
927
928 err = rcar_pcie_get_resources(pdev, pcie);
929 if (err < 0) {
930 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
931 return err;
932 }
933
934 for_each_of_pci_range(&parser, &range) {
935 of_pci_range_to_resource(&range, pdev->dev.of_node,
936 &pcie->res[win++]);
937
938 if (win > PCI_MAX_RESOURCES)
939 break;
940 }
941
942 err = rcar_pcie_parse_map_dma_ranges(pcie, pdev->dev.of_node);
943 if (err)
944 return err;
945
Phil Edworthy290c1fb2014-05-12 11:57:49 +0100946 if (IS_ENABLED(CONFIG_PCI_MSI)) {
947 err = rcar_pcie_enable_msi(pcie);
948 if (err < 0) {
949 dev_err(&pdev->dev,
950 "failed to enable MSI support: %d\n",
951 err);
952 return err;
953 }
954 }
955
Phil Edworthyc25da472014-05-12 11:57:48 +0100956 of_id = of_match_device(rcar_pcie_of_match, pcie->dev);
957 if (!of_id || !of_id->data)
958 return -EINVAL;
959 hw_init_fn = of_id->data;
960
961 /* Failure to get a link might just be that no cards are inserted */
962 err = hw_init_fn(pcie);
963 if (err) {
964 dev_info(&pdev->dev, "PCIe link down\n");
965 return 0;
966 }
967
968 data = pci_read_reg(pcie, MACSR);
969 dev_info(&pdev->dev, "PCIe x%d: link up\n", (data >> 20) & 0x3f);
970
971 rcar_pcie_enable(pcie);
972
973 return 0;
974}
975
976static struct platform_driver rcar_pcie_driver = {
977 .driver = {
978 .name = DRV_NAME,
979 .owner = THIS_MODULE,
980 .of_match_table = rcar_pcie_of_match,
981 .suppress_bind_attrs = true,
982 },
983 .probe = rcar_pcie_probe,
984};
985module_platform_driver(rcar_pcie_driver);
986
987MODULE_AUTHOR("Phil Edworthy <phil.edworthy@renesas.com>");
988MODULE_DESCRIPTION("Renesas R-Car PCIe driver");
989MODULE_LICENSE("GPLv2");