blob: 8ec4af1973884946f6966bdfd6d6a5e6636bfad1 [file] [log] [blame]
Paul Mundt5713e602009-06-17 18:20:48 +09001/*
2 * Low-Level PCI Express Support for the SH7786
3 *
Paul Mundt7561f2d2010-02-08 16:36:56 +09004 * Copyright (C) 2009 - 2010 Paul Mundt
Paul Mundt5713e602009-06-17 18:20:48 +09005 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/pci.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/io.h>
14#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Paul Mundtc524ebf2010-09-20 18:45:11 +090016#include <linux/clk.h>
17#include <linux/sh_clk.h>
Paul Mundt5713e602009-06-17 18:20:48 +090018#include "pcie-sh7786.h"
19#include <asm/sizes.h>
Paul Mundtc524ebf2010-09-20 18:45:11 +090020#include <asm/clock.h>
Paul Mundt5713e602009-06-17 18:20:48 +090021
22struct sh7786_pcie_port {
23 struct pci_channel *hose;
Paul Mundtc524ebf2010-09-20 18:45:11 +090024 struct clk *fclk, phy_clk;
Paul Mundt5713e602009-06-17 18:20:48 +090025 unsigned int index;
26 int endpoint;
27 int link;
28};
29
30static struct sh7786_pcie_port *sh7786_pcie_ports;
31static unsigned int nr_ports;
32
33static struct sh7786_pcie_hwops {
34 int (*core_init)(void);
35 int (*port_init_hw)(struct sh7786_pcie_port *port);
36} *sh7786_pcie_hwops;
37
Paul Mundt7561f2d2010-02-08 16:36:56 +090038static struct resource sh7786_pci0_resources[] = {
Paul Mundt5713e602009-06-17 18:20:48 +090039 {
Paul Mundt7561f2d2010-02-08 16:36:56 +090040 .name = "PCIe0 IO",
41 .start = 0xfd000000,
42 .end = 0xfd000000 + SZ_8M - 1,
43 .flags = IORESOURCE_IO,
44 }, {
45 .name = "PCIe0 MEM 0",
46 .start = 0xc0000000,
47 .end = 0xc0000000 + SZ_512M - 1,
48 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
49 }, {
50 .name = "PCIe0 MEM 1",
51 .start = 0x10000000,
52 .end = 0x10000000 + SZ_64M - 1,
Paul Mundt5713e602009-06-17 18:20:48 +090053 .flags = IORESOURCE_MEM,
54 }, {
Paul Mundt7561f2d2010-02-08 16:36:56 +090055 .name = "PCIe0 MEM 2",
56 .start = 0xfe100000,
57 .end = 0xfe100000 + SZ_1M - 1,
Paul Mundt1c3bb382010-09-07 17:07:05 +090058 .flags = IORESOURCE_MEM,
Paul Mundt5713e602009-06-17 18:20:48 +090059 },
60};
61
Paul Mundt7561f2d2010-02-08 16:36:56 +090062static struct resource sh7786_pci1_resources[] = {
63 {
64 .name = "PCIe1 IO",
65 .start = 0xfd800000,
66 .end = 0xfd800000 + SZ_8M - 1,
67 .flags = IORESOURCE_IO,
68 }, {
69 .name = "PCIe1 MEM 0",
70 .start = 0xa0000000,
71 .end = 0xa0000000 + SZ_512M - 1,
72 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
73 }, {
74 .name = "PCIe1 MEM 1",
75 .start = 0x30000000,
76 .end = 0x30000000 + SZ_256M - 1,
77 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
78 }, {
79 .name = "PCIe1 MEM 2",
80 .start = 0xfe300000,
81 .end = 0xfe300000 + SZ_1M - 1,
Paul Mundt1c3bb382010-09-07 17:07:05 +090082 .flags = IORESOURCE_MEM,
Paul Mundt7561f2d2010-02-08 16:36:56 +090083 },
Paul Mundt5713e602009-06-17 18:20:48 +090084};
85
Paul Mundt7561f2d2010-02-08 16:36:56 +090086static struct resource sh7786_pci2_resources[] = {
Paul Mundt5713e602009-06-17 18:20:48 +090087 {
Paul Mundt7561f2d2010-02-08 16:36:56 +090088 .name = "PCIe2 IO",
89 .start = 0xfc800000,
90 .end = 0xfc800000 + SZ_4M - 1,
Paul Mundtf0485192010-09-07 17:05:08 +090091 .flags = IORESOURCE_IO,
Paul Mundt5713e602009-06-17 18:20:48 +090092 }, {
Paul Mundt7561f2d2010-02-08 16:36:56 +090093 .name = "PCIe2 MEM 0",
94 .start = 0x80000000,
95 .end = 0x80000000 + SZ_512M - 1,
96 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
Paul Mundt5713e602009-06-17 18:20:48 +090097 }, {
Paul Mundt7561f2d2010-02-08 16:36:56 +090098 .name = "PCIe2 MEM 1",
99 .start = 0x20000000,
100 .end = 0x20000000 + SZ_256M - 1,
101 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
102 }, {
103 .name = "PCIe2 MEM 2",
104 .start = 0xfcd00000,
105 .end = 0xfcd00000 + SZ_1M - 1,
Paul Mundt1c3bb382010-09-07 17:07:05 +0900106 .flags = IORESOURCE_MEM,
Paul Mundt5713e602009-06-17 18:20:48 +0900107 },
108};
109
110extern struct pci_ops sh7786_pci_ops;
111
Paul Mundt7561f2d2010-02-08 16:36:56 +0900112#define DEFINE_CONTROLLER(start, idx) \
113{ \
114 .pci_ops = &sh7786_pci_ops, \
115 .resources = sh7786_pci##idx##_resources, \
116 .nr_resources = ARRAY_SIZE(sh7786_pci##idx##_resources), \
117 .reg_base = start, \
118 .mem_offset = 0, \
119 .io_offset = 0, \
Paul Mundt5713e602009-06-17 18:20:48 +0900120}
121
122static struct pci_channel sh7786_pci_channels[] = {
123 DEFINE_CONTROLLER(0xfe000000, 0),
124 DEFINE_CONTROLLER(0xfe200000, 1),
125 DEFINE_CONTROLLER(0xfcc00000, 2),
126};
127
Paul Mundtc524ebf2010-09-20 18:45:11 +0900128static struct clk fixed_pciexclkp = {
129 .rate = 100000000, /* 100 MHz reference clock */
130};
131
Paul Mundt2c65d752010-09-20 15:39:54 +0900132static void __devinit sh7786_pci_fixup(struct pci_dev *dev)
133{
134 /*
135 * Prevent enumeration of root complex resources.
136 */
137 if (pci_is_root_bus(dev->bus) && dev->devfn == 0) {
138 int i;
139
140 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
141 dev->resource[i].start = 0;
142 dev->resource[i].end = 0;
143 dev->resource[i].flags = 0;
144 }
145 }
146}
147DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_SH7786,
148 sh7786_pci_fixup);
149
Paul Mundtc524ebf2010-09-20 18:45:11 +0900150static int __init phy_wait_for_ack(struct pci_channel *chan)
Paul Mundt5713e602009-06-17 18:20:48 +0900151{
152 unsigned int timeout = 100;
153
154 while (timeout--) {
155 if (pci_read_reg(chan, SH4A_PCIEPHYADRR) & (1 << BITS_ACK))
156 return 0;
157
158 udelay(100);
159 }
160
161 return -ETIMEDOUT;
162}
163
Paul Mundtc524ebf2010-09-20 18:45:11 +0900164static int __init pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
Paul Mundt5713e602009-06-17 18:20:48 +0900165{
166 unsigned int timeout = 100;
167
168 while (timeout--) {
169 if ((pci_read_reg(chan, SH4A_PCIEINTR) & mask) == mask)
170 return 0;
171
172 udelay(100);
173 }
174
175 return -ETIMEDOUT;
176}
177
Paul Mundtc524ebf2010-09-20 18:45:11 +0900178static void __init phy_write_reg(struct pci_channel *chan, unsigned int addr,
179 unsigned int lane, unsigned int data)
Paul Mundt5713e602009-06-17 18:20:48 +0900180{
Paul Mundt53178d72010-08-20 16:04:59 +0900181 unsigned long phyaddr;
Paul Mundt5713e602009-06-17 18:20:48 +0900182
183 phyaddr = (1 << BITS_CMD) + ((lane & 0xf) << BITS_LANE) +
184 ((addr & 0xff) << BITS_ADR);
185
Paul Mundt5713e602009-06-17 18:20:48 +0900186 /* Set write data */
187 pci_write_reg(chan, data, SH4A_PCIEPHYDOUTR);
188 pci_write_reg(chan, phyaddr, SH4A_PCIEPHYADRR);
189
190 phy_wait_for_ack(chan);
191
192 /* Clear command */
Paul Mundt53178d72010-08-20 16:04:59 +0900193 pci_write_reg(chan, 0, SH4A_PCIEPHYDOUTR);
Paul Mundt5713e602009-06-17 18:20:48 +0900194 pci_write_reg(chan, 0, SH4A_PCIEPHYADRR);
195
196 phy_wait_for_ack(chan);
Paul Mundt5713e602009-06-17 18:20:48 +0900197}
198
Paul Mundtc524ebf2010-09-20 18:45:11 +0900199static int __init pcie_clk_init(struct sh7786_pcie_port *port)
Paul Mundt5713e602009-06-17 18:20:48 +0900200{
Paul Mundtc524ebf2010-09-20 18:45:11 +0900201 struct pci_channel *chan = port->hose;
202 struct clk *clk;
203 char fclk_name[16];
204 int ret;
205
206 /*
207 * First register the fixed clock
208 */
209 ret = clk_register(&fixed_pciexclkp);
210 if (unlikely(ret != 0))
211 return ret;
212
213 /*
214 * Grab the port's function clock, which the PHY clock depends
215 * on. clock lookups don't help us much at this point, since no
216 * dev_id is available this early. Lame.
217 */
218 snprintf(fclk_name, sizeof(fclk_name), "pcie%d_fck", port->index);
219
220 port->fclk = clk_get(NULL, fclk_name);
221 if (IS_ERR(port->fclk)) {
222 ret = PTR_ERR(port->fclk);
223 goto err_fclk;
224 }
225
226 clk_enable(port->fclk);
227
228 /*
229 * And now, set up the PHY clock
230 */
231 clk = &port->phy_clk;
232
233 memset(clk, 0, sizeof(struct clk));
234
235 clk->parent = &fixed_pciexclkp;
236 clk->enable_reg = (void __iomem *)(chan->reg_base + SH4A_PCIEPHYCTLR);
237 clk->enable_bit = BITS_CKE;
238
239 ret = sh_clk_mstp32_register(clk, 1);
240 if (unlikely(ret < 0))
241 goto err_phy;
242
243 return 0;
244
245err_phy:
246 clk_disable(port->fclk);
247 clk_put(port->fclk);
248err_fclk:
249 clk_unregister(&fixed_pciexclkp);
250
251 return ret;
252}
253
254static int __init phy_init(struct sh7786_pcie_port *port)
255{
256 struct pci_channel *chan = port->hose;
Paul Mundt5713e602009-06-17 18:20:48 +0900257 unsigned int timeout = 100;
258
Paul Mundtc524ebf2010-09-20 18:45:11 +0900259 clk_enable(&port->phy_clk);
Paul Mundt53178d72010-08-20 16:04:59 +0900260
Paul Mundt5713e602009-06-17 18:20:48 +0900261 /* Initialize the phy */
262 phy_write_reg(chan, 0x60, 0xf, 0x004b008b);
263 phy_write_reg(chan, 0x61, 0xf, 0x00007b41);
264 phy_write_reg(chan, 0x64, 0xf, 0x00ff4f00);
265 phy_write_reg(chan, 0x65, 0xf, 0x09070907);
266 phy_write_reg(chan, 0x66, 0xf, 0x00000010);
267 phy_write_reg(chan, 0x74, 0xf, 0x0007001c);
268 phy_write_reg(chan, 0x79, 0xf, 0x01fc000d);
Paul Mundt53178d72010-08-20 16:04:59 +0900269 phy_write_reg(chan, 0xb0, 0xf, 0x00000610);
Paul Mundt5713e602009-06-17 18:20:48 +0900270
271 /* Deassert Standby */
Paul Mundt53178d72010-08-20 16:04:59 +0900272 phy_write_reg(chan, 0x67, 0x1, 0x00000400);
273
274 /* Disable clock */
Paul Mundtc524ebf2010-09-20 18:45:11 +0900275 clk_disable(&port->phy_clk);
Paul Mundt5713e602009-06-17 18:20:48 +0900276
277 while (timeout--) {
278 if (pci_read_reg(chan, SH4A_PCIEPHYSR))
279 return 0;
280
281 udelay(100);
282 }
283
284 return -ETIMEDOUT;
285}
286
Paul Mundtc524ebf2010-09-20 18:45:11 +0900287static void __init pcie_reset(struct sh7786_pcie_port *port)
Paul Mundt2dbfa1e2010-09-07 16:11:04 +0900288{
289 struct pci_channel *chan = port->hose;
290
291 pci_write_reg(chan, 1, SH4A_PCIESRSTR);
292 pci_write_reg(chan, 0, SH4A_PCIETCTLR);
293 pci_write_reg(chan, 0, SH4A_PCIESRSTR);
294 pci_write_reg(chan, 0, SH4A_PCIETXVC0SR);
295}
296
Paul Mundtc524ebf2010-09-20 18:45:11 +0900297static int __init pcie_init(struct sh7786_pcie_port *port)
Paul Mundt5713e602009-06-17 18:20:48 +0900298{
299 struct pci_channel *chan = port->hose;
300 unsigned int data;
Paul Mundt7578a4c2010-02-10 16:00:58 +0900301 phys_addr_t memphys;
302 size_t memsize;
Paul Mundtda03a632010-09-07 17:03:10 +0900303 int ret, i, win;
Paul Mundt5713e602009-06-17 18:20:48 +0900304
305 /* Begin initialization */
Paul Mundt2dbfa1e2010-09-07 16:11:04 +0900306 pcie_reset(port);
Paul Mundt5713e602009-06-17 18:20:48 +0900307
Paul Mundt2c65d752010-09-20 15:39:54 +0900308 /*
309 * Initial header for port config space is type 1, set the device
310 * class to match. Hardware takes care of propagating the IDSETR
311 * settings, so there is no need to bother with a quirk.
312 */
313 pci_write_reg(chan, PCI_CLASS_BRIDGE_PCI << 16, SH4A_PCIEIDSETR1);
Paul Mundt5713e602009-06-17 18:20:48 +0900314
315 /* Initialize default capabilities. */
316 data = pci_read_reg(chan, SH4A_PCIEEXPCAP0);
317 data &= ~(PCI_EXP_FLAGS_TYPE << 16);
318
319 if (port->endpoint)
320 data |= PCI_EXP_TYPE_ENDPOINT << 20;
321 else
322 data |= PCI_EXP_TYPE_ROOT_PORT << 20;
323
324 data |= PCI_CAP_ID_EXP;
325 pci_write_reg(chan, data, SH4A_PCIEEXPCAP0);
326
Paul Mundt7578a4c2010-02-10 16:00:58 +0900327 /* Enable data link layer active state reporting */
328 pci_write_reg(chan, PCI_EXP_LNKCAP_DLLLARC, SH4A_PCIEEXPCAP3);
329
330 /* Enable extended sync and ASPM L0s support */
Paul Mundt5713e602009-06-17 18:20:48 +0900331 data = pci_read_reg(chan, SH4A_PCIEEXPCAP4);
Paul Mundt7578a4c2010-02-10 16:00:58 +0900332 data &= ~PCI_EXP_LNKCTL_ASPMC;
333 data |= PCI_EXP_LNKCTL_ES | 1;
Paul Mundt5713e602009-06-17 18:20:48 +0900334 pci_write_reg(chan, data, SH4A_PCIEEXPCAP4);
335
Paul Mundt7578a4c2010-02-10 16:00:58 +0900336 /* Write out the physical slot number */
337 data = pci_read_reg(chan, SH4A_PCIEEXPCAP5);
338 data &= ~PCI_EXP_SLTCAP_PSN;
339 data |= (port->index + 1) << 19;
340 pci_write_reg(chan, data, SH4A_PCIEEXPCAP5);
341
Paul Mundt5713e602009-06-17 18:20:48 +0900342 /* Set the completion timer timeout to the maximum 32ms. */
343 data = pci_read_reg(chan, SH4A_PCIETLCTLR);
Paul Mundt7578a4c2010-02-10 16:00:58 +0900344 data &= ~0x3f00;
Paul Mundt5713e602009-06-17 18:20:48 +0900345 data |= 0x32 << 8;
346 pci_write_reg(chan, data, SH4A_PCIETLCTLR);
347
348 /*
349 * Set fast training sequences to the maximum 255,
350 * and enable MAC data scrambling.
351 */
352 data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
353 data &= ~PCIEMACCTLR_SCR_DIS;
354 data |= (0xff << 16);
355 pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
356
Paul Mundt7578a4c2010-02-10 16:00:58 +0900357 memphys = __pa(memory_start);
358 memsize = roundup_pow_of_two(memory_end - memory_start);
359
360 /*
361 * If there's more than 512MB of memory, we need to roll over to
362 * LAR1/LAMR1.
363 */
364 if (memsize > SZ_512M) {
Paul Mundtcecf48e2010-09-20 17:10:02 +0900365 pci_write_reg(chan, memphys + SZ_512M, SH4A_PCIELAR1);
366 pci_write_reg(chan, ((memsize - SZ_512M) - SZ_256) | 1,
367 SH4A_PCIELAMR1);
Paul Mundt7578a4c2010-02-10 16:00:58 +0900368 memsize = SZ_512M;
369 } else {
370 /*
371 * Otherwise just zero it out and disable it.
372 */
Paul Mundtcecf48e2010-09-20 17:10:02 +0900373 pci_write_reg(chan, 0, SH4A_PCIELAR1);
374 pci_write_reg(chan, 0, SH4A_PCIELAMR1);
Paul Mundt7578a4c2010-02-10 16:00:58 +0900375 }
376
377 /*
378 * LAR0/LAMR0 covers up to the first 512MB, which is enough to
379 * cover all of lowmem on most platforms.
380 */
Paul Mundtcecf48e2010-09-20 17:10:02 +0900381 pci_write_reg(chan, memphys, SH4A_PCIELAR0);
382 pci_write_reg(chan, (memsize - SZ_256) | 1, SH4A_PCIELAMR0);
Paul Mundt7578a4c2010-02-10 16:00:58 +0900383
Paul Mundt5713e602009-06-17 18:20:48 +0900384 /* Finish initialization */
385 data = pci_read_reg(chan, SH4A_PCIETCTLR);
386 data |= 0x1;
387 pci_write_reg(chan, data, SH4A_PCIETCTLR);
388
Paul Mundt81df84f2010-09-19 13:57:51 +0900389 /* Let things settle down a bit.. */
390 mdelay(100);
391
Paul Mundt5713e602009-06-17 18:20:48 +0900392 /* Enable DL_Active Interrupt generation */
393 data = pci_read_reg(chan, SH4A_PCIEDLINTENR);
394 data |= PCIEDLINTENR_DLL_ACT_ENABLE;
395 pci_write_reg(chan, data, SH4A_PCIEDLINTENR);
396
397 /* Disable MAC data scrambling. */
398 data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
399 data |= PCIEMACCTLR_SCR_DIS | (0xff << 16);
400 pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
401
Paul Mundtbd792ae2010-09-20 16:12:58 +0900402 /*
403 * This will timeout if we don't have a link, but we permit the
404 * port to register anyways in order to support hotplug on future
405 * hardware.
406 */
Paul Mundt5713e602009-06-17 18:20:48 +0900407 ret = pci_wait_for_irq(chan, MASK_INT_TX_CTRL);
Paul Mundt5713e602009-06-17 18:20:48 +0900408
Paul Mundt7578a4c2010-02-10 16:00:58 +0900409 data = pci_read_reg(chan, SH4A_PCIEPCICONF1);
410 data &= ~(PCI_STATUS_DEVSEL_MASK << 16);
411 data |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
412 (PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST) << 16;
413 pci_write_reg(chan, data, SH4A_PCIEPCICONF1);
414
Paul Mundt5713e602009-06-17 18:20:48 +0900415 pci_write_reg(chan, 0x80888000, SH4A_PCIETXVC0DCTLR);
416 pci_write_reg(chan, 0x00222000, SH4A_PCIERXVC0DCTLR);
Paul Mundt5713e602009-06-17 18:20:48 +0900417
418 wmb();
419
Paul Mundtbd792ae2010-09-20 16:12:58 +0900420 if (ret == 0) {
421 data = pci_read_reg(chan, SH4A_PCIEMACSR);
422 printk(KERN_NOTICE "PCI: PCIe#%d x%d link detected\n",
423 port->index, (data >> 20) & 0x3f);
424 } else
425 printk(KERN_NOTICE "PCI: PCIe#%d link down\n",
426 port->index);
Paul Mundt5713e602009-06-17 18:20:48 +0900427
Paul Mundtda03a632010-09-07 17:03:10 +0900428 for (i = win = 0; i < chan->nr_resources; i++) {
Paul Mundt7578a4c2010-02-10 16:00:58 +0900429 struct resource *res = chan->resources + i;
430 resource_size_t size;
Paul Mundtcecf48e2010-09-20 17:10:02 +0900431 u32 mask;
Paul Mundt7578a4c2010-02-10 16:00:58 +0900432
Paul Mundtda03a632010-09-07 17:03:10 +0900433 /*
434 * We can't use the 32-bit mode windows in legacy 29-bit
435 * mode, so just skip them entirely.
436 */
437 if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode())
438 continue;
439
440 pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win));
Paul Mundt7578a4c2010-02-10 16:00:58 +0900441
Paul Mundt7578a4c2010-02-10 16:00:58 +0900442 /*
443 * The PAMR mask is calculated in units of 256kB, which
444 * keeps things pretty simple.
445 */
Paul Mundtcecf48e2010-09-20 17:10:02 +0900446 size = resource_size(res);
447 mask = (roundup_pow_of_two(size) / SZ_256K) - 1;
448 pci_write_reg(chan, mask << 18, SH4A_PCIEPAMR(win));
Paul Mundt7578a4c2010-02-10 16:00:58 +0900449
Paul Mundtcecf48e2010-09-20 17:10:02 +0900450 pci_write_reg(chan, RES_TO_U32_HIGH(res->start),
451 SH4A_PCIEPARH(win));
452 pci_write_reg(chan, RES_TO_U32_LOW(res->start),
453 SH4A_PCIEPARL(win));
Paul Mundt7578a4c2010-02-10 16:00:58 +0900454
Paul Mundtcecf48e2010-09-20 17:10:02 +0900455 mask = MASK_PARE;
Paul Mundt7578a4c2010-02-10 16:00:58 +0900456 if (res->flags & IORESOURCE_IO)
Paul Mundtcecf48e2010-09-20 17:10:02 +0900457 mask |= MASK_SPC;
Paul Mundt7578a4c2010-02-10 16:00:58 +0900458
Paul Mundtcecf48e2010-09-20 17:10:02 +0900459 pci_write_reg(chan, mask, SH4A_PCIEPTCTLR(win));
Paul Mundtda03a632010-09-07 17:03:10 +0900460
461 win++;
Paul Mundt7578a4c2010-02-10 16:00:58 +0900462 }
Paul Mundt5713e602009-06-17 18:20:48 +0900463
464 return 0;
465}
466
467int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
468{
469 return 71;
470}
471
Paul Mundtc524ebf2010-09-20 18:45:11 +0900472static int __init sh7786_pcie_core_init(void)
Paul Mundt5713e602009-06-17 18:20:48 +0900473{
474 /* Return the number of ports */
475 return test_mode_pin(MODE_PIN12) ? 3 : 2;
476}
477
Paul Mundtc524ebf2010-09-20 18:45:11 +0900478static int __init sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
Paul Mundt5713e602009-06-17 18:20:48 +0900479{
480 int ret;
481
Paul Mundt5713e602009-06-17 18:20:48 +0900482 /*
483 * Check if we are configured in endpoint or root complex mode,
484 * this is a fixed pin setting that applies to all PCIe ports.
485 */
486 port->endpoint = test_mode_pin(MODE_PIN11);
487
Paul Mundtc524ebf2010-09-20 18:45:11 +0900488 /*
489 * Setup clocks, needed both for PHY and PCIe registers.
490 */
491 ret = pcie_clk_init(port);
492 if (unlikely(ret < 0))
493 return ret;
494
495 ret = phy_init(port);
496 if (unlikely(ret < 0))
497 return ret;
498
Paul Mundt5713e602009-06-17 18:20:48 +0900499 ret = pcie_init(port);
500 if (unlikely(ret < 0))
501 return ret;
502
Paul Mundtbcf39352010-02-01 13:11:25 +0900503 return register_pci_controller(port->hose);
Paul Mundt5713e602009-06-17 18:20:48 +0900504}
505
506static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
507 .core_init = sh7786_pcie_core_init,
508 .port_init_hw = sh7786_pcie_init_hw,
509};
510
511static int __init sh7786_pcie_init(void)
512{
513 int ret = 0, i;
514
Matt Fleming3b554c32010-06-19 00:01:03 +0100515 printk(KERN_NOTICE "PCI: Starting initialization.\n");
Paul Mundt5713e602009-06-17 18:20:48 +0900516
517 sh7786_pcie_hwops = &sh7786_65nm_pcie_hwops;
518
519 nr_ports = sh7786_pcie_hwops->core_init();
520 BUG_ON(nr_ports > ARRAY_SIZE(sh7786_pci_channels));
521
522 if (unlikely(nr_ports == 0))
523 return -ENODEV;
524
525 sh7786_pcie_ports = kzalloc(nr_ports * sizeof(struct sh7786_pcie_port),
526 GFP_KERNEL);
527 if (unlikely(!sh7786_pcie_ports))
528 return -ENOMEM;
529
530 printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports);
531
532 for (i = 0; i < nr_ports; i++) {
533 struct sh7786_pcie_port *port = sh7786_pcie_ports + i;
534
535 port->index = i;
536 port->hose = sh7786_pci_channels + i;
Paul Mundt7561f2d2010-02-08 16:36:56 +0900537 port->hose->io_map_base = port->hose->resources[0].start;
Paul Mundt5713e602009-06-17 18:20:48 +0900538
539 ret |= sh7786_pcie_hwops->port_init_hw(port);
540 }
541
542 if (unlikely(ret))
543 return ret;
544
545 return 0;
546}
547arch_initcall(sh7786_pcie_init);