blob: 4e6cf8804979499e6ec4b96f7e4d4b57d0f7d2c0 [file] [log] [blame]
Paul Mundt5713e602009-06-17 18:20:48 +09001/*
2 * Low-Level PCI Express Support for the SH7786
3 *
Paul Mundt7561f2d2010-02-08 16:36:56 +09004 * Copyright (C) 2009 - 2010 Paul Mundt
Paul Mundt5713e602009-06-17 18:20:48 +09005 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/pci.h>
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/io.h>
14#include <linux/delay.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Paul Mundt5713e602009-06-17 18:20:48 +090016#include "pcie-sh7786.h"
17#include <asm/sizes.h>
18
19struct sh7786_pcie_port {
20 struct pci_channel *hose;
21 unsigned int index;
22 int endpoint;
23 int link;
24};
25
26static struct sh7786_pcie_port *sh7786_pcie_ports;
27static unsigned int nr_ports;
28
29static struct sh7786_pcie_hwops {
30 int (*core_init)(void);
31 int (*port_init_hw)(struct sh7786_pcie_port *port);
32} *sh7786_pcie_hwops;
33
Paul Mundt7561f2d2010-02-08 16:36:56 +090034static struct resource sh7786_pci0_resources[] = {
Paul Mundt5713e602009-06-17 18:20:48 +090035 {
Paul Mundt7561f2d2010-02-08 16:36:56 +090036 .name = "PCIe0 IO",
37 .start = 0xfd000000,
38 .end = 0xfd000000 + SZ_8M - 1,
39 .flags = IORESOURCE_IO,
40 }, {
41 .name = "PCIe0 MEM 0",
42 .start = 0xc0000000,
43 .end = 0xc0000000 + SZ_512M - 1,
44 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
45 }, {
46 .name = "PCIe0 MEM 1",
47 .start = 0x10000000,
48 .end = 0x10000000 + SZ_64M - 1,
Paul Mundt5713e602009-06-17 18:20:48 +090049 .flags = IORESOURCE_MEM,
50 }, {
Paul Mundt7561f2d2010-02-08 16:36:56 +090051 .name = "PCIe0 MEM 2",
52 .start = 0xfe100000,
53 .end = 0xfe100000 + SZ_1M - 1,
Paul Mundt1c3bb382010-09-07 17:07:05 +090054 .flags = IORESOURCE_MEM,
Paul Mundt5713e602009-06-17 18:20:48 +090055 },
56};
57
Paul Mundt7561f2d2010-02-08 16:36:56 +090058static struct resource sh7786_pci1_resources[] = {
59 {
60 .name = "PCIe1 IO",
61 .start = 0xfd800000,
62 .end = 0xfd800000 + SZ_8M - 1,
63 .flags = IORESOURCE_IO,
64 }, {
65 .name = "PCIe1 MEM 0",
66 .start = 0xa0000000,
67 .end = 0xa0000000 + SZ_512M - 1,
68 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
69 }, {
70 .name = "PCIe1 MEM 1",
71 .start = 0x30000000,
72 .end = 0x30000000 + SZ_256M - 1,
73 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
74 }, {
75 .name = "PCIe1 MEM 2",
76 .start = 0xfe300000,
77 .end = 0xfe300000 + SZ_1M - 1,
Paul Mundt1c3bb382010-09-07 17:07:05 +090078 .flags = IORESOURCE_MEM,
Paul Mundt7561f2d2010-02-08 16:36:56 +090079 },
Paul Mundt5713e602009-06-17 18:20:48 +090080};
81
Paul Mundt7561f2d2010-02-08 16:36:56 +090082static struct resource sh7786_pci2_resources[] = {
Paul Mundt5713e602009-06-17 18:20:48 +090083 {
Paul Mundt7561f2d2010-02-08 16:36:56 +090084 .name = "PCIe2 IO",
85 .start = 0xfc800000,
86 .end = 0xfc800000 + SZ_4M - 1,
Paul Mundtf0485192010-09-07 17:05:08 +090087 .flags = IORESOURCE_IO,
Paul Mundt5713e602009-06-17 18:20:48 +090088 }, {
Paul Mundt7561f2d2010-02-08 16:36:56 +090089 .name = "PCIe2 MEM 0",
90 .start = 0x80000000,
91 .end = 0x80000000 + SZ_512M - 1,
92 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
Paul Mundt5713e602009-06-17 18:20:48 +090093 }, {
Paul Mundt7561f2d2010-02-08 16:36:56 +090094 .name = "PCIe2 MEM 1",
95 .start = 0x20000000,
96 .end = 0x20000000 + SZ_256M - 1,
97 .flags = IORESOURCE_MEM | IORESOURCE_MEM_32BIT,
98 }, {
99 .name = "PCIe2 MEM 2",
100 .start = 0xfcd00000,
101 .end = 0xfcd00000 + SZ_1M - 1,
Paul Mundt1c3bb382010-09-07 17:07:05 +0900102 .flags = IORESOURCE_MEM,
Paul Mundt5713e602009-06-17 18:20:48 +0900103 },
104};
105
106extern struct pci_ops sh7786_pci_ops;
107
Paul Mundt7561f2d2010-02-08 16:36:56 +0900108#define DEFINE_CONTROLLER(start, idx) \
109{ \
110 .pci_ops = &sh7786_pci_ops, \
111 .resources = sh7786_pci##idx##_resources, \
112 .nr_resources = ARRAY_SIZE(sh7786_pci##idx##_resources), \
113 .reg_base = start, \
114 .mem_offset = 0, \
115 .io_offset = 0, \
Paul Mundt5713e602009-06-17 18:20:48 +0900116}
117
118static struct pci_channel sh7786_pci_channels[] = {
119 DEFINE_CONTROLLER(0xfe000000, 0),
120 DEFINE_CONTROLLER(0xfe200000, 1),
121 DEFINE_CONTROLLER(0xfcc00000, 2),
122};
123
124static int phy_wait_for_ack(struct pci_channel *chan)
125{
126 unsigned int timeout = 100;
127
128 while (timeout--) {
129 if (pci_read_reg(chan, SH4A_PCIEPHYADRR) & (1 << BITS_ACK))
130 return 0;
131
132 udelay(100);
133 }
134
135 return -ETIMEDOUT;
136}
137
138static int pci_wait_for_irq(struct pci_channel *chan, unsigned int mask)
139{
140 unsigned int timeout = 100;
141
142 while (timeout--) {
143 if ((pci_read_reg(chan, SH4A_PCIEINTR) & mask) == mask)
144 return 0;
145
146 udelay(100);
147 }
148
149 return -ETIMEDOUT;
150}
151
152static void phy_write_reg(struct pci_channel *chan, unsigned int addr,
153 unsigned int lane, unsigned int data)
154{
Paul Mundt53178d72010-08-20 16:04:59 +0900155 unsigned long phyaddr;
Paul Mundt5713e602009-06-17 18:20:48 +0900156
157 phyaddr = (1 << BITS_CMD) + ((lane & 0xf) << BITS_LANE) +
158 ((addr & 0xff) << BITS_ADR);
159
Paul Mundt5713e602009-06-17 18:20:48 +0900160 /* Set write data */
161 pci_write_reg(chan, data, SH4A_PCIEPHYDOUTR);
162 pci_write_reg(chan, phyaddr, SH4A_PCIEPHYADRR);
163
164 phy_wait_for_ack(chan);
165
166 /* Clear command */
Paul Mundt53178d72010-08-20 16:04:59 +0900167 pci_write_reg(chan, 0, SH4A_PCIEPHYDOUTR);
Paul Mundt5713e602009-06-17 18:20:48 +0900168 pci_write_reg(chan, 0, SH4A_PCIEPHYADRR);
169
170 phy_wait_for_ack(chan);
Paul Mundt5713e602009-06-17 18:20:48 +0900171}
172
173static int phy_init(struct pci_channel *chan)
174{
Paul Mundt53178d72010-08-20 16:04:59 +0900175 unsigned long ctrl;
Paul Mundt5713e602009-06-17 18:20:48 +0900176 unsigned int timeout = 100;
177
Paul Mundt53178d72010-08-20 16:04:59 +0900178 /* Enable clock */
179 ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
180 ctrl |= (1 << BITS_CKE);
181 pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
182
Paul Mundt5713e602009-06-17 18:20:48 +0900183 /* Initialize the phy */
184 phy_write_reg(chan, 0x60, 0xf, 0x004b008b);
185 phy_write_reg(chan, 0x61, 0xf, 0x00007b41);
186 phy_write_reg(chan, 0x64, 0xf, 0x00ff4f00);
187 phy_write_reg(chan, 0x65, 0xf, 0x09070907);
188 phy_write_reg(chan, 0x66, 0xf, 0x00000010);
189 phy_write_reg(chan, 0x74, 0xf, 0x0007001c);
190 phy_write_reg(chan, 0x79, 0xf, 0x01fc000d);
Paul Mundt53178d72010-08-20 16:04:59 +0900191 phy_write_reg(chan, 0xb0, 0xf, 0x00000610);
Paul Mundt5713e602009-06-17 18:20:48 +0900192
193 /* Deassert Standby */
Paul Mundt53178d72010-08-20 16:04:59 +0900194 phy_write_reg(chan, 0x67, 0x1, 0x00000400);
195
196 /* Disable clock */
197 ctrl = pci_read_reg(chan, SH4A_PCIEPHYCTLR);
198 ctrl &= ~(1 << BITS_CKE);
199 pci_write_reg(chan, ctrl, SH4A_PCIEPHYCTLR);
Paul Mundt5713e602009-06-17 18:20:48 +0900200
201 while (timeout--) {
202 if (pci_read_reg(chan, SH4A_PCIEPHYSR))
203 return 0;
204
205 udelay(100);
206 }
207
208 return -ETIMEDOUT;
209}
210
Paul Mundt2dbfa1e2010-09-07 16:11:04 +0900211static void pcie_reset(struct sh7786_pcie_port *port)
212{
213 struct pci_channel *chan = port->hose;
214
215 pci_write_reg(chan, 1, SH4A_PCIESRSTR);
216 pci_write_reg(chan, 0, SH4A_PCIETCTLR);
217 pci_write_reg(chan, 0, SH4A_PCIESRSTR);
218 pci_write_reg(chan, 0, SH4A_PCIETXVC0SR);
219}
220
Paul Mundt5713e602009-06-17 18:20:48 +0900221static int pcie_init(struct sh7786_pcie_port *port)
222{
223 struct pci_channel *chan = port->hose;
224 unsigned int data;
Paul Mundt7578a4c2010-02-10 16:00:58 +0900225 phys_addr_t memphys;
226 size_t memsize;
Paul Mundtda03a632010-09-07 17:03:10 +0900227 int ret, i, win;
Paul Mundt5713e602009-06-17 18:20:48 +0900228
229 /* Begin initialization */
Paul Mundt2dbfa1e2010-09-07 16:11:04 +0900230 pcie_reset(port);
Paul Mundt5713e602009-06-17 18:20:48 +0900231
232 /* Initialize as type1. */
233 data = pci_read_reg(chan, SH4A_PCIEPCICONF3);
234 data &= ~(0x7f << 16);
235 data |= PCI_HEADER_TYPE_BRIDGE << 16;
236 pci_write_reg(chan, data, SH4A_PCIEPCICONF3);
237
238 /* Initialize default capabilities. */
239 data = pci_read_reg(chan, SH4A_PCIEEXPCAP0);
240 data &= ~(PCI_EXP_FLAGS_TYPE << 16);
241
242 if (port->endpoint)
243 data |= PCI_EXP_TYPE_ENDPOINT << 20;
244 else
245 data |= PCI_EXP_TYPE_ROOT_PORT << 20;
246
247 data |= PCI_CAP_ID_EXP;
248 pci_write_reg(chan, data, SH4A_PCIEEXPCAP0);
249
Paul Mundt7578a4c2010-02-10 16:00:58 +0900250 /* Enable data link layer active state reporting */
251 pci_write_reg(chan, PCI_EXP_LNKCAP_DLLLARC, SH4A_PCIEEXPCAP3);
252
253 /* Enable extended sync and ASPM L0s support */
Paul Mundt5713e602009-06-17 18:20:48 +0900254 data = pci_read_reg(chan, SH4A_PCIEEXPCAP4);
Paul Mundt7578a4c2010-02-10 16:00:58 +0900255 data &= ~PCI_EXP_LNKCTL_ASPMC;
256 data |= PCI_EXP_LNKCTL_ES | 1;
Paul Mundt5713e602009-06-17 18:20:48 +0900257 pci_write_reg(chan, data, SH4A_PCIEEXPCAP4);
258
Paul Mundt7578a4c2010-02-10 16:00:58 +0900259 /* Write out the physical slot number */
260 data = pci_read_reg(chan, SH4A_PCIEEXPCAP5);
261 data &= ~PCI_EXP_SLTCAP_PSN;
262 data |= (port->index + 1) << 19;
263 pci_write_reg(chan, data, SH4A_PCIEEXPCAP5);
264
Paul Mundt5713e602009-06-17 18:20:48 +0900265 /* Set the completion timer timeout to the maximum 32ms. */
266 data = pci_read_reg(chan, SH4A_PCIETLCTLR);
Paul Mundt7578a4c2010-02-10 16:00:58 +0900267 data &= ~0x3f00;
Paul Mundt5713e602009-06-17 18:20:48 +0900268 data |= 0x32 << 8;
269 pci_write_reg(chan, data, SH4A_PCIETLCTLR);
270
271 /*
272 * Set fast training sequences to the maximum 255,
273 * and enable MAC data scrambling.
274 */
275 data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
276 data &= ~PCIEMACCTLR_SCR_DIS;
277 data |= (0xff << 16);
278 pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
279
Paul Mundt7578a4c2010-02-10 16:00:58 +0900280 memphys = __pa(memory_start);
281 memsize = roundup_pow_of_two(memory_end - memory_start);
282
283 /*
284 * If there's more than 512MB of memory, we need to roll over to
285 * LAR1/LAMR1.
286 */
287 if (memsize > SZ_512M) {
288 __raw_writel(memphys + SZ_512M, chan->reg_base + SH4A_PCIELAR1);
289 __raw_writel(((memsize - SZ_512M) - SZ_256) | 1,
290 chan->reg_base + SH4A_PCIELAMR1);
291 memsize = SZ_512M;
292 } else {
293 /*
294 * Otherwise just zero it out and disable it.
295 */
296 __raw_writel(0, chan->reg_base + SH4A_PCIELAR1);
297 __raw_writel(0, chan->reg_base + SH4A_PCIELAMR1);
298 }
299
300 /*
301 * LAR0/LAMR0 covers up to the first 512MB, which is enough to
302 * cover all of lowmem on most platforms.
303 */
304 __raw_writel(memphys, chan->reg_base + SH4A_PCIELAR0);
305 __raw_writel((memsize - SZ_256) | 1, chan->reg_base + SH4A_PCIELAMR0);
306
Paul Mundt53178d72010-08-20 16:04:59 +0900307 __raw_writel(memphys, chan->reg_base + SH4A_PCIEPCICONF4);
308 __raw_writel(0, chan->reg_base + SH4A_PCIEPCICONF5);
309
Paul Mundt5713e602009-06-17 18:20:48 +0900310 /* Finish initialization */
311 data = pci_read_reg(chan, SH4A_PCIETCTLR);
312 data |= 0x1;
313 pci_write_reg(chan, data, SH4A_PCIETCTLR);
314
Paul Mundt81df84f2010-09-19 13:57:51 +0900315 /* Let things settle down a bit.. */
316 mdelay(100);
317
Paul Mundt5713e602009-06-17 18:20:48 +0900318 /* Enable DL_Active Interrupt generation */
319 data = pci_read_reg(chan, SH4A_PCIEDLINTENR);
320 data |= PCIEDLINTENR_DLL_ACT_ENABLE;
321 pci_write_reg(chan, data, SH4A_PCIEDLINTENR);
322
323 /* Disable MAC data scrambling. */
324 data = pci_read_reg(chan, SH4A_PCIEMACCTLR);
325 data |= PCIEMACCTLR_SCR_DIS | (0xff << 16);
326 pci_write_reg(chan, data, SH4A_PCIEMACCTLR);
327
328 ret = pci_wait_for_irq(chan, MASK_INT_TX_CTRL);
329 if (unlikely(ret != 0))
330 return -ENODEV;
331
Paul Mundt7578a4c2010-02-10 16:00:58 +0900332 data = pci_read_reg(chan, SH4A_PCIEPCICONF1);
333 data &= ~(PCI_STATUS_DEVSEL_MASK << 16);
334 data |= PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER |
335 (PCI_STATUS_CAP_LIST | PCI_STATUS_DEVSEL_FAST) << 16;
336 pci_write_reg(chan, data, SH4A_PCIEPCICONF1);
337
Paul Mundt5713e602009-06-17 18:20:48 +0900338 pci_write_reg(chan, 0x80888000, SH4A_PCIETXVC0DCTLR);
339 pci_write_reg(chan, 0x00222000, SH4A_PCIERXVC0DCTLR);
Paul Mundt5713e602009-06-17 18:20:48 +0900340
341 wmb();
342
343 data = pci_read_reg(chan, SH4A_PCIEMACSR);
344 printk(KERN_NOTICE "PCI: PCIe#%d link width %d\n",
345 port->index, (data >> 20) & 0x3f);
346
Paul Mundtda03a632010-09-07 17:03:10 +0900347 for (i = win = 0; i < chan->nr_resources; i++) {
Paul Mundt7578a4c2010-02-10 16:00:58 +0900348 struct resource *res = chan->resources + i;
349 resource_size_t size;
350 u32 enable_mask;
351
Paul Mundtda03a632010-09-07 17:03:10 +0900352 /*
353 * We can't use the 32-bit mode windows in legacy 29-bit
354 * mode, so just skip them entirely.
355 */
356 if ((res->flags & IORESOURCE_MEM_32BIT) && __in_29bit_mode())
357 continue;
358
359 pci_write_reg(chan, 0x00000000, SH4A_PCIEPTCTLR(win));
Paul Mundt7578a4c2010-02-10 16:00:58 +0900360
361 size = resource_size(res);
362
363 /*
364 * The PAMR mask is calculated in units of 256kB, which
365 * keeps things pretty simple.
366 */
367 __raw_writel(((roundup_pow_of_two(size) / SZ_256K) - 1) << 18,
Paul Mundtda03a632010-09-07 17:03:10 +0900368 chan->reg_base + SH4A_PCIEPAMR(win));
Paul Mundt7578a4c2010-02-10 16:00:58 +0900369
Paul Mundtda03a632010-09-07 17:03:10 +0900370 pci_write_reg(chan, res->start, SH4A_PCIEPARL(win));
371 pci_write_reg(chan, 0x00000000, SH4A_PCIEPARH(win));
Paul Mundt7578a4c2010-02-10 16:00:58 +0900372
373 enable_mask = MASK_PARE;
374 if (res->flags & IORESOURCE_IO)
375 enable_mask |= MASK_SPC;
376
Paul Mundtda03a632010-09-07 17:03:10 +0900377 pci_write_reg(chan, enable_mask, SH4A_PCIEPTCTLR(win));
378
379 win++;
Paul Mundt7578a4c2010-02-10 16:00:58 +0900380 }
Paul Mundt5713e602009-06-17 18:20:48 +0900381
382 return 0;
383}
384
385int __init pcibios_map_platform_irq(struct pci_dev *pdev, u8 slot, u8 pin)
386{
387 return 71;
388}
389
390static int sh7786_pcie_core_init(void)
391{
392 /* Return the number of ports */
393 return test_mode_pin(MODE_PIN12) ? 3 : 2;
394}
395
396static int __devinit sh7786_pcie_init_hw(struct sh7786_pcie_port *port)
397{
398 int ret;
399
400 ret = phy_init(port->hose);
401 if (unlikely(ret < 0))
402 return ret;
403
404 /*
405 * Check if we are configured in endpoint or root complex mode,
406 * this is a fixed pin setting that applies to all PCIe ports.
407 */
408 port->endpoint = test_mode_pin(MODE_PIN11);
409
410 ret = pcie_init(port);
411 if (unlikely(ret < 0))
412 return ret;
413
Paul Mundtbcf39352010-02-01 13:11:25 +0900414 return register_pci_controller(port->hose);
Paul Mundt5713e602009-06-17 18:20:48 +0900415}
416
417static struct sh7786_pcie_hwops sh7786_65nm_pcie_hwops __initdata = {
418 .core_init = sh7786_pcie_core_init,
419 .port_init_hw = sh7786_pcie_init_hw,
420};
421
422static int __init sh7786_pcie_init(void)
423{
424 int ret = 0, i;
425
Matt Fleming3b554c32010-06-19 00:01:03 +0100426 printk(KERN_NOTICE "PCI: Starting initialization.\n");
Paul Mundt5713e602009-06-17 18:20:48 +0900427
428 sh7786_pcie_hwops = &sh7786_65nm_pcie_hwops;
429
430 nr_ports = sh7786_pcie_hwops->core_init();
431 BUG_ON(nr_ports > ARRAY_SIZE(sh7786_pci_channels));
432
433 if (unlikely(nr_ports == 0))
434 return -ENODEV;
435
436 sh7786_pcie_ports = kzalloc(nr_ports * sizeof(struct sh7786_pcie_port),
437 GFP_KERNEL);
438 if (unlikely(!sh7786_pcie_ports))
439 return -ENOMEM;
440
441 printk(KERN_NOTICE "PCI: probing %d ports.\n", nr_ports);
442
443 for (i = 0; i < nr_ports; i++) {
444 struct sh7786_pcie_port *port = sh7786_pcie_ports + i;
445
446 port->index = i;
447 port->hose = sh7786_pci_channels + i;
Paul Mundt7561f2d2010-02-08 16:36:56 +0900448 port->hose->io_map_base = port->hose->resources[0].start;
Paul Mundt5713e602009-06-17 18:20:48 +0900449
450 ret |= sh7786_pcie_hwops->port_init_hw(port);
451 }
452
453 if (unlikely(ret))
454 return ret;
455
456 return 0;
457}
458arch_initcall(sh7786_pcie_init);