blob: c0cb0620ab62ab9865ff8bc745faae47094175a5 [file] [log] [blame]
Rafał Miłecki9352f692011-07-05 19:48:26 +02001/*
2 * Broadcom specific AMBA
3 * PCI Core in hostmode
4 *
Hauke Mehrtens49dc9572012-01-31 00:03:35 +01005 * Copyright 2005 - 2011, Broadcom Corporation
6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7 * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
8 *
Rafał Miłecki9352f692011-07-05 19:48:26 +02009 * Licensed under the GNU/GPL. See COPYING for details.
10 */
11
12#include "bcma_private.h"
Paul Gortmaker58f743e2012-03-25 20:02:55 -040013#include <linux/pci.h>
Hauke Mehrtens49dc9572012-01-31 00:03:35 +010014#include <linux/export.h>
Rafał Miłecki9352f692011-07-05 19:48:26 +020015#include <linux/bcma/bcma.h>
Hauke Mehrtens49dc9572012-01-31 00:03:35 +010016#include <asm/paccess.h>
17
18/* Probe a 32bit value on the bus and catch bus exceptions.
19 * Returns nonzero on a bus exception.
20 * This is MIPS specific */
21#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
22
23/* Assume one-hot slot wiring */
24#define BCMA_PCI_SLOT_MAX 16
25#define PCI_CONFIG_SPACE_SIZE 256
26
27bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
28{
29 struct bcma_bus *bus = pc->core->bus;
30 u16 chipid_top;
31 u32 tmp;
32
33 chipid_top = (bus->chipinfo.id & 0xFF00);
34 if (chipid_top != 0x4700 &&
35 chipid_top != 0x5300)
36 return false;
37
Hauke Mehrtens49dc9572012-01-31 00:03:35 +010038 bcma_core_enable(pc->core, 0);
39
40 return !mips_busprobe32(tmp, pc->core->io_addr);
41}
42
43static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
44{
45 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
46 pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
47 return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
48}
49
50static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
51 u32 data)
52{
53 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
54 pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
55 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
56}
57
58static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
59 unsigned int func, unsigned int off)
60{
61 u32 addr = 0;
62
63 /* Issue config commands only when the data link is up (atleast
64 * one external pcie device is present).
65 */
66 if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
67 & BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
68 goto out;
69
70 /* Type 0 transaction */
71 /* Slide the PCI window to the appropriate slot */
72 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
73 /* Calculate the address */
74 addr = pc->host_controller->host_cfg_addr;
75 addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
76 addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
77 addr |= (off & ~3);
78
79out:
80 return addr;
81}
82
83static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
84 unsigned int func, unsigned int off,
85 void *buf, int len)
86{
87 int err = -EINVAL;
88 u32 addr, val;
89 void __iomem *mmio = 0;
90
91 WARN_ON(!pc->hostmode);
92 if (unlikely(len != 1 && len != 2 && len != 4))
93 goto out;
94 if (dev == 0) {
95 /* we support only two functions on device 0 */
96 if (func > 1)
Nathan Hintza35ab932013-01-12 02:46:14 -080097 goto out;
Hauke Mehrtens49dc9572012-01-31 00:03:35 +010098
99 /* accesses to config registers with offsets >= 256
100 * requires indirect access.
101 */
102 if (off >= PCI_CONFIG_SPACE_SIZE) {
103 addr = (func << 12);
104 addr |= (off & 0x0FFF);
105 val = bcma_pcie_read_config(pc, addr);
106 } else {
107 addr = BCMA_CORE_PCI_PCICFG0;
108 addr |= (func << 8);
109 addr |= (off & 0xfc);
110 val = pcicore_read32(pc, addr);
111 }
112 } else {
113 addr = bcma_get_cfgspace_addr(pc, dev, func, off);
114 if (unlikely(!addr))
115 goto out;
116 err = -ENOMEM;
Nathan Hintzc61cab32012-05-04 21:56:34 -0700117 mmio = ioremap_nocache(addr, sizeof(val));
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100118 if (!mmio)
119 goto out;
120
121 if (mips_busprobe32(val, mmio)) {
122 val = 0xffffffff;
123 goto unmap;
124 }
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100125 }
126 val >>= (8 * (off & 3));
127
128 switch (len) {
129 case 1:
130 *((u8 *)buf) = (u8)val;
131 break;
132 case 2:
133 *((u16 *)buf) = (u16)val;
134 break;
135 case 4:
136 *((u32 *)buf) = (u32)val;
137 break;
138 }
139 err = 0;
140unmap:
141 if (mmio)
142 iounmap(mmio);
143out:
144 return err;
145}
146
147static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
148 unsigned int func, unsigned int off,
149 const void *buf, int len)
150{
151 int err = -EINVAL;
Nathan Hintz447d7e22013-01-12 02:46:15 -0800152 u32 addr, val;
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100153 void __iomem *mmio = 0;
154 u16 chipid = pc->core->bus->chipinfo.id;
155
156 WARN_ON(!pc->hostmode);
157 if (unlikely(len != 1 && len != 2 && len != 4))
158 goto out;
159 if (dev == 0) {
Nathan Hintza35ab932013-01-12 02:46:14 -0800160 /* we support only two functions on device 0 */
161 if (func > 1)
162 goto out;
163
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100164 /* accesses to config registers with offsets >= 256
165 * requires indirect access.
166 */
167 if (off < PCI_CONFIG_SPACE_SIZE) {
Nathan Hintz447d7e22013-01-12 02:46:15 -0800168 addr = BCMA_CORE_PCI_PCICFG0;
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100169 addr |= (func << 8);
170 addr |= (off & 0xfc);
Nathan Hintz447d7e22013-01-12 02:46:15 -0800171 val = pcicore_read32(pc, addr);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100172 }
173 } else {
174 addr = bcma_get_cfgspace_addr(pc, dev, func, off);
175 if (unlikely(!addr))
176 goto out;
177 err = -ENOMEM;
Nathan Hintzc61cab32012-05-04 21:56:34 -0700178 mmio = ioremap_nocache(addr, sizeof(val));
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100179 if (!mmio)
180 goto out;
181
182 if (mips_busprobe32(val, mmio)) {
183 val = 0xffffffff;
184 goto unmap;
185 }
186 }
187
188 switch (len) {
189 case 1:
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100190 val &= ~(0xFF << (8 * (off & 3)));
191 val |= *((const u8 *)buf) << (8 * (off & 3));
192 break;
193 case 2:
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100194 val &= ~(0xFFFF << (8 * (off & 3)));
195 val |= *((const u16 *)buf) << (8 * (off & 3));
196 break;
197 case 4:
198 val = *((const u32 *)buf);
199 break;
200 }
Nathan Hintz447d7e22013-01-12 02:46:15 -0800201 if (dev == 0) {
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100202 /* accesses to config registers with offsets >= 256
203 * requires indirect access.
204 */
Nathan Hintz447d7e22013-01-12 02:46:15 -0800205 if (off >= PCI_CONFIG_SPACE_SIZE) {
206 addr = (func << 12);
207 addr |= (off & 0x0FFF);
208 bcma_pcie_write_config(pc, addr, val);
209 } else {
210 pcicore_write32(pc, addr, val);
211 }
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100212 } else {
213 writel(val, mmio);
214
Hauke Mehrtens4b4f5be2012-06-30 01:44:38 +0200215 if (chipid == BCMA_CHIP_ID_BCM4716 ||
216 chipid == BCMA_CHIP_ID_BCM4748)
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100217 readl(mmio);
218 }
219
220 err = 0;
221unmap:
222 if (mmio)
223 iounmap(mmio);
224out:
225 return err;
226}
227
228static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
229 unsigned int devfn,
230 int reg, int size, u32 *val)
231{
232 unsigned long flags;
233 int err;
234 struct bcma_drv_pci *pc;
235 struct bcma_drv_pci_host *pc_host;
236
237 pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
238 pc = pc_host->pdev;
239
240 spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
241 err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
242 PCI_FUNC(devfn), reg, val, size);
243 spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
244
245 return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
246}
247
248static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
249 unsigned int devfn,
250 int reg, int size, u32 val)
251{
252 unsigned long flags;
253 int err;
254 struct bcma_drv_pci *pc;
255 struct bcma_drv_pci_host *pc_host;
256
257 pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
258 pc = pc_host->pdev;
259
260 spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
261 err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
262 PCI_FUNC(devfn), reg, &val, size);
263 spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
264
265 return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
266}
267
268/* return cap_offset if requested capability exists in the PCI config space */
269static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
270 unsigned int dev,
271 unsigned int func, u8 req_cap_id,
272 unsigned char *buf, u32 *buflen)
273{
274 u8 cap_id;
275 u8 cap_ptr = 0;
276 u32 bufsize;
277 u8 byte_val;
278
279 /* check for Header type 0 */
280 bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
281 sizeof(u8));
282 if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
283 return cap_ptr;
284
285 /* check if the capability pointer field exists */
286 bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
287 sizeof(u8));
288 if (!(byte_val & PCI_STATUS_CAP_LIST))
289 return cap_ptr;
290
291 /* check if the capability pointer is 0x00 */
292 bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
293 sizeof(u8));
294 if (cap_ptr == 0x00)
295 return cap_ptr;
296
297 /* loop thr'u the capability list and see if the requested capabilty
298 * exists */
299 bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
300 while (cap_id != req_cap_id) {
301 bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
302 sizeof(u8));
303 if (cap_ptr == 0x00)
304 return cap_ptr;
305 bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
306 sizeof(u8));
307 }
308
309 /* found the caller requested capability */
310 if ((buf != NULL) && (buflen != NULL)) {
311 u8 cap_data;
312
313 bufsize = *buflen;
314 if (!bufsize)
315 return cap_ptr;
316
317 *buflen = 0;
318
319 /* copy the cpability data excluding cap ID and next ptr */
320 cap_data = cap_ptr + 2;
321 if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
322 bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
323 *buflen = bufsize;
324 while (bufsize--) {
325 bcma_extpci_read_config(pc, dev, func, cap_data, buf,
326 sizeof(u8));
327 cap_data++;
328 buf++;
329 }
330 }
331
332 return cap_ptr;
333}
334
335/* If the root port is capable of returning Config Request
336 * Retry Status (CRS) Completion Status to software then
337 * enable the feature.
338 */
339static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
340{
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +0200341 struct bcma_bus *bus = pc->core->bus;
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100342 u8 cap_ptr, root_ctrl, root_cap, dev;
343 u16 val16;
344 int i;
345
346 cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
347 NULL);
348 root_cap = cap_ptr + PCI_EXP_RTCAP;
349 bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
350 if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
351 /* Enable CRS software visibility */
352 root_ctrl = cap_ptr + PCI_EXP_RTCTL;
353 val16 = PCI_EXP_RTCTL_CRSSVE;
354 bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
355 sizeof(u16));
356
357 /* Initiate a configuration request to read the vendor id
358 * field of the device function's config space header after
359 * 100 ms wait time from the end of Reset. If the device is
360 * not done with its internal initialization, it must at
361 * least return a completion TLP, with a completion status
362 * of "Configuration Request Retry Status (CRS)". The root
363 * complex must complete the request to the host by returning
364 * a read-data value of 0001h for the Vendor ID field and
365 * all 1s for any additional bytes included in the request.
366 * Poll using the config reads for max wait time of 1 sec or
367 * until we receive the successful completion status. Repeat
368 * the procedure for all the devices.
369 */
370 for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
371 for (i = 0; i < 100000; i++) {
372 bcma_extpci_read_config(pc, dev, 0,
373 PCI_VENDOR_ID, &val16,
374 sizeof(val16));
375 if (val16 != 0x1)
376 break;
377 udelay(10);
378 }
379 if (val16 == 0x1)
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +0200380 bcma_err(bus, "PCI: Broken device in slot %d\n",
381 dev);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100382 }
383 }
384}
Rafał Miłecki9352f692011-07-05 19:48:26 +0200385
Hauke Mehrtensd1a7a8e2012-01-31 00:03:34 +0100386void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
Rafał Miłecki9352f692011-07-05 19:48:26 +0200387{
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100388 struct bcma_bus *bus = pc->core->bus;
389 struct bcma_drv_pci_host *pc_host;
390 u32 tmp;
391 u32 pci_membase_1G;
392 unsigned long io_map_base;
393
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +0200394 bcma_info(bus, "PCIEcore in host mode found\n");
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100395
Hauke Mehrtens2b4766c2012-10-01 00:12:54 +0200396 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
397 bcma_info(bus, "This PCIE core is disabled and not working\n");
398 return;
399 }
400
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100401 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
402 if (!pc_host) {
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +0200403 bcma_err(bus, "can not allocate memory");
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100404 return;
405 }
406
407 pc->host_controller = pc_host;
408 pc_host->pci_controller.io_resource = &pc_host->io_resource;
409 pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
410 pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
411 pc_host->pdev = pc;
412
413 pci_membase_1G = BCMA_SOC_PCI_DMA;
414 pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
415
416 pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
417 pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
418
419 pc_host->mem_resource.name = "BCMA PCIcore external memory",
420 pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
421 pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
422 pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
423
424 pc_host->io_resource.name = "BCMA PCIcore external I/O",
425 pc_host->io_resource.start = 0x100;
426 pc_host->io_resource.end = 0x7FF;
427 pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
428
429 /* Reset RC */
Rafał Miłecki1fd41a62012-09-25 10:17:22 +0200430 usleep_range(3000, 5000);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100431 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
Nathan Hintz990debe2013-01-10 22:24:03 -0800432 msleep(50);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100433 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
434 BCMA_CORE_PCI_CTL_RST_OE);
435
436 /* 64 MB I/O access window. On 4716, use
437 * sbtopcie0 to access the device registers. We
438 * can't use address match 2 (1 GB window) region
439 * as mips can't generate 64-bit address on the
440 * backplane.
441 */
Hauke Mehrtens4b4f5be2012-06-30 01:44:38 +0200442 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
443 bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100444 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
445 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
446 BCMA_SOC_PCI_MEM_SZ - 1;
447 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
448 BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
Hauke Mehrtens4b4f5be2012-06-30 01:44:38 +0200449 } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100450 tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
451 tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
452 tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
453 if (pc->core->core_unit == 0) {
454 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
455 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
456 BCMA_SOC_PCI_MEM_SZ - 1;
Hauke Mehrtensdfae7142012-09-29 20:40:18 +0200457 pc_host->io_resource.start = 0x100;
458 pc_host->io_resource.end = 0x47F;
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100459 pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
460 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
461 tmp | BCMA_SOC_PCI_MEM);
462 } else if (pc->core->core_unit == 1) {
463 pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
464 pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
465 BCMA_SOC_PCI_MEM_SZ - 1;
Hauke Mehrtensdfae7142012-09-29 20:40:18 +0200466 pc_host->io_resource.start = 0x480;
467 pc_host->io_resource.end = 0x7FF;
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100468 pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
469 pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
470 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
471 tmp | BCMA_SOC_PCI1_MEM);
472 }
473 } else
474 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
475 BCMA_CORE_PCI_SBTOPCI_IO);
476
477 /* 64 MB configuration access window */
478 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
479
480 /* 1 GB memory access window */
481 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
482 BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
483
484
485 /* As per PCI Express Base Spec 1.1 we need to wait for
486 * at least 100 ms from the end of a reset (cold/warm/hot)
487 * before issuing configuration requests to PCI Express
488 * devices.
489 */
Rafał Miłecki1fd41a62012-09-25 10:17:22 +0200490 msleep(100);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100491
492 bcma_core_pci_enable_crs(pc);
493
Nathan Hintz990debe2013-01-10 22:24:03 -0800494 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706 ||
495 bus->chipinfo.id == BCMA_CHIP_ID_BCM4716) {
496 u16 val16;
497 bcma_extpci_read_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
498 &val16, sizeof(val16));
499 val16 |= (2 << 5); /* Max payload size of 512 */
500 val16 |= (2 << 12); /* MRRS 512 */
501 bcma_extpci_write_config(pc, 0, 0, BCMA_CORE_PCI_CFG_DEVCTRL,
502 &val16, sizeof(val16));
503 }
504
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100505 /* Enable PCI bridge BAR0 memory & master access */
506 tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
507 bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
508
509 /* Enable PCI interrupts */
510 pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
511
512 /* Ok, ready to run, register it to the system.
513 * The following needs change, if we want to port hostmode
514 * to non-MIPS platform. */
Nathan Hintz4acabf42012-05-04 21:56:33 -0700515 io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
516 resource_size(&pc_host->mem_resource));
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100517 pc_host->pci_controller.io_map_base = io_map_base;
518 set_io_port_base(pc_host->pci_controller.io_map_base);
519 /* Give some time to the PCI controller to configure itself with the new
520 * values. Not waiting at this point causes crashes of the machine. */
Rafał Miłecki1fd41a62012-09-25 10:17:22 +0200521 usleep_range(10000, 15000);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100522 register_pci_controller(&pc_host->pci_controller);
523 return;
Rafał Miłecki9352f692011-07-05 19:48:26 +0200524}
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100525
526/* Early PCI fixup for a device on the PCI-core bridge. */
527static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
528{
529 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
530 /* This is not a device on the PCI-core bridge. */
531 return;
532 }
533 if (PCI_SLOT(dev->devfn) != 0)
534 return;
535
536 pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
537
538 /* Enable PCI bridge bus mastering and memory space */
539 pci_set_master(dev);
540 if (pcibios_enable_device(dev, ~0) < 0) {
541 pr_err("PCI: BCMA bridge enable failed\n");
542 return;
543 }
544
545 /* Enable PCI bridge BAR1 prefetch and burst */
546 pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
547}
548DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
549
550/* Early PCI fixup for all PCI-cores to set the correct memory address. */
551static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
552{
553 struct resource *res;
Hauke Mehrtens4a7267c2012-11-27 00:30:04 +0100554 int pos, err;
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100555
556 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
557 /* This is not a device on the PCI-core bridge. */
558 return;
559 }
560 if (PCI_SLOT(dev->devfn) == 0)
561 return;
562
563 pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
564
565 for (pos = 0; pos < 6; pos++) {
566 res = &dev->resource[pos];
Hauke Mehrtens4a7267c2012-11-27 00:30:04 +0100567 if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM)) {
568 err = pci_assign_resource(dev, pos);
569 if (err)
570 pr_err("PCI: Problem fixing up the addresses on %s\n",
571 pci_name(dev));
572 }
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100573 }
574}
575DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
576
577/* This function is called when doing a pci_enable_device().
578 * We must first check if the device is a device on the PCI-core bridge. */
579int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
580{
581 struct bcma_drv_pci_host *pc_host;
582
583 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
584 /* This is not a device on the PCI-core bridge. */
585 return -ENODEV;
586 }
587 pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
588 pci_ops);
589
590 pr_info("PCI: Fixing up device %s\n", pci_name(dev));
591
592 /* Fix up interrupt lines */
Nathan Hintze2aa19f2013-01-10 17:54:09 +0100593 dev->irq = bcma_core_irq(pc_host->pdev->core);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100594 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
595
596 return 0;
597}
598EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
599
600/* PCI device IRQ mapping. */
601int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
602{
603 struct bcma_drv_pci_host *pc_host;
604
605 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
606 /* This is not a device on the PCI-core bridge. */
607 return -ENODEV;
608 }
609
610 pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
611 pci_ops);
Nathan Hintze2aa19f2013-01-10 17:54:09 +0100612 return bcma_core_irq(pc_host->pdev->core);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100613}
614EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);