blob: 9baf886e82df39f710b897a0b864824fbedfebad [file] [log] [blame]
Rafał Miłecki9352f692011-07-05 19:48:26 +02001/*
2 * Broadcom specific AMBA
3 * PCI Core in hostmode
4 *
Hauke Mehrtens49dc9572012-01-31 00:03:35 +01005 * Copyright 2005 - 2011, Broadcom Corporation
6 * Copyright 2006, 2007, Michael Buesch <m@bues.ch>
7 * Copyright 2011, 2012, Hauke Mehrtens <hauke@hauke-m.de>
8 *
Rafał Miłecki9352f692011-07-05 19:48:26 +02009 * Licensed under the GNU/GPL. See COPYING for details.
10 */
11
12#include "bcma_private.h"
Paul Gortmaker58f743e2012-03-25 20:02:55 -040013#include <linux/pci.h>
Hauke Mehrtens49dc9572012-01-31 00:03:35 +010014#include <linux/export.h>
Rafał Miłecki9352f692011-07-05 19:48:26 +020015#include <linux/bcma/bcma.h>
Hauke Mehrtens49dc9572012-01-31 00:03:35 +010016#include <asm/paccess.h>
17
18/* Probe a 32bit value on the bus and catch bus exceptions.
19 * Returns nonzero on a bus exception.
20 * This is MIPS specific */
21#define mips_busprobe32(val, addr) get_dbe((val), ((u32 *)(addr)))
22
23/* Assume one-hot slot wiring */
24#define BCMA_PCI_SLOT_MAX 16
25#define PCI_CONFIG_SPACE_SIZE 256
26
27bool __devinit bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
28{
29 struct bcma_bus *bus = pc->core->bus;
30 u16 chipid_top;
31 u32 tmp;
32
33 chipid_top = (bus->chipinfo.id & 0xFF00);
34 if (chipid_top != 0x4700 &&
35 chipid_top != 0x5300)
36 return false;
37
38 if (bus->sprom.boardflags_lo & BCMA_CORE_PCI_BFL_NOPCI) {
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +020039 bcma_info(bus, "This PCI core is disabled and not working\n");
Hauke Mehrtens49dc9572012-01-31 00:03:35 +010040 return false;
41 }
42
43 bcma_core_enable(pc->core, 0);
44
45 return !mips_busprobe32(tmp, pc->core->io_addr);
46}
47
48static u32 bcma_pcie_read_config(struct bcma_drv_pci *pc, u32 address)
49{
50 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
51 pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
52 return pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_DATA);
53}
54
55static void bcma_pcie_write_config(struct bcma_drv_pci *pc, u32 address,
56 u32 data)
57{
58 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_ADDR, address);
59 pcicore_read32(pc, BCMA_CORE_PCI_CONFIG_ADDR);
60 pcicore_write32(pc, BCMA_CORE_PCI_CONFIG_DATA, data);
61}
62
63static u32 bcma_get_cfgspace_addr(struct bcma_drv_pci *pc, unsigned int dev,
64 unsigned int func, unsigned int off)
65{
66 u32 addr = 0;
67
68 /* Issue config commands only when the data link is up (atleast
69 * one external pcie device is present).
70 */
71 if (dev >= 2 || !(bcma_pcie_read(pc, BCMA_CORE_PCI_DLLP_LSREG)
72 & BCMA_CORE_PCI_DLLP_LSREG_LINKUP))
73 goto out;
74
75 /* Type 0 transaction */
76 /* Slide the PCI window to the appropriate slot */
77 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
78 /* Calculate the address */
79 addr = pc->host_controller->host_cfg_addr;
80 addr |= (dev << BCMA_CORE_PCI_CFG_SLOT_SHIFT);
81 addr |= (func << BCMA_CORE_PCI_CFG_FUN_SHIFT);
82 addr |= (off & ~3);
83
84out:
85 return addr;
86}
87
88static int bcma_extpci_read_config(struct bcma_drv_pci *pc, unsigned int dev,
89 unsigned int func, unsigned int off,
90 void *buf, int len)
91{
92 int err = -EINVAL;
93 u32 addr, val;
94 void __iomem *mmio = 0;
95
96 WARN_ON(!pc->hostmode);
97 if (unlikely(len != 1 && len != 2 && len != 4))
98 goto out;
99 if (dev == 0) {
100 /* we support only two functions on device 0 */
101 if (func > 1)
102 return -EINVAL;
103
104 /* accesses to config registers with offsets >= 256
105 * requires indirect access.
106 */
107 if (off >= PCI_CONFIG_SPACE_SIZE) {
108 addr = (func << 12);
109 addr |= (off & 0x0FFF);
110 val = bcma_pcie_read_config(pc, addr);
111 } else {
112 addr = BCMA_CORE_PCI_PCICFG0;
113 addr |= (func << 8);
114 addr |= (off & 0xfc);
115 val = pcicore_read32(pc, addr);
116 }
117 } else {
118 addr = bcma_get_cfgspace_addr(pc, dev, func, off);
119 if (unlikely(!addr))
120 goto out;
121 err = -ENOMEM;
Nathan Hintzc61cab32012-05-04 21:56:34 -0700122 mmio = ioremap_nocache(addr, sizeof(val));
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100123 if (!mmio)
124 goto out;
125
126 if (mips_busprobe32(val, mmio)) {
127 val = 0xffffffff;
128 goto unmap;
129 }
130
131 val = readl(mmio);
132 }
133 val >>= (8 * (off & 3));
134
135 switch (len) {
136 case 1:
137 *((u8 *)buf) = (u8)val;
138 break;
139 case 2:
140 *((u16 *)buf) = (u16)val;
141 break;
142 case 4:
143 *((u32 *)buf) = (u32)val;
144 break;
145 }
146 err = 0;
147unmap:
148 if (mmio)
149 iounmap(mmio);
150out:
151 return err;
152}
153
154static int bcma_extpci_write_config(struct bcma_drv_pci *pc, unsigned int dev,
155 unsigned int func, unsigned int off,
156 const void *buf, int len)
157{
158 int err = -EINVAL;
159 u32 addr = 0, val = 0;
160 void __iomem *mmio = 0;
161 u16 chipid = pc->core->bus->chipinfo.id;
162
163 WARN_ON(!pc->hostmode);
164 if (unlikely(len != 1 && len != 2 && len != 4))
165 goto out;
166 if (dev == 0) {
167 /* accesses to config registers with offsets >= 256
168 * requires indirect access.
169 */
170 if (off < PCI_CONFIG_SPACE_SIZE) {
171 addr = pc->core->addr + BCMA_CORE_PCI_PCICFG0;
172 addr |= (func << 8);
173 addr |= (off & 0xfc);
Nathan Hintzc61cab32012-05-04 21:56:34 -0700174 mmio = ioremap_nocache(addr, sizeof(val));
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100175 if (!mmio)
176 goto out;
177 }
178 } else {
179 addr = bcma_get_cfgspace_addr(pc, dev, func, off);
180 if (unlikely(!addr))
181 goto out;
182 err = -ENOMEM;
Nathan Hintzc61cab32012-05-04 21:56:34 -0700183 mmio = ioremap_nocache(addr, sizeof(val));
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100184 if (!mmio)
185 goto out;
186
187 if (mips_busprobe32(val, mmio)) {
188 val = 0xffffffff;
189 goto unmap;
190 }
191 }
192
193 switch (len) {
194 case 1:
195 val = readl(mmio);
196 val &= ~(0xFF << (8 * (off & 3)));
197 val |= *((const u8 *)buf) << (8 * (off & 3));
198 break;
199 case 2:
200 val = readl(mmio);
201 val &= ~(0xFFFF << (8 * (off & 3)));
202 val |= *((const u16 *)buf) << (8 * (off & 3));
203 break;
204 case 4:
205 val = *((const u32 *)buf);
206 break;
207 }
208 if (dev == 0 && !addr) {
209 /* accesses to config registers with offsets >= 256
210 * requires indirect access.
211 */
212 addr = (func << 12);
213 addr |= (off & 0x0FFF);
214 bcma_pcie_write_config(pc, addr, val);
215 } else {
216 writel(val, mmio);
217
Hauke Mehrtens4b4f5be2012-06-30 01:44:38 +0200218 if (chipid == BCMA_CHIP_ID_BCM4716 ||
219 chipid == BCMA_CHIP_ID_BCM4748)
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100220 readl(mmio);
221 }
222
223 err = 0;
224unmap:
225 if (mmio)
226 iounmap(mmio);
227out:
228 return err;
229}
230
231static int bcma_core_pci_hostmode_read_config(struct pci_bus *bus,
232 unsigned int devfn,
233 int reg, int size, u32 *val)
234{
235 unsigned long flags;
236 int err;
237 struct bcma_drv_pci *pc;
238 struct bcma_drv_pci_host *pc_host;
239
240 pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
241 pc = pc_host->pdev;
242
243 spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
244 err = bcma_extpci_read_config(pc, PCI_SLOT(devfn),
245 PCI_FUNC(devfn), reg, val, size);
246 spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
247
248 return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
249}
250
251static int bcma_core_pci_hostmode_write_config(struct pci_bus *bus,
252 unsigned int devfn,
253 int reg, int size, u32 val)
254{
255 unsigned long flags;
256 int err;
257 struct bcma_drv_pci *pc;
258 struct bcma_drv_pci_host *pc_host;
259
260 pc_host = container_of(bus->ops, struct bcma_drv_pci_host, pci_ops);
261 pc = pc_host->pdev;
262
263 spin_lock_irqsave(&pc_host->cfgspace_lock, flags);
264 err = bcma_extpci_write_config(pc, PCI_SLOT(devfn),
265 PCI_FUNC(devfn), reg, &val, size);
266 spin_unlock_irqrestore(&pc_host->cfgspace_lock, flags);
267
268 return err ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL;
269}
270
271/* return cap_offset if requested capability exists in the PCI config space */
272static u8 __devinit bcma_find_pci_capability(struct bcma_drv_pci *pc,
273 unsigned int dev,
274 unsigned int func, u8 req_cap_id,
275 unsigned char *buf, u32 *buflen)
276{
277 u8 cap_id;
278 u8 cap_ptr = 0;
279 u32 bufsize;
280 u8 byte_val;
281
282 /* check for Header type 0 */
283 bcma_extpci_read_config(pc, dev, func, PCI_HEADER_TYPE, &byte_val,
284 sizeof(u8));
285 if ((byte_val & 0x7f) != PCI_HEADER_TYPE_NORMAL)
286 return cap_ptr;
287
288 /* check if the capability pointer field exists */
289 bcma_extpci_read_config(pc, dev, func, PCI_STATUS, &byte_val,
290 sizeof(u8));
291 if (!(byte_val & PCI_STATUS_CAP_LIST))
292 return cap_ptr;
293
294 /* check if the capability pointer is 0x00 */
295 bcma_extpci_read_config(pc, dev, func, PCI_CAPABILITY_LIST, &cap_ptr,
296 sizeof(u8));
297 if (cap_ptr == 0x00)
298 return cap_ptr;
299
300 /* loop thr'u the capability list and see if the requested capabilty
301 * exists */
302 bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id, sizeof(u8));
303 while (cap_id != req_cap_id) {
304 bcma_extpci_read_config(pc, dev, func, cap_ptr + 1, &cap_ptr,
305 sizeof(u8));
306 if (cap_ptr == 0x00)
307 return cap_ptr;
308 bcma_extpci_read_config(pc, dev, func, cap_ptr, &cap_id,
309 sizeof(u8));
310 }
311
312 /* found the caller requested capability */
313 if ((buf != NULL) && (buflen != NULL)) {
314 u8 cap_data;
315
316 bufsize = *buflen;
317 if (!bufsize)
318 return cap_ptr;
319
320 *buflen = 0;
321
322 /* copy the cpability data excluding cap ID and next ptr */
323 cap_data = cap_ptr + 2;
324 if ((bufsize + cap_data) > PCI_CONFIG_SPACE_SIZE)
325 bufsize = PCI_CONFIG_SPACE_SIZE - cap_data;
326 *buflen = bufsize;
327 while (bufsize--) {
328 bcma_extpci_read_config(pc, dev, func, cap_data, buf,
329 sizeof(u8));
330 cap_data++;
331 buf++;
332 }
333 }
334
335 return cap_ptr;
336}
337
338/* If the root port is capable of returning Config Request
339 * Retry Status (CRS) Completion Status to software then
340 * enable the feature.
341 */
342static void __devinit bcma_core_pci_enable_crs(struct bcma_drv_pci *pc)
343{
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +0200344 struct bcma_bus *bus = pc->core->bus;
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100345 u8 cap_ptr, root_ctrl, root_cap, dev;
346 u16 val16;
347 int i;
348
349 cap_ptr = bcma_find_pci_capability(pc, 0, 0, PCI_CAP_ID_EXP, NULL,
350 NULL);
351 root_cap = cap_ptr + PCI_EXP_RTCAP;
352 bcma_extpci_read_config(pc, 0, 0, root_cap, &val16, sizeof(u16));
353 if (val16 & BCMA_CORE_PCI_RC_CRS_VISIBILITY) {
354 /* Enable CRS software visibility */
355 root_ctrl = cap_ptr + PCI_EXP_RTCTL;
356 val16 = PCI_EXP_RTCTL_CRSSVE;
357 bcma_extpci_read_config(pc, 0, 0, root_ctrl, &val16,
358 sizeof(u16));
359
360 /* Initiate a configuration request to read the vendor id
361 * field of the device function's config space header after
362 * 100 ms wait time from the end of Reset. If the device is
363 * not done with its internal initialization, it must at
364 * least return a completion TLP, with a completion status
365 * of "Configuration Request Retry Status (CRS)". The root
366 * complex must complete the request to the host by returning
367 * a read-data value of 0001h for the Vendor ID field and
368 * all 1s for any additional bytes included in the request.
369 * Poll using the config reads for max wait time of 1 sec or
370 * until we receive the successful completion status. Repeat
371 * the procedure for all the devices.
372 */
373 for (dev = 1; dev < BCMA_PCI_SLOT_MAX; dev++) {
374 for (i = 0; i < 100000; i++) {
375 bcma_extpci_read_config(pc, dev, 0,
376 PCI_VENDOR_ID, &val16,
377 sizeof(val16));
378 if (val16 != 0x1)
379 break;
380 udelay(10);
381 }
382 if (val16 == 0x1)
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +0200383 bcma_err(bus, "PCI: Broken device in slot %d\n",
384 dev);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100385 }
386 }
387}
Rafał Miłecki9352f692011-07-05 19:48:26 +0200388
Hauke Mehrtensd1a7a8e2012-01-31 00:03:34 +0100389void __devinit bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
Rafał Miłecki9352f692011-07-05 19:48:26 +0200390{
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100391 struct bcma_bus *bus = pc->core->bus;
392 struct bcma_drv_pci_host *pc_host;
393 u32 tmp;
394 u32 pci_membase_1G;
395 unsigned long io_map_base;
396
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +0200397 bcma_info(bus, "PCIEcore in host mode found\n");
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100398
399 pc_host = kzalloc(sizeof(*pc_host), GFP_KERNEL);
400 if (!pc_host) {
Rafał Miłecki3d9d8af2012-07-05 22:07:32 +0200401 bcma_err(bus, "can not allocate memory");
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100402 return;
403 }
404
405 pc->host_controller = pc_host;
406 pc_host->pci_controller.io_resource = &pc_host->io_resource;
407 pc_host->pci_controller.mem_resource = &pc_host->mem_resource;
408 pc_host->pci_controller.pci_ops = &pc_host->pci_ops;
409 pc_host->pdev = pc;
410
411 pci_membase_1G = BCMA_SOC_PCI_DMA;
412 pc_host->host_cfg_addr = BCMA_SOC_PCI_CFG;
413
414 pc_host->pci_ops.read = bcma_core_pci_hostmode_read_config;
415 pc_host->pci_ops.write = bcma_core_pci_hostmode_write_config;
416
417 pc_host->mem_resource.name = "BCMA PCIcore external memory",
418 pc_host->mem_resource.start = BCMA_SOC_PCI_DMA;
419 pc_host->mem_resource.end = BCMA_SOC_PCI_DMA + BCMA_SOC_PCI_DMA_SZ - 1;
420 pc_host->mem_resource.flags = IORESOURCE_MEM | IORESOURCE_PCI_FIXED;
421
422 pc_host->io_resource.name = "BCMA PCIcore external I/O",
423 pc_host->io_resource.start = 0x100;
424 pc_host->io_resource.end = 0x7FF;
425 pc_host->io_resource.flags = IORESOURCE_IO | IORESOURCE_PCI_FIXED;
426
427 /* Reset RC */
Rafał Miłecki1fd41a62012-09-25 10:17:22 +0200428 usleep_range(3000, 5000);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100429 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST_OE);
Rafał Miłecki1fd41a62012-09-25 10:17:22 +0200430 usleep_range(1000, 2000);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100431 pcicore_write32(pc, BCMA_CORE_PCI_CTL, BCMA_CORE_PCI_CTL_RST |
432 BCMA_CORE_PCI_CTL_RST_OE);
433
434 /* 64 MB I/O access window. On 4716, use
435 * sbtopcie0 to access the device registers. We
436 * can't use address match 2 (1 GB window) region
437 * as mips can't generate 64-bit address on the
438 * backplane.
439 */
Hauke Mehrtens4b4f5be2012-06-30 01:44:38 +0200440 if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4716 ||
441 bus->chipinfo.id == BCMA_CHIP_ID_BCM4748) {
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100442 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
443 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
444 BCMA_SOC_PCI_MEM_SZ - 1;
445 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
446 BCMA_CORE_PCI_SBTOPCI_MEM | BCMA_SOC_PCI_MEM);
Hauke Mehrtens4b4f5be2012-06-30 01:44:38 +0200447 } else if (bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) {
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100448 tmp = BCMA_CORE_PCI_SBTOPCI_MEM;
449 tmp |= BCMA_CORE_PCI_SBTOPCI_PREF;
450 tmp |= BCMA_CORE_PCI_SBTOPCI_BURST;
451 if (pc->core->core_unit == 0) {
452 pc_host->mem_resource.start = BCMA_SOC_PCI_MEM;
453 pc_host->mem_resource.end = BCMA_SOC_PCI_MEM +
454 BCMA_SOC_PCI_MEM_SZ - 1;
455 pci_membase_1G = BCMA_SOC_PCIE_DMA_H32;
456 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
457 tmp | BCMA_SOC_PCI_MEM);
458 } else if (pc->core->core_unit == 1) {
459 pc_host->mem_resource.start = BCMA_SOC_PCI1_MEM;
460 pc_host->mem_resource.end = BCMA_SOC_PCI1_MEM +
461 BCMA_SOC_PCI_MEM_SZ - 1;
462 pci_membase_1G = BCMA_SOC_PCIE1_DMA_H32;
463 pc_host->host_cfg_addr = BCMA_SOC_PCI1_CFG;
464 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
465 tmp | BCMA_SOC_PCI1_MEM);
466 }
467 } else
468 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI0,
469 BCMA_CORE_PCI_SBTOPCI_IO);
470
471 /* 64 MB configuration access window */
472 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI1, BCMA_CORE_PCI_SBTOPCI_CFG0);
473
474 /* 1 GB memory access window */
475 pcicore_write32(pc, BCMA_CORE_PCI_SBTOPCI2,
476 BCMA_CORE_PCI_SBTOPCI_MEM | pci_membase_1G);
477
478
479 /* As per PCI Express Base Spec 1.1 we need to wait for
480 * at least 100 ms from the end of a reset (cold/warm/hot)
481 * before issuing configuration requests to PCI Express
482 * devices.
483 */
Rafał Miłecki1fd41a62012-09-25 10:17:22 +0200484 msleep(100);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100485
486 bcma_core_pci_enable_crs(pc);
487
488 /* Enable PCI bridge BAR0 memory & master access */
489 tmp = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
490 bcma_extpci_write_config(pc, 0, 0, PCI_COMMAND, &tmp, sizeof(tmp));
491
492 /* Enable PCI interrupts */
493 pcicore_write32(pc, BCMA_CORE_PCI_IMASK, BCMA_CORE_PCI_IMASK_INTA);
494
495 /* Ok, ready to run, register it to the system.
496 * The following needs change, if we want to port hostmode
497 * to non-MIPS platform. */
Nathan Hintz4acabf42012-05-04 21:56:33 -0700498 io_map_base = (unsigned long)ioremap_nocache(pc_host->mem_resource.start,
499 resource_size(&pc_host->mem_resource));
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100500 pc_host->pci_controller.io_map_base = io_map_base;
501 set_io_port_base(pc_host->pci_controller.io_map_base);
502 /* Give some time to the PCI controller to configure itself with the new
503 * values. Not waiting at this point causes crashes of the machine. */
Rafał Miłecki1fd41a62012-09-25 10:17:22 +0200504 usleep_range(10000, 15000);
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100505 register_pci_controller(&pc_host->pci_controller);
506 return;
Rafał Miłecki9352f692011-07-05 19:48:26 +0200507}
Hauke Mehrtens49dc9572012-01-31 00:03:35 +0100508
509/* Early PCI fixup for a device on the PCI-core bridge. */
510static void bcma_core_pci_fixup_pcibridge(struct pci_dev *dev)
511{
512 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
513 /* This is not a device on the PCI-core bridge. */
514 return;
515 }
516 if (PCI_SLOT(dev->devfn) != 0)
517 return;
518
519 pr_info("PCI: Fixing up bridge %s\n", pci_name(dev));
520
521 /* Enable PCI bridge bus mastering and memory space */
522 pci_set_master(dev);
523 if (pcibios_enable_device(dev, ~0) < 0) {
524 pr_err("PCI: BCMA bridge enable failed\n");
525 return;
526 }
527
528 /* Enable PCI bridge BAR1 prefetch and burst */
529 pci_write_config_dword(dev, BCMA_PCI_BAR1_CONTROL, 3);
530}
531DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_pcibridge);
532
533/* Early PCI fixup for all PCI-cores to set the correct memory address. */
534static void bcma_core_pci_fixup_addresses(struct pci_dev *dev)
535{
536 struct resource *res;
537 int pos;
538
539 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
540 /* This is not a device on the PCI-core bridge. */
541 return;
542 }
543 if (PCI_SLOT(dev->devfn) == 0)
544 return;
545
546 pr_info("PCI: Fixing up addresses %s\n", pci_name(dev));
547
548 for (pos = 0; pos < 6; pos++) {
549 res = &dev->resource[pos];
550 if (res->flags & (IORESOURCE_IO | IORESOURCE_MEM))
551 pci_assign_resource(dev, pos);
552 }
553}
554DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, bcma_core_pci_fixup_addresses);
555
556/* This function is called when doing a pci_enable_device().
557 * We must first check if the device is a device on the PCI-core bridge. */
558int bcma_core_pci_plat_dev_init(struct pci_dev *dev)
559{
560 struct bcma_drv_pci_host *pc_host;
561
562 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
563 /* This is not a device on the PCI-core bridge. */
564 return -ENODEV;
565 }
566 pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
567 pci_ops);
568
569 pr_info("PCI: Fixing up device %s\n", pci_name(dev));
570
571 /* Fix up interrupt lines */
572 dev->irq = bcma_core_mips_irq(pc_host->pdev->core) + 2;
573 pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
574
575 return 0;
576}
577EXPORT_SYMBOL(bcma_core_pci_plat_dev_init);
578
579/* PCI device IRQ mapping. */
580int bcma_core_pci_pcibios_map_irq(const struct pci_dev *dev)
581{
582 struct bcma_drv_pci_host *pc_host;
583
584 if (dev->bus->ops->read != bcma_core_pci_hostmode_read_config) {
585 /* This is not a device on the PCI-core bridge. */
586 return -ENODEV;
587 }
588
589 pc_host = container_of(dev->bus->ops, struct bcma_drv_pci_host,
590 pci_ops);
591 return bcma_core_mips_irq(pc_host->pdev->core) + 2;
592}
593EXPORT_SYMBOL(bcma_core_pci_pcibios_map_irq);