blob: 8a257a7bf71389caa12c9244fc00e711ba02b157 [file] [log] [blame]
Michal Simekd3afa582010-01-18 14:42:34 +01001/*
2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Grant Likelyf1ca09b2010-08-16 23:44:49 -060030#include <linux/of.h>
31#include <linux/of_address.h>
Sebastian Andrzej Siewior04bea682011-01-24 09:58:55 +053032#include <linux/of_pci.h>
Paul Gortmaker66421a62011-09-22 11:22:55 -040033#include <linux/export.h>
Michal Simekd3afa582010-01-18 14:42:34 +010034
35#include <asm/processor.h>
36#include <asm/io.h>
Michal Simekd3afa582010-01-18 14:42:34 +010037#include <asm/pci-bridge.h>
38#include <asm/byteorder.h>
39
40static DEFINE_SPINLOCK(hose_spinlock);
41LIST_HEAD(hose_list);
42
43/* XXX kill that some day ... */
44static int global_phb_number; /* Global phb counter */
45
46/* ISA Memory physical address */
47resource_size_t isa_mem_base;
48
Michal Simekd3afa582010-01-18 14:42:34 +010049static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
50
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +100051unsigned long isa_io_base;
52unsigned long pci_dram_offset;
53static int pci_bus_count;
54
55
Michal Simekd3afa582010-01-18 14:42:34 +010056void set_pci_dma_ops(struct dma_map_ops *dma_ops)
57{
58 pci_dma_ops = dma_ops;
59}
60
61struct dma_map_ops *get_pci_dma_ops(void)
62{
63 return pci_dma_ops;
64}
65EXPORT_SYMBOL(get_pci_dma_ops);
66
Michal Simekd3afa582010-01-18 14:42:34 +010067struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
68{
69 struct pci_controller *phb;
70
71 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
72 if (!phb)
73 return NULL;
74 spin_lock(&hose_spinlock);
75 phb->global_number = global_phb_number++;
76 list_add_tail(&phb->list_node, &hose_list);
77 spin_unlock(&hose_spinlock);
78 phb->dn = dev;
79 phb->is_dynamic = mem_init_done;
80 return phb;
81}
82
83void pcibios_free_controller(struct pci_controller *phb)
84{
85 spin_lock(&hose_spinlock);
86 list_del(&phb->list_node);
87 spin_unlock(&hose_spinlock);
88
89 if (phb->is_dynamic)
90 kfree(phb);
91}
92
93static resource_size_t pcibios_io_size(const struct pci_controller *hose)
94{
Joe Perches28f65c12011-06-09 09:13:32 -070095 return resource_size(&hose->io_resource);
Michal Simekd3afa582010-01-18 14:42:34 +010096}
97
98int pcibios_vaddr_is_ioport(void __iomem *address)
99{
100 int ret = 0;
101 struct pci_controller *hose;
102 resource_size_t size;
103
104 spin_lock(&hose_spinlock);
105 list_for_each_entry(hose, &hose_list, list_node) {
106 size = pcibios_io_size(hose);
107 if (address >= hose->io_base_virt &&
108 address < (hose->io_base_virt + size)) {
109 ret = 1;
110 break;
111 }
112 }
113 spin_unlock(&hose_spinlock);
114 return ret;
115}
116
117unsigned long pci_address_to_pio(phys_addr_t address)
118{
119 struct pci_controller *hose;
120 resource_size_t size;
121 unsigned long ret = ~0;
122
123 spin_lock(&hose_spinlock);
124 list_for_each_entry(hose, &hose_list, list_node) {
125 size = pcibios_io_size(hose);
126 if (address >= hose->io_base_phys &&
127 address < (hose->io_base_phys + size)) {
128 unsigned long base =
129 (unsigned long)hose->io_base_virt - _IO_BASE;
130 ret = base + (address - hose->io_base_phys);
131 break;
132 }
133 }
134 spin_unlock(&hose_spinlock);
135
136 return ret;
137}
138EXPORT_SYMBOL_GPL(pci_address_to_pio);
139
140/*
141 * Return the domain number for this bus.
142 */
143int pci_domain_nr(struct pci_bus *bus)
144{
145 struct pci_controller *hose = pci_bus_to_host(bus);
146
147 return hose->global_number;
148}
149EXPORT_SYMBOL(pci_domain_nr);
150
151/* This routine is meant to be used early during boot, when the
152 * PCI bus numbers have not yet been assigned, and you need to
153 * issue PCI config cycles to an OF device.
154 * It could also be used to "fix" RTAS config cycles if you want
155 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
156 * config cycles.
157 */
158struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
159{
160 while (node) {
161 struct pci_controller *hose, *tmp;
162 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
163 if (hose->dn == node)
164 return hose;
165 node = node->parent;
166 }
167 return NULL;
168}
169
170static ssize_t pci_show_devspec(struct device *dev,
171 struct device_attribute *attr, char *buf)
172{
173 struct pci_dev *pdev;
174 struct device_node *np;
175
176 pdev = to_pci_dev(dev);
177 np = pci_device_to_OF_node(pdev);
178 if (np == NULL || np->full_name == NULL)
179 return 0;
180 return sprintf(buf, "%s", np->full_name);
181}
182static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
183
184/* Add sysfs properties */
185int pcibios_add_platform_entries(struct pci_dev *pdev)
186{
187 return device_create_file(&pdev->dev, &dev_attr_devspec);
188}
189
Myron Stoweb51d4a32011-10-28 15:47:56 -0600190void pcibios_set_master(struct pci_dev *dev)
191{
192 /* No special bus mastering setup handling */
193}
194
Michal Simekd3afa582010-01-18 14:42:34 +0100195char __devinit *pcibios_setup(char *str)
196{
197 return str;
198}
199
200/*
201 * Reads the interrupt pin to determine if interrupt is use by card.
202 * If the interrupt is used, then gets the interrupt line from the
203 * openfirmware and sets it in the pci_dev and pci_config line.
204 */
205int pci_read_irq_line(struct pci_dev *pci_dev)
206{
207 struct of_irq oirq;
208 unsigned int virq;
209
210 /* The current device-tree that iSeries generates from the HV
211 * PCI informations doesn't contain proper interrupt routing,
212 * and all the fallback would do is print out crap, so we
213 * don't attempt to resolve the interrupts here at all, some
214 * iSeries specific fixup does it.
215 *
216 * In the long run, we will hopefully fix the generated device-tree
217 * instead.
218 */
219 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
220
221#ifdef DEBUG
222 memset(&oirq, 0xff, sizeof(oirq));
223#endif
224 /* Try to get a mapping from the device-tree */
225 if (of_irq_map_pci(pci_dev, &oirq)) {
226 u8 line, pin;
227
228 /* If that fails, lets fallback to what is in the config
229 * space and map that through the default controller. We
230 * also set the type to level low since that's what PCI
231 * interrupts are. If your platform does differently, then
232 * either provide a proper interrupt tree or don't use this
233 * function.
234 */
235 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &pin))
236 return -1;
237 if (pin == 0)
238 return -1;
239 if (pci_read_config_byte(pci_dev, PCI_INTERRUPT_LINE, &line) ||
240 line == 0xff || line == 0) {
241 return -1;
242 }
243 pr_debug(" No map ! Using line %d (pin %d) from PCI config\n",
244 line, pin);
245
246 virq = irq_create_mapping(NULL, line);
Michal Simek18e3b102011-12-21 13:10:24 +0100247 if (virq)
Thomas Gleixner4adc1922011-03-24 14:52:04 +0100248 irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW);
Michal Simekd3afa582010-01-18 14:42:34 +0100249 } else {
250 pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n",
251 oirq.size, oirq.specifier[0], oirq.specifier[1],
252 oirq.controller ? oirq.controller->full_name :
253 "<default>");
254
255 virq = irq_create_of_mapping(oirq.controller, oirq.specifier,
256 oirq.size);
257 }
Michal Simek18e3b102011-12-21 13:10:24 +0100258 if (!virq) {
Michal Simekd3afa582010-01-18 14:42:34 +0100259 pr_debug(" Failed to map !\n");
260 return -1;
261 }
262
263 pr_debug(" Mapped to linux irq %d\n", virq);
264
265 pci_dev->irq = virq;
266
267 return 0;
268}
269EXPORT_SYMBOL(pci_read_irq_line);
270
271/*
272 * Platform support for /proc/bus/pci/X/Y mmap()s,
273 * modelled on the sparc64 implementation by Dave Miller.
274 * -- paulus.
275 */
276
277/*
278 * Adjust vm_pgoff of VMA such that it is the physical page offset
279 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
280 *
281 * Basically, the user finds the base address for his device which he wishes
282 * to mmap. They read the 32-bit value from the config space base register,
283 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
284 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
285 *
286 * Returns negative error code on failure, zero on success.
287 */
288static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
289 resource_size_t *offset,
290 enum pci_mmap_state mmap_state)
291{
292 struct pci_controller *hose = pci_bus_to_host(dev->bus);
293 unsigned long io_offset = 0;
294 int i, res_bit;
295
296 if (hose == 0)
297 return NULL; /* should never happen */
298
299 /* If memory, add on the PCI bridge address offset */
300 if (mmap_state == pci_mmap_mem) {
301#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
302 *offset += hose->pci_mem_offset;
303#endif
304 res_bit = IORESOURCE_MEM;
305 } else {
306 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
307 *offset += io_offset;
308 res_bit = IORESOURCE_IO;
309 }
310
311 /*
312 * Check that the offset requested corresponds to one of the
313 * resources of the device.
314 */
315 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
316 struct resource *rp = &dev->resource[i];
317 int flags = rp->flags;
318
319 /* treat ROM as memory (should be already) */
320 if (i == PCI_ROM_RESOURCE)
321 flags |= IORESOURCE_MEM;
322
323 /* Active and same type? */
324 if ((flags & res_bit) == 0)
325 continue;
326
327 /* In the range of this resource? */
328 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
329 continue;
330
331 /* found it! construct the final physical address */
332 if (mmap_state == pci_mmap_io)
333 *offset += hose->io_base_phys - io_offset;
334 return rp;
335 }
336
337 return NULL;
338}
339
340/*
341 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
342 * device mapping.
343 */
344static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
345 pgprot_t protection,
346 enum pci_mmap_state mmap_state,
347 int write_combine)
348{
349 pgprot_t prot = protection;
350
351 /* Write combine is always 0 on non-memory space mappings. On
352 * memory space, if the user didn't pass 1, we check for a
353 * "prefetchable" resource. This is a bit hackish, but we use
354 * this to workaround the inability of /sysfs to provide a write
355 * combine bit
356 */
357 if (mmap_state != pci_mmap_mem)
358 write_combine = 0;
359 else if (write_combine == 0) {
360 if (rp->flags & IORESOURCE_PREFETCH)
361 write_combine = 1;
362 }
363
364 return pgprot_noncached(prot);
365}
366
367/*
368 * This one is used by /dev/mem and fbdev who have no clue about the
369 * PCI device, it tries to find the PCI device first and calls the
370 * above routine
371 */
372pgprot_t pci_phys_mem_access_prot(struct file *file,
373 unsigned long pfn,
374 unsigned long size,
375 pgprot_t prot)
376{
377 struct pci_dev *pdev = NULL;
378 struct resource *found = NULL;
379 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
380 int i;
381
382 if (page_is_ram(pfn))
383 return prot;
384
385 prot = pgprot_noncached(prot);
386 for_each_pci_dev(pdev) {
387 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
388 struct resource *rp = &pdev->resource[i];
389 int flags = rp->flags;
390
391 /* Active and same type? */
392 if ((flags & IORESOURCE_MEM) == 0)
393 continue;
394 /* In the range of this resource? */
395 if (offset < (rp->start & PAGE_MASK) ||
396 offset > rp->end)
397 continue;
398 found = rp;
399 break;
400 }
401 if (found)
402 break;
403 }
404 if (found) {
405 if (found->flags & IORESOURCE_PREFETCH)
406 prot = pgprot_noncached_wc(prot);
407 pci_dev_put(pdev);
408 }
409
410 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
411 (unsigned long long)offset, pgprot_val(prot));
412
413 return prot;
414}
415
416/*
417 * Perform the actual remap of the pages for a PCI device mapping, as
418 * appropriate for this architecture. The region in the process to map
419 * is described by vm_start and vm_end members of VMA, the base physical
420 * address is found in vm_pgoff.
421 * The pci device structure is provided so that architectures may make mapping
422 * decisions on a per-device or per-bus basis.
423 *
424 * Returns a negative error code on failure, zero on success.
425 */
426int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
427 enum pci_mmap_state mmap_state, int write_combine)
428{
429 resource_size_t offset =
430 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
431 struct resource *rp;
432 int ret;
433
434 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
435 if (rp == NULL)
436 return -EINVAL;
437
438 vma->vm_pgoff = offset >> PAGE_SHIFT;
439 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
440 vma->vm_page_prot,
441 mmap_state, write_combine);
442
443 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
444 vma->vm_end - vma->vm_start, vma->vm_page_prot);
445
446 return ret;
447}
448
449/* This provides legacy IO read access on a bus */
450int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
451{
452 unsigned long offset;
453 struct pci_controller *hose = pci_bus_to_host(bus);
454 struct resource *rp = &hose->io_resource;
455 void __iomem *addr;
456
457 /* Check if port can be supported by that bus. We only check
458 * the ranges of the PHB though, not the bus itself as the rules
459 * for forwarding legacy cycles down bridges are not our problem
460 * here. So if the host bridge supports it, we do it.
461 */
462 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
463 offset += port;
464
465 if (!(rp->flags & IORESOURCE_IO))
466 return -ENXIO;
467 if (offset < rp->start || (offset + size) > rp->end)
468 return -ENXIO;
469 addr = hose->io_base_virt + port;
470
471 switch (size) {
472 case 1:
473 *((u8 *)val) = in_8(addr);
474 return 1;
475 case 2:
476 if (port & 1)
477 return -EINVAL;
478 *((u16 *)val) = in_le16(addr);
479 return 2;
480 case 4:
481 if (port & 3)
482 return -EINVAL;
483 *((u32 *)val) = in_le32(addr);
484 return 4;
485 }
486 return -EINVAL;
487}
488
489/* This provides legacy IO write access on a bus */
490int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
491{
492 unsigned long offset;
493 struct pci_controller *hose = pci_bus_to_host(bus);
494 struct resource *rp = &hose->io_resource;
495 void __iomem *addr;
496
497 /* Check if port can be supported by that bus. We only check
498 * the ranges of the PHB though, not the bus itself as the rules
499 * for forwarding legacy cycles down bridges are not our problem
500 * here. So if the host bridge supports it, we do it.
501 */
502 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
503 offset += port;
504
505 if (!(rp->flags & IORESOURCE_IO))
506 return -ENXIO;
507 if (offset < rp->start || (offset + size) > rp->end)
508 return -ENXIO;
509 addr = hose->io_base_virt + port;
510
511 /* WARNING: The generic code is idiotic. It gets passed a pointer
512 * to what can be a 1, 2 or 4 byte quantity and always reads that
513 * as a u32, which means that we have to correct the location of
514 * the data read within those 32 bits for size 1 and 2
515 */
516 switch (size) {
517 case 1:
518 out_8(addr, val >> 24);
519 return 1;
520 case 2:
521 if (port & 1)
522 return -EINVAL;
523 out_le16(addr, val >> 16);
524 return 2;
525 case 4:
526 if (port & 3)
527 return -EINVAL;
528 out_le32(addr, val);
529 return 4;
530 }
531 return -EINVAL;
532}
533
534/* This provides legacy IO or memory mmap access on a bus */
535int pci_mmap_legacy_page_range(struct pci_bus *bus,
536 struct vm_area_struct *vma,
537 enum pci_mmap_state mmap_state)
538{
539 struct pci_controller *hose = pci_bus_to_host(bus);
540 resource_size_t offset =
541 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
542 resource_size_t size = vma->vm_end - vma->vm_start;
543 struct resource *rp;
544
545 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
546 pci_domain_nr(bus), bus->number,
547 mmap_state == pci_mmap_mem ? "MEM" : "IO",
548 (unsigned long long)offset,
549 (unsigned long long)(offset + size - 1));
550
551 if (mmap_state == pci_mmap_mem) {
552 /* Hack alert !
553 *
554 * Because X is lame and can fail starting if it gets an error
555 * trying to mmap legacy_mem (instead of just moving on without
556 * legacy memory access) we fake it here by giving it anonymous
557 * memory, effectively behaving just like /dev/zero
558 */
559 if ((offset + size) > hose->isa_mem_size) {
Michal Simek79bf3a12010-01-20 15:17:08 +0100560#ifdef CONFIG_MMU
Michal Simekd3afa582010-01-18 14:42:34 +0100561 printk(KERN_DEBUG
562 "Process %s (pid:%d) mapped non-existing PCI"
563 "legacy memory for 0%04x:%02x\n",
564 current->comm, current->pid, pci_domain_nr(bus),
565 bus->number);
Michal Simek79bf3a12010-01-20 15:17:08 +0100566#endif
Michal Simekd3afa582010-01-18 14:42:34 +0100567 if (vma->vm_flags & VM_SHARED)
568 return shmem_zero_setup(vma);
569 return 0;
570 }
571 offset += hose->isa_mem_phys;
572 } else {
573 unsigned long io_offset = (unsigned long)hose->io_base_virt - \
574 _IO_BASE;
575 unsigned long roffset = offset + io_offset;
576 rp = &hose->io_resource;
577 if (!(rp->flags & IORESOURCE_IO))
578 return -ENXIO;
579 if (roffset < rp->start || (roffset + size) > rp->end)
580 return -ENXIO;
581 offset += hose->io_base_phys;
582 }
583 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
584
585 vma->vm_pgoff = offset >> PAGE_SHIFT;
586 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
587 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
588 vma->vm_end - vma->vm_start,
589 vma->vm_page_prot);
590}
591
592void pci_resource_to_user(const struct pci_dev *dev, int bar,
593 const struct resource *rsrc,
594 resource_size_t *start, resource_size_t *end)
595{
596 struct pci_controller *hose = pci_bus_to_host(dev->bus);
597 resource_size_t offset = 0;
598
599 if (hose == NULL)
600 return;
601
602 if (rsrc->flags & IORESOURCE_IO)
603 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
604
605 /* We pass a fully fixed up address to userland for MMIO instead of
606 * a BAR value because X is lame and expects to be able to use that
607 * to pass to /dev/mem !
608 *
609 * That means that we'll have potentially 64 bits values where some
610 * userland apps only expect 32 (like X itself since it thinks only
611 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
612 * 32 bits CHRPs :-(
613 *
614 * Hopefully, the sysfs insterface is immune to that gunk. Once X
615 * has been fixed (and the fix spread enough), we can re-enable the
616 * 2 lines below and pass down a BAR value to userland. In that case
617 * we'll also have to re-enable the matching code in
618 * __pci_mmap_make_offset().
619 *
620 * BenH.
621 */
622#if 0
623 else if (rsrc->flags & IORESOURCE_MEM)
624 offset = hose->pci_mem_offset;
625#endif
626
627 *start = rsrc->start - offset;
628 *end = rsrc->end - offset;
629}
630
631/**
632 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
633 * @hose: newly allocated pci_controller to be setup
634 * @dev: device node of the host bridge
635 * @primary: set if primary bus (32 bits only, soon to be deprecated)
636 *
637 * This function will parse the "ranges" property of a PCI host bridge device
638 * node and setup the resource mapping of a pci controller based on its
639 * content.
640 *
641 * Life would be boring if it wasn't for a few issues that we have to deal
642 * with here:
643 *
644 * - We can only cope with one IO space range and up to 3 Memory space
645 * ranges. However, some machines (thanks Apple !) tend to split their
646 * space into lots of small contiguous ranges. So we have to coalesce.
647 *
648 * - We can only cope with all memory ranges having the same offset
649 * between CPU addresses and PCI addresses. Unfortunately, some bridges
650 * are setup for a large 1:1 mapping along with a small "window" which
651 * maps PCI address 0 to some arbitrary high address of the CPU space in
652 * order to give access to the ISA memory hole.
653 * The way out of here that I've chosen for now is to always set the
654 * offset based on the first resource found, then override it if we
655 * have a different offset and the previous was set by an ISA hole.
656 *
657 * - Some busses have IO space not starting at 0, which causes trouble with
658 * the way we do our IO resource renumbering. The code somewhat deals with
659 * it for 64 bits but I would expect problems on 32 bits.
660 *
661 * - Some 32 bits platforms such as 4xx can have physical space larger than
662 * 32 bits so we need to use 64 bits values for the parsing
663 */
664void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose,
665 struct device_node *dev,
666 int primary)
667{
668 const u32 *ranges;
669 int rlen;
670 int pna = of_n_addr_cells(dev);
671 int np = pna + 5;
672 int memno = 0, isa_hole = -1;
673 u32 pci_space;
674 unsigned long long pci_addr, cpu_addr, pci_next, cpu_next, size;
675 unsigned long long isa_mb = 0;
676 struct resource *res;
677
678 printk(KERN_INFO "PCI host bridge %s %s ranges:\n",
679 dev->full_name, primary ? "(primary)" : "");
680
681 /* Get ranges property */
682 ranges = of_get_property(dev, "ranges", &rlen);
683 if (ranges == NULL)
684 return;
685
686 /* Parse it */
687 pr_debug("Parsing ranges property...\n");
688 while ((rlen -= np * 4) >= 0) {
689 /* Read next ranges element */
690 pci_space = ranges[0];
691 pci_addr = of_read_number(ranges + 1, 2);
692 cpu_addr = of_translate_address(dev, ranges + 3);
693 size = of_read_number(ranges + pna + 3, 2);
694
695 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx "
696 "cpu_addr:0x%016llx size:0x%016llx\n",
697 pci_space, pci_addr, cpu_addr, size);
698
699 ranges += np;
700
701 /* If we failed translation or got a zero-sized region
702 * (some FW try to feed us with non sensical zero sized regions
703 * such as power3 which look like some kind of attempt
704 * at exposing the VGA memory hole)
705 */
706 if (cpu_addr == OF_BAD_ADDR || size == 0)
707 continue;
708
709 /* Now consume following elements while they are contiguous */
710 for (; rlen >= np * sizeof(u32);
711 ranges += np, rlen -= np * 4) {
712 if (ranges[0] != pci_space)
713 break;
714 pci_next = of_read_number(ranges + 1, 2);
715 cpu_next = of_translate_address(dev, ranges + 3);
716 if (pci_next != pci_addr + size ||
717 cpu_next != cpu_addr + size)
718 break;
719 size += of_read_number(ranges + pna + 3, 2);
720 }
721
722 /* Act based on address space type */
723 res = NULL;
724 switch ((pci_space >> 24) & 0x3) {
725 case 1: /* PCI IO space */
726 printk(KERN_INFO
727 " IO 0x%016llx..0x%016llx -> 0x%016llx\n",
728 cpu_addr, cpu_addr + size - 1, pci_addr);
729
730 /* We support only one IO range */
731 if (hose->pci_io_size) {
732 printk(KERN_INFO
733 " \\--> Skipped (too many) !\n");
734 continue;
735 }
736 /* On 32 bits, limit I/O space to 16MB */
737 if (size > 0x01000000)
738 size = 0x01000000;
739
740 /* 32 bits needs to map IOs here */
741 hose->io_base_virt = ioremap(cpu_addr, size);
742
743 /* Expect trouble if pci_addr is not 0 */
744 if (primary)
745 isa_io_base =
746 (unsigned long)hose->io_base_virt;
747 /* pci_io_size and io_base_phys always represent IO
748 * space starting at 0 so we factor in pci_addr
749 */
750 hose->pci_io_size = pci_addr + size;
751 hose->io_base_phys = cpu_addr - pci_addr;
752
753 /* Build resource */
754 res = &hose->io_resource;
755 res->flags = IORESOURCE_IO;
756 res->start = pci_addr;
757 break;
758 case 2: /* PCI Memory space */
759 case 3: /* PCI 64 bits Memory space */
760 printk(KERN_INFO
761 " MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
762 cpu_addr, cpu_addr + size - 1, pci_addr,
763 (pci_space & 0x40000000) ? "Prefetch" : "");
764
765 /* We support only 3 memory ranges */
766 if (memno >= 3) {
767 printk(KERN_INFO
768 " \\--> Skipped (too many) !\n");
769 continue;
770 }
771 /* Handles ISA memory hole space here */
772 if (pci_addr == 0) {
773 isa_mb = cpu_addr;
774 isa_hole = memno;
775 if (primary || isa_mem_base == 0)
776 isa_mem_base = cpu_addr;
777 hose->isa_mem_phys = cpu_addr;
778 hose->isa_mem_size = size;
779 }
780
781 /* We get the PCI/Mem offset from the first range or
782 * the, current one if the offset came from an ISA
783 * hole. If they don't match, bugger.
784 */
785 if (memno == 0 ||
786 (isa_hole >= 0 && pci_addr != 0 &&
787 hose->pci_mem_offset == isa_mb))
788 hose->pci_mem_offset = cpu_addr - pci_addr;
789 else if (pci_addr != 0 &&
790 hose->pci_mem_offset != cpu_addr - pci_addr) {
791 printk(KERN_INFO
792 " \\--> Skipped (offset mismatch) !\n");
793 continue;
794 }
795
796 /* Build resource */
797 res = &hose->mem_resources[memno++];
798 res->flags = IORESOURCE_MEM;
799 if (pci_space & 0x40000000)
800 res->flags |= IORESOURCE_PREFETCH;
801 res->start = cpu_addr;
802 break;
803 }
804 if (res != NULL) {
805 res->name = dev->full_name;
806 res->end = res->start + size - 1;
807 res->parent = NULL;
808 res->sibling = NULL;
809 res->child = NULL;
810 }
811 }
812
813 /* If there's an ISA hole and the pci_mem_offset is -not- matching
814 * the ISA hole offset, then we need to remove the ISA hole from
815 * the resource list for that brige
816 */
817 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
818 unsigned int next = isa_hole + 1;
819 printk(KERN_INFO " Removing ISA hole at 0x%016llx\n", isa_mb);
820 if (next < memno)
821 memmove(&hose->mem_resources[isa_hole],
822 &hose->mem_resources[next],
823 sizeof(struct resource) * (memno - next));
824 hose->mem_resources[--memno].flags = 0;
825 }
826}
827
828/* Decide whether to display the domain number in /proc */
829int pci_proc_domain(struct pci_bus *bus)
830{
831 struct pci_controller *hose = pci_bus_to_host(bus);
832
Bjorn Helgaase5b36842012-02-23 20:18:57 -0700833 return 0;
Michal Simekd3afa582010-01-18 14:42:34 +0100834}
835
836void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
837 struct resource *res)
838{
839 resource_size_t offset = 0, mask = (resource_size_t)-1;
840 struct pci_controller *hose = pci_bus_to_host(dev->bus);
841
842 if (!hose)
843 return;
844 if (res->flags & IORESOURCE_IO) {
845 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
846 mask = 0xffffffffu;
847 } else if (res->flags & IORESOURCE_MEM)
848 offset = hose->pci_mem_offset;
849
850 region->start = (res->start - offset) & mask;
851 region->end = (res->end - offset) & mask;
852}
853EXPORT_SYMBOL(pcibios_resource_to_bus);
854
855void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
856 struct pci_bus_region *region)
857{
858 resource_size_t offset = 0, mask = (resource_size_t)-1;
859 struct pci_controller *hose = pci_bus_to_host(dev->bus);
860
861 if (!hose)
862 return;
863 if (res->flags & IORESOURCE_IO) {
864 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
865 mask = 0xffffffffu;
866 } else if (res->flags & IORESOURCE_MEM)
867 offset = hose->pci_mem_offset;
868 res->start = (region->start + offset) & mask;
869 res->end = (region->end + offset) & mask;
870}
871EXPORT_SYMBOL(pcibios_bus_to_resource);
872
873/* Fixup a bus resource into a linux resource */
874static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
875{
876 struct pci_controller *hose = pci_bus_to_host(dev->bus);
877 resource_size_t offset = 0, mask = (resource_size_t)-1;
878
879 if (res->flags & IORESOURCE_IO) {
880 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
881 mask = 0xffffffffu;
882 } else if (res->flags & IORESOURCE_MEM)
883 offset = hose->pci_mem_offset;
884
885 res->start = (res->start + offset) & mask;
886 res->end = (res->end + offset) & mask;
887}
888
889/* This header fixup will do the resource fixup for all devices as they are
890 * probed, but not for bridge ranges
891 */
892static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
893{
894 struct pci_controller *hose = pci_bus_to_host(dev->bus);
895 int i;
896
897 if (!hose) {
898 printk(KERN_ERR "No host bridge for PCI dev %s !\n",
899 pci_name(dev));
900 return;
901 }
902 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
903 struct resource *res = dev->resource + i;
904 if (!res->flags)
905 continue;
Bjorn Helgaase5b36842012-02-23 20:18:57 -0700906 if (res->start == 0) {
Michal Simekd3afa582010-01-18 14:42:34 +0100907 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]" \
908 "is unassigned\n",
909 pci_name(dev), i,
910 (unsigned long long)res->start,
911 (unsigned long long)res->end,
912 (unsigned int)res->flags);
913 res->end -= res->start;
914 res->start = 0;
915 res->flags |= IORESOURCE_UNSET;
916 continue;
917 }
918
919 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n",
920 pci_name(dev), i,
921 (unsigned long long)res->start,\
922 (unsigned long long)res->end,
923 (unsigned int)res->flags);
924
925 fixup_resource(res, dev);
926
927 pr_debug("PCI:%s %016llx-%016llx\n",
928 pci_name(dev),
929 (unsigned long long)res->start,
930 (unsigned long long)res->end);
931 }
932}
933DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
934
935/* This function tries to figure out if a bridge resource has been initialized
936 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
937 * things go more smoothly when it gets it right. It should covers cases such
938 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
939 */
940static int __devinit pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
941 struct resource *res)
942{
943 struct pci_controller *hose = pci_bus_to_host(bus);
944 struct pci_dev *dev = bus->self;
945 resource_size_t offset;
946 u16 command;
947 int i;
948
Michal Simekd3afa582010-01-18 14:42:34 +0100949 /* Job is a bit different between memory and IO */
950 if (res->flags & IORESOURCE_MEM) {
951 /* If the BAR is non-0 (res != pci_mem_offset) then it's
952 * probably been initialized by somebody
953 */
954 if (res->start != hose->pci_mem_offset)
955 return 0;
956
957 /* The BAR is 0, let's check if memory decoding is enabled on
958 * the bridge. If not, we consider it unassigned
959 */
960 pci_read_config_word(dev, PCI_COMMAND, &command);
961 if ((command & PCI_COMMAND_MEMORY) == 0)
962 return 1;
963
964 /* Memory decoding is enabled and the BAR is 0. If any of
965 * the bridge resources covers that starting address (0 then
966 * it's good enough for us for memory
967 */
968 for (i = 0; i < 3; i++) {
969 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
970 hose->mem_resources[i].start == hose->pci_mem_offset)
971 return 0;
972 }
973
974 /* Well, it starts at 0 and we know it will collide so we may as
975 * well consider it as unassigned. That covers the Apple case.
976 */
977 return 1;
978 } else {
979 /* If the BAR is non-0, then we consider it assigned */
980 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
981 if (((res->start - offset) & 0xfffffffful) != 0)
982 return 0;
983
984 /* Here, we are a bit different than memory as typically IO
985 * space starting at low addresses -is- valid. What we do
986 * instead if that we consider as unassigned anything that
987 * doesn't have IO enabled in the PCI command register,
988 * and that's it.
989 */
990 pci_read_config_word(dev, PCI_COMMAND, &command);
991 if (command & PCI_COMMAND_IO)
992 return 0;
993
994 /* It's starting at 0 and IO is disabled in the bridge, consider
995 * it unassigned
996 */
997 return 1;
998 }
999}
1000
1001/* Fixup resources of a PCI<->PCI bridge */
1002static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1003{
1004 struct resource *res;
1005 int i;
1006
1007 struct pci_dev *dev = bus->self;
1008
Michal Simek8a66da72010-04-16 09:03:00 +02001009 pci_bus_for_each_resource(bus, res, i) {
Michal Simekd3afa582010-01-18 14:42:34 +01001010 if (!res)
1011 continue;
1012 if (!res->flags)
1013 continue;
1014 if (i >= 3 && bus->self->transparent)
1015 continue;
1016
1017 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
1018 pci_name(dev), i,
1019 (unsigned long long)res->start,\
1020 (unsigned long long)res->end,
1021 (unsigned int)res->flags);
1022
1023 /* Perform fixup */
1024 fixup_resource(res, dev);
1025
1026 /* Try to detect uninitialized P2P bridge resources,
1027 * and clear them out so they get re-assigned later
1028 */
1029 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1030 res->flags = 0;
1031 pr_debug("PCI:%s (unassigned)\n",
1032 pci_name(dev));
1033 } else {
1034 pr_debug("PCI:%s %016llx-%016llx\n",
1035 pci_name(dev),
1036 (unsigned long long)res->start,
1037 (unsigned long long)res->end);
1038 }
1039 }
1040}
1041
1042void __devinit pcibios_setup_bus_self(struct pci_bus *bus)
1043{
1044 /* Fix up the bus resources for P2P bridges */
1045 if (bus->self != NULL)
1046 pcibios_fixup_bridge(bus);
1047}
1048
1049void __devinit pcibios_setup_bus_devices(struct pci_bus *bus)
1050{
1051 struct pci_dev *dev;
1052
1053 pr_debug("PCI: Fixup bus devices %d (%s)\n",
1054 bus->number, bus->self ? pci_name(bus->self) : "PHB");
1055
1056 list_for_each_entry(dev, &bus->devices, bus_list) {
Michal Simekd3afa582010-01-18 14:42:34 +01001057 /* Setup OF node pointer in archdata */
Michal Simek088ab302010-08-16 10:31:54 +02001058 dev->dev.of_node = pci_device_to_OF_node(dev);
Michal Simekd3afa582010-01-18 14:42:34 +01001059
1060 /* Fixup NUMA node as it may not be setup yet by the generic
1061 * code and is needed by the DMA init
1062 */
1063 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
1064
1065 /* Hook up default DMA ops */
Nishanth Aravamudan6c3bbdd2010-09-15 11:05:51 -07001066 set_dma_ops(&dev->dev, pci_dma_ops);
1067 dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET;
Michal Simekd3afa582010-01-18 14:42:34 +01001068
1069 /* Read default IRQs and fixup if necessary */
1070 pci_read_irq_line(dev);
1071 }
1072}
1073
1074void __devinit pcibios_fixup_bus(struct pci_bus *bus)
1075{
1076 /* When called from the generic PCI probe, read PCI<->PCI bridge
1077 * bases. This is -not- called when generating the PCI tree from
1078 * the OF device-tree.
1079 */
1080 if (bus->self != NULL)
1081 pci_read_bridge_bases(bus);
1082
1083 /* Now fixup the bus bus */
1084 pcibios_setup_bus_self(bus);
1085
1086 /* Now fixup devices on that bus */
1087 pcibios_setup_bus_devices(bus);
1088}
1089EXPORT_SYMBOL(pcibios_fixup_bus);
1090
1091static int skip_isa_ioresource_align(struct pci_dev *dev)
1092{
Michal Simekd3afa582010-01-18 14:42:34 +01001093 return 0;
1094}
1095
1096/*
1097 * We need to avoid collisions with `mirrored' VGA ports
1098 * and other strange ISA hardware, so we always want the
1099 * addresses to be allocated in the 0x000-0x0ff region
1100 * modulo 0x400.
1101 *
1102 * Why? Because some silly external IO cards only decode
1103 * the low 10 bits of the IO address. The 0x00-0xff region
1104 * is reserved for motherboard devices that decode all 16
1105 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
1106 * but we want to try to avoid allocating at 0x2900-0x2bff
1107 * which might have be mirrored at 0x0100-0x03ff..
1108 */
Michal Simekc86fac42010-04-16 09:04:51 +02001109resource_size_t pcibios_align_resource(void *data, const struct resource *res,
Michal Simekd3afa582010-01-18 14:42:34 +01001110 resource_size_t size, resource_size_t align)
1111{
1112 struct pci_dev *dev = data;
Michal Simekc86fac42010-04-16 09:04:51 +02001113 resource_size_t start = res->start;
Michal Simekd3afa582010-01-18 14:42:34 +01001114
1115 if (res->flags & IORESOURCE_IO) {
Michal Simekd3afa582010-01-18 14:42:34 +01001116 if (skip_isa_ioresource_align(dev))
Michal Simekc86fac42010-04-16 09:04:51 +02001117 return start;
1118 if (start & 0x300)
Michal Simekd3afa582010-01-18 14:42:34 +01001119 start = (start + 0x3ff) & ~0x3ff;
Michal Simekd3afa582010-01-18 14:42:34 +01001120 }
Michal Simekc86fac42010-04-16 09:04:51 +02001121
1122 return start;
Michal Simekd3afa582010-01-18 14:42:34 +01001123}
1124EXPORT_SYMBOL(pcibios_align_resource);
1125
1126/*
1127 * Reparent resource children of pr that conflict with res
1128 * under res, and make res replace those children.
1129 */
1130static int __init reparent_resources(struct resource *parent,
1131 struct resource *res)
1132{
1133 struct resource *p, **pp;
1134 struct resource **firstpp = NULL;
1135
1136 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
1137 if (p->end < res->start)
1138 continue;
1139 if (res->end < p->start)
1140 break;
1141 if (p->start < res->start || p->end > res->end)
1142 return -1; /* not completely contained */
1143 if (firstpp == NULL)
1144 firstpp = pp;
1145 }
1146 if (firstpp == NULL)
1147 return -1; /* didn't find any conflicting entries? */
1148 res->parent = parent;
1149 res->child = *firstpp;
1150 res->sibling = *pp;
1151 *firstpp = res;
1152 *pp = NULL;
1153 for (p = res->child; p != NULL; p = p->sibling) {
1154 p->parent = res;
1155 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
1156 p->name,
1157 (unsigned long long)p->start,
1158 (unsigned long long)p->end, res->name);
1159 }
1160 return 0;
1161}
1162
1163/*
1164 * Handle resources of PCI devices. If the world were perfect, we could
1165 * just allocate all the resource regions and do nothing more. It isn't.
1166 * On the other hand, we cannot just re-allocate all devices, as it would
1167 * require us to know lots of host bridge internals. So we attempt to
1168 * keep as much of the original configuration as possible, but tweak it
1169 * when it's found to be wrong.
1170 *
1171 * Known BIOS problems we have to work around:
1172 * - I/O or memory regions not configured
1173 * - regions configured, but not enabled in the command register
1174 * - bogus I/O addresses above 64K used
1175 * - expansion ROMs left enabled (this may sound harmless, but given
1176 * the fact the PCI specs explicitly allow address decoders to be
1177 * shared between expansion ROMs and other resource regions, it's
1178 * at least dangerous)
1179 *
1180 * Our solution:
1181 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1182 * This gives us fixed barriers on where we can allocate.
1183 * (2) Allocate resources for all enabled devices. If there is
1184 * a collision, just mark the resource as unallocated. Also
1185 * disable expansion ROMs during this step.
1186 * (3) Try to allocate resources for disabled devices. If the
1187 * resources were assigned correctly, everything goes well,
1188 * if they weren't, they won't disturb allocation of other
1189 * resources.
1190 * (4) Assign new addresses to resources which were either
1191 * not configured at all or misconfigured. If explicitly
1192 * requested by the user, configure expansion ROM address
1193 * as well.
1194 */
1195
1196void pcibios_allocate_bus_resources(struct pci_bus *bus)
1197{
1198 struct pci_bus *b;
1199 int i;
1200 struct resource *res, *pr;
1201
1202 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1203 pci_domain_nr(bus), bus->number);
1204
Michal Simek8a66da72010-04-16 09:03:00 +02001205 pci_bus_for_each_resource(bus, res, i) {
Michal Simekd3afa582010-01-18 14:42:34 +01001206 if (!res || !res->flags
1207 || res->start > res->end || res->parent)
1208 continue;
1209 if (bus->parent == NULL)
1210 pr = (res->flags & IORESOURCE_IO) ?
1211 &ioport_resource : &iomem_resource;
1212 else {
1213 /* Don't bother with non-root busses when
1214 * re-assigning all resources. We clear the
1215 * resource flags as if they were colliding
1216 * and as such ensure proper re-allocation
1217 * later.
1218 */
Michal Simekd3afa582010-01-18 14:42:34 +01001219 pr = pci_find_parent_resource(bus->self, res);
1220 if (pr == res) {
1221 /* this happens when the generic PCI
1222 * code (wrongly) decides that this
1223 * bridge is transparent -- paulus
1224 */
1225 continue;
1226 }
1227 }
1228
1229 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx "
1230 "[0x%x], parent %p (%s)\n",
1231 bus->self ? pci_name(bus->self) : "PHB",
1232 bus->number, i,
1233 (unsigned long long)res->start,
1234 (unsigned long long)res->end,
1235 (unsigned int)res->flags,
1236 pr, (pr && pr->name) ? pr->name : "nil");
1237
1238 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1239 if (request_resource(pr, res) == 0)
1240 continue;
1241 /*
1242 * Must be a conflict with an existing entry.
1243 * Move that entry (or entries) under the
1244 * bridge resource and try again.
1245 */
1246 if (reparent_resources(pr, res) == 0)
1247 continue;
1248 }
1249 printk(KERN_WARNING "PCI: Cannot allocate resource region "
1250 "%d of PCI bridge %d, will remap\n", i, bus->number);
1251clear_resource:
Yinghai Lu837c4ef2010-06-03 13:43:03 -07001252 res->start = res->end = 0;
Michal Simekd3afa582010-01-18 14:42:34 +01001253 res->flags = 0;
1254 }
1255
1256 list_for_each_entry(b, &bus->children, node)
1257 pcibios_allocate_bus_resources(b);
1258}
1259
1260static inline void __devinit alloc_resource(struct pci_dev *dev, int idx)
1261{
1262 struct resource *pr, *r = &dev->resource[idx];
1263
1264 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1265 pci_name(dev), idx,
1266 (unsigned long long)r->start,
1267 (unsigned long long)r->end,
1268 (unsigned int)r->flags);
1269
1270 pr = pci_find_parent_resource(dev, r);
1271 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1272 request_resource(pr, r) < 0) {
1273 printk(KERN_WARNING "PCI: Cannot allocate resource region %d"
1274 " of device %s, will remap\n", idx, pci_name(dev));
1275 if (pr)
1276 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1277 pr,
1278 (unsigned long long)pr->start,
1279 (unsigned long long)pr->end,
1280 (unsigned int)pr->flags);
1281 /* We'll assign a new address later */
1282 r->flags |= IORESOURCE_UNSET;
1283 r->end -= r->start;
1284 r->start = 0;
1285 }
1286}
1287
1288static void __init pcibios_allocate_resources(int pass)
1289{
1290 struct pci_dev *dev = NULL;
1291 int idx, disabled;
1292 u16 command;
1293 struct resource *r;
1294
1295 for_each_pci_dev(dev) {
1296 pci_read_config_word(dev, PCI_COMMAND, &command);
1297 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1298 r = &dev->resource[idx];
1299 if (r->parent) /* Already allocated */
1300 continue;
1301 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1302 continue; /* Not assigned at all */
1303 /* We only allocate ROMs on pass 1 just in case they
1304 * have been screwed up by firmware
1305 */
1306 if (idx == PCI_ROM_RESOURCE)
1307 disabled = 1;
1308 if (r->flags & IORESOURCE_IO)
1309 disabled = !(command & PCI_COMMAND_IO);
1310 else
1311 disabled = !(command & PCI_COMMAND_MEMORY);
1312 if (pass == disabled)
1313 alloc_resource(dev, idx);
1314 }
1315 if (pass)
1316 continue;
1317 r = &dev->resource[PCI_ROM_RESOURCE];
1318 if (r->flags) {
1319 /* Turn the ROM off, leave the resource region,
1320 * but keep it unregistered.
1321 */
1322 u32 reg;
1323 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1324 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1325 pr_debug("PCI: Switching off ROM of %s\n",
1326 pci_name(dev));
1327 r->flags &= ~IORESOURCE_ROM_ENABLE;
1328 pci_write_config_dword(dev, dev->rom_base_reg,
1329 reg & ~PCI_ROM_ADDRESS_ENABLE);
1330 }
1331 }
1332 }
1333}
1334
1335static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1336{
1337 struct pci_controller *hose = pci_bus_to_host(bus);
1338 resource_size_t offset;
1339 struct resource *res, *pres;
1340 int i;
1341
1342 pr_debug("Reserving legacy ranges for domain %04x\n",
1343 pci_domain_nr(bus));
1344
1345 /* Check for IO */
1346 if (!(hose->io_resource.flags & IORESOURCE_IO))
1347 goto no_io;
1348 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1349 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1350 BUG_ON(res == NULL);
1351 res->name = "Legacy IO";
1352 res->flags = IORESOURCE_IO;
1353 res->start = offset;
1354 res->end = (offset + 0xfff) & 0xfffffffful;
1355 pr_debug("Candidate legacy IO: %pR\n", res);
1356 if (request_resource(&hose->io_resource, res)) {
1357 printk(KERN_DEBUG
1358 "PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
1359 pci_domain_nr(bus), bus->number, res);
1360 kfree(res);
1361 }
1362
1363 no_io:
1364 /* Check for memory */
1365 offset = hose->pci_mem_offset;
1366 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1367 for (i = 0; i < 3; i++) {
1368 pres = &hose->mem_resources[i];
1369 if (!(pres->flags & IORESOURCE_MEM))
1370 continue;
1371 pr_debug("hose mem res: %pR\n", pres);
1372 if ((pres->start - offset) <= 0xa0000 &&
1373 (pres->end - offset) >= 0xbffff)
1374 break;
1375 }
1376 if (i >= 3)
1377 return;
1378 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1379 BUG_ON(res == NULL);
1380 res->name = "Legacy VGA memory";
1381 res->flags = IORESOURCE_MEM;
1382 res->start = 0xa0000 + offset;
1383 res->end = 0xbffff + offset;
1384 pr_debug("Candidate VGA memory: %pR\n", res);
1385 if (request_resource(pres, res)) {
1386 printk(KERN_DEBUG
1387 "PCI %04x:%02x Cannot reserve VGA memory %pR\n",
1388 pci_domain_nr(bus), bus->number, res);
1389 kfree(res);
1390 }
1391}
1392
1393void __init pcibios_resource_survey(void)
1394{
1395 struct pci_bus *b;
1396
1397 /* Allocate and assign resources. If we re-assign everything, then
1398 * we skip the allocate phase
1399 */
1400 list_for_each_entry(b, &pci_root_buses, node)
1401 pcibios_allocate_bus_resources(b);
1402
Bjorn Helgaase5b36842012-02-23 20:18:57 -07001403 pcibios_allocate_resources(0);
1404 pcibios_allocate_resources(1);
Michal Simekd3afa582010-01-18 14:42:34 +01001405
1406 /* Before we start assigning unassigned resource, we try to reserve
1407 * the low IO area and the VGA memory area if they intersect the
1408 * bus available resources to avoid allocating things on top of them
1409 */
Bjorn Helgaase5b36842012-02-23 20:18:57 -07001410 list_for_each_entry(b, &pci_root_buses, node)
1411 pcibios_reserve_legacy_regions(b);
Michal Simekd3afa582010-01-18 14:42:34 +01001412
Bjorn Helgaase5b36842012-02-23 20:18:57 -07001413 /* Now proceed to assigning things that were left unassigned */
1414 pr_debug("PCI: Assigning unassigned resources...\n");
1415 pci_assign_unassigned_resources();
Michal Simekd3afa582010-01-18 14:42:34 +01001416}
1417
1418#ifdef CONFIG_HOTPLUG
1419
1420/* This is used by the PCI hotplug driver to allocate resource
1421 * of newly plugged busses. We can try to consolidate with the
1422 * rest of the code later, for now, keep it as-is as our main
1423 * resource allocation function doesn't deal with sub-trees yet.
1424 */
1425void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
1426{
1427 struct pci_dev *dev;
1428 struct pci_bus *child_bus;
1429
1430 list_for_each_entry(dev, &bus->devices, bus_list) {
1431 int i;
1432
1433 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1434 struct resource *r = &dev->resource[i];
1435
1436 if (r->parent || !r->start || !r->flags)
1437 continue;
1438
1439 pr_debug("PCI: Claiming %s: "
1440 "Resource %d: %016llx..%016llx [%x]\n",
1441 pci_name(dev), i,
1442 (unsigned long long)r->start,
1443 (unsigned long long)r->end,
1444 (unsigned int)r->flags);
1445
1446 pci_claim_resource(dev, i);
1447 }
1448 }
1449
1450 list_for_each_entry(child_bus, &bus->children, node)
1451 pcibios_claim_one_bus(child_bus);
1452}
1453EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1454
1455
1456/* pcibios_finish_adding_to_bus
1457 *
1458 * This is to be called by the hotplug code after devices have been
1459 * added to a bus, this include calling it for a PHB that is just
1460 * being added
1461 */
1462void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1463{
1464 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1465 pci_domain_nr(bus), bus->number);
1466
1467 /* Allocate bus and devices resources */
1468 pcibios_allocate_bus_resources(bus);
1469 pcibios_claim_one_bus(bus);
1470
1471 /* Add new devices to global lists. Register in proc, sysfs. */
1472 pci_bus_add_devices(bus);
1473
1474 /* Fixup EEH */
Michal Simek1ce24702010-05-13 12:09:54 +02001475 /* eeh_add_device_tree_late(bus); */
Michal Simekd3afa582010-01-18 14:42:34 +01001476}
1477EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1478
1479#endif /* CONFIG_HOTPLUG */
1480
1481int pcibios_enable_device(struct pci_dev *dev, int mask)
1482{
1483 return pci_enable_resources(dev, mask);
1484}
1485
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001486static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources)
Michal Simekd3afa582010-01-18 14:42:34 +01001487{
Michal Simekd3afa582010-01-18 14:42:34 +01001488 struct resource *res;
1489 int i;
1490
1491 /* Hookup PHB IO resource */
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001492 res = &hose->io_resource;
1493
1494 /* Fixup IO space offset */
1495 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1496 res->start = (res->start + io_offset) & 0xffffffffu;
1497 res->end = (res->end + io_offset) & 0xffffffffu;
Michal Simekd3afa582010-01-18 14:42:34 +01001498
1499 if (!res->flags) {
1500 printk(KERN_WARNING "PCI: I/O resource not set for host"
1501 " bridge %s (domain %d)\n",
1502 hose->dn->full_name, hose->global_number);
1503 /* Workaround for lack of IO resource only on 32-bit */
1504 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1505 res->end = res->start + IO_SPACE_LIMIT;
1506 res->flags = IORESOURCE_IO;
1507 }
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001508 pci_add_resource(resources, res);
Michal Simekd3afa582010-01-18 14:42:34 +01001509
1510 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1511 (unsigned long long)res->start,
1512 (unsigned long long)res->end,
1513 (unsigned long)res->flags);
1514
1515 /* Hookup PHB Memory resources */
1516 for (i = 0; i < 3; ++i) {
1517 res = &hose->mem_resources[i];
1518 if (!res->flags) {
1519 if (i > 0)
1520 continue;
1521 printk(KERN_ERR "PCI: Memory resource 0 not set for "
1522 "host bridge %s (domain %d)\n",
1523 hose->dn->full_name, hose->global_number);
1524
1525 /* Workaround for lack of MEM resource only on 32-bit */
1526 res->start = hose->pci_mem_offset;
1527 res->end = (resource_size_t)-1LL;
1528 res->flags = IORESOURCE_MEM;
1529
1530 }
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001531 pci_add_resource(resources, res);
Michal Simekd3afa582010-01-18 14:42:34 +01001532
1533 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1534 i, (unsigned long long)res->start,
1535 (unsigned long long)res->end,
1536 (unsigned long)res->flags);
1537 }
1538
1539 pr_debug("PCI: PHB MEM offset = %016llx\n",
1540 (unsigned long long)hose->pci_mem_offset);
1541 pr_debug("PCI: PHB IO offset = %08lx\n",
1542 (unsigned long)hose->io_base_virt - _IO_BASE);
1543}
1544
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001545struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1546{
1547 struct pci_controller *hose = bus->sysdata;
1548
1549 return of_node_get(hose->dn);
1550}
1551
1552static void __devinit pcibios_scan_phb(struct pci_controller *hose)
1553{
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001554 LIST_HEAD(resources);
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001555 struct pci_bus *bus;
1556 struct device_node *node = hose->dn;
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001557
1558 pr_debug("PCI: Scanning PHB %s\n",
1559 node ? node->full_name : "<NO NAME>");
1560
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001561 pcibios_setup_phb_resources(hose, &resources);
1562
Bjorn Helgaas4723b982011-10-28 16:26:52 -06001563 bus = pci_scan_root_bus(hose->parent, hose->first_busno,
1564 hose->ops, hose, &resources);
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001565 if (bus == NULL) {
1566 printk(KERN_ERR "Failed to create bus for PCI domain %04x\n",
1567 hose->global_number);
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001568 pci_free_resource_list(&resources);
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001569 return;
1570 }
1571 bus->secondary = hose->first_busno;
1572 hose->bus = bus;
1573
Bjorn Helgaas4723b982011-10-28 16:26:52 -06001574 hose->last_busno = bus->subordinate;
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001575}
1576
1577static int __init pcibios_init(void)
1578{
1579 struct pci_controller *hose, *tmp;
1580 int next_busno = 0;
1581
1582 printk(KERN_INFO "PCI: Probing PCI hardware\n");
1583
1584 /* Scan all of the recorded PCI controllers. */
1585 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1586 hose->last_busno = 0xff;
1587 pcibios_scan_phb(hose);
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001588 if (next_busno <= hose->last_busno)
1589 next_busno = hose->last_busno + 1;
1590 }
1591 pci_bus_count = next_busno;
1592
1593 /* Call common code to handle resource allocation */
1594 pcibios_resource_survey();
1595
1596 return 0;
1597}
1598
1599subsys_initcall(pcibios_init);
1600
1601static struct pci_controller *pci_bus_to_hose(int bus)
1602{
1603 struct pci_controller *hose, *tmp;
1604
1605 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1606 if (bus >= hose->first_busno && bus <= hose->last_busno)
1607 return hose;
1608 return NULL;
1609}
1610
1611/* Provide information on locations of various I/O regions in physical
1612 * memory. Do this on a per-card basis so that we choose the right
1613 * root bridge.
1614 * Note that the returned IO or memory base is a physical address
1615 */
1616
1617long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1618{
1619 struct pci_controller *hose;
1620 long result = -EOPNOTSUPP;
1621
1622 hose = pci_bus_to_hose(bus);
1623 if (!hose)
1624 return -ENODEV;
1625
1626 switch (which) {
1627 case IOBASE_BRIDGE_NUMBER:
1628 return (long)hose->first_busno;
1629 case IOBASE_MEMORY:
1630 return (long)hose->pci_mem_offset;
1631 case IOBASE_IO:
1632 return (long)hose->io_base_phys;
1633 case IOBASE_ISA_IO:
1634 return (long)isa_io_base;
1635 case IOBASE_ISA_MEM:
1636 return (long)isa_mem_base;
1637 }
1638
1639 return result;
1640}
1641
Michal Simekd3afa582010-01-18 14:42:34 +01001642/*
1643 * Null PCI config access functions, for the case when we can't
1644 * find a hose.
1645 */
1646#define NULL_PCI_OP(rw, size, type) \
1647static int \
1648null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1649{ \
1650 return PCIBIOS_DEVICE_NOT_FOUND; \
1651}
1652
1653static int
1654null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1655 int len, u32 *val)
1656{
1657 return PCIBIOS_DEVICE_NOT_FOUND;
1658}
1659
1660static int
1661null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1662 int len, u32 val)
1663{
1664 return PCIBIOS_DEVICE_NOT_FOUND;
1665}
1666
1667static struct pci_ops null_pci_ops = {
1668 .read = null_read_config,
1669 .write = null_write_config,
1670};
1671
1672/*
1673 * These functions are used early on before PCI scanning is done
1674 * and all of the pci_dev and pci_bus structures have been created.
1675 */
1676static struct pci_bus *
1677fake_pci_bus(struct pci_controller *hose, int busnr)
1678{
1679 static struct pci_bus bus;
1680
1681 if (!hose)
1682 printk(KERN_ERR "Can't find hose for PCI bus %d!\n", busnr);
1683
1684 bus.number = busnr;
1685 bus.sysdata = hose;
1686 bus.ops = hose ? hose->ops : &null_pci_ops;
1687 return &bus;
1688}
1689
1690#define EARLY_PCI_OP(rw, size, type) \
1691int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1692 int devfn, int offset, type value) \
1693{ \
1694 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1695 devfn, offset, value); \
1696}
1697
1698EARLY_PCI_OP(read, byte, u8 *)
1699EARLY_PCI_OP(read, word, u16 *)
1700EARLY_PCI_OP(read, dword, u32 *)
1701EARLY_PCI_OP(write, byte, u8)
1702EARLY_PCI_OP(write, word, u16)
1703EARLY_PCI_OP(write, dword, u32)
1704
1705int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1706 int cap)
1707{
1708 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1709}
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001710