blob: 5060fc554aabb853e9bc8e29fc33292e3211d79c [file] [log] [blame]
Michal Simekd3afa582010-01-18 14:42:34 +01001/*
2 * Contains common pci routines for ALL ppc platform
3 * (based on pci_32.c and pci_64.c)
4 *
5 * Port for PPC64 David Engebretsen, IBM Corp.
6 * Contains common pci routines for ppc64 platform, pSeries and iSeries brands.
7 *
8 * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
9 * Rework, based on alpha PCI code.
10 *
11 * Common pmac/prep/chrp pci routines. -- Cort
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#include <linux/kernel.h>
20#include <linux/pci.h>
21#include <linux/string.h>
22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/mm.h>
25#include <linux/list.h>
26#include <linux/syscalls.h>
27#include <linux/irq.h>
28#include <linux/vmalloc.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090029#include <linux/slab.h>
Grant Likelyf1ca09b2010-08-16 23:44:49 -060030#include <linux/of.h>
31#include <linux/of_address.h>
Sebastian Andrzej Siewior04bea682011-01-24 09:58:55 +053032#include <linux/of_pci.h>
Paul Gortmaker66421a62011-09-22 11:22:55 -040033#include <linux/export.h>
Michal Simekd3afa582010-01-18 14:42:34 +010034
35#include <asm/processor.h>
Michal Simek6bd55f02012-12-27 10:40:38 +010036#include <linux/io.h>
Michal Simekd3afa582010-01-18 14:42:34 +010037#include <asm/pci-bridge.h>
38#include <asm/byteorder.h>
39
40static DEFINE_SPINLOCK(hose_spinlock);
41LIST_HEAD(hose_list);
42
43/* XXX kill that some day ... */
44static int global_phb_number; /* Global phb counter */
45
46/* ISA Memory physical address */
47resource_size_t isa_mem_base;
48
Michal Simekd3afa582010-01-18 14:42:34 +010049static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
50
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +100051unsigned long isa_io_base;
52unsigned long pci_dram_offset;
53static int pci_bus_count;
54
55
Michal Simekd3afa582010-01-18 14:42:34 +010056void set_pci_dma_ops(struct dma_map_ops *dma_ops)
57{
58 pci_dma_ops = dma_ops;
59}
60
61struct dma_map_ops *get_pci_dma_ops(void)
62{
63 return pci_dma_ops;
64}
65EXPORT_SYMBOL(get_pci_dma_ops);
66
Michal Simekd3afa582010-01-18 14:42:34 +010067struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
68{
69 struct pci_controller *phb;
70
71 phb = zalloc_maybe_bootmem(sizeof(struct pci_controller), GFP_KERNEL);
72 if (!phb)
73 return NULL;
74 spin_lock(&hose_spinlock);
75 phb->global_number = global_phb_number++;
76 list_add_tail(&phb->list_node, &hose_list);
77 spin_unlock(&hose_spinlock);
78 phb->dn = dev;
79 phb->is_dynamic = mem_init_done;
80 return phb;
81}
82
83void pcibios_free_controller(struct pci_controller *phb)
84{
85 spin_lock(&hose_spinlock);
86 list_del(&phb->list_node);
87 spin_unlock(&hose_spinlock);
88
89 if (phb->is_dynamic)
90 kfree(phb);
91}
92
93static resource_size_t pcibios_io_size(const struct pci_controller *hose)
94{
Joe Perches28f65c112011-06-09 09:13:32 -070095 return resource_size(&hose->io_resource);
Michal Simekd3afa582010-01-18 14:42:34 +010096}
97
98int pcibios_vaddr_is_ioport(void __iomem *address)
99{
100 int ret = 0;
101 struct pci_controller *hose;
102 resource_size_t size;
103
104 spin_lock(&hose_spinlock);
105 list_for_each_entry(hose, &hose_list, list_node) {
106 size = pcibios_io_size(hose);
107 if (address >= hose->io_base_virt &&
108 address < (hose->io_base_virt + size)) {
109 ret = 1;
110 break;
111 }
112 }
113 spin_unlock(&hose_spinlock);
114 return ret;
115}
116
117unsigned long pci_address_to_pio(phys_addr_t address)
118{
119 struct pci_controller *hose;
120 resource_size_t size;
121 unsigned long ret = ~0;
122
123 spin_lock(&hose_spinlock);
124 list_for_each_entry(hose, &hose_list, list_node) {
125 size = pcibios_io_size(hose);
126 if (address >= hose->io_base_phys &&
127 address < (hose->io_base_phys + size)) {
128 unsigned long base =
129 (unsigned long)hose->io_base_virt - _IO_BASE;
130 ret = base + (address - hose->io_base_phys);
131 break;
132 }
133 }
134 spin_unlock(&hose_spinlock);
135
136 return ret;
137}
138EXPORT_SYMBOL_GPL(pci_address_to_pio);
139
140/*
141 * Return the domain number for this bus.
142 */
143int pci_domain_nr(struct pci_bus *bus)
144{
145 struct pci_controller *hose = pci_bus_to_host(bus);
146
147 return hose->global_number;
148}
149EXPORT_SYMBOL(pci_domain_nr);
150
151/* This routine is meant to be used early during boot, when the
152 * PCI bus numbers have not yet been assigned, and you need to
153 * issue PCI config cycles to an OF device.
154 * It could also be used to "fix" RTAS config cycles if you want
155 * to set pci_assign_all_buses to 1 and still use RTAS for PCI
156 * config cycles.
157 */
158struct pci_controller *pci_find_hose_for_OF_device(struct device_node *node)
159{
160 while (node) {
161 struct pci_controller *hose, *tmp;
162 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
163 if (hose->dn == node)
164 return hose;
165 node = node->parent;
166 }
167 return NULL;
168}
169
170static ssize_t pci_show_devspec(struct device *dev,
171 struct device_attribute *attr, char *buf)
172{
173 struct pci_dev *pdev;
174 struct device_node *np;
175
176 pdev = to_pci_dev(dev);
177 np = pci_device_to_OF_node(pdev);
178 if (np == NULL || np->full_name == NULL)
179 return 0;
180 return sprintf(buf, "%s", np->full_name);
181}
182static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL);
183
184/* Add sysfs properties */
185int pcibios_add_platform_entries(struct pci_dev *pdev)
186{
187 return device_create_file(&pdev->dev, &dev_attr_devspec);
188}
189
Myron Stoweb51d4a32011-10-28 15:47:56 -0600190void pcibios_set_master(struct pci_dev *dev)
191{
192 /* No special bus mastering setup handling */
193}
194
Michal Simekd3afa582010-01-18 14:42:34 +0100195/*
Michal Simekd3afa582010-01-18 14:42:34 +0100196 * Platform support for /proc/bus/pci/X/Y mmap()s,
197 * modelled on the sparc64 implementation by Dave Miller.
198 * -- paulus.
199 */
200
201/*
202 * Adjust vm_pgoff of VMA such that it is the physical page offset
203 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
204 *
205 * Basically, the user finds the base address for his device which he wishes
206 * to mmap. They read the 32-bit value from the config space base register,
207 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
208 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
209 *
210 * Returns negative error code on failure, zero on success.
211 */
212static struct resource *__pci_mmap_make_offset(struct pci_dev *dev,
213 resource_size_t *offset,
214 enum pci_mmap_state mmap_state)
215{
216 struct pci_controller *hose = pci_bus_to_host(dev->bus);
217 unsigned long io_offset = 0;
218 int i, res_bit;
219
Michal Simekf7eaacc2013-01-04 09:14:46 +0100220 if (!hose)
Michal Simekd3afa582010-01-18 14:42:34 +0100221 return NULL; /* should never happen */
222
223 /* If memory, add on the PCI bridge address offset */
224 if (mmap_state == pci_mmap_mem) {
225#if 0 /* See comment in pci_resource_to_user() for why this is disabled */
226 *offset += hose->pci_mem_offset;
227#endif
228 res_bit = IORESOURCE_MEM;
229 } else {
230 io_offset = (unsigned long)hose->io_base_virt - _IO_BASE;
231 *offset += io_offset;
232 res_bit = IORESOURCE_IO;
233 }
234
235 /*
236 * Check that the offset requested corresponds to one of the
237 * resources of the device.
238 */
239 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
240 struct resource *rp = &dev->resource[i];
241 int flags = rp->flags;
242
243 /* treat ROM as memory (should be already) */
244 if (i == PCI_ROM_RESOURCE)
245 flags |= IORESOURCE_MEM;
246
247 /* Active and same type? */
248 if ((flags & res_bit) == 0)
249 continue;
250
251 /* In the range of this resource? */
252 if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end)
253 continue;
254
255 /* found it! construct the final physical address */
256 if (mmap_state == pci_mmap_io)
257 *offset += hose->io_base_phys - io_offset;
258 return rp;
259 }
260
261 return NULL;
262}
263
264/*
265 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
266 * device mapping.
267 */
268static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp,
269 pgprot_t protection,
270 enum pci_mmap_state mmap_state,
271 int write_combine)
272{
273 pgprot_t prot = protection;
274
275 /* Write combine is always 0 on non-memory space mappings. On
276 * memory space, if the user didn't pass 1, we check for a
277 * "prefetchable" resource. This is a bit hackish, but we use
278 * this to workaround the inability of /sysfs to provide a write
279 * combine bit
280 */
281 if (mmap_state != pci_mmap_mem)
282 write_combine = 0;
283 else if (write_combine == 0) {
284 if (rp->flags & IORESOURCE_PREFETCH)
285 write_combine = 1;
286 }
287
288 return pgprot_noncached(prot);
289}
290
291/*
292 * This one is used by /dev/mem and fbdev who have no clue about the
293 * PCI device, it tries to find the PCI device first and calls the
294 * above routine
295 */
296pgprot_t pci_phys_mem_access_prot(struct file *file,
297 unsigned long pfn,
298 unsigned long size,
299 pgprot_t prot)
300{
301 struct pci_dev *pdev = NULL;
302 struct resource *found = NULL;
303 resource_size_t offset = ((resource_size_t)pfn) << PAGE_SHIFT;
304 int i;
305
306 if (page_is_ram(pfn))
307 return prot;
308
309 prot = pgprot_noncached(prot);
310 for_each_pci_dev(pdev) {
311 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
312 struct resource *rp = &pdev->resource[i];
313 int flags = rp->flags;
314
315 /* Active and same type? */
316 if ((flags & IORESOURCE_MEM) == 0)
317 continue;
318 /* In the range of this resource? */
319 if (offset < (rp->start & PAGE_MASK) ||
320 offset > rp->end)
321 continue;
322 found = rp;
323 break;
324 }
325 if (found)
326 break;
327 }
328 if (found) {
329 if (found->flags & IORESOURCE_PREFETCH)
330 prot = pgprot_noncached_wc(prot);
331 pci_dev_put(pdev);
332 }
333
334 pr_debug("PCI: Non-PCI map for %llx, prot: %lx\n",
335 (unsigned long long)offset, pgprot_val(prot));
336
337 return prot;
338}
339
340/*
341 * Perform the actual remap of the pages for a PCI device mapping, as
342 * appropriate for this architecture. The region in the process to map
343 * is described by vm_start and vm_end members of VMA, the base physical
344 * address is found in vm_pgoff.
345 * The pci device structure is provided so that architectures may make mapping
346 * decisions on a per-device or per-bus basis.
347 *
348 * Returns a negative error code on failure, zero on success.
349 */
350int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
351 enum pci_mmap_state mmap_state, int write_combine)
352{
353 resource_size_t offset =
354 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
355 struct resource *rp;
356 int ret;
357
358 rp = __pci_mmap_make_offset(dev, &offset, mmap_state);
359 if (rp == NULL)
360 return -EINVAL;
361
362 vma->vm_pgoff = offset >> PAGE_SHIFT;
363 vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp,
364 vma->vm_page_prot,
365 mmap_state, write_combine);
366
367 ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
368 vma->vm_end - vma->vm_start, vma->vm_page_prot);
369
370 return ret;
371}
372
373/* This provides legacy IO read access on a bus */
374int pci_legacy_read(struct pci_bus *bus, loff_t port, u32 *val, size_t size)
375{
376 unsigned long offset;
377 struct pci_controller *hose = pci_bus_to_host(bus);
378 struct resource *rp = &hose->io_resource;
379 void __iomem *addr;
380
381 /* Check if port can be supported by that bus. We only check
382 * the ranges of the PHB though, not the bus itself as the rules
383 * for forwarding legacy cycles down bridges are not our problem
384 * here. So if the host bridge supports it, we do it.
385 */
386 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
387 offset += port;
388
389 if (!(rp->flags & IORESOURCE_IO))
390 return -ENXIO;
391 if (offset < rp->start || (offset + size) > rp->end)
392 return -ENXIO;
393 addr = hose->io_base_virt + port;
394
395 switch (size) {
396 case 1:
397 *((u8 *)val) = in_8(addr);
398 return 1;
399 case 2:
400 if (port & 1)
401 return -EINVAL;
402 *((u16 *)val) = in_le16(addr);
403 return 2;
404 case 4:
405 if (port & 3)
406 return -EINVAL;
407 *((u32 *)val) = in_le32(addr);
408 return 4;
409 }
410 return -EINVAL;
411}
412
413/* This provides legacy IO write access on a bus */
414int pci_legacy_write(struct pci_bus *bus, loff_t port, u32 val, size_t size)
415{
416 unsigned long offset;
417 struct pci_controller *hose = pci_bus_to_host(bus);
418 struct resource *rp = &hose->io_resource;
419 void __iomem *addr;
420
421 /* Check if port can be supported by that bus. We only check
422 * the ranges of the PHB though, not the bus itself as the rules
423 * for forwarding legacy cycles down bridges are not our problem
424 * here. So if the host bridge supports it, we do it.
425 */
426 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
427 offset += port;
428
429 if (!(rp->flags & IORESOURCE_IO))
430 return -ENXIO;
431 if (offset < rp->start || (offset + size) > rp->end)
432 return -ENXIO;
433 addr = hose->io_base_virt + port;
434
435 /* WARNING: The generic code is idiotic. It gets passed a pointer
436 * to what can be a 1, 2 or 4 byte quantity and always reads that
437 * as a u32, which means that we have to correct the location of
438 * the data read within those 32 bits for size 1 and 2
439 */
440 switch (size) {
441 case 1:
442 out_8(addr, val >> 24);
443 return 1;
444 case 2:
445 if (port & 1)
446 return -EINVAL;
447 out_le16(addr, val >> 16);
448 return 2;
449 case 4:
450 if (port & 3)
451 return -EINVAL;
452 out_le32(addr, val);
453 return 4;
454 }
455 return -EINVAL;
456}
457
458/* This provides legacy IO or memory mmap access on a bus */
459int pci_mmap_legacy_page_range(struct pci_bus *bus,
460 struct vm_area_struct *vma,
461 enum pci_mmap_state mmap_state)
462{
463 struct pci_controller *hose = pci_bus_to_host(bus);
464 resource_size_t offset =
465 ((resource_size_t)vma->vm_pgoff) << PAGE_SHIFT;
466 resource_size_t size = vma->vm_end - vma->vm_start;
467 struct resource *rp;
468
469 pr_debug("pci_mmap_legacy_page_range(%04x:%02x, %s @%llx..%llx)\n",
470 pci_domain_nr(bus), bus->number,
471 mmap_state == pci_mmap_mem ? "MEM" : "IO",
472 (unsigned long long)offset,
473 (unsigned long long)(offset + size - 1));
474
475 if (mmap_state == pci_mmap_mem) {
476 /* Hack alert !
477 *
478 * Because X is lame and can fail starting if it gets an error
479 * trying to mmap legacy_mem (instead of just moving on without
480 * legacy memory access) we fake it here by giving it anonymous
481 * memory, effectively behaving just like /dev/zero
482 */
483 if ((offset + size) > hose->isa_mem_size) {
Michal Simek79bf3a12010-01-20 15:17:08 +0100484#ifdef CONFIG_MMU
Michal Simek6bd55f02012-12-27 10:40:38 +0100485 pr_debug("Process %s (pid:%d) mapped non-existing PCI",
486 current->comm, current->pid);
487 pr_debug("legacy memory for 0%04x:%02x\n",
488 pci_domain_nr(bus), bus->number);
Michal Simek79bf3a12010-01-20 15:17:08 +0100489#endif
Michal Simekd3afa582010-01-18 14:42:34 +0100490 if (vma->vm_flags & VM_SHARED)
491 return shmem_zero_setup(vma);
492 return 0;
493 }
494 offset += hose->isa_mem_phys;
495 } else {
Michal Simek6bd55f02012-12-27 10:40:38 +0100496 unsigned long io_offset = (unsigned long)hose->io_base_virt -
Michal Simekd3afa582010-01-18 14:42:34 +0100497 _IO_BASE;
498 unsigned long roffset = offset + io_offset;
499 rp = &hose->io_resource;
500 if (!(rp->flags & IORESOURCE_IO))
501 return -ENXIO;
502 if (roffset < rp->start || (roffset + size) > rp->end)
503 return -ENXIO;
504 offset += hose->io_base_phys;
505 }
506 pr_debug(" -> mapping phys %llx\n", (unsigned long long)offset);
507
508 vma->vm_pgoff = offset >> PAGE_SHIFT;
509 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
510 return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
511 vma->vm_end - vma->vm_start,
512 vma->vm_page_prot);
513}
514
515void pci_resource_to_user(const struct pci_dev *dev, int bar,
516 const struct resource *rsrc,
517 resource_size_t *start, resource_size_t *end)
518{
519 struct pci_controller *hose = pci_bus_to_host(dev->bus);
520 resource_size_t offset = 0;
521
522 if (hose == NULL)
523 return;
524
525 if (rsrc->flags & IORESOURCE_IO)
526 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
527
528 /* We pass a fully fixed up address to userland for MMIO instead of
529 * a BAR value because X is lame and expects to be able to use that
530 * to pass to /dev/mem !
531 *
532 * That means that we'll have potentially 64 bits values where some
533 * userland apps only expect 32 (like X itself since it thinks only
534 * Sparc has 64 bits MMIO) but if we don't do that, we break it on
535 * 32 bits CHRPs :-(
536 *
537 * Hopefully, the sysfs insterface is immune to that gunk. Once X
538 * has been fixed (and the fix spread enough), we can re-enable the
539 * 2 lines below and pass down a BAR value to userland. In that case
540 * we'll also have to re-enable the matching code in
541 * __pci_mmap_make_offset().
542 *
543 * BenH.
544 */
545#if 0
546 else if (rsrc->flags & IORESOURCE_MEM)
547 offset = hose->pci_mem_offset;
548#endif
549
550 *start = rsrc->start - offset;
551 *end = rsrc->end - offset;
552}
553
554/**
555 * pci_process_bridge_OF_ranges - Parse PCI bridge resources from device tree
556 * @hose: newly allocated pci_controller to be setup
557 * @dev: device node of the host bridge
558 * @primary: set if primary bus (32 bits only, soon to be deprecated)
559 *
560 * This function will parse the "ranges" property of a PCI host bridge device
561 * node and setup the resource mapping of a pci controller based on its
562 * content.
563 *
564 * Life would be boring if it wasn't for a few issues that we have to deal
565 * with here:
566 *
567 * - We can only cope with one IO space range and up to 3 Memory space
568 * ranges. However, some machines (thanks Apple !) tend to split their
569 * space into lots of small contiguous ranges. So we have to coalesce.
570 *
571 * - We can only cope with all memory ranges having the same offset
572 * between CPU addresses and PCI addresses. Unfortunately, some bridges
573 * are setup for a large 1:1 mapping along with a small "window" which
574 * maps PCI address 0 to some arbitrary high address of the CPU space in
575 * order to give access to the ISA memory hole.
576 * The way out of here that I've chosen for now is to always set the
577 * offset based on the first resource found, then override it if we
578 * have a different offset and the previous was set by an ISA hole.
579 *
580 * - Some busses have IO space not starting at 0, which causes trouble with
581 * the way we do our IO resource renumbering. The code somewhat deals with
582 * it for 64 bits but I would expect problems on 32 bits.
583 *
584 * - Some 32 bits platforms such as 4xx can have physical space larger than
585 * 32 bits so we need to use 64 bits values for the parsing
586 */
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800587void pci_process_bridge_OF_ranges(struct pci_controller *hose,
588 struct device_node *dev, int primary)
Michal Simekd3afa582010-01-18 14:42:34 +0100589{
Michal Simekd3afa582010-01-18 14:42:34 +0100590 int memno = 0, isa_hole = -1;
Michal Simekd3afa582010-01-18 14:42:34 +0100591 unsigned long long isa_mb = 0;
592 struct resource *res;
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100593 struct of_pci_range range;
594 struct of_pci_range_parser parser;
Michal Simekd3afa582010-01-18 14:42:34 +0100595
Michal Simek6bd55f02012-12-27 10:40:38 +0100596 pr_info("PCI host bridge %s %s ranges:\n",
Michal Simekd3afa582010-01-18 14:42:34 +0100597 dev->full_name, primary ? "(primary)" : "");
598
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100599 /* Check for ranges property */
600 if (of_pci_range_parser_init(&parser, dev))
Michal Simekd3afa582010-01-18 14:42:34 +0100601 return;
602
Michal Simekd3afa582010-01-18 14:42:34 +0100603 pr_debug("Parsing ranges property...\n");
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100604 for_each_of_pci_range(&parser, &range) {
Michal Simekd3afa582010-01-18 14:42:34 +0100605 /* Read next ranges element */
Michal Simek6bd55f02012-12-27 10:40:38 +0100606 pr_debug("pci_space: 0x%08x pci_addr:0x%016llx ",
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100607 range.pci_space, range.pci_addr);
Michal Simek6bd55f02012-12-27 10:40:38 +0100608 pr_debug("cpu_addr:0x%016llx size:0x%016llx\n",
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100609 range.cpu_addr, range.size);
Michal Simekd3afa582010-01-18 14:42:34 +0100610
611 /* If we failed translation or got a zero-sized region
612 * (some FW try to feed us with non sensical zero sized regions
613 * such as power3 which look like some kind of attempt
614 * at exposing the VGA memory hole)
615 */
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100616 if (range.cpu_addr == OF_BAD_ADDR || range.size == 0)
Michal Simekd3afa582010-01-18 14:42:34 +0100617 continue;
618
Michal Simekd3afa582010-01-18 14:42:34 +0100619 /* Act based on address space type */
620 res = NULL;
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100621 switch (range.flags & IORESOURCE_TYPE_BITS) {
622 case IORESOURCE_IO:
Michal Simek6bd55f02012-12-27 10:40:38 +0100623 pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100624 range.cpu_addr, range.cpu_addr + range.size - 1,
625 range.pci_addr);
Michal Simekd3afa582010-01-18 14:42:34 +0100626
627 /* We support only one IO range */
628 if (hose->pci_io_size) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100629 pr_info(" \\--> Skipped (too many) !\n");
Michal Simekd3afa582010-01-18 14:42:34 +0100630 continue;
631 }
632 /* On 32 bits, limit I/O space to 16MB */
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100633 if (range.size > 0x01000000)
634 range.size = 0x01000000;
Michal Simekd3afa582010-01-18 14:42:34 +0100635
636 /* 32 bits needs to map IOs here */
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100637 hose->io_base_virt = ioremap(range.cpu_addr,
638 range.size);
Michal Simekd3afa582010-01-18 14:42:34 +0100639
640 /* Expect trouble if pci_addr is not 0 */
641 if (primary)
642 isa_io_base =
643 (unsigned long)hose->io_base_virt;
644 /* pci_io_size and io_base_phys always represent IO
645 * space starting at 0 so we factor in pci_addr
646 */
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100647 hose->pci_io_size = range.pci_addr + range.size;
648 hose->io_base_phys = range.cpu_addr - range.pci_addr;
Michal Simekd3afa582010-01-18 14:42:34 +0100649
650 /* Build resource */
651 res = &hose->io_resource;
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100652 range.cpu_addr = range.pci_addr;
653
Michal Simekd3afa582010-01-18 14:42:34 +0100654 break;
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100655 case IORESOURCE_MEM:
Michal Simek6bd55f02012-12-27 10:40:38 +0100656 pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx %s\n",
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100657 range.cpu_addr, range.cpu_addr + range.size - 1,
658 range.pci_addr,
659 (range.pci_space & 0x40000000) ?
660 "Prefetch" : "");
Michal Simekd3afa582010-01-18 14:42:34 +0100661
662 /* We support only 3 memory ranges */
663 if (memno >= 3) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100664 pr_info(" \\--> Skipped (too many) !\n");
Michal Simekd3afa582010-01-18 14:42:34 +0100665 continue;
666 }
667 /* Handles ISA memory hole space here */
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100668 if (range.pci_addr == 0) {
669 isa_mb = range.cpu_addr;
Michal Simekd3afa582010-01-18 14:42:34 +0100670 isa_hole = memno;
671 if (primary || isa_mem_base == 0)
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100672 isa_mem_base = range.cpu_addr;
673 hose->isa_mem_phys = range.cpu_addr;
674 hose->isa_mem_size = range.size;
Michal Simekd3afa582010-01-18 14:42:34 +0100675 }
676
677 /* We get the PCI/Mem offset from the first range or
678 * the, current one if the offset came from an ISA
679 * hole. If they don't match, bugger.
680 */
681 if (memno == 0 ||
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100682 (isa_hole >= 0 && range.pci_addr != 0 &&
Michal Simekd3afa582010-01-18 14:42:34 +0100683 hose->pci_mem_offset == isa_mb))
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100684 hose->pci_mem_offset = range.cpu_addr -
685 range.pci_addr;
686 else if (range.pci_addr != 0 &&
687 hose->pci_mem_offset != range.cpu_addr -
688 range.pci_addr) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100689 pr_info(" \\--> Skipped (offset mismatch) !\n");
Michal Simekd3afa582010-01-18 14:42:34 +0100690 continue;
691 }
692
693 /* Build resource */
694 res = &hose->mem_resources[memno++];
Michal Simekd3afa582010-01-18 14:42:34 +0100695 break;
696 }
Andrew Murray4f7b6de2013-07-27 20:01:22 +0100697 if (res != NULL)
698 of_pci_range_to_resource(&range, dev, res);
Michal Simekd3afa582010-01-18 14:42:34 +0100699 }
700
701 /* If there's an ISA hole and the pci_mem_offset is -not- matching
702 * the ISA hole offset, then we need to remove the ISA hole from
703 * the resource list for that brige
704 */
705 if (isa_hole >= 0 && hose->pci_mem_offset != isa_mb) {
706 unsigned int next = isa_hole + 1;
Michal Simek6bd55f02012-12-27 10:40:38 +0100707 pr_info(" Removing ISA hole at 0x%016llx\n", isa_mb);
Michal Simekd3afa582010-01-18 14:42:34 +0100708 if (next < memno)
709 memmove(&hose->mem_resources[isa_hole],
710 &hose->mem_resources[next],
711 sizeof(struct resource) * (memno - next));
712 hose->mem_resources[--memno].flags = 0;
713 }
714}
715
716/* Decide whether to display the domain number in /proc */
717int pci_proc_domain(struct pci_bus *bus)
718{
Bjorn Helgaase5b36842012-02-23 20:18:57 -0700719 return 0;
Michal Simekd3afa582010-01-18 14:42:34 +0100720}
721
Michal Simekd3afa582010-01-18 14:42:34 +0100722/* This header fixup will do the resource fixup for all devices as they are
723 * probed, but not for bridge ranges
724 */
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800725static void pcibios_fixup_resources(struct pci_dev *dev)
Michal Simekd3afa582010-01-18 14:42:34 +0100726{
727 struct pci_controller *hose = pci_bus_to_host(dev->bus);
728 int i;
729
730 if (!hose) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100731 pr_err("No host bridge for PCI dev %s !\n",
Michal Simekd3afa582010-01-18 14:42:34 +0100732 pci_name(dev));
733 return;
734 }
735 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
736 struct resource *res = dev->resource + i;
737 if (!res->flags)
738 continue;
Bjorn Helgaase5b36842012-02-23 20:18:57 -0700739 if (res->start == 0) {
Michal Simek6bd55f02012-12-27 10:40:38 +0100740 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]",
Michal Simekd3afa582010-01-18 14:42:34 +0100741 pci_name(dev), i,
742 (unsigned long long)res->start,
743 (unsigned long long)res->end,
744 (unsigned int)res->flags);
Michal Simek6bd55f02012-12-27 10:40:38 +0100745 pr_debug("is unassigned\n");
Michal Simekd3afa582010-01-18 14:42:34 +0100746 res->end -= res->start;
747 res->start = 0;
748 res->flags |= IORESOURCE_UNSET;
749 continue;
750 }
751
Bjorn Helgaasaa23bdc2012-02-23 20:19:02 -0700752 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
Michal Simekd3afa582010-01-18 14:42:34 +0100753 pci_name(dev), i,
Michal Simek6bd55f02012-12-27 10:40:38 +0100754 (unsigned long long)res->start,
Michal Simekd3afa582010-01-18 14:42:34 +0100755 (unsigned long long)res->end,
756 (unsigned int)res->flags);
Michal Simekd3afa582010-01-18 14:42:34 +0100757 }
758}
759DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_resources);
760
761/* This function tries to figure out if a bridge resource has been initialized
762 * by the firmware or not. It doesn't have to be absolutely bullet proof, but
763 * things go more smoothly when it gets it right. It should covers cases such
764 * as Apple "closed" bridge resources and bare-metal pSeries unassigned bridges
765 */
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800766static int pcibios_uninitialized_bridge_resource(struct pci_bus *bus,
767 struct resource *res)
Michal Simekd3afa582010-01-18 14:42:34 +0100768{
769 struct pci_controller *hose = pci_bus_to_host(bus);
770 struct pci_dev *dev = bus->self;
771 resource_size_t offset;
772 u16 command;
773 int i;
774
Michal Simekd3afa582010-01-18 14:42:34 +0100775 /* Job is a bit different between memory and IO */
776 if (res->flags & IORESOURCE_MEM) {
777 /* If the BAR is non-0 (res != pci_mem_offset) then it's
778 * probably been initialized by somebody
779 */
780 if (res->start != hose->pci_mem_offset)
781 return 0;
782
783 /* The BAR is 0, let's check if memory decoding is enabled on
784 * the bridge. If not, we consider it unassigned
785 */
786 pci_read_config_word(dev, PCI_COMMAND, &command);
787 if ((command & PCI_COMMAND_MEMORY) == 0)
788 return 1;
789
790 /* Memory decoding is enabled and the BAR is 0. If any of
791 * the bridge resources covers that starting address (0 then
792 * it's good enough for us for memory
793 */
794 for (i = 0; i < 3; i++) {
795 if ((hose->mem_resources[i].flags & IORESOURCE_MEM) &&
796 hose->mem_resources[i].start == hose->pci_mem_offset)
797 return 0;
798 }
799
800 /* Well, it starts at 0 and we know it will collide so we may as
801 * well consider it as unassigned. That covers the Apple case.
802 */
803 return 1;
804 } else {
805 /* If the BAR is non-0, then we consider it assigned */
806 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
807 if (((res->start - offset) & 0xfffffffful) != 0)
808 return 0;
809
810 /* Here, we are a bit different than memory as typically IO
811 * space starting at low addresses -is- valid. What we do
812 * instead if that we consider as unassigned anything that
813 * doesn't have IO enabled in the PCI command register,
814 * and that's it.
815 */
816 pci_read_config_word(dev, PCI_COMMAND, &command);
817 if (command & PCI_COMMAND_IO)
818 return 0;
819
820 /* It's starting at 0 and IO is disabled in the bridge, consider
821 * it unassigned
822 */
823 return 1;
824 }
825}
826
827/* Fixup resources of a PCI<->PCI bridge */
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800828static void pcibios_fixup_bridge(struct pci_bus *bus)
Michal Simekd3afa582010-01-18 14:42:34 +0100829{
830 struct resource *res;
831 int i;
832
833 struct pci_dev *dev = bus->self;
834
Michal Simek8a66da72010-04-16 09:03:00 +0200835 pci_bus_for_each_resource(bus, res, i) {
Michal Simekd3afa582010-01-18 14:42:34 +0100836 if (!res)
837 continue;
838 if (!res->flags)
839 continue;
840 if (i >= 3 && bus->self->transparent)
841 continue;
842
843 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n",
844 pci_name(dev), i,
Michal Simek6bd55f02012-12-27 10:40:38 +0100845 (unsigned long long)res->start,
Michal Simekd3afa582010-01-18 14:42:34 +0100846 (unsigned long long)res->end,
847 (unsigned int)res->flags);
848
Michal Simekd3afa582010-01-18 14:42:34 +0100849 /* Try to detect uninitialized P2P bridge resources,
850 * and clear them out so they get re-assigned later
851 */
852 if (pcibios_uninitialized_bridge_resource(bus, res)) {
853 res->flags = 0;
854 pr_debug("PCI:%s (unassigned)\n",
855 pci_name(dev));
856 } else {
857 pr_debug("PCI:%s %016llx-%016llx\n",
858 pci_name(dev),
859 (unsigned long long)res->start,
860 (unsigned long long)res->end);
861 }
862 }
863}
864
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800865void pcibios_setup_bus_self(struct pci_bus *bus)
Michal Simekd3afa582010-01-18 14:42:34 +0100866{
867 /* Fix up the bus resources for P2P bridges */
868 if (bus->self != NULL)
869 pcibios_fixup_bridge(bus);
870}
871
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800872void pcibios_setup_bus_devices(struct pci_bus *bus)
Michal Simekd3afa582010-01-18 14:42:34 +0100873{
874 struct pci_dev *dev;
875
876 pr_debug("PCI: Fixup bus devices %d (%s)\n",
877 bus->number, bus->self ? pci_name(bus->self) : "PHB");
878
879 list_for_each_entry(dev, &bus->devices, bus_list) {
Michal Simekd3afa582010-01-18 14:42:34 +0100880 /* Setup OF node pointer in archdata */
Michal Simek088ab302010-08-16 10:31:54 +0200881 dev->dev.of_node = pci_device_to_OF_node(dev);
Michal Simekd3afa582010-01-18 14:42:34 +0100882
883 /* Fixup NUMA node as it may not be setup yet by the generic
884 * code and is needed by the DMA init
885 */
886 set_dev_node(&dev->dev, pcibus_to_node(dev->bus));
887
888 /* Hook up default DMA ops */
Nishanth Aravamudan6c3bbdd2010-09-15 11:05:51 -0700889 set_dma_ops(&dev->dev, pci_dma_ops);
890 dev->dev.archdata.dma_data = (void *)PCI_DRAM_OFFSET;
Michal Simekd3afa582010-01-18 14:42:34 +0100891
892 /* Read default IRQs and fixup if necessary */
Grant Likelyf27446c2013-09-19 23:34:26 -0500893 dev->irq = of_irq_parse_and_map_pci(dev, 0, 0);
Michal Simekd3afa582010-01-18 14:42:34 +0100894 }
895}
896
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -0800897void pcibios_fixup_bus(struct pci_bus *bus)
Michal Simekd3afa582010-01-18 14:42:34 +0100898{
899 /* When called from the generic PCI probe, read PCI<->PCI bridge
900 * bases. This is -not- called when generating the PCI tree from
901 * the OF device-tree.
902 */
903 if (bus->self != NULL)
904 pci_read_bridge_bases(bus);
905
906 /* Now fixup the bus bus */
907 pcibios_setup_bus_self(bus);
908
909 /* Now fixup devices on that bus */
910 pcibios_setup_bus_devices(bus);
911}
912EXPORT_SYMBOL(pcibios_fixup_bus);
913
914static int skip_isa_ioresource_align(struct pci_dev *dev)
915{
Michal Simekd3afa582010-01-18 14:42:34 +0100916 return 0;
917}
918
919/*
920 * We need to avoid collisions with `mirrored' VGA ports
921 * and other strange ISA hardware, so we always want the
922 * addresses to be allocated in the 0x000-0x0ff region
923 * modulo 0x400.
924 *
925 * Why? Because some silly external IO cards only decode
926 * the low 10 bits of the IO address. The 0x00-0xff region
927 * is reserved for motherboard devices that decode all 16
928 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
929 * but we want to try to avoid allocating at 0x2900-0x2bff
930 * which might have be mirrored at 0x0100-0x03ff..
931 */
Michal Simekc86fac42010-04-16 09:04:51 +0200932resource_size_t pcibios_align_resource(void *data, const struct resource *res,
Michal Simekd3afa582010-01-18 14:42:34 +0100933 resource_size_t size, resource_size_t align)
934{
935 struct pci_dev *dev = data;
Michal Simekc86fac42010-04-16 09:04:51 +0200936 resource_size_t start = res->start;
Michal Simekd3afa582010-01-18 14:42:34 +0100937
938 if (res->flags & IORESOURCE_IO) {
Michal Simekd3afa582010-01-18 14:42:34 +0100939 if (skip_isa_ioresource_align(dev))
Michal Simekc86fac42010-04-16 09:04:51 +0200940 return start;
941 if (start & 0x300)
Michal Simekd3afa582010-01-18 14:42:34 +0100942 start = (start + 0x3ff) & ~0x3ff;
Michal Simekd3afa582010-01-18 14:42:34 +0100943 }
Michal Simekc86fac42010-04-16 09:04:51 +0200944
945 return start;
Michal Simekd3afa582010-01-18 14:42:34 +0100946}
947EXPORT_SYMBOL(pcibios_align_resource);
948
949/*
950 * Reparent resource children of pr that conflict with res
951 * under res, and make res replace those children.
952 */
953static int __init reparent_resources(struct resource *parent,
954 struct resource *res)
955{
956 struct resource *p, **pp;
957 struct resource **firstpp = NULL;
958
959 for (pp = &parent->child; (p = *pp) != NULL; pp = &p->sibling) {
960 if (p->end < res->start)
961 continue;
962 if (res->end < p->start)
963 break;
964 if (p->start < res->start || p->end > res->end)
965 return -1; /* not completely contained */
966 if (firstpp == NULL)
967 firstpp = pp;
968 }
969 if (firstpp == NULL)
970 return -1; /* didn't find any conflicting entries? */
971 res->parent = parent;
972 res->child = *firstpp;
973 res->sibling = *pp;
974 *firstpp = res;
975 *pp = NULL;
976 for (p = res->child; p != NULL; p = p->sibling) {
977 p->parent = res;
978 pr_debug("PCI: Reparented %s [%llx..%llx] under %s\n",
979 p->name,
980 (unsigned long long)p->start,
981 (unsigned long long)p->end, res->name);
982 }
983 return 0;
984}
985
986/*
987 * Handle resources of PCI devices. If the world were perfect, we could
988 * just allocate all the resource regions and do nothing more. It isn't.
989 * On the other hand, we cannot just re-allocate all devices, as it would
990 * require us to know lots of host bridge internals. So we attempt to
991 * keep as much of the original configuration as possible, but tweak it
992 * when it's found to be wrong.
993 *
994 * Known BIOS problems we have to work around:
995 * - I/O or memory regions not configured
996 * - regions configured, but not enabled in the command register
997 * - bogus I/O addresses above 64K used
998 * - expansion ROMs left enabled (this may sound harmless, but given
999 * the fact the PCI specs explicitly allow address decoders to be
1000 * shared between expansion ROMs and other resource regions, it's
1001 * at least dangerous)
1002 *
1003 * Our solution:
1004 * (1) Allocate resources for all buses behind PCI-to-PCI bridges.
1005 * This gives us fixed barriers on where we can allocate.
1006 * (2) Allocate resources for all enabled devices. If there is
1007 * a collision, just mark the resource as unallocated. Also
1008 * disable expansion ROMs during this step.
1009 * (3) Try to allocate resources for disabled devices. If the
1010 * resources were assigned correctly, everything goes well,
1011 * if they weren't, they won't disturb allocation of other
1012 * resources.
1013 * (4) Assign new addresses to resources which were either
1014 * not configured at all or misconfigured. If explicitly
1015 * requested by the user, configure expansion ROM address
1016 * as well.
1017 */
1018
Michal Simekf7eaacc2013-01-04 09:14:46 +01001019static void pcibios_allocate_bus_resources(struct pci_bus *bus)
Michal Simekd3afa582010-01-18 14:42:34 +01001020{
1021 struct pci_bus *b;
1022 int i;
1023 struct resource *res, *pr;
1024
1025 pr_debug("PCI: Allocating bus resources for %04x:%02x...\n",
1026 pci_domain_nr(bus), bus->number);
1027
Michal Simek8a66da72010-04-16 09:03:00 +02001028 pci_bus_for_each_resource(bus, res, i) {
Michal Simekd3afa582010-01-18 14:42:34 +01001029 if (!res || !res->flags
1030 || res->start > res->end || res->parent)
1031 continue;
1032 if (bus->parent == NULL)
1033 pr = (res->flags & IORESOURCE_IO) ?
1034 &ioport_resource : &iomem_resource;
1035 else {
1036 /* Don't bother with non-root busses when
1037 * re-assigning all resources. We clear the
1038 * resource flags as if they were colliding
1039 * and as such ensure proper re-allocation
1040 * later.
1041 */
Michal Simekd3afa582010-01-18 14:42:34 +01001042 pr = pci_find_parent_resource(bus->self, res);
1043 if (pr == res) {
1044 /* this happens when the generic PCI
1045 * code (wrongly) decides that this
1046 * bridge is transparent -- paulus
1047 */
1048 continue;
1049 }
1050 }
1051
Michal Simek6bd55f02012-12-27 10:40:38 +01001052 pr_debug("PCI: %s (bus %d) bridge rsrc %d: %016llx-%016llx ",
Michal Simekd3afa582010-01-18 14:42:34 +01001053 bus->self ? pci_name(bus->self) : "PHB",
1054 bus->number, i,
1055 (unsigned long long)res->start,
Michal Simek6bd55f02012-12-27 10:40:38 +01001056 (unsigned long long)res->end);
1057 pr_debug("[0x%x], parent %p (%s)\n",
Michal Simekd3afa582010-01-18 14:42:34 +01001058 (unsigned int)res->flags,
1059 pr, (pr && pr->name) ? pr->name : "nil");
1060
1061 if (pr && !(pr->flags & IORESOURCE_UNSET)) {
1062 if (request_resource(pr, res) == 0)
1063 continue;
1064 /*
1065 * Must be a conflict with an existing entry.
1066 * Move that entry (or entries) under the
1067 * bridge resource and try again.
1068 */
1069 if (reparent_resources(pr, res) == 0)
1070 continue;
1071 }
Michal Simek6bd55f02012-12-27 10:40:38 +01001072 pr_warn("PCI: Cannot allocate resource region ");
1073 pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
Yinghai Lu837c4ef2010-06-03 13:43:03 -07001074 res->start = res->end = 0;
Michal Simekd3afa582010-01-18 14:42:34 +01001075 res->flags = 0;
1076 }
1077
1078 list_for_each_entry(b, &bus->children, node)
1079 pcibios_allocate_bus_resources(b);
1080}
1081
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -08001082static inline void alloc_resource(struct pci_dev *dev, int idx)
Michal Simekd3afa582010-01-18 14:42:34 +01001083{
1084 struct resource *pr, *r = &dev->resource[idx];
1085
1086 pr_debug("PCI: Allocating %s: Resource %d: %016llx..%016llx [%x]\n",
1087 pci_name(dev), idx,
1088 (unsigned long long)r->start,
1089 (unsigned long long)r->end,
1090 (unsigned int)r->flags);
1091
1092 pr = pci_find_parent_resource(dev, r);
1093 if (!pr || (pr->flags & IORESOURCE_UNSET) ||
1094 request_resource(pr, r) < 0) {
Michal Simek6bd55f02012-12-27 10:40:38 +01001095 pr_warn("PCI: Cannot allocate resource region %d ", idx);
1096 pr_cont("of device %s, will remap\n", pci_name(dev));
Michal Simekd3afa582010-01-18 14:42:34 +01001097 if (pr)
1098 pr_debug("PCI: parent is %p: %016llx-%016llx [%x]\n",
1099 pr,
1100 (unsigned long long)pr->start,
1101 (unsigned long long)pr->end,
1102 (unsigned int)pr->flags);
1103 /* We'll assign a new address later */
1104 r->flags |= IORESOURCE_UNSET;
1105 r->end -= r->start;
1106 r->start = 0;
1107 }
1108}
1109
1110static void __init pcibios_allocate_resources(int pass)
1111{
1112 struct pci_dev *dev = NULL;
1113 int idx, disabled;
1114 u16 command;
1115 struct resource *r;
1116
1117 for_each_pci_dev(dev) {
1118 pci_read_config_word(dev, PCI_COMMAND, &command);
1119 for (idx = 0; idx <= PCI_ROM_RESOURCE; idx++) {
1120 r = &dev->resource[idx];
1121 if (r->parent) /* Already allocated */
1122 continue;
1123 if (!r->flags || (r->flags & IORESOURCE_UNSET))
1124 continue; /* Not assigned at all */
1125 /* We only allocate ROMs on pass 1 just in case they
1126 * have been screwed up by firmware
1127 */
1128 if (idx == PCI_ROM_RESOURCE)
1129 disabled = 1;
1130 if (r->flags & IORESOURCE_IO)
1131 disabled = !(command & PCI_COMMAND_IO);
1132 else
1133 disabled = !(command & PCI_COMMAND_MEMORY);
1134 if (pass == disabled)
1135 alloc_resource(dev, idx);
1136 }
1137 if (pass)
1138 continue;
1139 r = &dev->resource[PCI_ROM_RESOURCE];
1140 if (r->flags) {
1141 /* Turn the ROM off, leave the resource region,
1142 * but keep it unregistered.
1143 */
1144 u32 reg;
1145 pci_read_config_dword(dev, dev->rom_base_reg, &reg);
1146 if (reg & PCI_ROM_ADDRESS_ENABLE) {
1147 pr_debug("PCI: Switching off ROM of %s\n",
1148 pci_name(dev));
1149 r->flags &= ~IORESOURCE_ROM_ENABLE;
1150 pci_write_config_dword(dev, dev->rom_base_reg,
1151 reg & ~PCI_ROM_ADDRESS_ENABLE);
1152 }
1153 }
1154 }
1155}
1156
1157static void __init pcibios_reserve_legacy_regions(struct pci_bus *bus)
1158{
1159 struct pci_controller *hose = pci_bus_to_host(bus);
1160 resource_size_t offset;
1161 struct resource *res, *pres;
1162 int i;
1163
1164 pr_debug("Reserving legacy ranges for domain %04x\n",
1165 pci_domain_nr(bus));
1166
1167 /* Check for IO */
1168 if (!(hose->io_resource.flags & IORESOURCE_IO))
1169 goto no_io;
1170 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
1171 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1172 BUG_ON(res == NULL);
1173 res->name = "Legacy IO";
1174 res->flags = IORESOURCE_IO;
1175 res->start = offset;
1176 res->end = (offset + 0xfff) & 0xfffffffful;
1177 pr_debug("Candidate legacy IO: %pR\n", res);
1178 if (request_resource(&hose->io_resource, res)) {
Michal Simek6bd55f02012-12-27 10:40:38 +01001179 pr_debug("PCI %04x:%02x Cannot reserve Legacy IO %pR\n",
Michal Simekd3afa582010-01-18 14:42:34 +01001180 pci_domain_nr(bus), bus->number, res);
1181 kfree(res);
1182 }
1183
1184 no_io:
1185 /* Check for memory */
1186 offset = hose->pci_mem_offset;
1187 pr_debug("hose mem offset: %016llx\n", (unsigned long long)offset);
1188 for (i = 0; i < 3; i++) {
1189 pres = &hose->mem_resources[i];
1190 if (!(pres->flags & IORESOURCE_MEM))
1191 continue;
1192 pr_debug("hose mem res: %pR\n", pres);
1193 if ((pres->start - offset) <= 0xa0000 &&
1194 (pres->end - offset) >= 0xbffff)
1195 break;
1196 }
1197 if (i >= 3)
1198 return;
1199 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
1200 BUG_ON(res == NULL);
1201 res->name = "Legacy VGA memory";
1202 res->flags = IORESOURCE_MEM;
1203 res->start = 0xa0000 + offset;
1204 res->end = 0xbffff + offset;
1205 pr_debug("Candidate VGA memory: %pR\n", res);
1206 if (request_resource(pres, res)) {
Michal Simek6bd55f02012-12-27 10:40:38 +01001207 pr_debug("PCI %04x:%02x Cannot reserve VGA memory %pR\n",
Michal Simekd3afa582010-01-18 14:42:34 +01001208 pci_domain_nr(bus), bus->number, res);
1209 kfree(res);
1210 }
1211}
1212
1213void __init pcibios_resource_survey(void)
1214{
1215 struct pci_bus *b;
1216
1217 /* Allocate and assign resources. If we re-assign everything, then
1218 * we skip the allocate phase
1219 */
1220 list_for_each_entry(b, &pci_root_buses, node)
1221 pcibios_allocate_bus_resources(b);
1222
Bjorn Helgaase5b36842012-02-23 20:18:57 -07001223 pcibios_allocate_resources(0);
1224 pcibios_allocate_resources(1);
Michal Simekd3afa582010-01-18 14:42:34 +01001225
1226 /* Before we start assigning unassigned resource, we try to reserve
1227 * the low IO area and the VGA memory area if they intersect the
1228 * bus available resources to avoid allocating things on top of them
1229 */
Bjorn Helgaase5b36842012-02-23 20:18:57 -07001230 list_for_each_entry(b, &pci_root_buses, node)
1231 pcibios_reserve_legacy_regions(b);
Michal Simekd3afa582010-01-18 14:42:34 +01001232
Bjorn Helgaase5b36842012-02-23 20:18:57 -07001233 /* Now proceed to assigning things that were left unassigned */
1234 pr_debug("PCI: Assigning unassigned resources...\n");
1235 pci_assign_unassigned_resources();
Michal Simekd3afa582010-01-18 14:42:34 +01001236}
1237
Michal Simekd3afa582010-01-18 14:42:34 +01001238/* This is used by the PCI hotplug driver to allocate resource
1239 * of newly plugged busses. We can try to consolidate with the
1240 * rest of the code later, for now, keep it as-is as our main
1241 * resource allocation function doesn't deal with sub-trees yet.
1242 */
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -08001243void pcibios_claim_one_bus(struct pci_bus *bus)
Michal Simekd3afa582010-01-18 14:42:34 +01001244{
1245 struct pci_dev *dev;
1246 struct pci_bus *child_bus;
1247
1248 list_for_each_entry(dev, &bus->devices, bus_list) {
1249 int i;
1250
1251 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1252 struct resource *r = &dev->resource[i];
1253
1254 if (r->parent || !r->start || !r->flags)
1255 continue;
1256
Michal Simek6bd55f02012-12-27 10:40:38 +01001257 pr_debug("PCI: Claiming %s: ", pci_name(dev));
1258 pr_debug("Resource %d: %016llx..%016llx [%x]\n",
1259 i, (unsigned long long)r->start,
Michal Simekd3afa582010-01-18 14:42:34 +01001260 (unsigned long long)r->end,
1261 (unsigned int)r->flags);
1262
1263 pci_claim_resource(dev, i);
1264 }
1265 }
1266
1267 list_for_each_entry(child_bus, &bus->children, node)
1268 pcibios_claim_one_bus(child_bus);
1269}
1270EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
1271
1272
1273/* pcibios_finish_adding_to_bus
1274 *
1275 * This is to be called by the hotplug code after devices have been
1276 * added to a bus, this include calling it for a PHB that is just
1277 * being added
1278 */
1279void pcibios_finish_adding_to_bus(struct pci_bus *bus)
1280{
1281 pr_debug("PCI: Finishing adding to hotplug bus %04x:%02x\n",
1282 pci_domain_nr(bus), bus->number);
1283
1284 /* Allocate bus and devices resources */
1285 pcibios_allocate_bus_resources(bus);
1286 pcibios_claim_one_bus(bus);
1287
1288 /* Add new devices to global lists. Register in proc, sysfs. */
1289 pci_bus_add_devices(bus);
1290
1291 /* Fixup EEH */
Michal Simek1ce24702010-05-13 12:09:54 +02001292 /* eeh_add_device_tree_late(bus); */
Michal Simekd3afa582010-01-18 14:42:34 +01001293}
1294EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
1295
Michal Simekd3afa582010-01-18 14:42:34 +01001296int pcibios_enable_device(struct pci_dev *dev, int mask)
1297{
1298 return pci_enable_resources(dev, mask);
1299}
1300
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -08001301static void pcibios_setup_phb_resources(struct pci_controller *hose,
1302 struct list_head *resources)
Michal Simekd3afa582010-01-18 14:42:34 +01001303{
Bjorn Helgaas5420e462012-05-15 17:03:25 -06001304 unsigned long io_offset;
Michal Simekd3afa582010-01-18 14:42:34 +01001305 struct resource *res;
1306 int i;
1307
1308 /* Hookup PHB IO resource */
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001309 res = &hose->io_resource;
1310
1311 /* Fixup IO space offset */
1312 io_offset = (unsigned long)hose->io_base_virt - isa_io_base;
1313 res->start = (res->start + io_offset) & 0xffffffffu;
1314 res->end = (res->end + io_offset) & 0xffffffffu;
Michal Simekd3afa582010-01-18 14:42:34 +01001315
1316 if (!res->flags) {
Michal Simek6bd55f02012-12-27 10:40:38 +01001317 pr_warn("PCI: I/O resource not set for host ");
1318 pr_cont("bridge %s (domain %d)\n",
1319 hose->dn->full_name, hose->global_number);
Michal Simekd3afa582010-01-18 14:42:34 +01001320 /* Workaround for lack of IO resource only on 32-bit */
1321 res->start = (unsigned long)hose->io_base_virt - isa_io_base;
1322 res->end = res->start + IO_SPACE_LIMIT;
1323 res->flags = IORESOURCE_IO;
1324 }
Michal Simekf7eaacc2013-01-04 09:14:46 +01001325 pci_add_resource_offset(resources, res,
1326 (__force resource_size_t)(hose->io_base_virt - _IO_BASE));
Michal Simekd3afa582010-01-18 14:42:34 +01001327
1328 pr_debug("PCI: PHB IO resource = %016llx-%016llx [%lx]\n",
1329 (unsigned long long)res->start,
1330 (unsigned long long)res->end,
1331 (unsigned long)res->flags);
1332
1333 /* Hookup PHB Memory resources */
1334 for (i = 0; i < 3; ++i) {
1335 res = &hose->mem_resources[i];
1336 if (!res->flags) {
1337 if (i > 0)
1338 continue;
Michal Simek6bd55f02012-12-27 10:40:38 +01001339 pr_err("PCI: Memory resource 0 not set for ");
1340 pr_cont("host bridge %s (domain %d)\n",
1341 hose->dn->full_name, hose->global_number);
Michal Simekd3afa582010-01-18 14:42:34 +01001342
1343 /* Workaround for lack of MEM resource only on 32-bit */
1344 res->start = hose->pci_mem_offset;
1345 res->end = (resource_size_t)-1LL;
1346 res->flags = IORESOURCE_MEM;
1347
1348 }
Bjorn Helgaasaa23bdc2012-02-23 20:19:02 -07001349 pci_add_resource_offset(resources, res, hose->pci_mem_offset);
Michal Simekd3afa582010-01-18 14:42:34 +01001350
1351 pr_debug("PCI: PHB MEM resource %d = %016llx-%016llx [%lx]\n",
1352 i, (unsigned long long)res->start,
1353 (unsigned long long)res->end,
1354 (unsigned long)res->flags);
1355 }
1356
1357 pr_debug("PCI: PHB MEM offset = %016llx\n",
1358 (unsigned long long)hose->pci_mem_offset);
1359 pr_debug("PCI: PHB IO offset = %08lx\n",
1360 (unsigned long)hose->io_base_virt - _IO_BASE);
1361}
1362
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001363struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus)
1364{
1365 struct pci_controller *hose = bus->sysdata;
1366
1367 return of_node_get(hose->dn);
1368}
1369
Greg Kroah-Hartmanb881bc42012-12-21 14:06:37 -08001370static void pcibios_scan_phb(struct pci_controller *hose)
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001371{
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001372 LIST_HEAD(resources);
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001373 struct pci_bus *bus;
1374 struct device_node *node = hose->dn;
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001375
Grant Likely74a7f082012-06-15 11:50:25 -06001376 pr_debug("PCI: Scanning PHB %s\n", of_node_full_name(node));
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001377
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001378 pcibios_setup_phb_resources(hose, &resources);
1379
Bjorn Helgaas4723b982011-10-28 16:26:52 -06001380 bus = pci_scan_root_bus(hose->parent, hose->first_busno,
1381 hose->ops, hose, &resources);
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001382 if (bus == NULL) {
Michal Simek6bd55f02012-12-27 10:40:38 +01001383 pr_err("Failed to create bus for PCI domain %04x\n",
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001384 hose->global_number);
Bjorn Helgaas58de74b2011-10-28 16:26:46 -06001385 pci_free_resource_list(&resources);
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001386 return;
1387 }
Yinghai Lub918c622012-05-17 18:51:11 -07001388 bus->busn_res.start = hose->first_busno;
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001389 hose->bus = bus;
1390
Yinghai Lub918c622012-05-17 18:51:11 -07001391 hose->last_busno = bus->busn_res.end;
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001392}
1393
1394static int __init pcibios_init(void)
1395{
1396 struct pci_controller *hose, *tmp;
1397 int next_busno = 0;
1398
Michal Simek6bd55f02012-12-27 10:40:38 +01001399 pr_info("PCI: Probing PCI hardware\n");
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001400
1401 /* Scan all of the recorded PCI controllers. */
1402 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1403 hose->last_busno = 0xff;
1404 pcibios_scan_phb(hose);
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001405 if (next_busno <= hose->last_busno)
1406 next_busno = hose->last_busno + 1;
1407 }
1408 pci_bus_count = next_busno;
1409
1410 /* Call common code to handle resource allocation */
1411 pcibios_resource_survey();
1412
1413 return 0;
1414}
1415
1416subsys_initcall(pcibios_init);
1417
1418static struct pci_controller *pci_bus_to_hose(int bus)
1419{
1420 struct pci_controller *hose, *tmp;
1421
1422 list_for_each_entry_safe(hose, tmp, &hose_list, list_node)
1423 if (bus >= hose->first_busno && bus <= hose->last_busno)
1424 return hose;
1425 return NULL;
1426}
1427
1428/* Provide information on locations of various I/O regions in physical
1429 * memory. Do this on a per-card basis so that we choose the right
1430 * root bridge.
1431 * Note that the returned IO or memory base is a physical address
1432 */
1433
1434long sys_pciconfig_iobase(long which, unsigned long bus, unsigned long devfn)
1435{
1436 struct pci_controller *hose;
1437 long result = -EOPNOTSUPP;
1438
1439 hose = pci_bus_to_hose(bus);
1440 if (!hose)
1441 return -ENODEV;
1442
1443 switch (which) {
1444 case IOBASE_BRIDGE_NUMBER:
1445 return (long)hose->first_busno;
1446 case IOBASE_MEMORY:
1447 return (long)hose->pci_mem_offset;
1448 case IOBASE_IO:
1449 return (long)hose->io_base_phys;
1450 case IOBASE_ISA_IO:
1451 return (long)isa_io_base;
1452 case IOBASE_ISA_MEM:
1453 return (long)isa_mem_base;
1454 }
1455
1456 return result;
1457}
1458
Michal Simekd3afa582010-01-18 14:42:34 +01001459/*
1460 * Null PCI config access functions, for the case when we can't
1461 * find a hose.
1462 */
1463#define NULL_PCI_OP(rw, size, type) \
1464static int \
1465null_##rw##_config_##size(struct pci_dev *dev, int offset, type val) \
1466{ \
1467 return PCIBIOS_DEVICE_NOT_FOUND; \
1468}
1469
1470static int
1471null_read_config(struct pci_bus *bus, unsigned int devfn, int offset,
1472 int len, u32 *val)
1473{
1474 return PCIBIOS_DEVICE_NOT_FOUND;
1475}
1476
1477static int
1478null_write_config(struct pci_bus *bus, unsigned int devfn, int offset,
1479 int len, u32 val)
1480{
1481 return PCIBIOS_DEVICE_NOT_FOUND;
1482}
1483
1484static struct pci_ops null_pci_ops = {
1485 .read = null_read_config,
1486 .write = null_write_config,
1487};
1488
1489/*
1490 * These functions are used early on before PCI scanning is done
1491 * and all of the pci_dev and pci_bus structures have been created.
1492 */
1493static struct pci_bus *
1494fake_pci_bus(struct pci_controller *hose, int busnr)
1495{
1496 static struct pci_bus bus;
1497
1498 if (!hose)
Michal Simek6bd55f02012-12-27 10:40:38 +01001499 pr_err("Can't find hose for PCI bus %d!\n", busnr);
Michal Simekd3afa582010-01-18 14:42:34 +01001500
1501 bus.number = busnr;
1502 bus.sysdata = hose;
1503 bus.ops = hose ? hose->ops : &null_pci_ops;
1504 return &bus;
1505}
1506
1507#define EARLY_PCI_OP(rw, size, type) \
1508int early_##rw##_config_##size(struct pci_controller *hose, int bus, \
1509 int devfn, int offset, type value) \
1510{ \
1511 return pci_bus_##rw##_config_##size(fake_pci_bus(hose, bus), \
1512 devfn, offset, value); \
1513}
1514
1515EARLY_PCI_OP(read, byte, u8 *)
1516EARLY_PCI_OP(read, word, u16 *)
1517EARLY_PCI_OP(read, dword, u32 *)
1518EARLY_PCI_OP(write, byte, u8)
1519EARLY_PCI_OP(write, word, u16)
1520EARLY_PCI_OP(write, dword, u32)
1521
1522int early_find_capability(struct pci_controller *hose, int bus, int devfn,
1523 int cap)
1524{
1525 return pci_bus_find_capability(fake_pci_bus(hose, bus), devfn, cap);
1526}
Benjamin Herrenschmidtbf13a6f2011-04-11 11:17:26 +10001527