blob: b848cc3dc913d8de7dc5181fc140a9f83215e6cf [file] [log] [blame]
Chris Zankel5a0015d2005-06-23 22:01:16 -07001/*
Uwe Zeisbergerf30c2262006-10-03 23:01:26 +02002 * arch/xtensa/kernel/pci.c
Chris Zankel5a0015d2005-06-23 22:01:16 -07003 *
4 * PCI bios-type initialisation for PCI machines
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Copyright (C) 2001-2005 Tensilica Inc.
12 *
13 * Based largely on work from Cort (ppc/kernel/pci.c)
14 * IO functions copied from sparc.
15 *
16 * Chris Zankel <chris@zankel.net>
17 *
18 */
19
Chris Zankel5a0015d2005-06-23 22:01:16 -070020#include <linux/kernel.h>
21#include <linux/pci.h>
22#include <linux/delay.h>
23#include <linux/string.h>
24#include <linux/init.h>
25#include <linux/sched.h>
26#include <linux/errno.h>
27#include <linux/bootmem.h>
28
29#include <asm/pci-bridge.h>
30#include <asm/platform.h>
31
32#undef DEBUG
33
34#ifdef DEBUG
35#define DBG(x...) printk(x)
36#else
37#define DBG(x...)
38#endif
39
40/* PCI Controller */
41
42
43/*
44 * pcibios_alloc_controller
45 * pcibios_enable_device
46 * pcibios_fixups
47 * pcibios_align_resource
48 * pcibios_fixup_bus
Chris Zankel5a0015d2005-06-23 22:01:16 -070049 * pci_bus_add_device
50 * pci_mmap_page_range
51 */
52
53struct pci_controller* pci_ctrl_head;
54struct pci_controller** pci_ctrl_tail = &pci_ctrl_head;
55
56static int pci_bus_count;
57
Chris Zankel5a0015d2005-06-23 22:01:16 -070058/*
59 * We need to avoid collisions with `mirrored' VGA ports
60 * and other strange ISA hardware, so we always want the
61 * addresses to be allocated in the 0x000-0x0ff region
62 * modulo 0x400.
63 *
64 * Why? Because some silly external IO cards only decode
65 * the low 10 bits of the IO address. The 0x00-0xff region
66 * is reserved for motherboard devices that decode all 16
67 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
68 * but we want to try to avoid allocating at 0x2900-0x2bff
69 * which might have be mirrored at 0x0100-0x03ff..
70 */
Dominik Brodowskib26b2d42010-01-01 17:40:49 +010071resource_size_t
Dominik Brodowski3b7a17f2010-01-01 17:40:50 +010072pcibios_align_resource(void *data, const struct resource *res,
73 resource_size_t size, resource_size_t align)
Chris Zankel5a0015d2005-06-23 22:01:16 -070074{
75 struct pci_dev *dev = data;
Dominik Brodowskib26b2d42010-01-01 17:40:49 +010076 resource_size_t start = res->start;
Chris Zankel5a0015d2005-06-23 22:01:16 -070077
78 if (res->flags & IORESOURCE_IO) {
Chris Zankel5a0015d2005-06-23 22:01:16 -070079 if (size > 0x100) {
Max Filippovfd95ee72013-05-27 19:45:58 +040080 pr_err("PCI: I/O Region %s/%d too large (%u bytes)\n",
81 pci_name(dev), dev->resource - res,
82 size);
Chris Zankel5a0015d2005-06-23 22:01:16 -070083 }
84
Dominik Brodowskib26b2d42010-01-01 17:40:49 +010085 if (start & 0x300)
Chris Zankel5a0015d2005-06-23 22:01:16 -070086 start = (start + 0x3ff) & ~0x3ff;
Chris Zankel5a0015d2005-06-23 22:01:16 -070087 }
Dominik Brodowskib26b2d42010-01-01 17:40:49 +010088
89 return start;
Chris Zankel5a0015d2005-06-23 22:01:16 -070090}
91
92int
93pcibios_enable_resources(struct pci_dev *dev, int mask)
94{
95 u16 cmd, old_cmd;
96 int idx;
97 struct resource *r;
98
99 pci_read_config_word(dev, PCI_COMMAND, &cmd);
100 old_cmd = cmd;
101 for(idx=0; idx<6; idx++) {
102 r = &dev->resource[idx];
103 if (!r->start && r->end) {
104 printk (KERN_ERR "PCI: Device %s not available because "
Chris Zankel9ec55a92005-06-30 02:59:00 -0700105 "of resource collisions\n", pci_name(dev));
Chris Zankel5a0015d2005-06-23 22:01:16 -0700106 return -EINVAL;
107 }
108 if (r->flags & IORESOURCE_IO)
109 cmd |= PCI_COMMAND_IO;
110 if (r->flags & IORESOURCE_MEM)
111 cmd |= PCI_COMMAND_MEMORY;
112 }
113 if (dev->resource[PCI_ROM_RESOURCE].start)
114 cmd |= PCI_COMMAND_MEMORY;
115 if (cmd != old_cmd) {
116 printk("PCI: Enabling device %s (%04x -> %04x)\n",
Chris Zankel9ec55a92005-06-30 02:59:00 -0700117 pci_name(dev), old_cmd, cmd);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700118 pci_write_config_word(dev, PCI_COMMAND, cmd);
119 }
120 return 0;
121}
122
123struct pci_controller * __init pcibios_alloc_controller(void)
124{
125 struct pci_controller *pci_ctrl;
126
127 pci_ctrl = (struct pci_controller *)alloc_bootmem(sizeof(*pci_ctrl));
128 memset(pci_ctrl, 0, sizeof(struct pci_controller));
129
130 *pci_ctrl_tail = pci_ctrl;
131 pci_ctrl_tail = &pci_ctrl->next;
132
133 return pci_ctrl;
134}
135
Bjorn Helgaas7ec303a2011-10-28 16:28:19 -0600136static void __init pci_controller_apertures(struct pci_controller *pci_ctrl,
137 struct list_head *resources)
138{
139 struct resource *res;
140 unsigned long io_offset;
141 int i;
142
143 io_offset = (unsigned long)pci_ctrl->io_space.base;
144 res = &pci_ctrl->io_resource;
145 if (!res->flags) {
146 if (io_offset)
147 printk (KERN_ERR "I/O resource not set for host"
148 " bridge %d\n", pci_ctrl->index);
149 res->start = 0;
150 res->end = IO_SPACE_LIMIT;
151 res->flags = IORESOURCE_IO;
152 }
153 res->start += io_offset;
154 res->end += io_offset;
Bjorn Helgaas4ba2aef2012-02-23 20:19:04 -0700155 pci_add_resource_offset(resources, res, io_offset);
Bjorn Helgaas7ec303a2011-10-28 16:28:19 -0600156
157 for (i = 0; i < 3; i++) {
158 res = &pci_ctrl->mem_resources[i];
159 if (!res->flags) {
160 if (i > 0)
161 continue;
162 printk(KERN_ERR "Memory resource not set for "
163 "host bridge %d\n", pci_ctrl->index);
164 res->start = 0;
165 res->end = ~0U;
166 res->flags = IORESOURCE_MEM;
167 }
168 pci_add_resource(resources, res);
169 }
170}
171
Chris Zankel5a0015d2005-06-23 22:01:16 -0700172static int __init pcibios_init(void)
173{
174 struct pci_controller *pci_ctrl;
Bjorn Helgaas7ec303a2011-10-28 16:28:19 -0600175 struct list_head resources;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700176 struct pci_bus *bus;
Yijing Wangb97ea282015-03-16 11:18:56 +0800177 int next_busno = 0, ret;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700178
179 printk("PCI: Probing PCI hardware\n");
180
181 /* Scan all of the recorded PCI controllers. */
182 for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
183 pci_ctrl->last_busno = 0xff;
Bjorn Helgaas7ec303a2011-10-28 16:28:19 -0600184 INIT_LIST_HEAD(&resources);
185 pci_controller_apertures(pci_ctrl, &resources);
186 bus = pci_scan_root_bus(NULL, pci_ctrl->first_busno,
187 pci_ctrl->ops, pci_ctrl, &resources);
Yijing Wangb97ea282015-03-16 11:18:56 +0800188 if (!bus)
189 continue;
190
Chris Zankel5a0015d2005-06-23 22:01:16 -0700191 pci_ctrl->bus = bus;
Yinghai Lub918c622012-05-17 18:51:11 -0700192 pci_ctrl->last_busno = bus->busn_res.end;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700193 if (next_busno <= pci_ctrl->last_busno)
194 next_busno = pci_ctrl->last_busno+1;
195 }
196 pci_bus_count = next_busno;
Yijing Wangb97ea282015-03-16 11:18:56 +0800197 ret = platform_pcibios_fixup();
198 if (ret)
199 return ret;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700200
Yijing Wangb97ea282015-03-16 11:18:56 +0800201 for (pci_ctrl = pci_ctrl_head; pci_ctrl; pci_ctrl = pci_ctrl->next) {
202 if (pci_ctrl->bus)
203 pci_bus_add_devices(pci_ctrl->bus);
204 }
205
206 return 0;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700207}
208
209subsys_initcall(pcibios_init);
210
Max Filippovfd95ee72013-05-27 19:45:58 +0400211void pcibios_fixup_bus(struct pci_bus *bus)
Chris Zankel5a0015d2005-06-23 22:01:16 -0700212{
Bjorn Helgaas237865f2015-09-15 13:18:04 -0500213 if (bus->parent) {
214 /* This is a subordinate bridge */
215 pci_read_bridge_bases(bus);
216 }
Chris Zankel5a0015d2005-06-23 22:01:16 -0700217}
218
Myron Stowe9cdce182011-10-28 15:48:31 -0600219void pcibios_set_master(struct pci_dev *dev)
220{
221 /* No special bus mastering setup handling */
222}
223
Chris Zankel5a0015d2005-06-23 22:01:16 -0700224int pcibios_enable_device(struct pci_dev *dev, int mask)
225{
226 u16 cmd, old_cmd;
227 int idx;
228 struct resource *r;
229
230 pci_read_config_word(dev, PCI_COMMAND, &cmd);
231 old_cmd = cmd;
232 for (idx=0; idx<6; idx++) {
233 r = &dev->resource[idx];
234 if (!r->start && r->end) {
235 printk(KERN_ERR "PCI: Device %s not available because "
Chris Zankel9ec55a92005-06-30 02:59:00 -0700236 "of resource collisions\n", pci_name(dev));
Chris Zankel5a0015d2005-06-23 22:01:16 -0700237 return -EINVAL;
238 }
239 if (r->flags & IORESOURCE_IO)
240 cmd |= PCI_COMMAND_IO;
241 if (r->flags & IORESOURCE_MEM)
242 cmd |= PCI_COMMAND_MEMORY;
243 }
244 if (cmd != old_cmd) {
245 printk("PCI: Enabling device %s (%04x -> %04x)\n",
Chris Zankel9ec55a92005-06-30 02:59:00 -0700246 pci_name(dev), old_cmd, cmd);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700247 pci_write_config_word(dev, PCI_COMMAND, cmd);
248 }
249
250 return 0;
251}
252
253#ifdef CONFIG_PROC_FS
254
255/*
256 * Return the index of the PCI controller for device pdev.
257 */
258
259int
260pci_controller_num(struct pci_dev *dev)
261{
262 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
263 return pci_ctrl->index;
264}
265
266#endif /* CONFIG_PROC_FS */
267
Chris Zankel5a0015d2005-06-23 22:01:16 -0700268/*
269 * Platform support for /proc/bus/pci/X/Y mmap()s,
270 * modelled on the sparc64 implementation by Dave Miller.
271 * -- paulus.
272 */
273
274/*
275 * Adjust vm_pgoff of VMA such that it is the physical page offset
276 * corresponding to the 32-bit pci bus offset for DEV requested by the user.
277 *
278 * Basically, the user finds the base address for his device which he wishes
279 * to mmap. They read the 32-bit value from the config space base register,
280 * add whatever PAGE_SIZE multiple offset they wish, and feed this into the
281 * offset parameter of mmap on /proc/bus/pci/XXX for that device.
282 *
283 * Returns negative error code on failure, zero on success.
284 */
285static __inline__ int
286__pci_mmap_make_offset(struct pci_dev *dev, struct vm_area_struct *vma,
287 enum pci_mmap_state mmap_state)
288{
289 struct pci_controller *pci_ctrl = (struct pci_controller*) dev->sysdata;
290 unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
291 unsigned long io_offset = 0;
292 int i, res_bit;
293
294 if (pci_ctrl == 0)
295 return -EINVAL; /* should never happen */
296
297 /* If memory, add on the PCI bridge address offset */
298 if (mmap_state == pci_mmap_mem) {
299 res_bit = IORESOURCE_MEM;
300 } else {
301 io_offset = (unsigned long)pci_ctrl->io_space.base;
302 offset += io_offset;
303 res_bit = IORESOURCE_IO;
304 }
305
306 /*
307 * Check that the offset requested corresponds to one of the
308 * resources of the device.
309 */
310 for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
311 struct resource *rp = &dev->resource[i];
312 int flags = rp->flags;
313
314 /* treat ROM as memory (should be already) */
315 if (i == PCI_ROM_RESOURCE)
316 flags |= IORESOURCE_MEM;
317
318 /* Active and same type? */
319 if ((flags & res_bit) == 0)
320 continue;
321
322 /* In the range of this resource? */
323 if (offset < (rp->start & PAGE_MASK) || offset > rp->end)
324 continue;
325
326 /* found it! construct the final physical address */
327 if (mmap_state == pci_mmap_io)
328 offset += pci_ctrl->io_space.start - io_offset;
329 vma->vm_pgoff = offset >> PAGE_SHIFT;
330 return 0;
331 }
332
333 return -EINVAL;
334}
335
336/*
Chris Zankel5a0015d2005-06-23 22:01:16 -0700337 * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
338 * device mapping.
339 */
340static __inline__ void
341__pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
342 enum pci_mmap_state mmap_state, int write_combine)
343{
344 int prot = pgprot_val(vma->vm_page_prot);
345
346 /* Set to write-through */
Max Filippov2e6ee5e2012-09-17 05:44:33 +0400347 prot = (prot & _PAGE_CA_MASK) | _PAGE_CA_WT;
Chris Zankel5a0015d2005-06-23 22:01:16 -0700348#if 0
349 if (!write_combine)
350 prot |= _PAGE_WRITETHRU;
351#endif
352 vma->vm_page_prot = __pgprot(prot);
353}
354
355/*
356 * Perform the actual remap of the pages for a PCI device mapping, as
357 * appropriate for this architecture. The region in the process to map
358 * is described by vm_start and vm_end members of VMA, the base physical
359 * address is found in vm_pgoff.
360 * The pci device structure is provided so that architectures may make mapping
361 * decisions on a per-device or per-bus basis.
362 *
363 * Returns a negative error code on failure, zero on success.
364 */
365int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
366 enum pci_mmap_state mmap_state,
367 int write_combine)
368{
369 int ret;
370
371 ret = __pci_mmap_make_offset(dev, vma, mmap_state);
372 if (ret < 0)
373 return ret;
374
Chris Zankel5a0015d2005-06-23 22:01:16 -0700375 __pci_mmap_set_pgprot(dev, vma, mmap_state, write_combine);
376
Chris Zankel288a60c2005-09-22 21:44:23 -0700377 ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
378 vma->vm_end - vma->vm_start,vma->vm_page_prot);
Chris Zankel5a0015d2005-06-23 22:01:16 -0700379
380 return ret;
381}