blob: fc8cc6c53778b6336e26ef23b1ac3e78eb16c7a2 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * $Id: pci.c,v 1.91 1999/01/21 13:34:01 davem Exp $
3 *
4 * PCI Bus Services, see include/linux/pci.h for further explanation.
5 *
6 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
7 * David Mosberger-Tang
8 *
9 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
10 */
11
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/init.h>
15#include <linux/pci.h>
16#include <linux/module.h>
17#include <linux/spinlock.h>
18#include <asm/dma.h> /* isa_dma_bridge_buggy */
19
20
21/**
22 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
23 * @bus: pointer to PCI bus structure to search
24 *
25 * Given a PCI bus, returns the highest PCI bus number present in the set
26 * including the given PCI bus and its list of child PCI buses.
27 */
28unsigned char __devinit
29pci_bus_max_busnr(struct pci_bus* bus)
30{
31 struct list_head *tmp;
32 unsigned char max, n;
33
34 max = bus->number;
35 list_for_each(tmp, &bus->children) {
36 n = pci_bus_max_busnr(pci_bus_b(tmp));
37 if(n > max)
38 max = n;
39 }
40 return max;
41}
42
43/**
44 * pci_max_busnr - returns maximum PCI bus number
45 *
46 * Returns the highest PCI bus number present in the system global list of
47 * PCI buses.
48 */
49unsigned char __devinit
50pci_max_busnr(void)
51{
52 struct pci_bus *bus = NULL;
53 unsigned char max, n;
54
55 max = 0;
56 while ((bus = pci_find_next_bus(bus)) != NULL) {
57 n = pci_bus_max_busnr(bus);
58 if(n > max)
59 max = n;
60 }
61 return max;
62}
63
64static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap)
65{
66 u16 status;
67 u8 pos, id;
68 int ttl = 48;
69
70 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
71 if (!(status & PCI_STATUS_CAP_LIST))
72 return 0;
73
74 switch (hdr_type) {
75 case PCI_HEADER_TYPE_NORMAL:
76 case PCI_HEADER_TYPE_BRIDGE:
77 pci_bus_read_config_byte(bus, devfn, PCI_CAPABILITY_LIST, &pos);
78 break;
79 case PCI_HEADER_TYPE_CARDBUS:
80 pci_bus_read_config_byte(bus, devfn, PCI_CB_CAPABILITY_LIST, &pos);
81 break;
82 default:
83 return 0;
84 }
85 while (ttl-- && pos >= 0x40) {
86 pos &= ~3;
87 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, &id);
88 if (id == 0xff)
89 break;
90 if (id == cap)
91 return pos;
92 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_NEXT, &pos);
93 }
94 return 0;
95}
96
97/**
98 * pci_find_capability - query for devices' capabilities
99 * @dev: PCI device to query
100 * @cap: capability code
101 *
102 * Tell if a device supports a given PCI capability.
103 * Returns the address of the requested capability structure within the
104 * device's PCI configuration space or 0 in case the device does not
105 * support it. Possible values for @cap:
106 *
107 * %PCI_CAP_ID_PM Power Management
108 * %PCI_CAP_ID_AGP Accelerated Graphics Port
109 * %PCI_CAP_ID_VPD Vital Product Data
110 * %PCI_CAP_ID_SLOTID Slot Identification
111 * %PCI_CAP_ID_MSI Message Signalled Interrupts
112 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
113 * %PCI_CAP_ID_PCIX PCI-X
114 * %PCI_CAP_ID_EXP PCI Express
115 */
116int pci_find_capability(struct pci_dev *dev, int cap)
117{
118 return __pci_bus_find_cap(dev->bus, dev->devfn, dev->hdr_type, cap);
119}
120
121/**
122 * pci_bus_find_capability - query for devices' capabilities
123 * @bus: the PCI bus to query
124 * @devfn: PCI device to query
125 * @cap: capability code
126 *
127 * Like pci_find_capability() but works for pci devices that do not have a
128 * pci_dev structure set up yet.
129 *
130 * Returns the address of the requested capability structure within the
131 * device's PCI configuration space or 0 in case the device does not
132 * support it.
133 */
134int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
135{
136 u8 hdr_type;
137
138 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
139
140 return __pci_bus_find_cap(bus, devfn, hdr_type & 0x7f, cap);
141}
142
143/**
144 * pci_find_ext_capability - Find an extended capability
145 * @dev: PCI device to query
146 * @cap: capability code
147 *
148 * Returns the address of the requested extended capability structure
149 * within the device's PCI configuration space or 0 if the device does
150 * not support it. Possible values for @cap:
151 *
152 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
153 * %PCI_EXT_CAP_ID_VC Virtual Channel
154 * %PCI_EXT_CAP_ID_DSN Device Serial Number
155 * %PCI_EXT_CAP_ID_PWR Power Budgeting
156 */
157int pci_find_ext_capability(struct pci_dev *dev, int cap)
158{
159 u32 header;
160 int ttl = 480; /* 3840 bytes, minimum 8 bytes per capability */
161 int pos = 0x100;
162
163 if (dev->cfg_size <= 256)
164 return 0;
165
166 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
167 return 0;
168
169 /*
170 * If we have no capabilities, this is indicated by cap ID,
171 * cap version and next pointer all being 0.
172 */
173 if (header == 0)
174 return 0;
175
176 while (ttl-- > 0) {
177 if (PCI_EXT_CAP_ID(header) == cap)
178 return pos;
179
180 pos = PCI_EXT_CAP_NEXT(header);
181 if (pos < 0x100)
182 break;
183
184 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
185 break;
186 }
187
188 return 0;
189}
190
191/**
192 * pci_find_parent_resource - return resource region of parent bus of given region
193 * @dev: PCI device structure contains resources to be searched
194 * @res: child resource record for which parent is sought
195 *
196 * For given resource region of given device, return the resource
197 * region of parent bus the given region is contained in or where
198 * it should be allocated from.
199 */
200struct resource *
201pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
202{
203 const struct pci_bus *bus = dev->bus;
204 int i;
205 struct resource *best = NULL;
206
207 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
208 struct resource *r = bus->resource[i];
209 if (!r)
210 continue;
211 if (res->start && !(res->start >= r->start && res->end <= r->end))
212 continue; /* Not contained */
213 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
214 continue; /* Wrong type */
215 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
216 return r; /* Exact match */
217 if ((res->flags & IORESOURCE_PREFETCH) && !(r->flags & IORESOURCE_PREFETCH))
218 best = r; /* Approximating prefetchable by non-prefetchable */
219 }
220 return best;
221}
222
223/**
224 * pci_set_power_state - Set the power state of a PCI device
225 * @dev: PCI device to be suspended
226 * @state: PCI power state (D0, D1, D2, D3hot, D3cold) we're entering
227 *
228 * Transition a device to a new power state, using the Power Management
229 * Capabilities in the device's config space.
230 *
231 * RETURN VALUE:
232 * -EINVAL if trying to enter a lower state than we're already in.
233 * 0 if we're already in the requested state.
234 * -EIO if device does not support PCI PM.
235 * 0 if we can successfully change the power state.
236 */
237
238int
239pci_set_power_state(struct pci_dev *dev, pci_power_t state)
240{
241 int pm;
242 u16 pmcsr, pmc;
243
244 /* bound the state we're entering */
245 if (state > PCI_D3hot)
246 state = PCI_D3hot;
247
248 /* Validate current state:
249 * Can enter D0 from any state, but if we can only go deeper
250 * to sleep if we're already in a low power state
251 */
252 if (state != PCI_D0 && dev->current_state > state)
253 return -EINVAL;
254 else if (dev->current_state == state)
255 return 0; /* we're already there */
256
257 /* find PCI PM capability in list */
258 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
259
260 /* abort if the device doesn't support PM capabilities */
261 if (!pm)
262 return -EIO;
263
264 pci_read_config_word(dev,pm + PCI_PM_PMC,&pmc);
265 if ((pmc & PCI_PM_CAP_VER_MASK) > 2) {
266 printk(KERN_DEBUG
267 "PCI: %s has unsupported PM cap regs version (%u)\n",
268 pci_name(dev), pmc & PCI_PM_CAP_VER_MASK);
269 return -EIO;
270 }
271
272 /* check if this device supports the desired state */
273 if (state == PCI_D1 || state == PCI_D2) {
274 if (state == PCI_D1 && !(pmc & PCI_PM_CAP_D1))
275 return -EIO;
276 else if (state == PCI_D2 && !(pmc & PCI_PM_CAP_D2))
277 return -EIO;
278 }
279
280 /* If we're in D3, force entire word to 0.
281 * This doesn't affect PME_Status, disables PME_En, and
282 * sets PowerState to 0.
283 */
284 if (dev->current_state >= PCI_D3hot)
285 pmcsr = 0;
286 else {
287 pci_read_config_word(dev, pm + PCI_PM_CTRL, &pmcsr);
288 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
289 pmcsr |= state;
290 }
291
292 /* enter specified state */
293 pci_write_config_word(dev, pm + PCI_PM_CTRL, pmcsr);
294
295 /* Mandatory power management transition delays */
296 /* see PCI PM 1.1 5.6.1 table 18 */
297 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
298 msleep(10);
299 else if (state == PCI_D2 || dev->current_state == PCI_D2)
300 udelay(200);
301 dev->current_state = state;
302
303 return 0;
304}
305
306/**
307 * pci_choose_state - Choose the power state of a PCI device
308 * @dev: PCI device to be suspended
309 * @state: target sleep state for the whole system. This is the value
310 * that is passed to suspend() function.
311 *
312 * Returns PCI power state suitable for given device and given system
313 * message.
314 */
315
316pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
317{
318 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
319 return PCI_D0;
320
321 switch (state) {
322 case 0: return PCI_D0;
323 case 3: return PCI_D3hot;
324 default:
325 printk("They asked me for state %d\n", state);
326 BUG();
327 }
328 return PCI_D0;
329}
330
331EXPORT_SYMBOL(pci_choose_state);
332
333/**
334 * pci_save_state - save the PCI configuration space of a device before suspending
335 * @dev: - PCI device that we're dealing with
336 * @buffer: - buffer to hold config space context
337 *
338 * @buffer must be large enough to hold the entire PCI 2.2 config space
339 * (>= 64 bytes).
340 */
341int
342pci_save_state(struct pci_dev *dev)
343{
344 int i;
345 /* XXX: 100% dword access ok here? */
346 for (i = 0; i < 16; i++)
347 pci_read_config_dword(dev, i * 4,&dev->saved_config_space[i]);
348 return 0;
349}
350
351/**
352 * pci_restore_state - Restore the saved state of a PCI device
353 * @dev: - PCI device that we're dealing with
354 * @buffer: - saved PCI config space
355 *
356 */
357int
358pci_restore_state(struct pci_dev *dev)
359{
360 int i;
361
362 for (i = 0; i < 16; i++)
363 pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
364 return 0;
365}
366
367/**
368 * pci_enable_device_bars - Initialize some of a device for use
369 * @dev: PCI device to be initialized
370 * @bars: bitmask of BAR's that must be configured
371 *
372 * Initialize device before it's used by a driver. Ask low-level code
373 * to enable selected I/O and memory resources. Wake up the device if it
374 * was suspended. Beware, this function can fail.
375 */
376
377int
378pci_enable_device_bars(struct pci_dev *dev, int bars)
379{
380 int err;
381
382 pci_set_power_state(dev, PCI_D0);
383 if ((err = pcibios_enable_device(dev, bars)) < 0)
384 return err;
385 return 0;
386}
387
388/**
389 * pci_enable_device - Initialize device before it's used by a driver.
390 * @dev: PCI device to be initialized
391 *
392 * Initialize device before it's used by a driver. Ask low-level code
393 * to enable I/O and memory. Wake up the device if it was suspended.
394 * Beware, this function can fail.
395 */
396int
397pci_enable_device(struct pci_dev *dev)
398{
399 int err;
400
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 if ((err = pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1)))
402 return err;
403 pci_fixup_device(pci_fixup_enable, dev);
Kenji Kaneshigeceb43742005-04-08 14:53:31 +0900404 dev->is_enabled = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 return 0;
406}
407
408/**
409 * pcibios_disable_device - disable arch specific PCI resources for device dev
410 * @dev: the PCI device to disable
411 *
412 * Disables architecture specific PCI resources for the device. This
413 * is the default implementation. Architecture implementations can
414 * override this.
415 */
416void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
417
418/**
419 * pci_disable_device - Disable PCI device after use
420 * @dev: PCI device to be disabled
421 *
422 * Signal to the system that the PCI device is not in use by the system
423 * anymore. This only involves disabling PCI bus-mastering, if active.
424 */
425void
426pci_disable_device(struct pci_dev *dev)
427{
428 u16 pci_command;
429
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
431 if (pci_command & PCI_COMMAND_MASTER) {
432 pci_command &= ~PCI_COMMAND_MASTER;
433 pci_write_config_word(dev, PCI_COMMAND, pci_command);
434 }
Kenji Kaneshigeceb43742005-04-08 14:53:31 +0900435 dev->is_busmaster = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
437 pcibios_disable_device(dev);
Kenji Kaneshigeceb43742005-04-08 14:53:31 +0900438 dev->is_enabled = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439}
440
441/**
442 * pci_enable_wake - enable device to generate PME# when suspended
443 * @dev: - PCI device to operate on
444 * @state: - Current state of device.
445 * @enable: - Flag to enable or disable generation
446 *
447 * Set the bits in the device's PM Capabilities to generate PME# when
448 * the system is suspended.
449 *
450 * -EIO is returned if device doesn't have PM Capabilities.
451 * -EINVAL is returned if device supports it, but can't generate wake events.
452 * 0 if operation is successful.
453 *
454 */
455int pci_enable_wake(struct pci_dev *dev, pci_power_t state, int enable)
456{
457 int pm;
458 u16 value;
459
460 /* find PCI PM capability in list */
461 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
462
463 /* If device doesn't support PM Capabilities, but request is to disable
464 * wake events, it's a nop; otherwise fail */
465 if (!pm)
466 return enable ? -EIO : 0;
467
468 /* Check device's ability to generate PME# */
469 pci_read_config_word(dev,pm+PCI_PM_PMC,&value);
470
471 value &= PCI_PM_CAP_PME_MASK;
472 value >>= ffs(PCI_PM_CAP_PME_MASK) - 1; /* First bit of mask */
473
474 /* Check if it can generate PME# from requested state. */
475 if (!value || !(value & (1 << state)))
476 return enable ? -EINVAL : 0;
477
478 pci_read_config_word(dev, pm + PCI_PM_CTRL, &value);
479
480 /* Clear PME_Status by writing 1 to it and enable PME# */
481 value |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
482
483 if (!enable)
484 value &= ~PCI_PM_CTRL_PME_ENABLE;
485
486 pci_write_config_word(dev, pm + PCI_PM_CTRL, value);
487
488 return 0;
489}
490
491int
492pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
493{
494 u8 pin;
495
496 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
497 if (!pin)
498 return -1;
499 pin--;
500 while (dev->bus->self) {
501 pin = (pin + PCI_SLOT(dev->devfn)) % 4;
502 dev = dev->bus->self;
503 }
504 *bridge = dev;
505 return pin;
506}
507
508/**
509 * pci_release_region - Release a PCI bar
510 * @pdev: PCI device whose resources were previously reserved by pci_request_region
511 * @bar: BAR to release
512 *
513 * Releases the PCI I/O and memory resources previously reserved by a
514 * successful call to pci_request_region. Call this function only
515 * after all use of the PCI regions has ceased.
516 */
517void pci_release_region(struct pci_dev *pdev, int bar)
518{
519 if (pci_resource_len(pdev, bar) == 0)
520 return;
521 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
522 release_region(pci_resource_start(pdev, bar),
523 pci_resource_len(pdev, bar));
524 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
525 release_mem_region(pci_resource_start(pdev, bar),
526 pci_resource_len(pdev, bar));
527}
528
529/**
530 * pci_request_region - Reserved PCI I/O and memory resource
531 * @pdev: PCI device whose resources are to be reserved
532 * @bar: BAR to be reserved
533 * @res_name: Name to be associated with resource.
534 *
535 * Mark the PCI region associated with PCI device @pdev BR @bar as
536 * being reserved by owner @res_name. Do not access any
537 * address inside the PCI regions unless this call returns
538 * successfully.
539 *
540 * Returns 0 on success, or %EBUSY on error. A warning
541 * message is also printed on failure.
542 */
543int pci_request_region(struct pci_dev *pdev, int bar, char *res_name)
544{
545 if (pci_resource_len(pdev, bar) == 0)
546 return 0;
547
548 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
549 if (!request_region(pci_resource_start(pdev, bar),
550 pci_resource_len(pdev, bar), res_name))
551 goto err_out;
552 }
553 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
554 if (!request_mem_region(pci_resource_start(pdev, bar),
555 pci_resource_len(pdev, bar), res_name))
556 goto err_out;
557 }
558
559 return 0;
560
561err_out:
562 printk (KERN_WARNING "PCI: Unable to reserve %s region #%d:%lx@%lx for device %s\n",
563 pci_resource_flags(pdev, bar) & IORESOURCE_IO ? "I/O" : "mem",
564 bar + 1, /* PCI BAR # */
565 pci_resource_len(pdev, bar), pci_resource_start(pdev, bar),
566 pci_name(pdev));
567 return -EBUSY;
568}
569
570
571/**
572 * pci_release_regions - Release reserved PCI I/O and memory resources
573 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
574 *
575 * Releases all PCI I/O and memory resources previously reserved by a
576 * successful call to pci_request_regions. Call this function only
577 * after all use of the PCI regions has ceased.
578 */
579
580void pci_release_regions(struct pci_dev *pdev)
581{
582 int i;
583
584 for (i = 0; i < 6; i++)
585 pci_release_region(pdev, i);
586}
587
588/**
589 * pci_request_regions - Reserved PCI I/O and memory resources
590 * @pdev: PCI device whose resources are to be reserved
591 * @res_name: Name to be associated with resource.
592 *
593 * Mark all PCI regions associated with PCI device @pdev as
594 * being reserved by owner @res_name. Do not access any
595 * address inside the PCI regions unless this call returns
596 * successfully.
597 *
598 * Returns 0 on success, or %EBUSY on error. A warning
599 * message is also printed on failure.
600 */
601int pci_request_regions(struct pci_dev *pdev, char *res_name)
602{
603 int i;
604
605 for (i = 0; i < 6; i++)
606 if(pci_request_region(pdev, i, res_name))
607 goto err_out;
608 return 0;
609
610err_out:
611 while(--i >= 0)
612 pci_release_region(pdev, i);
613
614 return -EBUSY;
615}
616
617/**
618 * pci_set_master - enables bus-mastering for device dev
619 * @dev: the PCI device to enable
620 *
621 * Enables bus-mastering on the device and calls pcibios_set_master()
622 * to do the needed arch specific settings.
623 */
624void
625pci_set_master(struct pci_dev *dev)
626{
627 u16 cmd;
628
629 pci_read_config_word(dev, PCI_COMMAND, &cmd);
630 if (! (cmd & PCI_COMMAND_MASTER)) {
631 pr_debug("PCI: Enabling bus mastering for device %s\n", pci_name(dev));
632 cmd |= PCI_COMMAND_MASTER;
633 pci_write_config_word(dev, PCI_COMMAND, cmd);
634 }
635 dev->is_busmaster = 1;
636 pcibios_set_master(dev);
637}
638
639#ifndef HAVE_ARCH_PCI_MWI
640/* This can be overridden by arch code. */
641u8 pci_cache_line_size = L1_CACHE_BYTES >> 2;
642
643/**
644 * pci_generic_prep_mwi - helper function for pci_set_mwi
645 * @dev: the PCI device for which MWI is enabled
646 *
647 * Helper function for generic implementation of pcibios_prep_mwi
648 * function. Originally copied from drivers/net/acenic.c.
649 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
650 *
651 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
652 */
653static int
654pci_generic_prep_mwi(struct pci_dev *dev)
655{
656 u8 cacheline_size;
657
658 if (!pci_cache_line_size)
659 return -EINVAL; /* The system doesn't support MWI. */
660
661 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
662 equal to or multiple of the right value. */
663 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
664 if (cacheline_size >= pci_cache_line_size &&
665 (cacheline_size % pci_cache_line_size) == 0)
666 return 0;
667
668 /* Write the correct value. */
669 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
670 /* Read it back. */
671 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
672 if (cacheline_size == pci_cache_line_size)
673 return 0;
674
675 printk(KERN_DEBUG "PCI: cache line size of %d is not supported "
676 "by device %s\n", pci_cache_line_size << 2, pci_name(dev));
677
678 return -EINVAL;
679}
680#endif /* !HAVE_ARCH_PCI_MWI */
681
682/**
683 * pci_set_mwi - enables memory-write-invalidate PCI transaction
684 * @dev: the PCI device for which MWI is enabled
685 *
686 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND,
687 * and then calls @pcibios_set_mwi to do the needed arch specific
688 * operations or a generic mwi-prep function.
689 *
690 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
691 */
692int
693pci_set_mwi(struct pci_dev *dev)
694{
695 int rc;
696 u16 cmd;
697
698#ifdef HAVE_ARCH_PCI_MWI
699 rc = pcibios_prep_mwi(dev);
700#else
701 rc = pci_generic_prep_mwi(dev);
702#endif
703
704 if (rc)
705 return rc;
706
707 pci_read_config_word(dev, PCI_COMMAND, &cmd);
708 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
709 pr_debug("PCI: Enabling Mem-Wr-Inval for device %s\n", pci_name(dev));
710 cmd |= PCI_COMMAND_INVALIDATE;
711 pci_write_config_word(dev, PCI_COMMAND, cmd);
712 }
713
714 return 0;
715}
716
717/**
718 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
719 * @dev: the PCI device to disable
720 *
721 * Disables PCI Memory-Write-Invalidate transaction on the device
722 */
723void
724pci_clear_mwi(struct pci_dev *dev)
725{
726 u16 cmd;
727
728 pci_read_config_word(dev, PCI_COMMAND, &cmd);
729 if (cmd & PCI_COMMAND_INVALIDATE) {
730 cmd &= ~PCI_COMMAND_INVALIDATE;
731 pci_write_config_word(dev, PCI_COMMAND, cmd);
732 }
733}
734
735#ifndef HAVE_ARCH_PCI_SET_DMA_MASK
736/*
737 * These can be overridden by arch-specific implementations
738 */
739int
740pci_set_dma_mask(struct pci_dev *dev, u64 mask)
741{
742 if (!pci_dma_supported(dev, mask))
743 return -EIO;
744
745 dev->dma_mask = mask;
746
747 return 0;
748}
749
750int
751pci_dac_set_dma_mask(struct pci_dev *dev, u64 mask)
752{
753 if (!pci_dac_dma_supported(dev, mask))
754 return -EIO;
755
756 dev->dma_mask = mask;
757
758 return 0;
759}
760
761int
762pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
763{
764 if (!pci_dma_supported(dev, mask))
765 return -EIO;
766
767 dev->dev.coherent_dma_mask = mask;
768
769 return 0;
770}
771#endif
772
773static int __devinit pci_init(void)
774{
775 struct pci_dev *dev = NULL;
776
777 while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
778 pci_fixup_device(pci_fixup_final, dev);
779 }
780 return 0;
781}
782
783static int __devinit pci_setup(char *str)
784{
785 while (str) {
786 char *k = strchr(str, ',');
787 if (k)
788 *k++ = 0;
789 if (*str && (str = pcibios_setup(str)) && *str) {
790 /* PCI layer options should be handled here */
791 printk(KERN_ERR "PCI: Unknown option `%s'\n", str);
792 }
793 str = k;
794 }
795 return 1;
796}
797
798device_initcall(pci_init);
799
800__setup("pci=", pci_setup);
801
802#if defined(CONFIG_ISA) || defined(CONFIG_EISA)
803/* FIXME: Some boxes have multiple ISA bridges! */
804struct pci_dev *isa_bridge;
805EXPORT_SYMBOL(isa_bridge);
806#endif
807
808EXPORT_SYMBOL(pci_enable_device_bars);
809EXPORT_SYMBOL(pci_enable_device);
810EXPORT_SYMBOL(pci_disable_device);
811EXPORT_SYMBOL(pci_max_busnr);
812EXPORT_SYMBOL(pci_bus_max_busnr);
813EXPORT_SYMBOL(pci_find_capability);
814EXPORT_SYMBOL(pci_bus_find_capability);
815EXPORT_SYMBOL(pci_release_regions);
816EXPORT_SYMBOL(pci_request_regions);
817EXPORT_SYMBOL(pci_release_region);
818EXPORT_SYMBOL(pci_request_region);
819EXPORT_SYMBOL(pci_set_master);
820EXPORT_SYMBOL(pci_set_mwi);
821EXPORT_SYMBOL(pci_clear_mwi);
822EXPORT_SYMBOL(pci_set_dma_mask);
823EXPORT_SYMBOL(pci_dac_set_dma_mask);
824EXPORT_SYMBOL(pci_set_consistent_dma_mask);
825EXPORT_SYMBOL(pci_assign_resource);
826EXPORT_SYMBOL(pci_find_parent_resource);
827
828EXPORT_SYMBOL(pci_set_power_state);
829EXPORT_SYMBOL(pci_save_state);
830EXPORT_SYMBOL(pci_restore_state);
831EXPORT_SYMBOL(pci_enable_wake);
832
833/* Quirk info */
834
835EXPORT_SYMBOL(isa_dma_bridge_buggy);
836EXPORT_SYMBOL(pci_pci_problems);