blob: 985df63aa59fe6fef05d30f2e3e1a3dc0e05d018 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
David Brownell075c1772007-04-26 00:12:06 -070014#include <linux/pm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
17#include <linux/spinlock.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080018#include <linux/string.h>
vignesh babu229f5af2007-08-13 18:23:14 +053019#include <linux/log2.h>
Shaohua Li7d715a62008-02-25 09:46:41 +080020#include <linux/pci-aspm.h>
Stephen Rothwellc300bd2fb2008-07-10 02:16:44 +020021#include <linux/pm_wakeup.h>
Sheng Yang8dd7f802008-10-21 17:38:25 +080022#include <linux/interrupt.h>
Yuji Shimada32a9a6822009-03-16 17:13:39 +090023#include <linux/device.h>
Rafael J. Wysockib67ea762010-02-17 23:44:09 +010024#include <linux/pm_runtime.h>
Bjorn Helgaas284f5f92012-04-30 15:21:02 -060025#include <asm-generic/pci-bridge.h>
Yuji Shimada32a9a6822009-03-16 17:13:39 +090026#include <asm/setup.h>
Greg KHbc56b9e2005-04-08 14:53:31 +090027#include "pci.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Alan Stern00240c32009-04-27 13:33:16 -040029const char *pci_power_names[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31};
32EXPORT_SYMBOL_GPL(pci_power_names);
33
Rafael J. Wysocki93177a72010-01-02 22:57:24 +010034int isa_dma_bridge_buggy;
35EXPORT_SYMBOL(isa_dma_bridge_buggy);
36
37int pci_pci_problems;
38EXPORT_SYMBOL(pci_pci_problems);
39
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +010040unsigned int pci_pm_d3_delay;
41
Matthew Garrettdf17e622010-10-04 14:22:29 -040042static void pci_pme_list_scan(struct work_struct *work);
43
44static LIST_HEAD(pci_pme_list);
45static DEFINE_MUTEX(pci_pme_list_mutex);
46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47
48struct pci_pme_device {
49 struct list_head list;
50 struct pci_dev *dev;
51};
52
53#define PME_TIMEOUT 1000 /* How long between PME checks */
54
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +010055static void pci_dev_d3_sleep(struct pci_dev *dev)
56{
57 unsigned int delay = dev->d3_delay;
58
59 if (delay < pci_pm_d3_delay)
60 delay = pci_pm_d3_delay;
61
62 msleep(delay);
63}
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Jeff Garzik32a2eea2007-10-11 16:57:27 -040065#ifdef CONFIG_PCI_DOMAINS
66int pci_domains_supported = 1;
67#endif
68
Atsushi Nemoto4516a612007-02-05 16:36:06 -080069#define DEFAULT_CARDBUS_IO_SIZE (256)
70#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74
Eric W. Biederman28760482009-09-09 14:09:24 -070075#define DEFAULT_HOTPLUG_IO_SIZE (256)
76#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
78unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80
Jon Mason5f39e672011-10-03 09:50:20 -050081enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
Jon Masonb03e7492011-07-20 15:20:54 -050082
Jesse Barnesac1aa472009-10-26 13:20:44 -070083/*
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
88 */
Tejun Heo98e724c2009-10-08 18:59:53 +090089u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
Jesse Barnesac1aa472009-10-26 13:20:44 -070090u8 pci_cache_line_size;
91
Myron Stowe96c55902011-10-28 15:48:38 -060092/*
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
95 */
96unsigned int pcibios_max_latency = 255;
97
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +010098/* If set, the PCIe ARI capability will not be used. */
99static bool pcie_ari_disabled;
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/**
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
104 *
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
107 */
Sam Ravnborg96bde062007-03-26 21:53:30 -0800108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 struct list_head *tmp;
111 unsigned char max, n;
112
Kristen Accardib82db5c2006-01-17 16:56:56 -0800113 max = bus->subordinate;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp));
116 if(n > max)
117 max = n;
118 }
119 return max;
120}
Kristen Accardib82db5c2006-01-17 16:56:56 -0800121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Andrew Morton1684f5d2008-12-01 14:30:30 -0800123#ifdef CONFIG_HAS_IOMEM
124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125{
126 /*
127 * Make sure the BAR is actually a memory resource, not an IO resource
128 */
129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130 WARN_ON(1);
131 return NULL;
132 }
133 return ioremap_nocache(pci_resource_start(pdev, bar),
134 pci_resource_len(pdev, bar));
135}
136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137#endif
138
Kristen Accardib82db5c2006-01-17 16:56:56 -0800139#if 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140/**
141 * pci_max_busnr - returns maximum PCI bus number
142 *
143 * Returns the highest PCI bus number present in the system global list of
144 * PCI buses.
145 */
146unsigned char __devinit
147pci_max_busnr(void)
148{
149 struct pci_bus *bus = NULL;
150 unsigned char max, n;
151
152 max = 0;
153 while ((bus = pci_find_next_bus(bus)) != NULL) {
154 n = pci_bus_max_busnr(bus);
155 if(n > max)
156 max = n;
157 }
158 return max;
159}
160
Adrian Bunk54c762f2005-12-22 01:08:52 +0100161#endif /* 0 */
162
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100163#define PCI_FIND_CAP_TTL 48
164
165static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
166 u8 pos, int cap, int *ttl)
Roland Dreier24a4e372005-10-28 17:35:34 -0700167{
168 u8 id;
Roland Dreier24a4e372005-10-28 17:35:34 -0700169
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100170 while ((*ttl)--) {
Roland Dreier24a4e372005-10-28 17:35:34 -0700171 pci_bus_read_config_byte(bus, devfn, pos, &pos);
172 if (pos < 0x40)
173 break;
174 pos &= ~3;
175 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
176 &id);
177 if (id == 0xff)
178 break;
179 if (id == cap)
180 return pos;
181 pos += PCI_CAP_LIST_NEXT;
182 }
183 return 0;
184}
185
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100186static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
187 u8 pos, int cap)
188{
189 int ttl = PCI_FIND_CAP_TTL;
190
191 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
192}
193
Roland Dreier24a4e372005-10-28 17:35:34 -0700194int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
195{
196 return __pci_find_next_cap(dev->bus, dev->devfn,
197 pos + PCI_CAP_LIST_NEXT, cap);
198}
199EXPORT_SYMBOL_GPL(pci_find_next_capability);
200
Michael Ellermand3bac112006-11-22 18:26:16 +1100201static int __pci_bus_find_cap_start(struct pci_bus *bus,
202 unsigned int devfn, u8 hdr_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
204 u16 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
206 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
207 if (!(status & PCI_STATUS_CAP_LIST))
208 return 0;
209
210 switch (hdr_type) {
211 case PCI_HEADER_TYPE_NORMAL:
212 case PCI_HEADER_TYPE_BRIDGE:
Michael Ellermand3bac112006-11-22 18:26:16 +1100213 return PCI_CAPABILITY_LIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 case PCI_HEADER_TYPE_CARDBUS:
Michael Ellermand3bac112006-11-22 18:26:16 +1100215 return PCI_CB_CAPABILITY_LIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 default:
217 return 0;
218 }
Michael Ellermand3bac112006-11-22 18:26:16 +1100219
220 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
223/**
224 * pci_find_capability - query for devices' capabilities
225 * @dev: PCI device to query
226 * @cap: capability code
227 *
228 * Tell if a device supports a given PCI capability.
229 * Returns the address of the requested capability structure within the
230 * device's PCI configuration space or 0 in case the device does not
231 * support it. Possible values for @cap:
232 *
233 * %PCI_CAP_ID_PM Power Management
234 * %PCI_CAP_ID_AGP Accelerated Graphics Port
235 * %PCI_CAP_ID_VPD Vital Product Data
236 * %PCI_CAP_ID_SLOTID Slot Identification
237 * %PCI_CAP_ID_MSI Message Signalled Interrupts
238 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
239 * %PCI_CAP_ID_PCIX PCI-X
240 * %PCI_CAP_ID_EXP PCI Express
241 */
242int pci_find_capability(struct pci_dev *dev, int cap)
243{
Michael Ellermand3bac112006-11-22 18:26:16 +1100244 int pos;
245
246 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
247 if (pos)
248 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
249
250 return pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251}
252
253/**
254 * pci_bus_find_capability - query for devices' capabilities
255 * @bus: the PCI bus to query
256 * @devfn: PCI device to query
257 * @cap: capability code
258 *
259 * Like pci_find_capability() but works for pci devices that do not have a
260 * pci_dev structure set up yet.
261 *
262 * Returns the address of the requested capability structure within the
263 * device's PCI configuration space or 0 in case the device does not
264 * support it.
265 */
266int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
267{
Michael Ellermand3bac112006-11-22 18:26:16 +1100268 int pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 u8 hdr_type;
270
271 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
272
Michael Ellermand3bac112006-11-22 18:26:16 +1100273 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
274 if (pos)
275 pos = __pci_find_next_cap(bus, devfn, pos, cap);
276
277 return pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
279
280/**
Myron Stowec463b8c2012-06-01 15:16:37 -0600281 * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure
282 * @dev: PCI device to check
283 *
284 * Like pci_pcie_cap() but also checks that the PCIe capability version is
285 * >= 2. Note that v1 capability structures could be sparse in that not
286 * all register fields were required. v2 requires the entire structure to
287 * be present size wise, while still allowing for non-implemented registers
288 * to exist but they must be hardwired to 0.
289 *
290 * Due to the differences in the versions of capability structures, one
291 * must be careful not to try and access non-existant registers that may
292 * exist in early versions - v1 - of Express devices.
293 *
294 * Returns the offset of the PCIe capability structure as long as the
295 * capability version is >= 2; otherwise 0 is returned.
296 */
297static int pci_pcie_cap2(struct pci_dev *dev)
298{
299 u16 flags;
300 int pos;
301
302 pos = pci_pcie_cap(dev);
303 if (pos) {
304 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
305 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
306 pos = 0;
307 }
308
309 return pos;
310}
311
312/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 * pci_find_ext_capability - Find an extended capability
314 * @dev: PCI device to query
315 * @cap: capability code
316 *
317 * Returns the address of the requested extended capability structure
318 * within the device's PCI configuration space or 0 if the device does
319 * not support it. Possible values for @cap:
320 *
321 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
322 * %PCI_EXT_CAP_ID_VC Virtual Channel
323 * %PCI_EXT_CAP_ID_DSN Device Serial Number
324 * %PCI_EXT_CAP_ID_PWR Power Budgeting
325 */
326int pci_find_ext_capability(struct pci_dev *dev, int cap)
327{
328 u32 header;
Zhao, Yu557848c2008-10-13 19:18:07 +0800329 int ttl;
330 int pos = PCI_CFG_SPACE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331
Zhao, Yu557848c2008-10-13 19:18:07 +0800332 /* minimum 8 bytes per capability */
333 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
334
335 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 return 0;
337
338 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
339 return 0;
340
341 /*
342 * If we have no capabilities, this is indicated by cap ID,
343 * cap version and next pointer all being 0.
344 */
345 if (header == 0)
346 return 0;
347
348 while (ttl-- > 0) {
349 if (PCI_EXT_CAP_ID(header) == cap)
350 return pos;
351
352 pos = PCI_EXT_CAP_NEXT(header);
Zhao, Yu557848c2008-10-13 19:18:07 +0800353 if (pos < PCI_CFG_SPACE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700354 break;
355
356 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
357 break;
358 }
359
360 return 0;
361}
Brice Goglin3a720d72006-05-23 06:10:01 -0400362EXPORT_SYMBOL_GPL(pci_find_ext_capability);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
Jesse Barnescf4c43d2009-07-15 13:13:00 -0700364/**
365 * pci_bus_find_ext_capability - find an extended capability
366 * @bus: the PCI bus to query
367 * @devfn: PCI device to query
368 * @cap: capability code
369 *
370 * Like pci_find_ext_capability() but works for pci devices that do not have a
371 * pci_dev structure set up yet.
372 *
373 * Returns the address of the requested capability structure within the
374 * device's PCI configuration space or 0 in case the device does not
375 * support it.
376 */
377int pci_bus_find_ext_capability(struct pci_bus *bus, unsigned int devfn,
378 int cap)
379{
380 u32 header;
381 int ttl;
382 int pos = PCI_CFG_SPACE_SIZE;
383
384 /* minimum 8 bytes per capability */
385 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
386
387 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
388 return 0;
389 if (header == 0xffffffff || header == 0)
390 return 0;
391
392 while (ttl-- > 0) {
393 if (PCI_EXT_CAP_ID(header) == cap)
394 return pos;
395
396 pos = PCI_EXT_CAP_NEXT(header);
397 if (pos < PCI_CFG_SPACE_SIZE)
398 break;
399
400 if (!pci_bus_read_config_dword(bus, devfn, pos, &header))
401 break;
402 }
403
404 return 0;
405}
406
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100407static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
408{
409 int rc, ttl = PCI_FIND_CAP_TTL;
410 u8 cap, mask;
411
412 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
413 mask = HT_3BIT_CAP_MASK;
414 else
415 mask = HT_5BIT_CAP_MASK;
416
417 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
418 PCI_CAP_ID_HT, &ttl);
419 while (pos) {
420 rc = pci_read_config_byte(dev, pos + 3, &cap);
421 if (rc != PCIBIOS_SUCCESSFUL)
422 return 0;
423
424 if ((cap & mask) == ht_cap)
425 return pos;
426
Brice Goglin47a4d5b2007-01-10 23:15:29 -0800427 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
428 pos + PCI_CAP_LIST_NEXT,
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100429 PCI_CAP_ID_HT, &ttl);
430 }
431
432 return 0;
433}
434/**
435 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
436 * @dev: PCI device to query
437 * @pos: Position from which to continue searching
438 * @ht_cap: Hypertransport capability code
439 *
440 * To be used in conjunction with pci_find_ht_capability() to search for
441 * all capabilities matching @ht_cap. @pos should always be a value returned
442 * from pci_find_ht_capability().
443 *
444 * NB. To be 100% safe against broken PCI devices, the caller should take
445 * steps to avoid an infinite loop.
446 */
447int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
448{
449 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
450}
451EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
452
453/**
454 * pci_find_ht_capability - query a device's Hypertransport capabilities
455 * @dev: PCI device to query
456 * @ht_cap: Hypertransport capability code
457 *
458 * Tell if a device supports a given Hypertransport capability.
459 * Returns an address within the device's PCI configuration space
460 * or 0 in case the device does not support the request capability.
461 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
462 * which has a Hypertransport capability matching @ht_cap.
463 */
464int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
465{
466 int pos;
467
468 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
469 if (pos)
470 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
471
472 return pos;
473}
474EXPORT_SYMBOL_GPL(pci_find_ht_capability);
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476/**
477 * pci_find_parent_resource - return resource region of parent bus of given region
478 * @dev: PCI device structure contains resources to be searched
479 * @res: child resource record for which parent is sought
480 *
481 * For given resource region of given device, return the resource
482 * region of parent bus the given region is contained in or where
483 * it should be allocated from.
484 */
485struct resource *
486pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
487{
488 const struct pci_bus *bus = dev->bus;
489 int i;
Bjorn Helgaas89a74ec2010-02-23 10:24:31 -0700490 struct resource *best = NULL, *r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Bjorn Helgaas89a74ec2010-02-23 10:24:31 -0700492 pci_bus_for_each_resource(bus, r, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 if (!r)
494 continue;
495 if (res->start && !(res->start >= r->start && res->end <= r->end))
496 continue; /* Not contained */
497 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
498 continue; /* Wrong type */
499 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
500 return r; /* Exact match */
Linus Torvalds8c8def22009-11-09 12:04:32 -0800501 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
502 if (r->flags & IORESOURCE_PREFETCH)
503 continue;
504 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
505 if (!best)
506 best = r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508 return best;
509}
510
511/**
John W. Linville064b53db2005-07-27 10:19:44 -0400512 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
513 * @dev: PCI device to have its BARs restored
514 *
515 * Restore the BAR values for a given device, so as to make it
516 * accessible by its driver.
517 */
Adrian Bunkad6685992007-10-27 03:06:22 +0200518static void
John W. Linville064b53db2005-07-27 10:19:44 -0400519pci_restore_bars(struct pci_dev *dev)
520{
Yu Zhaobc5f5a82008-11-22 02:40:00 +0800521 int i;
John W. Linville064b53db2005-07-27 10:19:44 -0400522
Yu Zhaobc5f5a82008-11-22 02:40:00 +0800523 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
Yu Zhao14add802008-11-22 02:38:52 +0800524 pci_update_resource(dev, i);
John W. Linville064b53db2005-07-27 10:19:44 -0400525}
526
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200527static struct pci_platform_pm_ops *pci_platform_pm;
528
529int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
530{
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +0200531 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
532 || !ops->sleep_wake || !ops->can_wakeup)
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200533 return -EINVAL;
534 pci_platform_pm = ops;
535 return 0;
536}
537
538static inline bool platform_pci_power_manageable(struct pci_dev *dev)
539{
540 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
541}
542
543static inline int platform_pci_set_power_state(struct pci_dev *dev,
544 pci_power_t t)
545{
546 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
547}
548
549static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
550{
551 return pci_platform_pm ?
552 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
553}
Randy Dunlap8f7020d2005-10-23 11:57:38 -0700554
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +0200555static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
556{
557 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
558}
559
560static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
561{
562 return pci_platform_pm ?
563 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
564}
565
Rafael J. Wysockib67ea762010-02-17 23:44:09 +0100566static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
567{
568 return pci_platform_pm ?
569 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
570}
571
John W. Linville064b53db2005-07-27 10:19:44 -0400572/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200573 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
574 * given PCI device
575 * @dev: PCI device to handle.
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200576 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 *
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200578 * RETURN VALUE:
579 * -EINVAL if the requested state is invalid.
580 * -EIO if device does not support PCI PM or its PM capabilities register has a
581 * wrong version, or device doesn't support the requested state.
582 * 0 if device already is in the requested state.
583 * 0 if device's power state has been successfully changed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 */
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100585static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200587 u16 pmcsr;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200588 bool need_restore = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
Rafael J. Wysocki4a865902009-03-16 22:40:36 +0100590 /* Check if we're already there */
591 if (dev->current_state == state)
592 return 0;
593
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200594 if (!dev->pm_cap)
Andrew Lunncca03de2007-07-09 11:55:58 -0700595 return -EIO;
596
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200597 if (state < PCI_D0 || state > PCI_D3hot)
598 return -EINVAL;
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 /* Validate current state:
601 * Can enter D0 from any state, but if we can only go deeper
602 * to sleep if we're already in a low power state
603 */
Rafael J. Wysocki4a865902009-03-16 22:40:36 +0100604 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200605 && dev->current_state > state) {
Bjorn Helgaas80ccba12008-06-13 10:52:11 -0600606 dev_err(&dev->dev, "invalid power transition "
607 "(from state %d to %d)\n", dev->current_state, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 return -EINVAL;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 /* check if this device supports the desired state */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200612 if ((state == PCI_D1 && !dev->d1_support)
613 || (state == PCI_D2 && !dev->d2_support))
Daniel Ritz3fe9d192005-08-17 15:32:19 -0700614 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200616 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
John W. Linville064b53db2005-07-27 10:19:44 -0400617
John W. Linville32a36582005-09-14 09:52:42 -0400618 /* If we're (effectively) in D3, force entire word to 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 * This doesn't affect PME_Status, disables PME_En, and
620 * sets PowerState to 0.
621 */
John W. Linville32a36582005-09-14 09:52:42 -0400622 switch (dev->current_state) {
John W. Linvilled3535fb2005-09-28 17:50:51 -0400623 case PCI_D0:
624 case PCI_D1:
625 case PCI_D2:
626 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
627 pmcsr |= state;
628 break;
Rafael J. Wysockif62795f2009-05-18 22:51:12 +0200629 case PCI_D3hot:
630 case PCI_D3cold:
John W. Linville32a36582005-09-14 09:52:42 -0400631 case PCI_UNKNOWN: /* Boot-up */
632 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100633 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200634 need_restore = true;
John W. Linville32a36582005-09-14 09:52:42 -0400635 /* Fall-through: force to D0 */
John W. Linville32a36582005-09-14 09:52:42 -0400636 default:
John W. Linvilled3535fb2005-09-28 17:50:51 -0400637 pmcsr = 0;
John W. Linville32a36582005-09-14 09:52:42 -0400638 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 }
640
641 /* enter specified state */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200642 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643
644 /* Mandatory power management transition delays */
645 /* see PCI PM 1.1 5.6.1 table 18 */
646 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +0100647 pci_dev_d3_sleep(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 else if (state == PCI_D2 || dev->current_state == PCI_D2)
Rafael J. Wysockiaa8c6c92009-01-16 21:54:43 +0100649 udelay(PCI_PM_D2_DELAY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
Rafael J. Wysockie13cdbd2009-10-05 00:48:40 +0200651 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
652 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
653 if (dev->current_state != state && printk_ratelimit())
654 dev_info(&dev->dev, "Refused to change power state, "
655 "currently in D%d\n", dev->current_state);
John W. Linville064b53db2005-07-27 10:19:44 -0400656
657 /* According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
658 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
659 * from D3hot to D0 _may_ perform an internal reset, thereby
660 * going to "D0 Uninitialized" rather than "D0 Initialized".
661 * For example, at least some versions of the 3c905B and the
662 * 3c556B exhibit this behaviour.
663 *
664 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
665 * devices in a D3hot state at boot. Consequently, we need to
666 * restore at least the BARs so that the device will be
667 * accessible to its driver.
668 */
669 if (need_restore)
670 pci_restore_bars(dev);
671
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100672 if (dev->bus->self)
Shaohua Li7d715a62008-02-25 09:46:41 +0800673 pcie_aspm_pm_state_change(dev->bus->self);
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 return 0;
676}
677
678/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200679 * pci_update_current_state - Read PCI power state of given device from its
680 * PCI PM registers and cache it
681 * @dev: PCI device to handle.
Rafael J. Wysockif06fc0b2008-12-27 16:30:52 +0100682 * @state: State to cache in case the device doesn't have the PM capability
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200683 */
Rafael J. Wysocki734104292009-01-07 13:07:15 +0100684void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200685{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200686 if (dev->pm_cap) {
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200687 u16 pmcsr;
688
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200689 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200690 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
Rafael J. Wysockif06fc0b2008-12-27 16:30:52 +0100691 } else {
692 dev->current_state = state;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200693 }
694}
695
696/**
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100697 * pci_platform_power_transition - Use platform to change device power state
698 * @dev: PCI device to handle.
699 * @state: State to put the device into.
700 */
701static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
702{
703 int error;
704
705 if (platform_pci_power_manageable(dev)) {
706 error = platform_pci_set_power_state(dev, state);
707 if (!error)
708 pci_update_current_state(dev, state);
Ajaykumar Hotchandanib51306c2011-12-12 13:57:36 +0530709 /* Fall back to PCI_D0 if native PM is not supported */
710 if (!dev->pm_cap)
711 dev->current_state = PCI_D0;
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100712 } else {
713 error = -ENODEV;
714 /* Fall back to PCI_D0 if native PM is not supported */
Rafael J. Wysockib3bad722009-05-17 20:17:06 +0200715 if (!dev->pm_cap)
716 dev->current_state = PCI_D0;
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100717 }
718
719 return error;
720}
721
722/**
723 * __pci_start_power_transition - Start power transition of a PCI device
724 * @dev: PCI device to handle.
725 * @state: State to put the device into.
726 */
727static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
728{
729 if (state == PCI_D0)
730 pci_platform_power_transition(dev, PCI_D0);
731}
732
733/**
734 * __pci_complete_power_transition - Complete power transition of a PCI device
735 * @dev: PCI device to handle.
736 * @state: State to put the device into.
737 *
738 * This function should not be called directly by device drivers.
739 */
740int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
741{
Matthew Garrettcc2893b2010-04-22 09:30:51 -0400742 return state >= PCI_D0 ?
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100743 pci_platform_power_transition(dev, state) : -EINVAL;
744}
745EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
746
747/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200748 * pci_set_power_state - Set the power state of a PCI device
749 * @dev: PCI device to handle.
750 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
751 *
Nick Andrew877d0312009-01-26 11:06:57 +0100752 * Transition a device to a new power state, using the platform firmware and/or
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200753 * the device's PCI PM registers.
754 *
755 * RETURN VALUE:
756 * -EINVAL if the requested state is invalid.
757 * -EIO if device does not support PCI PM or its PM capabilities register has a
758 * wrong version, or device doesn't support the requested state.
759 * 0 if device already is in the requested state.
760 * 0 if device's power state has been successfully changed.
761 */
762int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
763{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200764 int error;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200765
766 /* bound the state we're entering */
767 if (state > PCI_D3hot)
768 state = PCI_D3hot;
769 else if (state < PCI_D0)
770 state = PCI_D0;
771 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
772 /*
773 * If the device or the parent bridge do not support PCI PM,
774 * ignore the request if we're doing anything other than putting
775 * it into D0 (which would only happen on boot).
776 */
777 return 0;
778
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100779 __pci_start_power_transition(dev, state);
780
Alan Cox979b1792008-07-24 17:18:38 +0100781 /* This device is quirked not to be put into D3, so
782 don't put it in D3 */
783 if (state == PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
784 return 0;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200785
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100786 error = pci_raw_set_power_state(dev, state);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200787
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100788 if (!__pci_complete_power_transition(dev, state))
789 error = 0;
Naga Chumbalkar1a680b72011-03-21 03:29:08 +0000790 /*
791 * When aspm_policy is "powersave" this call ensures
792 * that ASPM is configured.
793 */
794 if (!error && dev->bus->self)
795 pcie_aspm_powersave_config_link(dev->bus->self);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200796
797 return error;
798}
799
800/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 * pci_choose_state - Choose the power state of a PCI device
802 * @dev: PCI device to be suspended
803 * @state: target sleep state for the whole system. This is the value
804 * that is passed to suspend() function.
805 *
806 * Returns PCI power state suitable for given device and given system
807 * message.
808 */
809
810pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
811{
Shaohua Liab826ca2007-07-20 10:03:22 +0800812 pci_power_t ret;
David Shaohua Li0f644742005-03-19 00:15:48 -0500813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
815 return PCI_D0;
816
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200817 ret = platform_pci_choose_state(dev);
818 if (ret != PCI_POWER_ERROR)
819 return ret;
Pavel Machekca078ba2005-09-03 15:56:57 -0700820
821 switch (state.event) {
822 case PM_EVENT_ON:
823 return PCI_D0;
824 case PM_EVENT_FREEZE:
David Brownellb887d2e2006-08-14 23:11:05 -0700825 case PM_EVENT_PRETHAW:
826 /* REVISIT both freeze and pre-thaw "should" use D0 */
Pavel Machekca078ba2005-09-03 15:56:57 -0700827 case PM_EVENT_SUSPEND:
Rafael J. Wysocki3a2d5b72008-02-23 19:13:25 +0100828 case PM_EVENT_HIBERNATE:
Pavel Machekca078ba2005-09-03 15:56:57 -0700829 return PCI_D3hot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 default:
Bjorn Helgaas80ccba12008-06-13 10:52:11 -0600831 dev_info(&dev->dev, "unrecognized suspend event %d\n",
832 state.event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 BUG();
834 }
835 return PCI_D0;
836}
837
838EXPORT_SYMBOL(pci_choose_state);
839
Yu Zhao89858512009-02-16 02:55:47 +0800840#define PCI_EXP_SAVE_REGS 7
841
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800842#define pcie_cap_has_devctl(type, flags) 1
843#define pcie_cap_has_lnkctl(type, flags) \
844 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
845 (type == PCI_EXP_TYPE_ROOT_PORT || \
846 type == PCI_EXP_TYPE_ENDPOINT || \
847 type == PCI_EXP_TYPE_LEG_END))
848#define pcie_cap_has_sltctl(type, flags) \
849 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
850 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
851 (type == PCI_EXP_TYPE_DOWNSTREAM && \
852 (flags & PCI_EXP_FLAGS_SLOT))))
853#define pcie_cap_has_rtctl(type, flags) \
854 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
855 (type == PCI_EXP_TYPE_ROOT_PORT || \
856 type == PCI_EXP_TYPE_RC_EC))
857#define pcie_cap_has_devctl2(type, flags) \
858 ((flags & PCI_EXP_FLAGS_VERS) > 1)
859#define pcie_cap_has_lnkctl2(type, flags) \
860 ((flags & PCI_EXP_FLAGS_VERS) > 1)
861#define pcie_cap_has_sltctl2(type, flags) \
862 ((flags & PCI_EXP_FLAGS_VERS) > 1)
863
Yinghai Lu34a48762012-02-11 00:18:41 -0800864static struct pci_cap_saved_state *pci_find_saved_cap(
865 struct pci_dev *pci_dev, char cap)
866{
867 struct pci_cap_saved_state *tmp;
868 struct hlist_node *pos;
869
870 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
871 if (tmp->cap.cap_nr == cap)
872 return tmp;
873 }
874 return NULL;
875}
876
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300877static int pci_save_pcie_state(struct pci_dev *dev)
878{
879 int pos, i = 0;
880 struct pci_cap_saved_state *save_state;
881 u16 *cap;
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800882 u16 flags;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300883
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +0900884 pos = pci_pcie_cap(dev);
885 if (!pos)
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300886 return 0;
887
Eric W. Biederman9f355752007-03-08 13:06:13 -0700888 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300889 if (!save_state) {
Harvey Harrisone496b612009-01-07 16:22:37 -0800890 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300891 return -ENOMEM;
892 }
Alex Williamson24a4742f2011-05-10 10:02:11 -0600893 cap = (u16 *)&save_state->cap.data[0];
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300894
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800895 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
896
897 if (pcie_cap_has_devctl(dev->pcie_type, flags))
898 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
899 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
900 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
901 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
902 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
903 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
904 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
905 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
906 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
907 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
908 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
909 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
910 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100911
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300912 return 0;
913}
914
915static void pci_restore_pcie_state(struct pci_dev *dev)
916{
917 int i = 0, pos;
918 struct pci_cap_saved_state *save_state;
919 u16 *cap;
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800920 u16 flags;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300921
922 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
923 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
924 if (!save_state || pos <= 0)
925 return;
Alex Williamson24a4742f2011-05-10 10:02:11 -0600926 cap = (u16 *)&save_state->cap.data[0];
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300927
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800928 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
929
930 if (pcie_cap_has_devctl(dev->pcie_type, flags))
931 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
932 if (pcie_cap_has_lnkctl(dev->pcie_type, flags))
933 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
934 if (pcie_cap_has_sltctl(dev->pcie_type, flags))
935 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
936 if (pcie_cap_has_rtctl(dev->pcie_type, flags))
937 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
938 if (pcie_cap_has_devctl2(dev->pcie_type, flags))
939 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
940 if (pcie_cap_has_lnkctl2(dev->pcie_type, flags))
941 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
942 if (pcie_cap_has_sltctl2(dev->pcie_type, flags))
943 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300944}
945
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800946
947static int pci_save_pcix_state(struct pci_dev *dev)
948{
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100949 int pos;
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800950 struct pci_cap_saved_state *save_state;
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800951
952 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
953 if (pos <= 0)
954 return 0;
955
Shaohua Lif34303d2007-12-18 09:56:47 +0800956 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800957 if (!save_state) {
Harvey Harrisone496b612009-01-07 16:22:37 -0800958 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800959 return -ENOMEM;
960 }
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800961
Alex Williamson24a4742f2011-05-10 10:02:11 -0600962 pci_read_config_word(dev, pos + PCI_X_CMD,
963 (u16 *)save_state->cap.data);
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100964
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800965 return 0;
966}
967
968static void pci_restore_pcix_state(struct pci_dev *dev)
969{
970 int i = 0, pos;
971 struct pci_cap_saved_state *save_state;
972 u16 *cap;
973
974 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
975 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
976 if (!save_state || pos <= 0)
977 return;
Alex Williamson24a4742f2011-05-10 10:02:11 -0600978 cap = (u16 *)&save_state->cap.data[0];
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800979
980 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800981}
982
983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984/**
985 * pci_save_state - save the PCI configuration space of a device before suspending
986 * @dev: - PCI device that we're dealing with
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 */
988int
989pci_save_state(struct pci_dev *dev)
990{
991 int i;
992 /* XXX: 100% dword access ok here? */
993 for (i = 0; i < 16; i++)
Kleber Sacilotto de Souza9e0b5b22009-11-25 00:55:51 -0200994 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
Rafael J. Wysockiaa8c6c92009-01-16 21:54:43 +0100995 dev->state_saved = true;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300996 if ((i = pci_save_pcie_state(dev)) != 0)
997 return i;
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800998 if ((i = pci_save_pcix_state(dev)) != 0)
999 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 return 0;
1001}
1002
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001003static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1004 u32 saved_val, int retry)
1005{
1006 u32 val;
1007
1008 pci_read_config_dword(pdev, offset, &val);
1009 if (val == saved_val)
1010 return;
1011
1012 for (;;) {
1013 dev_dbg(&pdev->dev, "restoring config space at offset "
1014 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
1015 pci_write_config_dword(pdev, offset, saved_val);
1016 if (retry-- <= 0)
1017 return;
1018
1019 pci_read_config_dword(pdev, offset, &val);
1020 if (val == saved_val)
1021 return;
1022
1023 mdelay(1);
1024 }
1025}
1026
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001027static void pci_restore_config_space_range(struct pci_dev *pdev,
1028 int start, int end, int retry)
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001029{
1030 int index;
1031
1032 for (index = end; index >= start; index--)
1033 pci_restore_config_dword(pdev, 4 * index,
1034 pdev->saved_config_space[index],
1035 retry);
1036}
1037
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001038static void pci_restore_config_space(struct pci_dev *pdev)
1039{
1040 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1041 pci_restore_config_space_range(pdev, 10, 15, 0);
1042 /* Restore BARs before the command register. */
1043 pci_restore_config_space_range(pdev, 4, 9, 10);
1044 pci_restore_config_space_range(pdev, 0, 3, 0);
1045 } else {
1046 pci_restore_config_space_range(pdev, 0, 15, 0);
1047 }
1048}
1049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050/**
1051 * pci_restore_state - Restore the saved state of a PCI device
1052 * @dev: - PCI device that we're dealing with
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 */
Jon Mason1d3c16a2010-11-30 17:43:26 -06001054void pci_restore_state(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055{
Alek Duc82f63e2009-08-08 08:46:19 +08001056 if (!dev->state_saved)
Jon Mason1d3c16a2010-11-30 17:43:26 -06001057 return;
Rafael J. Wysocki4b77b0a2009-09-09 23:49:59 +02001058
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +03001059 /* PCI Express register must be restored first */
1060 pci_restore_pcie_state(dev);
Hao, Xudong1900ca12011-12-17 21:24:40 +08001061 pci_restore_ats_state(dev);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +03001062
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001063 pci_restore_config_space(dev);
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001064
Stephen Hemmingercc692a52006-11-08 16:17:15 -08001065 pci_restore_pcix_state(dev);
Shaohua Li41017f02006-02-08 17:11:38 +08001066 pci_restore_msi_state(dev);
Yu Zhao8c5cdb62009-03-20 11:25:12 +08001067 pci_restore_iov_state(dev);
Michael Ellerman8fed4b62007-01-25 19:34:08 +11001068
Rafael J. Wysocki4b77b0a2009-09-09 23:49:59 +02001069 dev->state_saved = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070}
1071
Alex Williamsonffbdd3f2011-05-10 10:02:27 -06001072struct pci_saved_state {
1073 u32 config_space[16];
1074 struct pci_cap_saved_data cap[0];
1075};
1076
1077/**
1078 * pci_store_saved_state - Allocate and return an opaque struct containing
1079 * the device saved state.
1080 * @dev: PCI device that we're dealing with
1081 *
1082 * Rerturn NULL if no state or error.
1083 */
1084struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1085{
1086 struct pci_saved_state *state;
1087 struct pci_cap_saved_state *tmp;
1088 struct pci_cap_saved_data *cap;
1089 struct hlist_node *pos;
1090 size_t size;
1091
1092 if (!dev->state_saved)
1093 return NULL;
1094
1095 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1096
1097 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1098 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1099
1100 state = kzalloc(size, GFP_KERNEL);
1101 if (!state)
1102 return NULL;
1103
1104 memcpy(state->config_space, dev->saved_config_space,
1105 sizeof(state->config_space));
1106
1107 cap = state->cap;
1108 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1109 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1110 memcpy(cap, &tmp->cap, len);
1111 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1112 }
1113 /* Empty cap_save terminates list */
1114
1115 return state;
1116}
1117EXPORT_SYMBOL_GPL(pci_store_saved_state);
1118
1119/**
1120 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1121 * @dev: PCI device that we're dealing with
1122 * @state: Saved state returned from pci_store_saved_state()
1123 */
1124int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1125{
1126 struct pci_cap_saved_data *cap;
1127
1128 dev->state_saved = false;
1129
1130 if (!state)
1131 return 0;
1132
1133 memcpy(dev->saved_config_space, state->config_space,
1134 sizeof(state->config_space));
1135
1136 cap = state->cap;
1137 while (cap->size) {
1138 struct pci_cap_saved_state *tmp;
1139
1140 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1141 if (!tmp || tmp->cap.size != cap->size)
1142 return -EINVAL;
1143
1144 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1145 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1146 sizeof(struct pci_cap_saved_data) + cap->size);
1147 }
1148
1149 dev->state_saved = true;
1150 return 0;
1151}
1152EXPORT_SYMBOL_GPL(pci_load_saved_state);
1153
1154/**
1155 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1156 * and free the memory allocated for it.
1157 * @dev: PCI device that we're dealing with
1158 * @state: Pointer to saved state returned from pci_store_saved_state()
1159 */
1160int pci_load_and_free_saved_state(struct pci_dev *dev,
1161 struct pci_saved_state **state)
1162{
1163 int ret = pci_load_saved_state(dev, *state);
1164 kfree(*state);
1165 *state = NULL;
1166 return ret;
1167}
1168EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1169
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001170static int do_pci_enable_device(struct pci_dev *dev, int bars)
1171{
1172 int err;
1173
1174 err = pci_set_power_state(dev, PCI_D0);
1175 if (err < 0 && err != -EIO)
1176 return err;
1177 err = pcibios_enable_device(dev, bars);
1178 if (err < 0)
1179 return err;
1180 pci_fixup_device(pci_fixup_enable, dev);
1181
1182 return 0;
1183}
1184
1185/**
Tejun Heo0b62e132007-07-27 14:43:35 +09001186 * pci_reenable_device - Resume abandoned device
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001187 * @dev: PCI device to be resumed
1188 *
1189 * Note this function is a backend of pci_default_resume and is not supposed
1190 * to be called by normal code, write proper resume handler and use it instead.
1191 */
Tejun Heo0b62e132007-07-27 14:43:35 +09001192int pci_reenable_device(struct pci_dev *dev)
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001193{
Yuji Shimada296ccb02009-04-03 16:41:46 +09001194 if (pci_is_enabled(dev))
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001195 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1196 return 0;
1197}
1198
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001199static int __pci_enable_device_flags(struct pci_dev *dev,
1200 resource_size_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201{
1202 int err;
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001203 int i, bars = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Jesse Barnes97c145f2010-11-05 15:16:36 -04001205 /*
1206 * Power state could be unknown at this point, either due to a fresh
1207 * boot or a device removal call. So get the current power state
1208 * so that things like MSI message writing will behave as expected
1209 * (e.g. if the device really is in D0 at enable time).
1210 */
1211 if (dev->pm_cap) {
1212 u16 pmcsr;
1213 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1214 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1215 }
1216
Hidetoshi Seto9fb625c2006-12-18 10:28:43 +09001217 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1218 return 0; /* already enabled */
1219
Yinghai Lu497f16f2011-12-17 18:33:37 -08001220 /* only skip sriov related */
1221 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1222 if (dev->resource[i].flags & flags)
1223 bars |= (1 << i);
1224 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001225 if (dev->resource[i].flags & flags)
1226 bars |= (1 << i);
1227
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001228 err = do_pci_enable_device(dev, bars);
Greg Kroah-Hartman95a62962005-07-28 11:37:33 -07001229 if (err < 0)
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001230 atomic_dec(&dev->enable_cnt);
Hidetoshi Seto9fb625c2006-12-18 10:28:43 +09001231 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232}
1233
1234/**
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001235 * pci_enable_device_io - Initialize a device for use with IO space
1236 * @dev: PCI device to be initialized
1237 *
1238 * Initialize device before it's used by a driver. Ask low-level code
1239 * to enable I/O resources. Wake up the device if it was suspended.
1240 * Beware, this function can fail.
1241 */
1242int pci_enable_device_io(struct pci_dev *dev)
1243{
1244 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1245}
1246
1247/**
1248 * pci_enable_device_mem - Initialize a device for use with Memory space
1249 * @dev: PCI device to be initialized
1250 *
1251 * Initialize device before it's used by a driver. Ask low-level code
1252 * to enable Memory resources. Wake up the device if it was suspended.
1253 * Beware, this function can fail.
1254 */
1255int pci_enable_device_mem(struct pci_dev *dev)
1256{
1257 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1258}
1259
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260/**
1261 * pci_enable_device - Initialize device before it's used by a driver.
1262 * @dev: PCI device to be initialized
1263 *
1264 * Initialize device before it's used by a driver. Ask low-level code
1265 * to enable I/O and memory. Wake up the device if it was suspended.
1266 * Beware, this function can fail.
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001267 *
1268 * Note we don't actually enable the device many times if we call
1269 * this function repeatedly (we just increment the count).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 */
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001271int pci_enable_device(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272{
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001273 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274}
1275
Tejun Heo9ac78492007-01-20 16:00:26 +09001276/*
1277 * Managed PCI resources. This manages device on/off, intx/msi/msix
1278 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1279 * there's no need to track it separately. pci_devres is initialized
1280 * when a device is enabled using managed PCI device enable interface.
1281 */
1282struct pci_devres {
Tejun Heo7f375f32007-02-25 04:36:01 -08001283 unsigned int enabled:1;
1284 unsigned int pinned:1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001285 unsigned int orig_intx:1;
1286 unsigned int restore_intx:1;
1287 u32 region_mask;
1288};
1289
1290static void pcim_release(struct device *gendev, void *res)
1291{
1292 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1293 struct pci_devres *this = res;
1294 int i;
1295
1296 if (dev->msi_enabled)
1297 pci_disable_msi(dev);
1298 if (dev->msix_enabled)
1299 pci_disable_msix(dev);
1300
1301 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1302 if (this->region_mask & (1 << i))
1303 pci_release_region(dev, i);
1304
1305 if (this->restore_intx)
1306 pci_intx(dev, this->orig_intx);
1307
Tejun Heo7f375f32007-02-25 04:36:01 -08001308 if (this->enabled && !this->pinned)
Tejun Heo9ac78492007-01-20 16:00:26 +09001309 pci_disable_device(dev);
1310}
1311
1312static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1313{
1314 struct pci_devres *dr, *new_dr;
1315
1316 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1317 if (dr)
1318 return dr;
1319
1320 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1321 if (!new_dr)
1322 return NULL;
1323 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1324}
1325
1326static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1327{
1328 if (pci_is_managed(pdev))
1329 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1330 return NULL;
1331}
1332
1333/**
1334 * pcim_enable_device - Managed pci_enable_device()
1335 * @pdev: PCI device to be initialized
1336 *
1337 * Managed pci_enable_device().
1338 */
1339int pcim_enable_device(struct pci_dev *pdev)
1340{
1341 struct pci_devres *dr;
1342 int rc;
1343
1344 dr = get_pci_dr(pdev);
1345 if (unlikely(!dr))
1346 return -ENOMEM;
Tejun Heob95d58e2008-01-30 18:20:04 +09001347 if (dr->enabled)
1348 return 0;
Tejun Heo9ac78492007-01-20 16:00:26 +09001349
1350 rc = pci_enable_device(pdev);
1351 if (!rc) {
1352 pdev->is_managed = 1;
Tejun Heo7f375f32007-02-25 04:36:01 -08001353 dr->enabled = 1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001354 }
1355 return rc;
1356}
1357
1358/**
1359 * pcim_pin_device - Pin managed PCI device
1360 * @pdev: PCI device to pin
1361 *
1362 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1363 * driver detach. @pdev must have been enabled with
1364 * pcim_enable_device().
1365 */
1366void pcim_pin_device(struct pci_dev *pdev)
1367{
1368 struct pci_devres *dr;
1369
1370 dr = find_pci_dr(pdev);
Tejun Heo7f375f32007-02-25 04:36:01 -08001371 WARN_ON(!dr || !dr->enabled);
Tejun Heo9ac78492007-01-20 16:00:26 +09001372 if (dr)
Tejun Heo7f375f32007-02-25 04:36:01 -08001373 dr->pinned = 1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001374}
1375
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376/**
1377 * pcibios_disable_device - disable arch specific PCI resources for device dev
1378 * @dev: the PCI device to disable
1379 *
1380 * Disables architecture specific PCI resources for the device. This
1381 * is the default implementation. Architecture implementations can
1382 * override this.
1383 */
1384void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
1385
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001386static void do_pci_disable_device(struct pci_dev *dev)
1387{
1388 u16 pci_command;
1389
1390 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1391 if (pci_command & PCI_COMMAND_MASTER) {
1392 pci_command &= ~PCI_COMMAND_MASTER;
1393 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1394 }
1395
1396 pcibios_disable_device(dev);
1397}
1398
1399/**
1400 * pci_disable_enabled_device - Disable device without updating enable_cnt
1401 * @dev: PCI device to disable
1402 *
1403 * NOTE: This function is a backend of PCI power management routines and is
1404 * not supposed to be called drivers.
1405 */
1406void pci_disable_enabled_device(struct pci_dev *dev)
1407{
Yuji Shimada296ccb02009-04-03 16:41:46 +09001408 if (pci_is_enabled(dev))
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001409 do_pci_disable_device(dev);
1410}
1411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412/**
1413 * pci_disable_device - Disable PCI device after use
1414 * @dev: PCI device to be disabled
1415 *
1416 * Signal to the system that the PCI device is not in use by the system
1417 * anymore. This only involves disabling PCI bus-mastering, if active.
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001418 *
1419 * Note we don't actually disable the device until all callers of
Roman Fietzeee6583f2010-05-18 14:45:47 +02001420 * pci_enable_device() have called pci_disable_device().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 */
1422void
1423pci_disable_device(struct pci_dev *dev)
1424{
Tejun Heo9ac78492007-01-20 16:00:26 +09001425 struct pci_devres *dr;
Shaohua Li99dc8042006-05-26 10:58:27 +08001426
Tejun Heo9ac78492007-01-20 16:00:26 +09001427 dr = find_pci_dr(dev);
1428 if (dr)
Tejun Heo7f375f32007-02-25 04:36:01 -08001429 dr->enabled = 0;
Tejun Heo9ac78492007-01-20 16:00:26 +09001430
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001431 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1432 return;
1433
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001434 do_pci_disable_device(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001436 dev->is_busmaster = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437}
1438
1439/**
Brian Kingf7bdd122007-04-06 16:39:36 -05001440 * pcibios_set_pcie_reset_state - set reset state for device dev
Stefan Assmann45e829e2009-12-03 06:49:24 -05001441 * @dev: the PCIe device reset
Brian Kingf7bdd122007-04-06 16:39:36 -05001442 * @state: Reset state to enter into
1443 *
1444 *
Stefan Assmann45e829e2009-12-03 06:49:24 -05001445 * Sets the PCIe reset state for the device. This is the default
Brian Kingf7bdd122007-04-06 16:39:36 -05001446 * implementation. Architecture implementations can override this.
1447 */
1448int __attribute__ ((weak)) pcibios_set_pcie_reset_state(struct pci_dev *dev,
1449 enum pcie_reset_state state)
1450{
1451 return -EINVAL;
1452}
1453
1454/**
1455 * pci_set_pcie_reset_state - set reset state for device dev
Stefan Assmann45e829e2009-12-03 06:49:24 -05001456 * @dev: the PCIe device reset
Brian Kingf7bdd122007-04-06 16:39:36 -05001457 * @state: Reset state to enter into
1458 *
1459 *
1460 * Sets the PCI reset state for the device.
1461 */
1462int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1463{
1464 return pcibios_set_pcie_reset_state(dev, state);
1465}
1466
1467/**
Rafael J. Wysocki58ff4632010-02-17 23:36:58 +01001468 * pci_check_pme_status - Check if given device has generated PME.
1469 * @dev: Device to check.
1470 *
1471 * Check the PME status of the device and if set, clear it and clear PME enable
1472 * (if set). Return 'true' if PME status and PME enable were both set or
1473 * 'false' otherwise.
1474 */
1475bool pci_check_pme_status(struct pci_dev *dev)
1476{
1477 int pmcsr_pos;
1478 u16 pmcsr;
1479 bool ret = false;
1480
1481 if (!dev->pm_cap)
1482 return false;
1483
1484 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1485 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1486 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1487 return false;
1488
1489 /* Clear PME status. */
1490 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1491 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1492 /* Disable PME to avoid interrupt flood. */
1493 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1494 ret = true;
1495 }
1496
1497 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1498
1499 return ret;
1500}
1501
1502/**
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001503 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1504 * @dev: Device to handle.
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001505 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001506 *
1507 * Check if @dev has generated PME and queue a resume request for it in that
1508 * case.
1509 */
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001510static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001511{
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001512 if (pme_poll_reset && dev->pme_poll)
1513 dev->pme_poll = false;
1514
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001515 if (pci_check_pme_status(dev)) {
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001516 pci_wakeup_event(dev);
Rafael J. Wysocki0f953bf2010-12-29 13:22:08 +01001517 pm_request_resume(&dev->dev);
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001518 }
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001519 return 0;
1520}
1521
1522/**
1523 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1524 * @bus: Top bus of the subtree to walk.
1525 */
1526void pci_pme_wakeup_bus(struct pci_bus *bus)
1527{
1528 if (bus)
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001529 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001530}
1531
1532/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001533 * pci_pme_capable - check the capability of PCI device to generate PME#
1534 * @dev: PCI device to handle.
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001535 * @state: PCI state from which device will issue PME#.
1536 */
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001537bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001538{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001539 if (!dev->pm_cap)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001540 return false;
1541
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001542 return !!(dev->pme_support & (1 << state));
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001543}
1544
Matthew Garrettdf17e622010-10-04 14:22:29 -04001545static void pci_pme_list_scan(struct work_struct *work)
1546{
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001547 struct pci_pme_device *pme_dev, *n;
Matthew Garrettdf17e622010-10-04 14:22:29 -04001548
1549 mutex_lock(&pci_pme_list_mutex);
1550 if (!list_empty(&pci_pme_list)) {
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001551 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1552 if (pme_dev->dev->pme_poll) {
1553 pci_pme_wakeup(pme_dev->dev, NULL);
1554 } else {
1555 list_del(&pme_dev->list);
1556 kfree(pme_dev);
1557 }
1558 }
1559 if (!list_empty(&pci_pme_list))
1560 schedule_delayed_work(&pci_pme_work,
1561 msecs_to_jiffies(PME_TIMEOUT));
Matthew Garrettdf17e622010-10-04 14:22:29 -04001562 }
1563 mutex_unlock(&pci_pme_list_mutex);
1564}
1565
1566/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001567 * pci_pme_active - enable or disable PCI device's PME# function
1568 * @dev: PCI device to handle.
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001569 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1570 *
1571 * The caller must verify that the device is capable of generating PME# before
1572 * calling this function with @enable equal to 'true'.
1573 */
Rafael J. Wysocki5a6c9b62008-08-08 00:14:24 +02001574void pci_pme_active(struct pci_dev *dev, bool enable)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001575{
1576 u16 pmcsr;
1577
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001578 if (!dev->pm_cap)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001579 return;
1580
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001581 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001582 /* Clear PME_Status by writing 1 to it and enable PME# */
1583 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1584 if (!enable)
1585 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1586
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001587 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001588
Matthew Garrettdf17e622010-10-04 14:22:29 -04001589 /* PCI (as opposed to PCIe) PME requires that the device have
1590 its PME# line hooked up correctly. Not all hardware vendors
1591 do this, so the PME never gets delivered and the device
1592 remains asleep. The easiest way around this is to
1593 periodically walk the list of suspended devices and check
1594 whether any have their PME flag set. The assumption is that
1595 we'll wake up often enough anyway that this won't be a huge
1596 hit, and the power savings from the devices will still be a
1597 win. */
1598
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001599 if (dev->pme_poll) {
Matthew Garrettdf17e622010-10-04 14:22:29 -04001600 struct pci_pme_device *pme_dev;
1601 if (enable) {
1602 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1603 GFP_KERNEL);
1604 if (!pme_dev)
1605 goto out;
1606 pme_dev->dev = dev;
1607 mutex_lock(&pci_pme_list_mutex);
1608 list_add(&pme_dev->list, &pci_pme_list);
1609 if (list_is_singular(&pci_pme_list))
1610 schedule_delayed_work(&pci_pme_work,
1611 msecs_to_jiffies(PME_TIMEOUT));
1612 mutex_unlock(&pci_pme_list_mutex);
1613 } else {
1614 mutex_lock(&pci_pme_list_mutex);
1615 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1616 if (pme_dev->dev == dev) {
1617 list_del(&pme_dev->list);
1618 kfree(pme_dev);
1619 break;
1620 }
1621 }
1622 mutex_unlock(&pci_pme_list_mutex);
1623 }
1624 }
1625
1626out:
Vincent Palatin85b85822011-12-05 11:51:18 -08001627 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001628}
1629
1630/**
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001631 * __pci_enable_wake - enable PCI device as wakeup event source
David Brownell075c1772007-04-26 00:12:06 -07001632 * @dev: PCI device affected
1633 * @state: PCI state from which device will issue wakeup events
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001634 * @runtime: True if the events are to be generated at run time
David Brownell075c1772007-04-26 00:12:06 -07001635 * @enable: True to enable event generation; false to disable
Linus Torvalds1da177e2005-04-16 15:20:36 -07001636 *
David Brownell075c1772007-04-26 00:12:06 -07001637 * This enables the device as a wakeup event source, or disables it.
1638 * When such events involves platform-specific hooks, those hooks are
1639 * called automatically by this routine.
1640 *
1641 * Devices with legacy power management (no standard PCI PM capabilities)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001642 * always require such platform hooks.
David Brownell075c1772007-04-26 00:12:06 -07001643 *
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001644 * RETURN VALUE:
1645 * 0 is returned on success
1646 * -EINVAL is returned if device is not supposed to wake up the system
1647 * Error code depending on the platform is returned if both the platform and
1648 * the native mechanism fail to enable the generation of wake-up events
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649 */
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001650int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1651 bool runtime, bool enable)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652{
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001653 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001655 if (enable && !runtime && !device_may_wakeup(&dev->dev))
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001656 return -EINVAL;
1657
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001658 /* Don't do the same thing twice in a row for one device. */
1659 if (!!enable == !!dev->wakeup_prepared)
1660 return 0;
1661
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001662 /*
1663 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1664 * Anderson we should be doing PME# wake enable followed by ACPI wake
1665 * enable. To disable wake-up we call the platform first, for symmetry.
David Brownell075c1772007-04-26 00:12:06 -07001666 */
1667
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001668 if (enable) {
1669 int error;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001670
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001671 if (pci_pme_capable(dev, state))
1672 pci_pme_active(dev, true);
1673 else
1674 ret = 1;
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001675 error = runtime ? platform_pci_run_wake(dev, true) :
1676 platform_pci_sleep_wake(dev, true);
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001677 if (ret)
1678 ret = error;
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001679 if (!ret)
1680 dev->wakeup_prepared = true;
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001681 } else {
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001682 if (runtime)
1683 platform_pci_run_wake(dev, false);
1684 else
1685 platform_pci_sleep_wake(dev, false);
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001686 pci_pme_active(dev, false);
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001687 dev->wakeup_prepared = false;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001688 }
1689
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001690 return ret;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001691}
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001692EXPORT_SYMBOL(__pci_enable_wake);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001693
1694/**
Rafael J. Wysocki0235c4f2008-08-18 21:38:00 +02001695 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1696 * @dev: PCI device to prepare
1697 * @enable: True to enable wake-up event generation; false to disable
1698 *
1699 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1700 * and this function allows them to set that up cleanly - pci_enable_wake()
1701 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1702 * ordering constraints.
1703 *
1704 * This function only returns error code if the device is not capable of
1705 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1706 * enable wake-up power for it.
1707 */
1708int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1709{
1710 return pci_pme_capable(dev, PCI_D3cold) ?
1711 pci_enable_wake(dev, PCI_D3cold, enable) :
1712 pci_enable_wake(dev, PCI_D3hot, enable);
1713}
1714
1715/**
Jesse Barnes37139072008-07-28 11:49:26 -07001716 * pci_target_state - find an appropriate low power state for a given PCI dev
1717 * @dev: PCI device
1718 *
1719 * Use underlying platform code to find a supported low power state for @dev.
1720 * If the platform can't manage @dev, return the deepest state from which it
1721 * can generate wake events, based on any available PME info.
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001722 */
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001723pci_power_t pci_target_state(struct pci_dev *dev)
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001724{
1725 pci_power_t target_state = PCI_D3hot;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001726
1727 if (platform_pci_power_manageable(dev)) {
1728 /*
1729 * Call the platform to choose the target state of the device
1730 * and enable wake-up from this state if supported.
1731 */
1732 pci_power_t state = platform_pci_choose_state(dev);
1733
1734 switch (state) {
1735 case PCI_POWER_ERROR:
1736 case PCI_UNKNOWN:
1737 break;
1738 case PCI_D1:
1739 case PCI_D2:
1740 if (pci_no_d1d2(dev))
1741 break;
1742 default:
1743 target_state = state;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001744 }
Rafael J. Wysockid2abdf62009-06-14 21:25:02 +02001745 } else if (!dev->pm_cap) {
1746 target_state = PCI_D0;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001747 } else if (device_may_wakeup(&dev->dev)) {
1748 /*
1749 * Find the deepest state from which the device can generate
1750 * wake-up events, make it the target state and enable device
1751 * to generate PME#.
1752 */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001753 if (dev->pme_support) {
1754 while (target_state
1755 && !(dev->pme_support & (1 << target_state)))
1756 target_state--;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001757 }
1758 }
1759
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001760 return target_state;
1761}
1762
1763/**
1764 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1765 * @dev: Device to handle.
1766 *
1767 * Choose the power state appropriate for the device depending on whether
1768 * it can wake up the system and/or is power manageable by the platform
1769 * (PCI_D3hot is the default) and put the device into that state.
1770 */
1771int pci_prepare_to_sleep(struct pci_dev *dev)
1772{
1773 pci_power_t target_state = pci_target_state(dev);
1774 int error;
1775
1776 if (target_state == PCI_POWER_ERROR)
1777 return -EIO;
1778
Rafael J. Wysocki8efb8c72009-03-30 21:46:27 +02001779 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
Rafael J. Wysockic157dfa2008-07-13 22:45:06 +02001780
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001781 error = pci_set_power_state(dev, target_state);
1782
1783 if (error)
1784 pci_enable_wake(dev, target_state, false);
1785
1786 return error;
1787}
1788
1789/**
Randy Dunlap443bd1c2008-07-21 09:27:18 -07001790 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001791 * @dev: Device to handle.
1792 *
Thomas Weber88393162010-03-16 11:47:56 +01001793 * Disable device's system wake-up capability and put it into D0.
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001794 */
1795int pci_back_from_sleep(struct pci_dev *dev)
1796{
1797 pci_enable_wake(dev, PCI_D0, false);
1798 return pci_set_power_state(dev, PCI_D0);
1799}
1800
1801/**
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001802 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1803 * @dev: PCI device being suspended.
1804 *
1805 * Prepare @dev to generate wake-up events at run time and put it into a low
1806 * power state.
1807 */
1808int pci_finish_runtime_suspend(struct pci_dev *dev)
1809{
1810 pci_power_t target_state = pci_target_state(dev);
1811 int error;
1812
1813 if (target_state == PCI_POWER_ERROR)
1814 return -EIO;
1815
1816 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1817
1818 error = pci_set_power_state(dev, target_state);
1819
1820 if (error)
1821 __pci_enable_wake(dev, target_state, true, false);
1822
1823 return error;
1824}
1825
1826/**
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001827 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1828 * @dev: Device to check.
1829 *
1830 * Return true if the device itself is cabable of generating wake-up events
1831 * (through the platform or using the native PCIe PME) or if the device supports
1832 * PME and one of its upstream bridges can generate wake-up events.
1833 */
1834bool pci_dev_run_wake(struct pci_dev *dev)
1835{
1836 struct pci_bus *bus = dev->bus;
1837
1838 if (device_run_wake(&dev->dev))
1839 return true;
1840
1841 if (!dev->pme_support)
1842 return false;
1843
1844 while (bus->parent) {
1845 struct pci_dev *bridge = bus->self;
1846
1847 if (device_run_wake(&bridge->dev))
1848 return true;
1849
1850 bus = bus->parent;
1851 }
1852
1853 /* We have reached the root bus. */
1854 if (bus->bridge)
1855 return device_run_wake(bus->bridge);
1856
1857 return false;
1858}
1859EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1860
1861/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001862 * pci_pm_init - Initialize PM functions of given PCI device
1863 * @dev: PCI device to handle.
1864 */
1865void pci_pm_init(struct pci_dev *dev)
1866{
1867 int pm;
1868 u16 pmc;
David Brownell075c1772007-04-26 00:12:06 -07001869
Rafael J. Wysockibb910a72010-02-27 21:37:37 +01001870 pm_runtime_forbid(&dev->dev);
Rafael J. Wysockia1e4d722010-02-08 19:16:33 +01001871 device_enable_async_suspend(&dev->dev);
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001872 dev->wakeup_prepared = false;
Rafael J. Wysockibb910a72010-02-27 21:37:37 +01001873
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001874 dev->pm_cap = 0;
1875
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876 /* find PCI PM capability in list */
1877 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
David Brownell075c1772007-04-26 00:12:06 -07001878 if (!pm)
Linus Torvalds50246dd2009-01-16 08:14:51 -08001879 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 /* Check device's ability to generate PME# */
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001881 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001883 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1884 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1885 pmc & PCI_PM_CAP_VER_MASK);
Linus Torvalds50246dd2009-01-16 08:14:51 -08001886 return;
David Brownell075c1772007-04-26 00:12:06 -07001887 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001889 dev->pm_cap = pm;
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01001890 dev->d3_delay = PCI_PM_D3_WAIT;
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001891
1892 dev->d1_support = false;
1893 dev->d2_support = false;
1894 if (!pci_no_d1d2(dev)) {
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001895 if (pmc & PCI_PM_CAP_D1)
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001896 dev->d1_support = true;
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001897 if (pmc & PCI_PM_CAP_D2)
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001898 dev->d2_support = true;
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001899
1900 if (dev->d1_support || dev->d2_support)
1901 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
Jesse Barnesec84f122008-09-23 11:43:34 -07001902 dev->d1_support ? " D1" : "",
1903 dev->d2_support ? " D2" : "");
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001904 }
1905
1906 pmc &= PCI_PM_CAP_PME_MASK;
1907 if (pmc) {
Bjorn Helgaas10c3d712009-11-04 10:32:42 -07001908 dev_printk(KERN_DEBUG, &dev->dev,
1909 "PME# supported from%s%s%s%s%s\n",
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001910 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1911 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1912 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1913 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1914 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001915 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001916 dev->pme_poll = true;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001917 /*
1918 * Make device's PM flags reflect the wake-up capability, but
1919 * let the user space enable it to wake up the system as needed.
1920 */
1921 device_set_wakeup_capable(&dev->dev, true);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001922 /* Disable the PME# generation functionality */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001923 pci_pme_active(dev, false);
1924 } else {
1925 dev->pme_support = 0;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001926 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927}
1928
Yu Zhao58c3a722008-10-14 14:02:53 +08001929/**
Jesse Barneseb9c39d2008-12-17 12:10:05 -08001930 * platform_pci_wakeup_init - init platform wakeup if present
1931 * @dev: PCI device
1932 *
1933 * Some devices don't have PCI PM caps but can still generate wakeup
1934 * events through platform methods (like ACPI events). If @dev supports
1935 * platform wakeup events, set the device flag to indicate as much. This
1936 * may be redundant if the device also supports PCI PM caps, but double
1937 * initialization should be safe in that case.
1938 */
1939void platform_pci_wakeup_init(struct pci_dev *dev)
1940{
1941 if (!platform_pci_can_wakeup(dev))
1942 return;
1943
1944 device_set_wakeup_capable(&dev->dev, true);
Jesse Barneseb9c39d2008-12-17 12:10:05 -08001945 platform_pci_sleep_wake(dev, false);
1946}
1947
Yinghai Lu34a48762012-02-11 00:18:41 -08001948static void pci_add_saved_cap(struct pci_dev *pci_dev,
1949 struct pci_cap_saved_state *new_cap)
1950{
1951 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
1952}
1953
Jesse Barneseb9c39d2008-12-17 12:10:05 -08001954/**
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01001955 * pci_add_save_buffer - allocate buffer for saving given capability registers
1956 * @dev: the PCI device
1957 * @cap: the capability to allocate the buffer for
1958 * @size: requested size of the buffer
1959 */
1960static int pci_add_cap_save_buffer(
1961 struct pci_dev *dev, char cap, unsigned int size)
1962{
1963 int pos;
1964 struct pci_cap_saved_state *save_state;
1965
1966 pos = pci_find_capability(dev, cap);
1967 if (pos <= 0)
1968 return 0;
1969
1970 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
1971 if (!save_state)
1972 return -ENOMEM;
1973
Alex Williamson24a4742f2011-05-10 10:02:11 -06001974 save_state->cap.cap_nr = cap;
1975 save_state->cap.size = size;
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01001976 pci_add_saved_cap(dev, save_state);
1977
1978 return 0;
1979}
1980
1981/**
1982 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
1983 * @dev: the PCI device
1984 */
1985void pci_allocate_cap_save_buffers(struct pci_dev *dev)
1986{
1987 int error;
1988
Yu Zhao89858512009-02-16 02:55:47 +08001989 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
1990 PCI_EXP_SAVE_REGS * sizeof(u16));
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01001991 if (error)
1992 dev_err(&dev->dev,
1993 "unable to preallocate PCI Express save buffer\n");
1994
1995 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
1996 if (error)
1997 dev_err(&dev->dev,
1998 "unable to preallocate PCI-X save buffer\n");
1999}
2000
Yinghai Luf7968412012-02-11 00:18:30 -08002001void pci_free_cap_save_buffers(struct pci_dev *dev)
2002{
2003 struct pci_cap_saved_state *tmp;
2004 struct hlist_node *pos, *n;
2005
2006 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
2007 kfree(tmp);
2008}
2009
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01002010/**
Yu Zhao58c3a722008-10-14 14:02:53 +08002011 * pci_enable_ari - enable ARI forwarding if hardware support it
2012 * @dev: the PCI device
2013 */
2014void pci_enable_ari(struct pci_dev *dev)
2015{
2016 int pos;
2017 u32 cap;
Myron Stowec463b8c2012-06-01 15:16:37 -06002018 u16 ctrl;
Zhao, Yu81135872008-10-23 13:15:39 +08002019 struct pci_dev *bridge;
Yu Zhao58c3a722008-10-14 14:02:53 +08002020
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +01002021 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
Yu Zhao58c3a722008-10-14 14:02:53 +08002022 return;
2023
Zhao, Yu81135872008-10-23 13:15:39 +08002024 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
Yu Zhao58c3a722008-10-14 14:02:53 +08002025 if (!pos)
2026 return;
2027
Zhao, Yu81135872008-10-23 13:15:39 +08002028 bridge = dev->bus->self;
Myron Stowecb97ae32012-06-01 15:16:31 -06002029 if (!bridge)
Zhao, Yu81135872008-10-23 13:15:39 +08002030 return;
2031
Myron Stowec463b8c2012-06-01 15:16:37 -06002032 /* ARI is a PCIe cap v2 feature */
2033 pos = pci_pcie_cap2(bridge);
Zhao, Yu81135872008-10-23 13:15:39 +08002034 if (!pos)
2035 return;
2036
2037 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
Yu Zhao58c3a722008-10-14 14:02:53 +08002038 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2039 return;
2040
Zhao, Yu81135872008-10-23 13:15:39 +08002041 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
Yu Zhao58c3a722008-10-14 14:02:53 +08002042 ctrl |= PCI_EXP_DEVCTL2_ARI;
Zhao, Yu81135872008-10-23 13:15:39 +08002043 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
Yu Zhao58c3a722008-10-14 14:02:53 +08002044
Zhao, Yu81135872008-10-23 13:15:39 +08002045 bridge->ari_enabled = 1;
Yu Zhao58c3a722008-10-14 14:02:53 +08002046}
2047
Jesse Barnesb48d4422010-10-19 13:07:57 -07002048/**
Myron Stowec463b8c2012-06-01 15:16:37 -06002049 * pci_enable_ido - enable ID-based Ordering on a device
Jesse Barnesb48d4422010-10-19 13:07:57 -07002050 * @dev: the PCI device
2051 * @type: which types of IDO to enable
2052 *
2053 * Enable ID-based ordering on @dev. @type can contain the bits
2054 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2055 * which types of transactions are allowed to be re-ordered.
2056 */
2057void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2058{
2059 int pos;
2060 u16 ctrl;
2061
Myron Stowec463b8c2012-06-01 15:16:37 -06002062 /* ID-based Ordering is a PCIe cap v2 feature */
2063 pos = pci_pcie_cap2(dev);
Jesse Barnesb48d4422010-10-19 13:07:57 -07002064 if (!pos)
2065 return;
2066
2067 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2068 if (type & PCI_EXP_IDO_REQUEST)
2069 ctrl |= PCI_EXP_IDO_REQ_EN;
2070 if (type & PCI_EXP_IDO_COMPLETION)
2071 ctrl |= PCI_EXP_IDO_CMP_EN;
2072 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2073}
2074EXPORT_SYMBOL(pci_enable_ido);
2075
2076/**
2077 * pci_disable_ido - disable ID-based ordering on a device
2078 * @dev: the PCI device
2079 * @type: which types of IDO to disable
2080 */
2081void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2082{
2083 int pos;
2084 u16 ctrl;
2085
Myron Stowec463b8c2012-06-01 15:16:37 -06002086 /* ID-based Ordering is a PCIe cap v2 feature */
2087 pos = pci_pcie_cap2(dev);
Jesse Barnesb48d4422010-10-19 13:07:57 -07002088 if (!pos)
2089 return;
2090
2091 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2092 if (type & PCI_EXP_IDO_REQUEST)
2093 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2094 if (type & PCI_EXP_IDO_COMPLETION)
2095 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2096 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2097}
2098EXPORT_SYMBOL(pci_disable_ido);
2099
Jesse Barnes48a92a82011-01-10 12:46:36 -08002100/**
2101 * pci_enable_obff - enable optimized buffer flush/fill
2102 * @dev: PCI device
2103 * @type: type of signaling to use
2104 *
2105 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2106 * signaling if possible, falling back to message signaling only if
2107 * WAKE# isn't supported. @type should indicate whether the PCIe link
2108 * be brought out of L0s or L1 to send the message. It should be either
2109 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2110 *
2111 * If your device can benefit from receiving all messages, even at the
2112 * power cost of bringing the link back up from a low power state, use
2113 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2114 * preferred type).
2115 *
2116 * RETURNS:
2117 * Zero on success, appropriate error number on failure.
2118 */
2119int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2120{
2121 int pos;
2122 u32 cap;
2123 u16 ctrl;
2124 int ret;
2125
Myron Stowec463b8c2012-06-01 15:16:37 -06002126 /* OBFF is a PCIe cap v2 feature */
2127 pos = pci_pcie_cap2(dev);
Jesse Barnes48a92a82011-01-10 12:46:36 -08002128 if (!pos)
2129 return -ENOTSUPP;
2130
2131 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2132 if (!(cap & PCI_EXP_OBFF_MASK))
2133 return -ENOTSUPP; /* no OBFF support at all */
2134
2135 /* Make sure the topology supports OBFF as well */
2136 if (dev->bus) {
2137 ret = pci_enable_obff(dev->bus->self, type);
2138 if (ret)
2139 return ret;
2140 }
2141
2142 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2143 if (cap & PCI_EXP_OBFF_WAKE)
2144 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2145 else {
2146 switch (type) {
2147 case PCI_EXP_OBFF_SIGNAL_L0:
2148 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2149 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2150 break;
2151 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2152 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2153 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2154 break;
2155 default:
2156 WARN(1, "bad OBFF signal type\n");
2157 return -ENOTSUPP;
2158 }
2159 }
2160 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2161
2162 return 0;
2163}
2164EXPORT_SYMBOL(pci_enable_obff);
2165
2166/**
2167 * pci_disable_obff - disable optimized buffer flush/fill
2168 * @dev: PCI device
2169 *
2170 * Disable OBFF on @dev.
2171 */
2172void pci_disable_obff(struct pci_dev *dev)
2173{
2174 int pos;
2175 u16 ctrl;
2176
Myron Stowec463b8c2012-06-01 15:16:37 -06002177 /* OBFF is a PCIe cap v2 feature */
2178 pos = pci_pcie_cap2(dev);
Jesse Barnes48a92a82011-01-10 12:46:36 -08002179 if (!pos)
2180 return;
2181
2182 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2183 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2184 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2185}
2186EXPORT_SYMBOL(pci_disable_obff);
2187
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002188/**
2189 * pci_ltr_supported - check whether a device supports LTR
2190 * @dev: PCI device
2191 *
2192 * RETURNS:
2193 * True if @dev supports latency tolerance reporting, false otherwise.
2194 */
Myron Stowec32823f2012-06-01 15:16:25 -06002195static bool pci_ltr_supported(struct pci_dev *dev)
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002196{
2197 int pos;
2198 u32 cap;
2199
Myron Stowec463b8c2012-06-01 15:16:37 -06002200 /* LTR is a PCIe cap v2 feature */
2201 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002202 if (!pos)
2203 return false;
2204
2205 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2206
2207 return cap & PCI_EXP_DEVCAP2_LTR;
2208}
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002209
2210/**
2211 * pci_enable_ltr - enable latency tolerance reporting
2212 * @dev: PCI device
2213 *
2214 * Enable LTR on @dev if possible, which means enabling it first on
2215 * upstream ports.
2216 *
2217 * RETURNS:
2218 * Zero on success, errno on failure.
2219 */
2220int pci_enable_ltr(struct pci_dev *dev)
2221{
2222 int pos;
2223 u16 ctrl;
2224 int ret;
2225
2226 if (!pci_ltr_supported(dev))
2227 return -ENOTSUPP;
2228
Myron Stowec463b8c2012-06-01 15:16:37 -06002229 /* LTR is a PCIe cap v2 feature */
2230 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002231 if (!pos)
2232 return -ENOTSUPP;
2233
2234 /* Only primary function can enable/disable LTR */
2235 if (PCI_FUNC(dev->devfn) != 0)
2236 return -EINVAL;
2237
2238 /* Enable upstream ports first */
2239 if (dev->bus) {
2240 ret = pci_enable_ltr(dev->bus->self);
2241 if (ret)
2242 return ret;
2243 }
2244
2245 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2246 ctrl |= PCI_EXP_LTR_EN;
2247 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2248
2249 return 0;
2250}
2251EXPORT_SYMBOL(pci_enable_ltr);
2252
2253/**
2254 * pci_disable_ltr - disable latency tolerance reporting
2255 * @dev: PCI device
2256 */
2257void pci_disable_ltr(struct pci_dev *dev)
2258{
2259 int pos;
2260 u16 ctrl;
2261
2262 if (!pci_ltr_supported(dev))
2263 return;
2264
Myron Stowec463b8c2012-06-01 15:16:37 -06002265 /* LTR is a PCIe cap v2 feature */
2266 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002267 if (!pos)
2268 return;
2269
2270 /* Only primary function can enable/disable LTR */
2271 if (PCI_FUNC(dev->devfn) != 0)
2272 return;
2273
2274 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2275 ctrl &= ~PCI_EXP_LTR_EN;
2276 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2277}
2278EXPORT_SYMBOL(pci_disable_ltr);
2279
2280static int __pci_ltr_scale(int *val)
2281{
2282 int scale = 0;
2283
2284 while (*val > 1023) {
2285 *val = (*val + 31) / 32;
2286 scale++;
2287 }
2288 return scale;
2289}
2290
2291/**
2292 * pci_set_ltr - set LTR latency values
2293 * @dev: PCI device
2294 * @snoop_lat_ns: snoop latency in nanoseconds
2295 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2296 *
2297 * Figure out the scale and set the LTR values accordingly.
2298 */
2299int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2300{
2301 int pos, ret, snoop_scale, nosnoop_scale;
2302 u16 val;
2303
2304 if (!pci_ltr_supported(dev))
2305 return -ENOTSUPP;
2306
2307 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2308 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2309
2310 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2311 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2312 return -EINVAL;
2313
2314 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2315 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2316 return -EINVAL;
2317
2318 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2319 if (!pos)
2320 return -ENOTSUPP;
2321
2322 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2323 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2324 if (ret != 4)
2325 return -EIO;
2326
2327 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2328 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2329 if (ret != 4)
2330 return -EIO;
2331
2332 return 0;
2333}
2334EXPORT_SYMBOL(pci_set_ltr);
2335
Chris Wright5d990b62009-12-04 12:15:21 -08002336static int pci_acs_enable;
2337
2338/**
2339 * pci_request_acs - ask for ACS to be enabled if supported
2340 */
2341void pci_request_acs(void)
2342{
2343 pci_acs_enable = 1;
2344}
2345
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002346/**
Allen Kayae21ee62009-10-07 10:27:17 -07002347 * pci_enable_acs - enable ACS if hardware support it
2348 * @dev: the PCI device
2349 */
2350void pci_enable_acs(struct pci_dev *dev)
2351{
2352 int pos;
2353 u16 cap;
2354 u16 ctrl;
2355
Chris Wright5d990b62009-12-04 12:15:21 -08002356 if (!pci_acs_enable)
2357 return;
2358
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002359 if (!pci_is_pcie(dev))
Allen Kayae21ee62009-10-07 10:27:17 -07002360 return;
2361
2362 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2363 if (!pos)
2364 return;
2365
2366 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2367 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2368
2369 /* Source Validation */
2370 ctrl |= (cap & PCI_ACS_SV);
2371
2372 /* P2P Request Redirect */
2373 ctrl |= (cap & PCI_ACS_RR);
2374
2375 /* P2P Completion Redirect */
2376 ctrl |= (cap & PCI_ACS_CR);
2377
2378 /* Upstream Forwarding */
2379 ctrl |= (cap & PCI_ACS_UF);
2380
2381 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2382}
2383
2384/**
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002385 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2386 * @dev: the PCI device
2387 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2388 *
2389 * Perform INTx swizzling for a device behind one level of bridge. This is
2390 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
Matthew Wilcox46b952a2009-07-01 14:24:30 -07002391 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2392 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2393 * the PCI Express Base Specification, Revision 2.1)
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002394 */
John Crispin3df425f2012-04-12 17:33:07 +02002395u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002396{
Matthew Wilcox46b952a2009-07-01 14:24:30 -07002397 int slot;
2398
2399 if (pci_ari_enabled(dev->bus))
2400 slot = 0;
2401 else
2402 slot = PCI_SLOT(dev->devfn);
2403
2404 return (((pin - 1) + slot) % 4) + 1;
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002405}
2406
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407int
2408pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2409{
2410 u8 pin;
2411
Kristen Accardi514d2072005-11-02 16:24:39 -08002412 pin = dev->pin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002413 if (!pin)
2414 return -1;
Bjorn Helgaas878f2e52008-12-09 16:11:46 -07002415
Kenji Kaneshige8784fd42009-05-26 16:07:33 +09002416 while (!pci_is_root_bus(dev->bus)) {
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002417 pin = pci_swizzle_interrupt_pin(dev, pin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418 dev = dev->bus->self;
2419 }
2420 *bridge = dev;
2421 return pin;
2422}
2423
2424/**
Bjorn Helgaas68feac82008-12-16 21:36:55 -07002425 * pci_common_swizzle - swizzle INTx all the way to root bridge
2426 * @dev: the PCI device
2427 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2428 *
2429 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2430 * bridges all the way up to a PCI root bus.
2431 */
2432u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2433{
2434 u8 pin = *pinp;
2435
Kenji Kaneshige1eb39482009-05-26 16:08:36 +09002436 while (!pci_is_root_bus(dev->bus)) {
Bjorn Helgaas68feac82008-12-16 21:36:55 -07002437 pin = pci_swizzle_interrupt_pin(dev, pin);
2438 dev = dev->bus->self;
2439 }
2440 *pinp = pin;
2441 return PCI_SLOT(dev->devfn);
2442}
2443
2444/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445 * pci_release_region - Release a PCI bar
2446 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2447 * @bar: BAR to release
2448 *
2449 * Releases the PCI I/O and memory resources previously reserved by a
2450 * successful call to pci_request_region. Call this function only
2451 * after all use of the PCI regions has ceased.
2452 */
2453void pci_release_region(struct pci_dev *pdev, int bar)
2454{
Tejun Heo9ac78492007-01-20 16:00:26 +09002455 struct pci_devres *dr;
2456
Linus Torvalds1da177e2005-04-16 15:20:36 -07002457 if (pci_resource_len(pdev, bar) == 0)
2458 return;
2459 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2460 release_region(pci_resource_start(pdev, bar),
2461 pci_resource_len(pdev, bar));
2462 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2463 release_mem_region(pci_resource_start(pdev, bar),
2464 pci_resource_len(pdev, bar));
Tejun Heo9ac78492007-01-20 16:00:26 +09002465
2466 dr = find_pci_dr(pdev);
2467 if (dr)
2468 dr->region_mask &= ~(1 << bar);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469}
2470
2471/**
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002472 * __pci_request_region - Reserved PCI I/O and memory resource
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 * @pdev: PCI device whose resources are to be reserved
2474 * @bar: BAR to be reserved
2475 * @res_name: Name to be associated with resource.
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002476 * @exclusive: whether the region access is exclusive or not
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 *
2478 * Mark the PCI region associated with PCI device @pdev BR @bar as
2479 * being reserved by owner @res_name. Do not access any
2480 * address inside the PCI regions unless this call returns
2481 * successfully.
2482 *
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002483 * If @exclusive is set, then the region is marked so that userspace
2484 * is explicitly not allowed to map the resource via /dev/mem or
2485 * sysfs MMIO access.
2486 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 * Returns 0 on success, or %EBUSY on error. A warning
2488 * message is also printed on failure.
2489 */
Arjan van de Vene8de1482008-10-22 19:55:31 -07002490static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2491 int exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002492{
Tejun Heo9ac78492007-01-20 16:00:26 +09002493 struct pci_devres *dr;
2494
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 if (pci_resource_len(pdev, bar) == 0)
2496 return 0;
2497
2498 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2499 if (!request_region(pci_resource_start(pdev, bar),
2500 pci_resource_len(pdev, bar), res_name))
2501 goto err_out;
2502 }
2503 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
Arjan van de Vene8de1482008-10-22 19:55:31 -07002504 if (!__request_mem_region(pci_resource_start(pdev, bar),
2505 pci_resource_len(pdev, bar), res_name,
2506 exclusive))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 goto err_out;
2508 }
Tejun Heo9ac78492007-01-20 16:00:26 +09002509
2510 dr = find_pci_dr(pdev);
2511 if (dr)
2512 dr->region_mask |= 1 << bar;
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514 return 0;
2515
2516err_out:
Bjorn Helgaasc7dabef2009-10-27 13:26:47 -06002517 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
Benjamin Herrenschmidt096e6f62008-10-20 15:07:37 +11002518 &pdev->resource[bar]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 return -EBUSY;
2520}
2521
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002522/**
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002523 * pci_request_region - Reserve PCI I/O and memory resource
Arjan van de Vene8de1482008-10-22 19:55:31 -07002524 * @pdev: PCI device whose resources are to be reserved
2525 * @bar: BAR to be reserved
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002526 * @res_name: Name to be associated with resource
Arjan van de Vene8de1482008-10-22 19:55:31 -07002527 *
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002528 * Mark the PCI region associated with PCI device @pdev BAR @bar as
Arjan van de Vene8de1482008-10-22 19:55:31 -07002529 * being reserved by owner @res_name. Do not access any
2530 * address inside the PCI regions unless this call returns
2531 * successfully.
2532 *
2533 * Returns 0 on success, or %EBUSY on error. A warning
2534 * message is also printed on failure.
2535 */
2536int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2537{
2538 return __pci_request_region(pdev, bar, res_name, 0);
2539}
2540
2541/**
2542 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2543 * @pdev: PCI device whose resources are to be reserved
2544 * @bar: BAR to be reserved
2545 * @res_name: Name to be associated with resource.
2546 *
2547 * Mark the PCI region associated with PCI device @pdev BR @bar as
2548 * being reserved by owner @res_name. Do not access any
2549 * address inside the PCI regions unless this call returns
2550 * successfully.
2551 *
2552 * Returns 0 on success, or %EBUSY on error. A warning
2553 * message is also printed on failure.
2554 *
2555 * The key difference that _exclusive makes it that userspace is
2556 * explicitly not allowed to map the resource via /dev/mem or
2557 * sysfs.
2558 */
2559int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2560{
2561 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2562}
2563/**
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002564 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2565 * @pdev: PCI device whose resources were previously reserved
2566 * @bars: Bitmask of BARs to be released
2567 *
2568 * Release selected PCI I/O and memory resources previously reserved.
2569 * Call this function only after all use of the PCI regions has ceased.
2570 */
2571void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2572{
2573 int i;
2574
2575 for (i = 0; i < 6; i++)
2576 if (bars & (1 << i))
2577 pci_release_region(pdev, i);
2578}
2579
Arjan van de Vene8de1482008-10-22 19:55:31 -07002580int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2581 const char *res_name, int excl)
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002582{
2583 int i;
2584
2585 for (i = 0; i < 6; i++)
2586 if (bars & (1 << i))
Arjan van de Vene8de1482008-10-22 19:55:31 -07002587 if (__pci_request_region(pdev, i, res_name, excl))
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002588 goto err_out;
2589 return 0;
2590
2591err_out:
2592 while(--i >= 0)
2593 if (bars & (1 << i))
2594 pci_release_region(pdev, i);
2595
2596 return -EBUSY;
2597}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002598
Arjan van de Vene8de1482008-10-22 19:55:31 -07002599
2600/**
2601 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2602 * @pdev: PCI device whose resources are to be reserved
2603 * @bars: Bitmask of BARs to be requested
2604 * @res_name: Name to be associated with resource
2605 */
2606int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2607 const char *res_name)
2608{
2609 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2610}
2611
2612int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2613 int bars, const char *res_name)
2614{
2615 return __pci_request_selected_regions(pdev, bars, res_name,
2616 IORESOURCE_EXCLUSIVE);
2617}
2618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619/**
2620 * pci_release_regions - Release reserved PCI I/O and memory resources
2621 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2622 *
2623 * Releases all PCI I/O and memory resources previously reserved by a
2624 * successful call to pci_request_regions. Call this function only
2625 * after all use of the PCI regions has ceased.
2626 */
2627
2628void pci_release_regions(struct pci_dev *pdev)
2629{
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002630 pci_release_selected_regions(pdev, (1 << 6) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631}
2632
2633/**
2634 * pci_request_regions - Reserved PCI I/O and memory resources
2635 * @pdev: PCI device whose resources are to be reserved
2636 * @res_name: Name to be associated with resource.
2637 *
2638 * Mark all PCI regions associated with PCI device @pdev as
2639 * being reserved by owner @res_name. Do not access any
2640 * address inside the PCI regions unless this call returns
2641 * successfully.
2642 *
2643 * Returns 0 on success, or %EBUSY on error. A warning
2644 * message is also printed on failure.
2645 */
Jeff Garzik3c990e92006-03-04 21:52:42 -05002646int pci_request_regions(struct pci_dev *pdev, const char *res_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647{
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002648 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649}
2650
2651/**
Arjan van de Vene8de1482008-10-22 19:55:31 -07002652 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2653 * @pdev: PCI device whose resources are to be reserved
2654 * @res_name: Name to be associated with resource.
2655 *
2656 * Mark all PCI regions associated with PCI device @pdev as
2657 * being reserved by owner @res_name. Do not access any
2658 * address inside the PCI regions unless this call returns
2659 * successfully.
2660 *
2661 * pci_request_regions_exclusive() will mark the region so that
2662 * /dev/mem and the sysfs MMIO access will not be allowed.
2663 *
2664 * Returns 0 on success, or %EBUSY on error. A warning
2665 * message is also printed on failure.
2666 */
2667int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2668{
2669 return pci_request_selected_regions_exclusive(pdev,
2670 ((1 << 6) - 1), res_name);
2671}
2672
Ben Hutchings6a479072008-12-23 03:08:29 +00002673static void __pci_set_master(struct pci_dev *dev, bool enable)
2674{
2675 u16 old_cmd, cmd;
2676
2677 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2678 if (enable)
2679 cmd = old_cmd | PCI_COMMAND_MASTER;
2680 else
2681 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2682 if (cmd != old_cmd) {
2683 dev_dbg(&dev->dev, "%s bus mastering\n",
2684 enable ? "enabling" : "disabling");
2685 pci_write_config_word(dev, PCI_COMMAND, cmd);
2686 }
2687 dev->is_busmaster = enable;
2688}
Arjan van de Vene8de1482008-10-22 19:55:31 -07002689
2690/**
Myron Stowe96c55902011-10-28 15:48:38 -06002691 * pcibios_set_master - enable PCI bus-mastering for device dev
2692 * @dev: the PCI device to enable
2693 *
2694 * Enables PCI bus-mastering for the device. This is the default
2695 * implementation. Architecture specific implementations can override
2696 * this if necessary.
2697 */
2698void __weak pcibios_set_master(struct pci_dev *dev)
2699{
2700 u8 lat;
2701
Myron Stowef6766782011-10-28 15:49:20 -06002702 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2703 if (pci_is_pcie(dev))
2704 return;
2705
Myron Stowe96c55902011-10-28 15:48:38 -06002706 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2707 if (lat < 16)
2708 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2709 else if (lat > pcibios_max_latency)
2710 lat = pcibios_max_latency;
2711 else
2712 return;
2713 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2714 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2715}
2716
2717/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 * pci_set_master - enables bus-mastering for device dev
2719 * @dev: the PCI device to enable
2720 *
2721 * Enables bus-mastering on the device and calls pcibios_set_master()
2722 * to do the needed arch specific settings.
2723 */
Ben Hutchings6a479072008-12-23 03:08:29 +00002724void pci_set_master(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725{
Ben Hutchings6a479072008-12-23 03:08:29 +00002726 __pci_set_master(dev, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 pcibios_set_master(dev);
2728}
2729
Ben Hutchings6a479072008-12-23 03:08:29 +00002730/**
2731 * pci_clear_master - disables bus-mastering for device dev
2732 * @dev: the PCI device to disable
2733 */
2734void pci_clear_master(struct pci_dev *dev)
2735{
2736 __pci_set_master(dev, false);
2737}
2738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739/**
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002740 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2741 * @dev: the PCI device for which MWI is to be enabled
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 *
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002743 * Helper function for pci_set_mwi.
2744 * Originally copied from drivers/net/acenic.c.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2746 *
2747 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2748 */
Tejun Heo15ea76d2009-09-22 17:34:48 +09002749int pci_set_cacheline_size(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750{
2751 u8 cacheline_size;
2752
2753 if (!pci_cache_line_size)
Tejun Heo15ea76d2009-09-22 17:34:48 +09002754 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
2756 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2757 equal to or multiple of the right value. */
2758 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2759 if (cacheline_size >= pci_cache_line_size &&
2760 (cacheline_size % pci_cache_line_size) == 0)
2761 return 0;
2762
2763 /* Write the correct value. */
2764 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2765 /* Read it back. */
2766 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2767 if (cacheline_size == pci_cache_line_size)
2768 return 0;
2769
Bjorn Helgaas80ccba12008-06-13 10:52:11 -06002770 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2771 "supported\n", pci_cache_line_size << 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
2773 return -EINVAL;
2774}
Tejun Heo15ea76d2009-09-22 17:34:48 +09002775EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2776
2777#ifdef PCI_DISABLE_MWI
2778int pci_set_mwi(struct pci_dev *dev)
2779{
2780 return 0;
2781}
2782
2783int pci_try_set_mwi(struct pci_dev *dev)
2784{
2785 return 0;
2786}
2787
2788void pci_clear_mwi(struct pci_dev *dev)
2789{
2790}
2791
2792#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793
2794/**
2795 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2796 * @dev: the PCI device for which MWI is enabled
2797 *
Randy Dunlap694625c2007-07-09 11:55:54 -07002798 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 *
2800 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2801 */
2802int
2803pci_set_mwi(struct pci_dev *dev)
2804{
2805 int rc;
2806 u16 cmd;
2807
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002808 rc = pci_set_cacheline_size(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 if (rc)
2810 return rc;
2811
2812 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2813 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
Bjorn Helgaas80ccba12008-06-13 10:52:11 -06002814 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 cmd |= PCI_COMMAND_INVALIDATE;
2816 pci_write_config_word(dev, PCI_COMMAND, cmd);
2817 }
2818
2819 return 0;
2820}
2821
2822/**
Randy Dunlap694625c2007-07-09 11:55:54 -07002823 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2824 * @dev: the PCI device for which MWI is enabled
2825 *
2826 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2827 * Callers are not required to check the return value.
2828 *
2829 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2830 */
2831int pci_try_set_mwi(struct pci_dev *dev)
2832{
2833 int rc = pci_set_mwi(dev);
2834 return rc;
2835}
2836
2837/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2839 * @dev: the PCI device to disable
2840 *
2841 * Disables PCI Memory-Write-Invalidate transaction on the device
2842 */
2843void
2844pci_clear_mwi(struct pci_dev *dev)
2845{
2846 u16 cmd;
2847
2848 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2849 if (cmd & PCI_COMMAND_INVALIDATE) {
2850 cmd &= ~PCI_COMMAND_INVALIDATE;
2851 pci_write_config_word(dev, PCI_COMMAND, cmd);
2852 }
2853}
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002854#endif /* ! PCI_DISABLE_MWI */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855
Brett M Russa04ce0f2005-08-15 15:23:41 -04002856/**
2857 * pci_intx - enables/disables PCI INTx for device dev
Randy Dunlap8f7020d2005-10-23 11:57:38 -07002858 * @pdev: the PCI device to operate on
2859 * @enable: boolean: whether to enable or disable PCI INTx
Brett M Russa04ce0f2005-08-15 15:23:41 -04002860 *
2861 * Enables/disables PCI INTx for device dev
2862 */
2863void
2864pci_intx(struct pci_dev *pdev, int enable)
2865{
2866 u16 pci_command, new;
2867
2868 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
2869
2870 if (enable) {
2871 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
2872 } else {
2873 new = pci_command | PCI_COMMAND_INTX_DISABLE;
2874 }
2875
2876 if (new != pci_command) {
Tejun Heo9ac78492007-01-20 16:00:26 +09002877 struct pci_devres *dr;
2878
Brett M Russ2fd9d742005-09-09 10:02:22 -07002879 pci_write_config_word(pdev, PCI_COMMAND, new);
Tejun Heo9ac78492007-01-20 16:00:26 +09002880
2881 dr = find_pci_dr(pdev);
2882 if (dr && !dr->restore_intx) {
2883 dr->restore_intx = 1;
2884 dr->orig_intx = !enable;
2885 }
Brett M Russa04ce0f2005-08-15 15:23:41 -04002886 }
2887}
2888
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08002889/**
Jan Kiszkaa2e27782011-11-04 09:46:00 +01002890 * pci_intx_mask_supported - probe for INTx masking support
Randy Dunlap6e9292c2012-01-21 11:02:35 -08002891 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01002892 *
2893 * Check if the device dev support INTx masking via the config space
2894 * command word.
2895 */
2896bool pci_intx_mask_supported(struct pci_dev *dev)
2897{
2898 bool mask_supported = false;
2899 u16 orig, new;
2900
2901 pci_cfg_access_lock(dev);
2902
2903 pci_read_config_word(dev, PCI_COMMAND, &orig);
2904 pci_write_config_word(dev, PCI_COMMAND,
2905 orig ^ PCI_COMMAND_INTX_DISABLE);
2906 pci_read_config_word(dev, PCI_COMMAND, &new);
2907
2908 /*
2909 * There's no way to protect against hardware bugs or detect them
2910 * reliably, but as long as we know what the value should be, let's
2911 * go ahead and check it.
2912 */
2913 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
2914 dev_err(&dev->dev, "Command register changed from "
2915 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
2916 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
2917 mask_supported = true;
2918 pci_write_config_word(dev, PCI_COMMAND, orig);
2919 }
2920
2921 pci_cfg_access_unlock(dev);
2922 return mask_supported;
2923}
2924EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
2925
2926static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
2927{
2928 struct pci_bus *bus = dev->bus;
2929 bool mask_updated = true;
2930 u32 cmd_status_dword;
2931 u16 origcmd, newcmd;
2932 unsigned long flags;
2933 bool irq_pending;
2934
2935 /*
2936 * We do a single dword read to retrieve both command and status.
2937 * Document assumptions that make this possible.
2938 */
2939 BUILD_BUG_ON(PCI_COMMAND % 4);
2940 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
2941
2942 raw_spin_lock_irqsave(&pci_lock, flags);
2943
2944 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
2945
2946 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
2947
2948 /*
2949 * Check interrupt status register to see whether our device
2950 * triggered the interrupt (when masking) or the next IRQ is
2951 * already pending (when unmasking).
2952 */
2953 if (mask != irq_pending) {
2954 mask_updated = false;
2955 goto done;
2956 }
2957
2958 origcmd = cmd_status_dword;
2959 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
2960 if (mask)
2961 newcmd |= PCI_COMMAND_INTX_DISABLE;
2962 if (newcmd != origcmd)
2963 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
2964
2965done:
2966 raw_spin_unlock_irqrestore(&pci_lock, flags);
2967
2968 return mask_updated;
2969}
2970
2971/**
2972 * pci_check_and_mask_intx - mask INTx on pending interrupt
Randy Dunlap6e9292c2012-01-21 11:02:35 -08002973 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01002974 *
2975 * Check if the device dev has its INTx line asserted, mask it and
2976 * return true in that case. False is returned if not interrupt was
2977 * pending.
2978 */
2979bool pci_check_and_mask_intx(struct pci_dev *dev)
2980{
2981 return pci_check_and_set_intx_mask(dev, true);
2982}
2983EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
2984
2985/**
2986 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
Randy Dunlap6e9292c2012-01-21 11:02:35 -08002987 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01002988 *
2989 * Check if the device dev has its INTx line asserted, unmask it if not
2990 * and return true. False is returned and the mask remains active if
2991 * there was still an interrupt pending.
2992 */
2993bool pci_check_and_unmask_intx(struct pci_dev *dev)
2994{
2995 return pci_check_and_set_intx_mask(dev, false);
2996}
2997EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
2998
2999/**
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08003000 * pci_msi_off - disables any msi or msix capabilities
Randy Dunlap8d7d86e2007-03-16 19:55:52 -07003001 * @dev: the PCI device to operate on
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08003002 *
3003 * If you want to use msi see pci_enable_msi and friends.
3004 * This is a lower level primitive that allows us to disable
3005 * msi operation at the device level.
3006 */
3007void pci_msi_off(struct pci_dev *dev)
3008{
3009 int pos;
3010 u16 control;
3011
3012 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3013 if (pos) {
3014 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3015 control &= ~PCI_MSI_FLAGS_ENABLE;
3016 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3017 }
3018 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3019 if (pos) {
3020 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3021 control &= ~PCI_MSIX_FLAGS_ENABLE;
3022 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3023 }
3024}
Michael S. Tsirkinb03214d2010-06-23 22:49:06 -06003025EXPORT_SYMBOL_GPL(pci_msi_off);
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08003026
FUJITA Tomonori4d57cdf2008-02-04 22:27:55 -08003027int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3028{
3029 return dma_set_max_seg_size(&dev->dev, size);
3030}
3031EXPORT_SYMBOL(pci_set_dma_max_seg_size);
FUJITA Tomonori4d57cdf2008-02-04 22:27:55 -08003032
FUJITA Tomonori59fc67d2008-02-04 22:28:14 -08003033int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3034{
3035 return dma_set_seg_boundary(&dev->dev, mask);
3036}
3037EXPORT_SYMBOL(pci_set_dma_seg_boundary);
FUJITA Tomonori59fc67d2008-02-04 22:28:14 -08003038
Yu Zhao8c1c6992009-06-13 15:52:13 +08003039static int pcie_flr(struct pci_dev *dev, int probe)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003040{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003041 int i;
3042 int pos;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003043 u32 cap;
Shmulik Ravid04b55c42009-12-03 22:27:51 +02003044 u16 status, control;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003045
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003046 pos = pci_pcie_cap(dev);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003047 if (!pos)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003048 return -ENOTTY;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003049
3050 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003051 if (!(cap & PCI_EXP_DEVCAP_FLR))
3052 return -ENOTTY;
3053
Sheng Yangd91cdc72008-11-11 17:17:47 +08003054 if (probe)
3055 return 0;
3056
Sheng Yang8dd7f802008-10-21 17:38:25 +08003057 /* Wait for Transaction Pending bit clean */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003058 for (i = 0; i < 4; i++) {
3059 if (i)
3060 msleep((1 << (i - 1)) * 100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003061
Yu Zhao8c1c6992009-06-13 15:52:13 +08003062 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3063 if (!(status & PCI_EXP_DEVSTA_TRPND))
3064 goto clear;
3065 }
Sheng Yang8dd7f802008-10-21 17:38:25 +08003066
Yu Zhao8c1c6992009-06-13 15:52:13 +08003067 dev_err(&dev->dev, "transaction is not cleared; "
3068 "proceeding with reset anyway\n");
Sheng Yang5fe5db02009-02-09 14:53:47 +08003069
Yu Zhao8c1c6992009-06-13 15:52:13 +08003070clear:
Shmulik Ravid04b55c42009-12-03 22:27:51 +02003071 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3072 control |= PCI_EXP_DEVCTL_BCR_FLR;
3073 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3074
Yu Zhao8c1c6992009-06-13 15:52:13 +08003075 msleep(100);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003076
Sheng Yang8dd7f802008-10-21 17:38:25 +08003077 return 0;
3078}
Sheng Yangd91cdc72008-11-11 17:17:47 +08003079
Yu Zhao8c1c6992009-06-13 15:52:13 +08003080static int pci_af_flr(struct pci_dev *dev, int probe)
Sheng Yang1ca88792008-11-11 17:17:48 +08003081{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003082 int i;
3083 int pos;
Sheng Yang1ca88792008-11-11 17:17:48 +08003084 u8 cap;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003085 u8 status;
Sheng Yang1ca88792008-11-11 17:17:48 +08003086
Yu Zhao8c1c6992009-06-13 15:52:13 +08003087 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3088 if (!pos)
Sheng Yang1ca88792008-11-11 17:17:48 +08003089 return -ENOTTY;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003090
3091 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
Sheng Yang1ca88792008-11-11 17:17:48 +08003092 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3093 return -ENOTTY;
3094
3095 if (probe)
3096 return 0;
3097
Sheng Yang1ca88792008-11-11 17:17:48 +08003098 /* Wait for Transaction Pending bit clean */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003099 for (i = 0; i < 4; i++) {
3100 if (i)
3101 msleep((1 << (i - 1)) * 100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003102
Yu Zhao8c1c6992009-06-13 15:52:13 +08003103 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3104 if (!(status & PCI_AF_STATUS_TP))
3105 goto clear;
3106 }
3107
3108 dev_err(&dev->dev, "transaction is not cleared; "
3109 "proceeding with reset anyway\n");
3110
3111clear:
3112 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
Sheng Yang1ca88792008-11-11 17:17:48 +08003113 msleep(100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003114
Sheng Yang1ca88792008-11-11 17:17:48 +08003115 return 0;
3116}
3117
Rafael J. Wysocki83d74e02011-03-05 21:48:44 +01003118/**
3119 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3120 * @dev: Device to reset.
3121 * @probe: If set, only check if the device can be reset this way.
3122 *
3123 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3124 * unset, it will be reinitialized internally when going from PCI_D3hot to
3125 * PCI_D0. If that's the case and the device is not in a low-power state
3126 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3127 *
3128 * NOTE: This causes the caller to sleep for twice the device power transition
3129 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3130 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3131 * Moreover, only devices in D0 can be reset by this function.
3132 */
Yu Zhaof85876b2009-06-13 15:52:14 +08003133static int pci_pm_reset(struct pci_dev *dev, int probe)
Sheng Yangd91cdc72008-11-11 17:17:47 +08003134{
Yu Zhaof85876b2009-06-13 15:52:14 +08003135 u16 csr;
Sheng Yangd91cdc72008-11-11 17:17:47 +08003136
Yu Zhaof85876b2009-06-13 15:52:14 +08003137 if (!dev->pm_cap)
3138 return -ENOTTY;
Sheng Yangd91cdc72008-11-11 17:17:47 +08003139
Yu Zhaof85876b2009-06-13 15:52:14 +08003140 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3141 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3142 return -ENOTTY;
Sheng Yang1ca88792008-11-11 17:17:48 +08003143
Yu Zhaof85876b2009-06-13 15:52:14 +08003144 if (probe)
3145 return 0;
3146
3147 if (dev->current_state != PCI_D0)
3148 return -EINVAL;
3149
3150 csr &= ~PCI_PM_CTRL_STATE_MASK;
3151 csr |= PCI_D3hot;
3152 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01003153 pci_dev_d3_sleep(dev);
Yu Zhaof85876b2009-06-13 15:52:14 +08003154
3155 csr &= ~PCI_PM_CTRL_STATE_MASK;
3156 csr |= PCI_D0;
3157 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01003158 pci_dev_d3_sleep(dev);
Yu Zhaof85876b2009-06-13 15:52:14 +08003159
3160 return 0;
3161}
3162
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003163static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3164{
3165 u16 ctrl;
3166 struct pci_dev *pdev;
3167
Yu Zhao654b75e2009-06-26 14:04:46 +08003168 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003169 return -ENOTTY;
3170
3171 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3172 if (pdev != dev)
3173 return -ENOTTY;
3174
3175 if (probe)
3176 return 0;
3177
3178 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3179 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3180 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3181 msleep(100);
3182
3183 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3184 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3185 msleep(100);
3186
3187 return 0;
3188}
3189
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003190static int __pci_dev_reset(struct pci_dev *dev, int probe)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003191{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003192 int rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003193
Yu Zhao8c1c6992009-06-13 15:52:13 +08003194 might_sleep();
Sheng Yang8dd7f802008-10-21 17:38:25 +08003195
Dexuan Cuib9c3b262009-12-07 13:03:21 +08003196 rc = pci_dev_specific_reset(dev, probe);
3197 if (rc != -ENOTTY)
3198 goto done;
3199
Yu Zhao8c1c6992009-06-13 15:52:13 +08003200 rc = pcie_flr(dev, probe);
3201 if (rc != -ENOTTY)
3202 goto done;
3203
3204 rc = pci_af_flr(dev, probe);
Yu Zhaof85876b2009-06-13 15:52:14 +08003205 if (rc != -ENOTTY)
3206 goto done;
3207
3208 rc = pci_pm_reset(dev, probe);
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003209 if (rc != -ENOTTY)
3210 goto done;
3211
3212 rc = pci_parent_bus_reset(dev, probe);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003213done:
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003214 return rc;
3215}
3216
3217static int pci_dev_reset(struct pci_dev *dev, int probe)
3218{
3219 int rc;
3220
3221 if (!probe) {
3222 pci_cfg_access_lock(dev);
3223 /* block PM suspend, driver probe, etc. */
3224 device_lock(&dev->dev);
3225 }
3226
3227 rc = __pci_dev_reset(dev, probe);
3228
Yu Zhao8c1c6992009-06-13 15:52:13 +08003229 if (!probe) {
Greg Kroah-Hartman8e9394c2010-02-17 10:57:05 -08003230 device_unlock(&dev->dev);
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01003231 pci_cfg_access_unlock(dev);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003232 }
Yu Zhao8c1c6992009-06-13 15:52:13 +08003233 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003234}
Sheng Yang8dd7f802008-10-21 17:38:25 +08003235/**
Yu Zhao8c1c6992009-06-13 15:52:13 +08003236 * __pci_reset_function - reset a PCI device function
3237 * @dev: PCI device to reset
Sheng Yang8dd7f802008-10-21 17:38:25 +08003238 *
3239 * Some devices allow an individual function to be reset without affecting
3240 * other functions in the same device. The PCI device must be responsive
3241 * to PCI config space in order to use this function.
3242 *
3243 * The device function is presumed to be unused when this function is called.
3244 * Resetting the device will make the contents of PCI configuration space
3245 * random, so any caller of this must be prepared to reinitialise the
3246 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3247 * etc.
3248 *
Yu Zhao8c1c6992009-06-13 15:52:13 +08003249 * Returns 0 if the device function was successfully reset or negative if the
Sheng Yang8dd7f802008-10-21 17:38:25 +08003250 * device doesn't support resetting a single function.
3251 */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003252int __pci_reset_function(struct pci_dev *dev)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003253{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003254 return pci_dev_reset(dev, 0);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003255}
Yu Zhao8c1c6992009-06-13 15:52:13 +08003256EXPORT_SYMBOL_GPL(__pci_reset_function);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003257
3258/**
Konrad Rzeszutek Wilk6fbf9e72012-01-12 12:06:46 -05003259 * __pci_reset_function_locked - reset a PCI device function while holding
3260 * the @dev mutex lock.
3261 * @dev: PCI device to reset
3262 *
3263 * Some devices allow an individual function to be reset without affecting
3264 * other functions in the same device. The PCI device must be responsive
3265 * to PCI config space in order to use this function.
3266 *
3267 * The device function is presumed to be unused and the caller is holding
3268 * the device mutex lock when this function is called.
3269 * Resetting the device will make the contents of PCI configuration space
3270 * random, so any caller of this must be prepared to reinitialise the
3271 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3272 * etc.
3273 *
3274 * Returns 0 if the device function was successfully reset or negative if the
3275 * device doesn't support resetting a single function.
3276 */
3277int __pci_reset_function_locked(struct pci_dev *dev)
3278{
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003279 return __pci_dev_reset(dev, 0);
Konrad Rzeszutek Wilk6fbf9e72012-01-12 12:06:46 -05003280}
3281EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3282
3283/**
Michael S. Tsirkin711d5772009-07-27 23:37:48 +03003284 * pci_probe_reset_function - check whether the device can be safely reset
3285 * @dev: PCI device to reset
3286 *
3287 * Some devices allow an individual function to be reset without affecting
3288 * other functions in the same device. The PCI device must be responsive
3289 * to PCI config space in order to use this function.
3290 *
3291 * Returns 0 if the device function can be reset or negative if the
3292 * device doesn't support resetting a single function.
3293 */
3294int pci_probe_reset_function(struct pci_dev *dev)
3295{
3296 return pci_dev_reset(dev, 1);
3297}
3298
3299/**
Yu Zhao8c1c6992009-06-13 15:52:13 +08003300 * pci_reset_function - quiesce and reset a PCI device function
3301 * @dev: PCI device to reset
Sheng Yang8dd7f802008-10-21 17:38:25 +08003302 *
3303 * Some devices allow an individual function to be reset without affecting
3304 * other functions in the same device. The PCI device must be responsive
3305 * to PCI config space in order to use this function.
3306 *
3307 * This function does not just reset the PCI portion of a device, but
3308 * clears all the state associated with the device. This function differs
Yu Zhao8c1c6992009-06-13 15:52:13 +08003309 * from __pci_reset_function in that it saves and restores device state
Sheng Yang8dd7f802008-10-21 17:38:25 +08003310 * over the reset.
3311 *
Yu Zhao8c1c6992009-06-13 15:52:13 +08003312 * Returns 0 if the device function was successfully reset or negative if the
Sheng Yang8dd7f802008-10-21 17:38:25 +08003313 * device doesn't support resetting a single function.
3314 */
3315int pci_reset_function(struct pci_dev *dev)
3316{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003317 int rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003318
Yu Zhao8c1c6992009-06-13 15:52:13 +08003319 rc = pci_dev_reset(dev, 1);
3320 if (rc)
3321 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003322
Sheng Yang8dd7f802008-10-21 17:38:25 +08003323 pci_save_state(dev);
3324
Yu Zhao8c1c6992009-06-13 15:52:13 +08003325 /*
3326 * both INTx and MSI are disabled after the Interrupt Disable bit
3327 * is set and the Bus Master bit is cleared.
3328 */
Sheng Yang8dd7f802008-10-21 17:38:25 +08003329 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3330
Yu Zhao8c1c6992009-06-13 15:52:13 +08003331 rc = pci_dev_reset(dev, 0);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003332
3333 pci_restore_state(dev);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003334
Yu Zhao8c1c6992009-06-13 15:52:13 +08003335 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003336}
3337EXPORT_SYMBOL_GPL(pci_reset_function);
3338
3339/**
Peter Orubad556ad42007-05-15 13:59:13 +02003340 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3341 * @dev: PCI device to query
3342 *
3343 * Returns mmrbc: maximum designed memory read count in bytes
3344 * or appropriate error value.
3345 */
3346int pcix_get_max_mmrbc(struct pci_dev *dev)
3347{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003348 int cap;
Peter Orubad556ad42007-05-15 13:59:13 +02003349 u32 stat;
3350
3351 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3352 if (!cap)
3353 return -EINVAL;
3354
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003355 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
Peter Orubad556ad42007-05-15 13:59:13 +02003356 return -EINVAL;
3357
Dean Nelson25daeb52010-03-09 22:26:40 -05003358 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
Peter Orubad556ad42007-05-15 13:59:13 +02003359}
3360EXPORT_SYMBOL(pcix_get_max_mmrbc);
3361
3362/**
3363 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3364 * @dev: PCI device to query
3365 *
3366 * Returns mmrbc: maximum memory read count in bytes
3367 * or appropriate error value.
3368 */
3369int pcix_get_mmrbc(struct pci_dev *dev)
3370{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003371 int cap;
Dean Nelsonbdc2bda2010-03-09 22:26:48 -05003372 u16 cmd;
Peter Orubad556ad42007-05-15 13:59:13 +02003373
3374 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3375 if (!cap)
3376 return -EINVAL;
3377
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003378 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3379 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003380
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003381 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
Peter Orubad556ad42007-05-15 13:59:13 +02003382}
3383EXPORT_SYMBOL(pcix_get_mmrbc);
3384
3385/**
3386 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3387 * @dev: PCI device to query
3388 * @mmrbc: maximum memory read count in bytes
3389 * valid values are 512, 1024, 2048, 4096
3390 *
3391 * If possible sets maximum memory read byte count, some bridges have erratas
3392 * that prevent this.
3393 */
3394int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3395{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003396 int cap;
Dean Nelsonbdc2bda2010-03-09 22:26:48 -05003397 u32 stat, v, o;
3398 u16 cmd;
Peter Orubad556ad42007-05-15 13:59:13 +02003399
vignesh babu229f5af2007-08-13 18:23:14 +05303400 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003401 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003402
3403 v = ffs(mmrbc) - 10;
3404
3405 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3406 if (!cap)
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003407 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003408
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003409 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3410 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003411
3412 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3413 return -E2BIG;
3414
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003415 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3416 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003417
3418 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3419 if (o != v) {
3420 if (v > o && dev->bus &&
3421 (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
3422 return -EIO;
3423
3424 cmd &= ~PCI_X_CMD_MAX_READ;
3425 cmd |= v << 2;
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003426 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3427 return -EIO;
Peter Orubad556ad42007-05-15 13:59:13 +02003428 }
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003429 return 0;
Peter Orubad556ad42007-05-15 13:59:13 +02003430}
3431EXPORT_SYMBOL(pcix_set_mmrbc);
3432
3433/**
3434 * pcie_get_readrq - get PCI Express read request size
3435 * @dev: PCI device to query
3436 *
3437 * Returns maximum memory read request in bytes
3438 * or appropriate error value.
3439 */
3440int pcie_get_readrq(struct pci_dev *dev)
3441{
3442 int ret, cap;
3443 u16 ctl;
3444
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003445 cap = pci_pcie_cap(dev);
Peter Orubad556ad42007-05-15 13:59:13 +02003446 if (!cap)
3447 return -EINVAL;
3448
3449 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3450 if (!ret)
Julia Lawall93e75fa2010-08-05 22:23:16 +02003451 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
Peter Orubad556ad42007-05-15 13:59:13 +02003452
3453 return ret;
3454}
3455EXPORT_SYMBOL(pcie_get_readrq);
3456
3457/**
3458 * pcie_set_readrq - set PCI Express maximum memory read request
3459 * @dev: PCI device to query
Randy Dunlap42e61f42007-07-23 21:42:11 -07003460 * @rq: maximum memory read count in bytes
Peter Orubad556ad42007-05-15 13:59:13 +02003461 * valid values are 128, 256, 512, 1024, 2048, 4096
3462 *
Jon Masonc9b378c2011-06-28 18:26:25 -05003463 * If possible sets maximum memory read request in bytes
Peter Orubad556ad42007-05-15 13:59:13 +02003464 */
3465int pcie_set_readrq(struct pci_dev *dev, int rq)
3466{
3467 int cap, err = -EINVAL;
3468 u16 ctl, v;
3469
vignesh babu229f5af2007-08-13 18:23:14 +05303470 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
Peter Orubad556ad42007-05-15 13:59:13 +02003471 goto out;
3472
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003473 cap = pci_pcie_cap(dev);
Peter Orubad556ad42007-05-15 13:59:13 +02003474 if (!cap)
3475 goto out;
3476
3477 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3478 if (err)
3479 goto out;
Benjamin Herrenschmidta1c473a2011-10-14 14:56:15 -05003480 /*
3481 * If using the "performance" PCIe config, we clamp the
3482 * read rq size to the max packet size to prevent the
3483 * host bridge generating requests larger than we can
3484 * cope with
3485 */
3486 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3487 int mps = pcie_get_mps(dev);
3488
3489 if (mps < 0)
3490 return mps;
3491 if (mps < rq)
3492 rq = mps;
3493 }
3494
3495 v = (ffs(rq) - 8) << 12;
Peter Orubad556ad42007-05-15 13:59:13 +02003496
3497 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3498 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3499 ctl |= v;
Jon Masonc9b378c2011-06-28 18:26:25 -05003500 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
Peter Orubad556ad42007-05-15 13:59:13 +02003501 }
3502
3503out:
3504 return err;
3505}
3506EXPORT_SYMBOL(pcie_set_readrq);
3507
3508/**
Jon Masonb03e7492011-07-20 15:20:54 -05003509 * pcie_get_mps - get PCI Express maximum payload size
3510 * @dev: PCI device to query
3511 *
3512 * Returns maximum payload size in bytes
3513 * or appropriate error value.
3514 */
3515int pcie_get_mps(struct pci_dev *dev)
3516{
3517 int ret, cap;
3518 u16 ctl;
3519
3520 cap = pci_pcie_cap(dev);
3521 if (!cap)
3522 return -EINVAL;
3523
3524 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3525 if (!ret)
3526 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3527
3528 return ret;
3529}
3530
3531/**
3532 * pcie_set_mps - set PCI Express maximum payload size
3533 * @dev: PCI device to query
Randy Dunlap47c08f32011-08-20 11:49:43 -07003534 * @mps: maximum payload size in bytes
Jon Masonb03e7492011-07-20 15:20:54 -05003535 * valid values are 128, 256, 512, 1024, 2048, 4096
3536 *
3537 * If possible sets maximum payload size
3538 */
3539int pcie_set_mps(struct pci_dev *dev, int mps)
3540{
3541 int cap, err = -EINVAL;
3542 u16 ctl, v;
3543
3544 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3545 goto out;
3546
3547 v = ffs(mps) - 8;
3548 if (v > dev->pcie_mpss)
3549 goto out;
3550 v <<= 5;
3551
3552 cap = pci_pcie_cap(dev);
3553 if (!cap)
3554 goto out;
3555
3556 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3557 if (err)
3558 goto out;
3559
3560 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3561 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3562 ctl |= v;
3563 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3564 }
3565out:
3566 return err;
3567}
3568
3569/**
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003570 * pci_select_bars - Make BAR mask from the type of resource
Randy Dunlapf95d8822007-02-10 14:41:56 -08003571 * @dev: the PCI device for which BAR mask is made
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003572 * @flags: resource type mask to be selected
3573 *
3574 * This helper routine makes bar mask from the type of resource.
3575 */
3576int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3577{
3578 int i, bars = 0;
3579 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3580 if (pci_resource_flags(dev, i) & flags)
3581 bars |= (1 << i);
3582 return bars;
3583}
3584
Yu Zhao613e7ed2008-11-22 02:41:27 +08003585/**
3586 * pci_resource_bar - get position of the BAR associated with a resource
3587 * @dev: the PCI device
3588 * @resno: the resource number
3589 * @type: the BAR type to be filled in
3590 *
3591 * Returns BAR position in config space, or 0 if the BAR is invalid.
3592 */
3593int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3594{
Yu Zhaod1b054d2009-03-20 11:25:11 +08003595 int reg;
3596
Yu Zhao613e7ed2008-11-22 02:41:27 +08003597 if (resno < PCI_ROM_RESOURCE) {
3598 *type = pci_bar_unknown;
3599 return PCI_BASE_ADDRESS_0 + 4 * resno;
3600 } else if (resno == PCI_ROM_RESOURCE) {
3601 *type = pci_bar_mem32;
3602 return dev->rom_base_reg;
Yu Zhaod1b054d2009-03-20 11:25:11 +08003603 } else if (resno < PCI_BRIDGE_RESOURCES) {
3604 /* device specific resource */
3605 reg = pci_iov_resource_bar(dev, resno, type);
3606 if (reg)
3607 return reg;
Yu Zhao613e7ed2008-11-22 02:41:27 +08003608 }
3609
Bjorn Helgaas865df572009-11-04 10:32:57 -07003610 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
Yu Zhao613e7ed2008-11-22 02:41:27 +08003611 return 0;
3612}
3613
Mike Travis95a8b6e2010-02-02 14:38:13 -08003614/* Some architectures require additional programming to enable VGA */
3615static arch_set_vga_state_t arch_set_vga_state;
3616
3617void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3618{
3619 arch_set_vga_state = func; /* NULL disables */
3620}
3621
3622static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
Dave Airlie7ad35cf2011-05-25 14:00:49 +10003623 unsigned int command_bits, u32 flags)
Mike Travis95a8b6e2010-02-02 14:38:13 -08003624{
3625 if (arch_set_vga_state)
3626 return arch_set_vga_state(dev, decode, command_bits,
Dave Airlie7ad35cf2011-05-25 14:00:49 +10003627 flags);
Mike Travis95a8b6e2010-02-02 14:38:13 -08003628 return 0;
3629}
3630
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003631/**
3632 * pci_set_vga_state - set VGA decode state on device and parents if requested
Randy Dunlap19eea632009-09-17 15:28:22 -07003633 * @dev: the PCI device
3634 * @decode: true = enable decoding, false = disable decoding
3635 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
Randy Dunlap3f37d622011-05-25 19:21:25 -07003636 * @flags: traverse ancestors and change bridges
Dave Airlie3448a192010-06-01 15:32:24 +10003637 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003638 */
3639int pci_set_vga_state(struct pci_dev *dev, bool decode,
Dave Airlie3448a192010-06-01 15:32:24 +10003640 unsigned int command_bits, u32 flags)
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003641{
3642 struct pci_bus *bus;
3643 struct pci_dev *bridge;
3644 u16 cmd;
Mike Travis95a8b6e2010-02-02 14:38:13 -08003645 int rc;
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003646
Dave Airlie3448a192010-06-01 15:32:24 +10003647 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003648
Mike Travis95a8b6e2010-02-02 14:38:13 -08003649 /* ARCH specific VGA enables */
Dave Airlie3448a192010-06-01 15:32:24 +10003650 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
Mike Travis95a8b6e2010-02-02 14:38:13 -08003651 if (rc)
3652 return rc;
3653
Dave Airlie3448a192010-06-01 15:32:24 +10003654 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3655 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3656 if (decode == true)
3657 cmd |= command_bits;
3658 else
3659 cmd &= ~command_bits;
3660 pci_write_config_word(dev, PCI_COMMAND, cmd);
3661 }
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003662
Dave Airlie3448a192010-06-01 15:32:24 +10003663 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003664 return 0;
3665
3666 bus = dev->bus;
3667 while (bus) {
3668 bridge = bus->self;
3669 if (bridge) {
3670 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3671 &cmd);
3672 if (decode == true)
3673 cmd |= PCI_BRIDGE_CTL_VGA;
3674 else
3675 cmd &= ~PCI_BRIDGE_CTL_VGA;
3676 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3677 cmd);
3678 }
3679 bus = bus->parent;
3680 }
3681 return 0;
3682}
3683
Yuji Shimada32a9a6822009-03-16 17:13:39 +09003684#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3685static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
Thomas Gleixnere9d1e492009-11-06 22:41:23 +00003686static DEFINE_SPINLOCK(resource_alignment_lock);
Yuji Shimada32a9a6822009-03-16 17:13:39 +09003687
3688/**
3689 * pci_specified_resource_alignment - get resource alignment specified by user.
3690 * @dev: the PCI device to get
3691 *
3692 * RETURNS: Resource alignment if it is specified.
3693 * Zero if it is not specified.
3694 */
3695resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3696{
3697 int seg, bus, slot, func, align_order, count;
3698 resource_size_t align = 0;
3699 char *p;
3700
3701 spin_lock(&resource_alignment_lock);
3702 p = resource_alignment_param;
3703 while (*p) {
3704 count = 0;
3705 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3706 p[count] == '@') {
3707 p += count + 1;
3708 } else {
3709 align_order = -1;
3710 }
3711 if (sscanf(p, "%x:%x:%x.%x%n",
3712 &seg, &bus, &slot, &func, &count) != 4) {
3713 seg = 0;
3714 if (sscanf(p, "%x:%x.%x%n",
3715 &bus, &slot, &func, &count) != 3) {
3716 /* Invalid format */
3717 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3718 p);
3719 break;
3720 }
3721 }
3722 p += count;
3723 if (seg == pci_domain_nr(dev->bus) &&
3724 bus == dev->bus->number &&
3725 slot == PCI_SLOT(dev->devfn) &&
3726 func == PCI_FUNC(dev->devfn)) {
3727 if (align_order == -1) {
3728 align = PAGE_SIZE;
3729 } else {
3730 align = 1 << align_order;
3731 }
3732 /* Found */
3733 break;
3734 }
3735 if (*p != ';' && *p != ',') {
3736 /* End of param or invalid format */
3737 break;
3738 }
3739 p++;
3740 }
3741 spin_unlock(&resource_alignment_lock);
3742 return align;
3743}
3744
3745/**
3746 * pci_is_reassigndev - check if specified PCI is target device to reassign
3747 * @dev: the PCI device to check
3748 *
3749 * RETURNS: non-zero for PCI device is a target device to reassign,
3750 * or zero is not.
3751 */
3752int pci_is_reassigndev(struct pci_dev *dev)
3753{
3754 return (pci_specified_resource_alignment(dev) != 0);
3755}
3756
Yinghai Lu2069ecf2012-02-15 21:40:31 -08003757/*
3758 * This function disables memory decoding and releases memory resources
3759 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3760 * It also rounds up size to specified alignment.
3761 * Later on, the kernel will assign page-aligned memory resource back
3762 * to the device.
3763 */
3764void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3765{
3766 int i;
3767 struct resource *r;
3768 resource_size_t align, size;
3769 u16 command;
3770
3771 if (!pci_is_reassigndev(dev))
3772 return;
3773
3774 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3775 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3776 dev_warn(&dev->dev,
3777 "Can't reassign resources to host bridge.\n");
3778 return;
3779 }
3780
3781 dev_info(&dev->dev,
3782 "Disabling memory decoding and releasing memory resources.\n");
3783 pci_read_config_word(dev, PCI_COMMAND, &command);
3784 command &= ~PCI_COMMAND_MEMORY;
3785 pci_write_config_word(dev, PCI_COMMAND, command);
3786
3787 align = pci_specified_resource_alignment(dev);
3788 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3789 r = &dev->resource[i];
3790 if (!(r->flags & IORESOURCE_MEM))
3791 continue;
3792 size = resource_size(r);
3793 if (size < align) {
3794 size = align;
3795 dev_info(&dev->dev,
3796 "Rounding up size of resource #%d to %#llx.\n",
3797 i, (unsigned long long)size);
3798 }
3799 r->end = size - 1;
3800 r->start = 0;
3801 }
3802 /* Need to disable bridge's resource window,
3803 * to enable the kernel to reassign new resource
3804 * window later on.
3805 */
3806 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3807 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3808 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3809 r = &dev->resource[i];
3810 if (!(r->flags & IORESOURCE_MEM))
3811 continue;
3812 r->end = resource_size(r) - 1;
3813 r->start = 0;
3814 }
3815 pci_disable_bridge_window(dev);
3816 }
3817}
3818
Yuji Shimada32a9a6822009-03-16 17:13:39 +09003819ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3820{
3821 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3822 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3823 spin_lock(&resource_alignment_lock);
3824 strncpy(resource_alignment_param, buf, count);
3825 resource_alignment_param[count] = '\0';
3826 spin_unlock(&resource_alignment_lock);
3827 return count;
3828}
3829
3830ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3831{
3832 size_t count;
3833 spin_lock(&resource_alignment_lock);
3834 count = snprintf(buf, size, "%s", resource_alignment_param);
3835 spin_unlock(&resource_alignment_lock);
3836 return count;
3837}
3838
3839static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3840{
3841 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3842}
3843
3844static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3845 const char *buf, size_t count)
3846{
3847 return pci_set_resource_alignment_param(buf, count);
3848}
3849
3850BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3851 pci_resource_alignment_store);
3852
3853static int __init pci_resource_alignment_sysfs_init(void)
3854{
3855 return bus_create_file(&pci_bus_type,
3856 &bus_attr_resource_alignment);
3857}
3858
3859late_initcall(pci_resource_alignment_sysfs_init);
3860
Jeff Garzik32a2eea2007-10-11 16:57:27 -04003861static void __devinit pci_no_domains(void)
3862{
3863#ifdef CONFIG_PCI_DOMAINS
3864 pci_domains_supported = 0;
3865#endif
3866}
3867
Andrew Patterson0ef5f8f2008-11-10 15:30:50 -07003868/**
3869 * pci_ext_cfg_enabled - can we access extended PCI config space?
3870 * @dev: The PCI device of the root bridge.
3871 *
3872 * Returns 1 if we can access PCI extended config space (offsets
3873 * greater than 0xff). This is the default implementation. Architecture
3874 * implementations can override this.
3875 */
3876int __attribute__ ((weak)) pci_ext_cfg_avail(struct pci_dev *dev)
3877{
3878 return 1;
3879}
3880
Benjamin Herrenschmidt2d1c8612009-12-09 17:52:13 +11003881void __weak pci_fixup_cardbus(struct pci_bus *bus)
3882{
3883}
3884EXPORT_SYMBOL(pci_fixup_cardbus);
3885
Al Viroad04d312008-11-22 17:37:14 +00003886static int __init pci_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003887{
3888 while (str) {
3889 char *k = strchr(str, ',');
3890 if (k)
3891 *k++ = 0;
3892 if (*str && (str = pcibios_setup(str)) && *str) {
Matthew Wilcox309e57d2006-03-05 22:33:34 -07003893 if (!strcmp(str, "nomsi")) {
3894 pci_no_msi();
Randy Dunlap7f785762007-10-05 13:17:58 -07003895 } else if (!strcmp(str, "noaer")) {
3896 pci_no_aer();
Yinghai Lub55438f2012-02-23 19:23:30 -08003897 } else if (!strncmp(str, "realloc=", 8)) {
3898 pci_realloc_get_opt(str + 8);
Ram Paif483d392011-07-07 11:19:10 -07003899 } else if (!strncmp(str, "realloc", 7)) {
Yinghai Lub55438f2012-02-23 19:23:30 -08003900 pci_realloc_get_opt("on");
Jeff Garzik32a2eea2007-10-11 16:57:27 -04003901 } else if (!strcmp(str, "nodomains")) {
3902 pci_no_domains();
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +01003903 } else if (!strncmp(str, "noari", 5)) {
3904 pcie_ari_disabled = true;
Atsushi Nemoto4516a612007-02-05 16:36:06 -08003905 } else if (!strncmp(str, "cbiosize=", 9)) {
3906 pci_cardbus_io_size = memparse(str + 9, &str);
3907 } else if (!strncmp(str, "cbmemsize=", 10)) {
3908 pci_cardbus_mem_size = memparse(str + 10, &str);
Yuji Shimada32a9a6822009-03-16 17:13:39 +09003909 } else if (!strncmp(str, "resource_alignment=", 19)) {
3910 pci_set_resource_alignment_param(str + 19,
3911 strlen(str + 19));
Andrew Patterson43c16402009-04-22 16:52:09 -06003912 } else if (!strncmp(str, "ecrc=", 5)) {
3913 pcie_ecrc_get_policy(str + 5);
Eric W. Biederman28760482009-09-09 14:09:24 -07003914 } else if (!strncmp(str, "hpiosize=", 9)) {
3915 pci_hotplug_io_size = memparse(str + 9, &str);
3916 } else if (!strncmp(str, "hpmemsize=", 10)) {
3917 pci_hotplug_mem_size = memparse(str + 10, &str);
Jon Mason5f39e672011-10-03 09:50:20 -05003918 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
3919 pcie_bus_config = PCIE_BUS_TUNE_OFF;
Jon Masonb03e7492011-07-20 15:20:54 -05003920 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
3921 pcie_bus_config = PCIE_BUS_SAFE;
3922 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
3923 pcie_bus_config = PCIE_BUS_PERFORMANCE;
Jon Mason5f39e672011-10-03 09:50:20 -05003924 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
3925 pcie_bus_config = PCIE_BUS_PEER2PEER;
Bjorn Helgaas284f5f92012-04-30 15:21:02 -06003926 } else if (!strncmp(str, "pcie_scan_all", 13)) {
3927 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
Matthew Wilcox309e57d2006-03-05 22:33:34 -07003928 } else {
3929 printk(KERN_ERR "PCI: Unknown option `%s'\n",
3930 str);
3931 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003932 }
3933 str = k;
3934 }
Andi Kleen0637a702006-09-26 10:52:41 +02003935 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936}
Andi Kleen0637a702006-09-26 10:52:41 +02003937early_param("pci", pci_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938
Tejun Heo0b62e132007-07-27 14:43:35 +09003939EXPORT_SYMBOL(pci_reenable_device);
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11003940EXPORT_SYMBOL(pci_enable_device_io);
3941EXPORT_SYMBOL(pci_enable_device_mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003942EXPORT_SYMBOL(pci_enable_device);
Tejun Heo9ac78492007-01-20 16:00:26 +09003943EXPORT_SYMBOL(pcim_enable_device);
3944EXPORT_SYMBOL(pcim_pin_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945EXPORT_SYMBOL(pci_disable_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003946EXPORT_SYMBOL(pci_find_capability);
3947EXPORT_SYMBOL(pci_bus_find_capability);
3948EXPORT_SYMBOL(pci_release_regions);
3949EXPORT_SYMBOL(pci_request_regions);
Arjan van de Vene8de1482008-10-22 19:55:31 -07003950EXPORT_SYMBOL(pci_request_regions_exclusive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003951EXPORT_SYMBOL(pci_release_region);
3952EXPORT_SYMBOL(pci_request_region);
Arjan van de Vene8de1482008-10-22 19:55:31 -07003953EXPORT_SYMBOL(pci_request_region_exclusive);
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003954EXPORT_SYMBOL(pci_release_selected_regions);
3955EXPORT_SYMBOL(pci_request_selected_regions);
Arjan van de Vene8de1482008-10-22 19:55:31 -07003956EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957EXPORT_SYMBOL(pci_set_master);
Ben Hutchings6a479072008-12-23 03:08:29 +00003958EXPORT_SYMBOL(pci_clear_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959EXPORT_SYMBOL(pci_set_mwi);
Randy Dunlap694625c2007-07-09 11:55:54 -07003960EXPORT_SYMBOL(pci_try_set_mwi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961EXPORT_SYMBOL(pci_clear_mwi);
Brett M Russa04ce0f2005-08-15 15:23:41 -04003962EXPORT_SYMBOL_GPL(pci_intx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003963EXPORT_SYMBOL(pci_assign_resource);
3964EXPORT_SYMBOL(pci_find_parent_resource);
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003965EXPORT_SYMBOL(pci_select_bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966
3967EXPORT_SYMBOL(pci_set_power_state);
3968EXPORT_SYMBOL(pci_save_state);
3969EXPORT_SYMBOL(pci_restore_state);
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02003970EXPORT_SYMBOL(pci_pme_capable);
Rafael J. Wysocki5a6c9b62008-08-08 00:14:24 +02003971EXPORT_SYMBOL(pci_pme_active);
Rafael J. Wysocki0235c4f2008-08-18 21:38:00 +02003972EXPORT_SYMBOL(pci_wake_from_d3);
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02003973EXPORT_SYMBOL(pci_target_state);
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02003974EXPORT_SYMBOL(pci_prepare_to_sleep);
3975EXPORT_SYMBOL(pci_back_from_sleep);
Brian Kingf7bdd122007-04-06 16:39:36 -05003976EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);