blob: 28eb55b77ee9ba99fabe838f5f8dc08bfc884c5c [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * PCI Bus Services, see include/linux/pci.h for further explanation.
3 *
4 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
5 * David Mosberger-Tang
6 *
7 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
8 */
9
10#include <linux/kernel.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/pci.h>
David Brownell075c1772007-04-26 00:12:06 -070014#include <linux/pm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090015#include <linux/slab.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <linux/module.h>
17#include <linux/spinlock.h>
Tim Schmielau4e57b682005-10-30 15:03:48 -080018#include <linux/string.h>
vignesh babu229f5af2007-08-13 18:23:14 +053019#include <linux/log2.h>
Shaohua Li7d715a62008-02-25 09:46:41 +080020#include <linux/pci-aspm.h>
Stephen Rothwellc300bd2fb2008-07-10 02:16:44 +020021#include <linux/pm_wakeup.h>
Sheng Yang8dd7f802008-10-21 17:38:25 +080022#include <linux/interrupt.h>
Yuji Shimada32a9a6822009-03-16 17:13:39 +090023#include <linux/device.h>
Rafael J. Wysockib67ea762010-02-17 23:44:09 +010024#include <linux/pm_runtime.h>
Bjorn Helgaas284f5f92012-04-30 15:21:02 -060025#include <asm-generic/pci-bridge.h>
Yuji Shimada32a9a6822009-03-16 17:13:39 +090026#include <asm/setup.h>
Greg KHbc56b9e2005-04-08 14:53:31 +090027#include "pci.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Alan Stern00240c32009-04-27 13:33:16 -040029const char *pci_power_names[] = {
30 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
31};
32EXPORT_SYMBOL_GPL(pci_power_names);
33
Rafael J. Wysocki93177a72010-01-02 22:57:24 +010034int isa_dma_bridge_buggy;
35EXPORT_SYMBOL(isa_dma_bridge_buggy);
36
37int pci_pci_problems;
38EXPORT_SYMBOL(pci_pci_problems);
39
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +010040unsigned int pci_pm_d3_delay;
41
Matthew Garrettdf17e622010-10-04 14:22:29 -040042static void pci_pme_list_scan(struct work_struct *work);
43
44static LIST_HEAD(pci_pme_list);
45static DEFINE_MUTEX(pci_pme_list_mutex);
46static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
47
48struct pci_pme_device {
49 struct list_head list;
50 struct pci_dev *dev;
51};
52
53#define PME_TIMEOUT 1000 /* How long between PME checks */
54
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +010055static void pci_dev_d3_sleep(struct pci_dev *dev)
56{
57 unsigned int delay = dev->d3_delay;
58
59 if (delay < pci_pm_d3_delay)
60 delay = pci_pm_d3_delay;
61
62 msleep(delay);
63}
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
Jeff Garzik32a2eea2007-10-11 16:57:27 -040065#ifdef CONFIG_PCI_DOMAINS
66int pci_domains_supported = 1;
67#endif
68
Atsushi Nemoto4516a612007-02-05 16:36:06 -080069#define DEFAULT_CARDBUS_IO_SIZE (256)
70#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
71/* pci=cbmemsize=nnM,cbiosize=nn can override this */
72unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
73unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
74
Eric W. Biederman28760482009-09-09 14:09:24 -070075#define DEFAULT_HOTPLUG_IO_SIZE (256)
76#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
77/* pci=hpmemsize=nnM,hpiosize=nn can override this */
78unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
79unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
80
Jon Mason5f39e672011-10-03 09:50:20 -050081enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
Jon Masonb03e7492011-07-20 15:20:54 -050082
Jesse Barnesac1aa472009-10-26 13:20:44 -070083/*
84 * The default CLS is used if arch didn't set CLS explicitly and not
85 * all pci devices agree on the same value. Arch can override either
86 * the dfl or actual value as it sees fit. Don't forget this is
87 * measured in 32-bit words, not bytes.
88 */
Tejun Heo98e724c2009-10-08 18:59:53 +090089u8 pci_dfl_cache_line_size __devinitdata = L1_CACHE_BYTES >> 2;
Jesse Barnesac1aa472009-10-26 13:20:44 -070090u8 pci_cache_line_size;
91
Myron Stowe96c55902011-10-28 15:48:38 -060092/*
93 * If we set up a device for bus mastering, we need to check the latency
94 * timer as certain BIOSes forget to set it properly.
95 */
96unsigned int pcibios_max_latency = 255;
97
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +010098/* If set, the PCIe ARI capability will not be used. */
99static bool pcie_ari_disabled;
100
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101/**
102 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
103 * @bus: pointer to PCI bus structure to search
104 *
105 * Given a PCI bus, returns the highest PCI bus number present in the set
106 * including the given PCI bus and its list of child PCI buses.
107 */
Sam Ravnborg96bde062007-03-26 21:53:30 -0800108unsigned char pci_bus_max_busnr(struct pci_bus* bus)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109{
110 struct list_head *tmp;
111 unsigned char max, n;
112
Yinghai Lub918c622012-05-17 18:51:11 -0700113 max = bus->busn_res.end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 list_for_each(tmp, &bus->children) {
115 n = pci_bus_max_busnr(pci_bus_b(tmp));
116 if(n > max)
117 max = n;
118 }
119 return max;
120}
Kristen Accardib82db5c2006-01-17 16:56:56 -0800121EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Andrew Morton1684f5d2008-12-01 14:30:30 -0800123#ifdef CONFIG_HAS_IOMEM
124void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
125{
126 /*
127 * Make sure the BAR is actually a memory resource, not an IO resource
128 */
129 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
130 WARN_ON(1);
131 return NULL;
132 }
133 return ioremap_nocache(pci_resource_start(pdev, bar),
134 pci_resource_len(pdev, bar));
135}
136EXPORT_SYMBOL_GPL(pci_ioremap_bar);
137#endif
138
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100139#define PCI_FIND_CAP_TTL 48
140
141static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
142 u8 pos, int cap, int *ttl)
Roland Dreier24a4e372005-10-28 17:35:34 -0700143{
144 u8 id;
Roland Dreier24a4e372005-10-28 17:35:34 -0700145
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100146 while ((*ttl)--) {
Roland Dreier24a4e372005-10-28 17:35:34 -0700147 pci_bus_read_config_byte(bus, devfn, pos, &pos);
148 if (pos < 0x40)
149 break;
150 pos &= ~3;
151 pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
152 &id);
153 if (id == 0xff)
154 break;
155 if (id == cap)
156 return pos;
157 pos += PCI_CAP_LIST_NEXT;
158 }
159 return 0;
160}
161
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100162static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
163 u8 pos, int cap)
164{
165 int ttl = PCI_FIND_CAP_TTL;
166
167 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
168}
169
Roland Dreier24a4e372005-10-28 17:35:34 -0700170int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
171{
172 return __pci_find_next_cap(dev->bus, dev->devfn,
173 pos + PCI_CAP_LIST_NEXT, cap);
174}
175EXPORT_SYMBOL_GPL(pci_find_next_capability);
176
Michael Ellermand3bac112006-11-22 18:26:16 +1100177static int __pci_bus_find_cap_start(struct pci_bus *bus,
178 unsigned int devfn, u8 hdr_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179{
180 u16 status;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
183 if (!(status & PCI_STATUS_CAP_LIST))
184 return 0;
185
186 switch (hdr_type) {
187 case PCI_HEADER_TYPE_NORMAL:
188 case PCI_HEADER_TYPE_BRIDGE:
Michael Ellermand3bac112006-11-22 18:26:16 +1100189 return PCI_CAPABILITY_LIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 case PCI_HEADER_TYPE_CARDBUS:
Michael Ellermand3bac112006-11-22 18:26:16 +1100191 return PCI_CB_CAPABILITY_LIST;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 default:
193 return 0;
194 }
Michael Ellermand3bac112006-11-22 18:26:16 +1100195
196 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
198
199/**
200 * pci_find_capability - query for devices' capabilities
201 * @dev: PCI device to query
202 * @cap: capability code
203 *
204 * Tell if a device supports a given PCI capability.
205 * Returns the address of the requested capability structure within the
206 * device's PCI configuration space or 0 in case the device does not
207 * support it. Possible values for @cap:
208 *
209 * %PCI_CAP_ID_PM Power Management
210 * %PCI_CAP_ID_AGP Accelerated Graphics Port
211 * %PCI_CAP_ID_VPD Vital Product Data
212 * %PCI_CAP_ID_SLOTID Slot Identification
213 * %PCI_CAP_ID_MSI Message Signalled Interrupts
214 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
215 * %PCI_CAP_ID_PCIX PCI-X
216 * %PCI_CAP_ID_EXP PCI Express
217 */
218int pci_find_capability(struct pci_dev *dev, int cap)
219{
Michael Ellermand3bac112006-11-22 18:26:16 +1100220 int pos;
221
222 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
223 if (pos)
224 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
225
226 return pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
229/**
230 * pci_bus_find_capability - query for devices' capabilities
231 * @bus: the PCI bus to query
232 * @devfn: PCI device to query
233 * @cap: capability code
234 *
235 * Like pci_find_capability() but works for pci devices that do not have a
236 * pci_dev structure set up yet.
237 *
238 * Returns the address of the requested capability structure within the
239 * device's PCI configuration space or 0 in case the device does not
240 * support it.
241 */
242int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
243{
Michael Ellermand3bac112006-11-22 18:26:16 +1100244 int pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 u8 hdr_type;
246
247 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
248
Michael Ellermand3bac112006-11-22 18:26:16 +1100249 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
250 if (pos)
251 pos = __pci_find_next_cap(bus, devfn, pos, cap);
252
253 return pos;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
255
256/**
Myron Stowec463b8c2012-06-01 15:16:37 -0600257 * pci_pcie_cap2 - query for devices' PCI_CAP_ID_EXP v2 capability structure
258 * @dev: PCI device to check
259 *
260 * Like pci_pcie_cap() but also checks that the PCIe capability version is
261 * >= 2. Note that v1 capability structures could be sparse in that not
262 * all register fields were required. v2 requires the entire structure to
263 * be present size wise, while still allowing for non-implemented registers
264 * to exist but they must be hardwired to 0.
265 *
266 * Due to the differences in the versions of capability structures, one
267 * must be careful not to try and access non-existant registers that may
268 * exist in early versions - v1 - of Express devices.
269 *
270 * Returns the offset of the PCIe capability structure as long as the
271 * capability version is >= 2; otherwise 0 is returned.
272 */
273static int pci_pcie_cap2(struct pci_dev *dev)
274{
275 u16 flags;
276 int pos;
277
278 pos = pci_pcie_cap(dev);
279 if (pos) {
280 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
281 if ((flags & PCI_EXP_FLAGS_VERS) < 2)
282 pos = 0;
283 }
284
285 return pos;
286}
287
288/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 * pci_find_ext_capability - Find an extended capability
290 * @dev: PCI device to query
291 * @cap: capability code
292 *
293 * Returns the address of the requested extended capability structure
294 * within the device's PCI configuration space or 0 if the device does
295 * not support it. Possible values for @cap:
296 *
297 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
298 * %PCI_EXT_CAP_ID_VC Virtual Channel
299 * %PCI_EXT_CAP_ID_DSN Device Serial Number
300 * %PCI_EXT_CAP_ID_PWR Power Budgeting
301 */
302int pci_find_ext_capability(struct pci_dev *dev, int cap)
303{
304 u32 header;
Zhao, Yu557848c2008-10-13 19:18:07 +0800305 int ttl;
306 int pos = PCI_CFG_SPACE_SIZE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307
Zhao, Yu557848c2008-10-13 19:18:07 +0800308 /* minimum 8 bytes per capability */
309 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
310
311 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 return 0;
313
314 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
315 return 0;
316
317 /*
318 * If we have no capabilities, this is indicated by cap ID,
319 * cap version and next pointer all being 0.
320 */
321 if (header == 0)
322 return 0;
323
324 while (ttl-- > 0) {
325 if (PCI_EXT_CAP_ID(header) == cap)
326 return pos;
327
328 pos = PCI_EXT_CAP_NEXT(header);
Zhao, Yu557848c2008-10-13 19:18:07 +0800329 if (pos < PCI_CFG_SPACE_SIZE)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 break;
331
332 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
333 break;
334 }
335
336 return 0;
337}
Brice Goglin3a720d72006-05-23 06:10:01 -0400338EXPORT_SYMBOL_GPL(pci_find_ext_capability);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100340static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
341{
342 int rc, ttl = PCI_FIND_CAP_TTL;
343 u8 cap, mask;
344
345 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
346 mask = HT_3BIT_CAP_MASK;
347 else
348 mask = HT_5BIT_CAP_MASK;
349
350 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
351 PCI_CAP_ID_HT, &ttl);
352 while (pos) {
353 rc = pci_read_config_byte(dev, pos + 3, &cap);
354 if (rc != PCIBIOS_SUCCESSFUL)
355 return 0;
356
357 if ((cap & mask) == ht_cap)
358 return pos;
359
Brice Goglin47a4d5b2007-01-10 23:15:29 -0800360 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
361 pos + PCI_CAP_LIST_NEXT,
Michael Ellerman687d5fe2006-11-22 18:26:18 +1100362 PCI_CAP_ID_HT, &ttl);
363 }
364
365 return 0;
366}
367/**
368 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
369 * @dev: PCI device to query
370 * @pos: Position from which to continue searching
371 * @ht_cap: Hypertransport capability code
372 *
373 * To be used in conjunction with pci_find_ht_capability() to search for
374 * all capabilities matching @ht_cap. @pos should always be a value returned
375 * from pci_find_ht_capability().
376 *
377 * NB. To be 100% safe against broken PCI devices, the caller should take
378 * steps to avoid an infinite loop.
379 */
380int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
381{
382 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
383}
384EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
385
386/**
387 * pci_find_ht_capability - query a device's Hypertransport capabilities
388 * @dev: PCI device to query
389 * @ht_cap: Hypertransport capability code
390 *
391 * Tell if a device supports a given Hypertransport capability.
392 * Returns an address within the device's PCI configuration space
393 * or 0 in case the device does not support the request capability.
394 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
395 * which has a Hypertransport capability matching @ht_cap.
396 */
397int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
398{
399 int pos;
400
401 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
402 if (pos)
403 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
404
405 return pos;
406}
407EXPORT_SYMBOL_GPL(pci_find_ht_capability);
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409/**
410 * pci_find_parent_resource - return resource region of parent bus of given region
411 * @dev: PCI device structure contains resources to be searched
412 * @res: child resource record for which parent is sought
413 *
414 * For given resource region of given device, return the resource
415 * region of parent bus the given region is contained in or where
416 * it should be allocated from.
417 */
418struct resource *
419pci_find_parent_resource(const struct pci_dev *dev, struct resource *res)
420{
421 const struct pci_bus *bus = dev->bus;
422 int i;
Bjorn Helgaas89a74ec2010-02-23 10:24:31 -0700423 struct resource *best = NULL, *r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424
Bjorn Helgaas89a74ec2010-02-23 10:24:31 -0700425 pci_bus_for_each_resource(bus, r, i) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (!r)
427 continue;
428 if (res->start && !(res->start >= r->start && res->end <= r->end))
429 continue; /* Not contained */
430 if ((res->flags ^ r->flags) & (IORESOURCE_IO | IORESOURCE_MEM))
431 continue; /* Wrong type */
432 if (!((res->flags ^ r->flags) & IORESOURCE_PREFETCH))
433 return r; /* Exact match */
Linus Torvalds8c8def22009-11-09 12:04:32 -0800434 /* We can't insert a non-prefetch resource inside a prefetchable parent .. */
435 if (r->flags & IORESOURCE_PREFETCH)
436 continue;
437 /* .. but we can put a prefetchable resource inside a non-prefetchable one */
438 if (!best)
439 best = r;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 }
441 return best;
442}
443
444/**
John W. Linville064b53db2005-07-27 10:19:44 -0400445 * pci_restore_bars - restore a devices BAR values (e.g. after wake-up)
446 * @dev: PCI device to have its BARs restored
447 *
448 * Restore the BAR values for a given device, so as to make it
449 * accessible by its driver.
450 */
Adrian Bunkad6685992007-10-27 03:06:22 +0200451static void
John W. Linville064b53db2005-07-27 10:19:44 -0400452pci_restore_bars(struct pci_dev *dev)
453{
Yu Zhaobc5f5a82008-11-22 02:40:00 +0800454 int i;
John W. Linville064b53db2005-07-27 10:19:44 -0400455
Yu Zhaobc5f5a82008-11-22 02:40:00 +0800456 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
Yu Zhao14add802008-11-22 02:38:52 +0800457 pci_update_resource(dev, i);
John W. Linville064b53db2005-07-27 10:19:44 -0400458}
459
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200460static struct pci_platform_pm_ops *pci_platform_pm;
461
462int pci_set_platform_pm(struct pci_platform_pm_ops *ops)
463{
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +0200464 if (!ops->is_manageable || !ops->set_state || !ops->choose_state
465 || !ops->sleep_wake || !ops->can_wakeup)
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200466 return -EINVAL;
467 pci_platform_pm = ops;
468 return 0;
469}
470
471static inline bool platform_pci_power_manageable(struct pci_dev *dev)
472{
473 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
474}
475
476static inline int platform_pci_set_power_state(struct pci_dev *dev,
477 pci_power_t t)
478{
479 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
480}
481
482static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
483{
484 return pci_platform_pm ?
485 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
486}
Randy Dunlap8f7020d2005-10-23 11:57:38 -0700487
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +0200488static inline bool platform_pci_can_wakeup(struct pci_dev *dev)
489{
490 return pci_platform_pm ? pci_platform_pm->can_wakeup(dev) : false;
491}
492
493static inline int platform_pci_sleep_wake(struct pci_dev *dev, bool enable)
494{
495 return pci_platform_pm ?
496 pci_platform_pm->sleep_wake(dev, enable) : -ENODEV;
497}
498
Rafael J. Wysockib67ea762010-02-17 23:44:09 +0100499static inline int platform_pci_run_wake(struct pci_dev *dev, bool enable)
500{
501 return pci_platform_pm ?
502 pci_platform_pm->run_wake(dev, enable) : -ENODEV;
503}
504
John W. Linville064b53db2005-07-27 10:19:44 -0400505/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200506 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
507 * given PCI device
508 * @dev: PCI device to handle.
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200509 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 *
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200511 * RETURN VALUE:
512 * -EINVAL if the requested state is invalid.
513 * -EIO if device does not support PCI PM or its PM capabilities register has a
514 * wrong version, or device doesn't support the requested state.
515 * 0 if device already is in the requested state.
516 * 0 if device's power state has been successfully changed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 */
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100518static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700519{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200520 u16 pmcsr;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200521 bool need_restore = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522
Rafael J. Wysocki4a865902009-03-16 22:40:36 +0100523 /* Check if we're already there */
524 if (dev->current_state == state)
525 return 0;
526
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200527 if (!dev->pm_cap)
Andrew Lunncca03de2007-07-09 11:55:58 -0700528 return -EIO;
529
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200530 if (state < PCI_D0 || state > PCI_D3hot)
531 return -EINVAL;
532
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 /* Validate current state:
534 * Can enter D0 from any state, but if we can only go deeper
535 * to sleep if we're already in a low power state
536 */
Rafael J. Wysocki4a865902009-03-16 22:40:36 +0100537 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200538 && dev->current_state > state) {
Bjorn Helgaas80ccba12008-06-13 10:52:11 -0600539 dev_err(&dev->dev, "invalid power transition "
540 "(from state %d to %d)\n", dev->current_state, state);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 return -EINVAL;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /* check if this device supports the desired state */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200545 if ((state == PCI_D1 && !dev->d1_support)
546 || (state == PCI_D2 && !dev->d2_support))
Daniel Ritz3fe9d192005-08-17 15:32:19 -0700547 return -EIO;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200549 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
John W. Linville064b53db2005-07-27 10:19:44 -0400550
John W. Linville32a36582005-09-14 09:52:42 -0400551 /* If we're (effectively) in D3, force entire word to 0.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 * This doesn't affect PME_Status, disables PME_En, and
553 * sets PowerState to 0.
554 */
John W. Linville32a36582005-09-14 09:52:42 -0400555 switch (dev->current_state) {
John W. Linvilled3535fb2005-09-28 17:50:51 -0400556 case PCI_D0:
557 case PCI_D1:
558 case PCI_D2:
559 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
560 pmcsr |= state;
561 break;
Rafael J. Wysockif62795f2009-05-18 22:51:12 +0200562 case PCI_D3hot:
563 case PCI_D3cold:
John W. Linville32a36582005-09-14 09:52:42 -0400564 case PCI_UNKNOWN: /* Boot-up */
565 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100566 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200567 need_restore = true;
John W. Linville32a36582005-09-14 09:52:42 -0400568 /* Fall-through: force to D0 */
John W. Linville32a36582005-09-14 09:52:42 -0400569 default:
John W. Linvilled3535fb2005-09-28 17:50:51 -0400570 pmcsr = 0;
John W. Linville32a36582005-09-14 09:52:42 -0400571 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 }
573
574 /* enter specified state */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200575 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576
577 /* Mandatory power management transition delays */
578 /* see PCI PM 1.1 5.6.1 table 18 */
579 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +0100580 pci_dev_d3_sleep(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 else if (state == PCI_D2 || dev->current_state == PCI_D2)
Rafael J. Wysockiaa8c6c92009-01-16 21:54:43 +0100582 udelay(PCI_PM_D2_DELAY);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583
Rafael J. Wysockie13cdbd2009-10-05 00:48:40 +0200584 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
585 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
586 if (dev->current_state != state && printk_ratelimit())
587 dev_info(&dev->dev, "Refused to change power state, "
588 "currently in D%d\n", dev->current_state);
John W. Linville064b53db2005-07-27 10:19:44 -0400589
Huang Ying448bd852012-06-23 10:23:51 +0800590 /*
591 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
John W. Linville064b53db2005-07-27 10:19:44 -0400592 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
593 * from D3hot to D0 _may_ perform an internal reset, thereby
594 * going to "D0 Uninitialized" rather than "D0 Initialized".
595 * For example, at least some versions of the 3c905B and the
596 * 3c556B exhibit this behaviour.
597 *
598 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
599 * devices in a D3hot state at boot. Consequently, we need to
600 * restore at least the BARs so that the device will be
601 * accessible to its driver.
602 */
603 if (need_restore)
604 pci_restore_bars(dev);
605
Rafael J. Wysockif00a20e2009-03-16 22:40:08 +0100606 if (dev->bus->self)
Shaohua Li7d715a62008-02-25 09:46:41 +0800607 pcie_aspm_pm_state_change(dev->bus->self);
608
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 return 0;
610}
611
612/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200613 * pci_update_current_state - Read PCI power state of given device from its
614 * PCI PM registers and cache it
615 * @dev: PCI device to handle.
Rafael J. Wysockif06fc0b2008-12-27 16:30:52 +0100616 * @state: State to cache in case the device doesn't have the PM capability
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200617 */
Rafael J. Wysocki734104292009-01-07 13:07:15 +0100618void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200619{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200620 if (dev->pm_cap) {
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200621 u16 pmcsr;
622
Huang Ying448bd852012-06-23 10:23:51 +0800623 /*
624 * Configuration space is not accessible for device in
625 * D3cold, so just keep or set D3cold for safety
626 */
627 if (dev->current_state == PCI_D3cold)
628 return;
629 if (state == PCI_D3cold) {
630 dev->current_state = PCI_D3cold;
631 return;
632 }
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200633 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200634 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
Rafael J. Wysockif06fc0b2008-12-27 16:30:52 +0100635 } else {
636 dev->current_state = state;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200637 }
638}
639
640/**
Rafael J. Wysockidb288c92012-07-05 15:20:00 -0600641 * pci_power_up - Put the given device into D0 forcibly
642 * @dev: PCI device to power up
643 */
644void pci_power_up(struct pci_dev *dev)
645{
646 if (platform_pci_power_manageable(dev))
647 platform_pci_set_power_state(dev, PCI_D0);
648
649 pci_raw_set_power_state(dev, PCI_D0);
650 pci_update_current_state(dev, PCI_D0);
651}
652
653/**
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100654 * pci_platform_power_transition - Use platform to change device power state
655 * @dev: PCI device to handle.
656 * @state: State to put the device into.
657 */
658static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
659{
660 int error;
661
662 if (platform_pci_power_manageable(dev)) {
663 error = platform_pci_set_power_state(dev, state);
664 if (!error)
665 pci_update_current_state(dev, state);
Ajaykumar Hotchandanib51306c2011-12-12 13:57:36 +0530666 /* Fall back to PCI_D0 if native PM is not supported */
667 if (!dev->pm_cap)
668 dev->current_state = PCI_D0;
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100669 } else {
670 error = -ENODEV;
671 /* Fall back to PCI_D0 if native PM is not supported */
Rafael J. Wysockib3bad722009-05-17 20:17:06 +0200672 if (!dev->pm_cap)
673 dev->current_state = PCI_D0;
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100674 }
675
676 return error;
677}
678
679/**
680 * __pci_start_power_transition - Start power transition of a PCI device
681 * @dev: PCI device to handle.
682 * @state: State to put the device into.
683 */
684static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
685{
Huang Ying448bd852012-06-23 10:23:51 +0800686 if (state == PCI_D0) {
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100687 pci_platform_power_transition(dev, PCI_D0);
Huang Ying448bd852012-06-23 10:23:51 +0800688 /*
689 * Mandatory power management transition delays, see
690 * PCI Express Base Specification Revision 2.0 Section
691 * 6.6.1: Conventional Reset. Do not delay for
692 * devices powered on/off by corresponding bridge,
693 * because have already delayed for the bridge.
694 */
695 if (dev->runtime_d3cold) {
696 msleep(dev->d3cold_delay);
697 /*
698 * When powering on a bridge from D3cold, the
699 * whole hierarchy may be powered on into
700 * D0uninitialized state, resume them to give
701 * them a chance to suspend again
702 */
703 pci_wakeup_bus(dev->subordinate);
704 }
705 }
706}
707
708/**
709 * __pci_dev_set_current_state - Set current state of a PCI device
710 * @dev: Device to handle
711 * @data: pointer to state to be set
712 */
713static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
714{
715 pci_power_t state = *(pci_power_t *)data;
716
717 dev->current_state = state;
718 return 0;
719}
720
721/**
722 * __pci_bus_set_current_state - Walk given bus and set current state of devices
723 * @bus: Top bus of the subtree to walk.
724 * @state: state to be set
725 */
726static void __pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
727{
728 if (bus)
729 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100730}
731
732/**
733 * __pci_complete_power_transition - Complete power transition of a PCI device
734 * @dev: PCI device to handle.
735 * @state: State to put the device into.
736 *
737 * This function should not be called directly by device drivers.
738 */
739int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
740{
Huang Ying448bd852012-06-23 10:23:51 +0800741 int ret;
742
Rafael J. Wysockidb288c92012-07-05 15:20:00 -0600743 if (state <= PCI_D0)
Huang Ying448bd852012-06-23 10:23:51 +0800744 return -EINVAL;
745 ret = pci_platform_power_transition(dev, state);
746 /* Power off the bridge may power off the whole hierarchy */
747 if (!ret && state == PCI_D3cold)
748 __pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
749 return ret;
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100750}
751EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
752
753/**
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200754 * pci_set_power_state - Set the power state of a PCI device
755 * @dev: PCI device to handle.
756 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
757 *
Nick Andrew877d0312009-01-26 11:06:57 +0100758 * Transition a device to a new power state, using the platform firmware and/or
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200759 * the device's PCI PM registers.
760 *
761 * RETURN VALUE:
762 * -EINVAL if the requested state is invalid.
763 * -EIO if device does not support PCI PM or its PM capabilities register has a
764 * wrong version, or device doesn't support the requested state.
765 * 0 if device already is in the requested state.
766 * 0 if device's power state has been successfully changed.
767 */
768int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
769{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +0200770 int error;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200771
772 /* bound the state we're entering */
Huang Ying448bd852012-06-23 10:23:51 +0800773 if (state > PCI_D3cold)
774 state = PCI_D3cold;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200775 else if (state < PCI_D0)
776 state = PCI_D0;
777 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
778 /*
779 * If the device or the parent bridge do not support PCI PM,
780 * ignore the request if we're doing anything other than putting
781 * it into D0 (which would only happen on boot).
782 */
783 return 0;
784
Rafael J. Wysockidb288c92012-07-05 15:20:00 -0600785 /* Check if we're already there */
786 if (dev->current_state == state)
787 return 0;
788
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100789 __pci_start_power_transition(dev, state);
790
Alan Cox979b1792008-07-24 17:18:38 +0100791 /* This device is quirked not to be put into D3, so
792 don't put it in D3 */
Huang Ying448bd852012-06-23 10:23:51 +0800793 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
Alan Cox979b1792008-07-24 17:18:38 +0100794 return 0;
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200795
Huang Ying448bd852012-06-23 10:23:51 +0800796 /*
797 * To put device in D3cold, we put device into D3hot in native
798 * way, then put device into D3cold with platform ops
799 */
800 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
801 PCI_D3hot : state);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200802
Rafael J. Wysocki0e5dd462009-03-26 22:51:40 +0100803 if (!__pci_complete_power_transition(dev, state))
804 error = 0;
Naga Chumbalkar1a680b72011-03-21 03:29:08 +0000805 /*
806 * When aspm_policy is "powersave" this call ensures
807 * that ASPM is configured.
808 */
809 if (!error && dev->bus->self)
810 pcie_aspm_powersave_config_link(dev->bus->self);
Rafael J. Wysocki44e4e662008-07-07 03:32:52 +0200811
812 return error;
813}
814
815/**
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 * pci_choose_state - Choose the power state of a PCI device
817 * @dev: PCI device to be suspended
818 * @state: target sleep state for the whole system. This is the value
819 * that is passed to suspend() function.
820 *
821 * Returns PCI power state suitable for given device and given system
822 * message.
823 */
824
825pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
826{
Shaohua Liab826ca2007-07-20 10:03:22 +0800827 pci_power_t ret;
David Shaohua Li0f644742005-03-19 00:15:48 -0500828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 if (!pci_find_capability(dev, PCI_CAP_ID_PM))
830 return PCI_D0;
831
Rafael J. Wysocki961d9122008-07-07 03:32:02 +0200832 ret = platform_pci_choose_state(dev);
833 if (ret != PCI_POWER_ERROR)
834 return ret;
Pavel Machekca078ba2005-09-03 15:56:57 -0700835
836 switch (state.event) {
837 case PM_EVENT_ON:
838 return PCI_D0;
839 case PM_EVENT_FREEZE:
David Brownellb887d2e2006-08-14 23:11:05 -0700840 case PM_EVENT_PRETHAW:
841 /* REVISIT both freeze and pre-thaw "should" use D0 */
Pavel Machekca078ba2005-09-03 15:56:57 -0700842 case PM_EVENT_SUSPEND:
Rafael J. Wysocki3a2d5b72008-02-23 19:13:25 +0100843 case PM_EVENT_HIBERNATE:
Pavel Machekca078ba2005-09-03 15:56:57 -0700844 return PCI_D3hot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 default:
Bjorn Helgaas80ccba12008-06-13 10:52:11 -0600846 dev_info(&dev->dev, "unrecognized suspend event %d\n",
847 state.event);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 BUG();
849 }
850 return PCI_D0;
851}
852
853EXPORT_SYMBOL(pci_choose_state);
854
Yu Zhao89858512009-02-16 02:55:47 +0800855#define PCI_EXP_SAVE_REGS 7
856
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800857#define pcie_cap_has_devctl(type, flags) 1
858#define pcie_cap_has_lnkctl(type, flags) \
859 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
860 (type == PCI_EXP_TYPE_ROOT_PORT || \
861 type == PCI_EXP_TYPE_ENDPOINT || \
862 type == PCI_EXP_TYPE_LEG_END))
863#define pcie_cap_has_sltctl(type, flags) \
864 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
865 ((type == PCI_EXP_TYPE_ROOT_PORT) || \
866 (type == PCI_EXP_TYPE_DOWNSTREAM && \
867 (flags & PCI_EXP_FLAGS_SLOT))))
868#define pcie_cap_has_rtctl(type, flags) \
869 ((flags & PCI_EXP_FLAGS_VERS) > 1 || \
870 (type == PCI_EXP_TYPE_ROOT_PORT || \
871 type == PCI_EXP_TYPE_RC_EC))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800872
Yinghai Lu34a48762012-02-11 00:18:41 -0800873static struct pci_cap_saved_state *pci_find_saved_cap(
874 struct pci_dev *pci_dev, char cap)
875{
876 struct pci_cap_saved_state *tmp;
877 struct hlist_node *pos;
878
879 hlist_for_each_entry(tmp, pos, &pci_dev->saved_cap_space, next) {
880 if (tmp->cap.cap_nr == cap)
881 return tmp;
882 }
883 return NULL;
884}
885
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300886static int pci_save_pcie_state(struct pci_dev *dev)
887{
Yijing Wang62f87c02012-07-24 17:20:03 +0800888 int type, pos, i = 0;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300889 struct pci_cap_saved_state *save_state;
890 u16 *cap;
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800891 u16 flags;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300892
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +0900893 pos = pci_pcie_cap(dev);
894 if (!pos)
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300895 return 0;
896
Eric W. Biederman9f355752007-03-08 13:06:13 -0700897 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300898 if (!save_state) {
Harvey Harrisone496b612009-01-07 16:22:37 -0800899 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300900 return -ENOMEM;
901 }
Alex Williamson24a4742f2011-05-10 10:02:11 -0600902 cap = (u16 *)&save_state->cap.data[0];
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300903
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800904 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
905
Yijing Wang62f87c02012-07-24 17:20:03 +0800906 type = pci_pcie_type(dev);
907 if (pcie_cap_has_devctl(type, flags))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800908 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &cap[i++]);
Yijing Wang62f87c02012-07-24 17:20:03 +0800909 if (pcie_cap_has_lnkctl(type, flags))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800910 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL, &cap[i++]);
Yijing Wang62f87c02012-07-24 17:20:03 +0800911 if (pcie_cap_has_sltctl(type, flags))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800912 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL, &cap[i++]);
Yijing Wang62f87c02012-07-24 17:20:03 +0800913 if (pcie_cap_has_rtctl(type, flags))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800914 pci_read_config_word(dev, pos + PCI_EXP_RTCTL, &cap[i++]);
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100915
Myron Stowe9cb604e2012-06-01 15:16:43 -0600916 pos = pci_pcie_cap2(dev);
917 if (!pos)
918 return 0;
919
920 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &cap[i++]);
921 pci_read_config_word(dev, pos + PCI_EXP_LNKCTL2, &cap[i++]);
922 pci_read_config_word(dev, pos + PCI_EXP_SLTCTL2, &cap[i++]);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300923 return 0;
924}
925
926static void pci_restore_pcie_state(struct pci_dev *dev)
927{
Yijing Wang62f87c02012-07-24 17:20:03 +0800928 int i = 0, pos, type;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300929 struct pci_cap_saved_state *save_state;
930 u16 *cap;
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800931 u16 flags;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300932
933 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
934 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
935 if (!save_state || pos <= 0)
936 return;
Alex Williamson24a4742f2011-05-10 10:02:11 -0600937 cap = (u16 *)&save_state->cap.data[0];
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300938
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800939 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &flags);
940
Yijing Wang62f87c02012-07-24 17:20:03 +0800941 type = pci_pcie_type(dev);
942 if (pcie_cap_has_devctl(type, flags))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800943 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, cap[i++]);
Yijing Wang62f87c02012-07-24 17:20:03 +0800944 if (pcie_cap_has_lnkctl(type, flags))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800945 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL, cap[i++]);
Yijing Wang62f87c02012-07-24 17:20:03 +0800946 if (pcie_cap_has_sltctl(type, flags))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800947 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL, cap[i++]);
Yijing Wang62f87c02012-07-24 17:20:03 +0800948 if (pcie_cap_has_rtctl(type, flags))
Yu Zhao1b6b8ce2009-04-09 14:57:39 +0800949 pci_write_config_word(dev, pos + PCI_EXP_RTCTL, cap[i++]);
Myron Stowe9cb604e2012-06-01 15:16:43 -0600950
951 pos = pci_pcie_cap2(dev);
952 if (!pos)
953 return;
954
955 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, cap[i++]);
956 pci_write_config_word(dev, pos + PCI_EXP_LNKCTL2, cap[i++]);
957 pci_write_config_word(dev, pos + PCI_EXP_SLTCTL2, cap[i++]);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +0300958}
959
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800960
961static int pci_save_pcix_state(struct pci_dev *dev)
962{
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100963 int pos;
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800964 struct pci_cap_saved_state *save_state;
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800965
966 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
967 if (pos <= 0)
968 return 0;
969
Shaohua Lif34303d2007-12-18 09:56:47 +0800970 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800971 if (!save_state) {
Harvey Harrisone496b612009-01-07 16:22:37 -0800972 dev_err(&dev->dev, "buffer not found in %s\n", __func__);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800973 return -ENOMEM;
974 }
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800975
Alex Williamson24a4742f2011-05-10 10:02:11 -0600976 pci_read_config_word(dev, pos + PCI_X_CMD,
977 (u16 *)save_state->cap.data);
Rafael J. Wysocki63f48982008-12-07 22:02:58 +0100978
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800979 return 0;
980}
981
982static void pci_restore_pcix_state(struct pci_dev *dev)
983{
984 int i = 0, pos;
985 struct pci_cap_saved_state *save_state;
986 u16 *cap;
987
988 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
989 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
990 if (!save_state || pos <= 0)
991 return;
Alex Williamson24a4742f2011-05-10 10:02:11 -0600992 cap = (u16 *)&save_state->cap.data[0];
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800993
994 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
Stephen Hemmingercc692a52006-11-08 16:17:15 -0800995}
996
997
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998/**
999 * pci_save_state - save the PCI configuration space of a device before suspending
1000 * @dev: - PCI device that we're dealing with
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 */
1002int
1003pci_save_state(struct pci_dev *dev)
1004{
1005 int i;
1006 /* XXX: 100% dword access ok here? */
1007 for (i = 0; i < 16; i++)
Kleber Sacilotto de Souza9e0b5b22009-11-25 00:55:51 -02001008 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
Rafael J. Wysockiaa8c6c92009-01-16 21:54:43 +01001009 dev->state_saved = true;
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +03001010 if ((i = pci_save_pcie_state(dev)) != 0)
1011 return i;
Stephen Hemmingercc692a52006-11-08 16:17:15 -08001012 if ((i = pci_save_pcix_state(dev)) != 0)
1013 return i;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 return 0;
1015}
1016
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001017static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1018 u32 saved_val, int retry)
1019{
1020 u32 val;
1021
1022 pci_read_config_dword(pdev, offset, &val);
1023 if (val == saved_val)
1024 return;
1025
1026 for (;;) {
1027 dev_dbg(&pdev->dev, "restoring config space at offset "
1028 "%#x (was %#x, writing %#x)\n", offset, val, saved_val);
1029 pci_write_config_dword(pdev, offset, saved_val);
1030 if (retry-- <= 0)
1031 return;
1032
1033 pci_read_config_dword(pdev, offset, &val);
1034 if (val == saved_val)
1035 return;
1036
1037 mdelay(1);
1038 }
1039}
1040
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001041static void pci_restore_config_space_range(struct pci_dev *pdev,
1042 int start, int end, int retry)
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001043{
1044 int index;
1045
1046 for (index = end; index >= start; index--)
1047 pci_restore_config_dword(pdev, 4 * index,
1048 pdev->saved_config_space[index],
1049 retry);
1050}
1051
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001052static void pci_restore_config_space(struct pci_dev *pdev)
1053{
1054 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1055 pci_restore_config_space_range(pdev, 10, 15, 0);
1056 /* Restore BARs before the command register. */
1057 pci_restore_config_space_range(pdev, 4, 9, 10);
1058 pci_restore_config_space_range(pdev, 0, 3, 0);
1059 } else {
1060 pci_restore_config_space_range(pdev, 0, 15, 0);
1061 }
1062}
1063
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064/**
1065 * pci_restore_state - Restore the saved state of a PCI device
1066 * @dev: - PCI device that we're dealing with
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 */
Jon Mason1d3c16a2010-11-30 17:43:26 -06001068void pci_restore_state(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069{
Alek Duc82f63e2009-08-08 08:46:19 +08001070 if (!dev->state_saved)
Jon Mason1d3c16a2010-11-30 17:43:26 -06001071 return;
Rafael J. Wysocki4b77b0a2009-09-09 23:49:59 +02001072
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +03001073 /* PCI Express register must be restored first */
1074 pci_restore_pcie_state(dev);
Hao, Xudong1900ca12011-12-17 21:24:40 +08001075 pci_restore_ats_state(dev);
Michael S. Tsirkinb56a5a22006-08-21 16:22:22 +03001076
Rafael J. Wysockia6cb9ee2012-04-16 23:07:50 +02001077 pci_restore_config_space(dev);
Rafael J. Wysockiebfc5b82012-04-15 21:40:40 +02001078
Stephen Hemmingercc692a52006-11-08 16:17:15 -08001079 pci_restore_pcix_state(dev);
Shaohua Li41017f02006-02-08 17:11:38 +08001080 pci_restore_msi_state(dev);
Yu Zhao8c5cdb62009-03-20 11:25:12 +08001081 pci_restore_iov_state(dev);
Michael Ellerman8fed4b62007-01-25 19:34:08 +11001082
Rafael J. Wysocki4b77b0a2009-09-09 23:49:59 +02001083 dev->state_saved = false;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084}
1085
Alex Williamsonffbdd3f2011-05-10 10:02:27 -06001086struct pci_saved_state {
1087 u32 config_space[16];
1088 struct pci_cap_saved_data cap[0];
1089};
1090
1091/**
1092 * pci_store_saved_state - Allocate and return an opaque struct containing
1093 * the device saved state.
1094 * @dev: PCI device that we're dealing with
1095 *
1096 * Rerturn NULL if no state or error.
1097 */
1098struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1099{
1100 struct pci_saved_state *state;
1101 struct pci_cap_saved_state *tmp;
1102 struct pci_cap_saved_data *cap;
1103 struct hlist_node *pos;
1104 size_t size;
1105
1106 if (!dev->state_saved)
1107 return NULL;
1108
1109 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1110
1111 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next)
1112 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1113
1114 state = kzalloc(size, GFP_KERNEL);
1115 if (!state)
1116 return NULL;
1117
1118 memcpy(state->config_space, dev->saved_config_space,
1119 sizeof(state->config_space));
1120
1121 cap = state->cap;
1122 hlist_for_each_entry(tmp, pos, &dev->saved_cap_space, next) {
1123 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1124 memcpy(cap, &tmp->cap, len);
1125 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1126 }
1127 /* Empty cap_save terminates list */
1128
1129 return state;
1130}
1131EXPORT_SYMBOL_GPL(pci_store_saved_state);
1132
1133/**
1134 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1135 * @dev: PCI device that we're dealing with
1136 * @state: Saved state returned from pci_store_saved_state()
1137 */
1138int pci_load_saved_state(struct pci_dev *dev, struct pci_saved_state *state)
1139{
1140 struct pci_cap_saved_data *cap;
1141
1142 dev->state_saved = false;
1143
1144 if (!state)
1145 return 0;
1146
1147 memcpy(dev->saved_config_space, state->config_space,
1148 sizeof(state->config_space));
1149
1150 cap = state->cap;
1151 while (cap->size) {
1152 struct pci_cap_saved_state *tmp;
1153
1154 tmp = pci_find_saved_cap(dev, cap->cap_nr);
1155 if (!tmp || tmp->cap.size != cap->size)
1156 return -EINVAL;
1157
1158 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1159 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1160 sizeof(struct pci_cap_saved_data) + cap->size);
1161 }
1162
1163 dev->state_saved = true;
1164 return 0;
1165}
1166EXPORT_SYMBOL_GPL(pci_load_saved_state);
1167
1168/**
1169 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1170 * and free the memory allocated for it.
1171 * @dev: PCI device that we're dealing with
1172 * @state: Pointer to saved state returned from pci_store_saved_state()
1173 */
1174int pci_load_and_free_saved_state(struct pci_dev *dev,
1175 struct pci_saved_state **state)
1176{
1177 int ret = pci_load_saved_state(dev, *state);
1178 kfree(*state);
1179 *state = NULL;
1180 return ret;
1181}
1182EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1183
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001184static int do_pci_enable_device(struct pci_dev *dev, int bars)
1185{
1186 int err;
1187
1188 err = pci_set_power_state(dev, PCI_D0);
1189 if (err < 0 && err != -EIO)
1190 return err;
1191 err = pcibios_enable_device(dev, bars);
1192 if (err < 0)
1193 return err;
1194 pci_fixup_device(pci_fixup_enable, dev);
1195
1196 return 0;
1197}
1198
1199/**
Tejun Heo0b62e132007-07-27 14:43:35 +09001200 * pci_reenable_device - Resume abandoned device
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001201 * @dev: PCI device to be resumed
1202 *
1203 * Note this function is a backend of pci_default_resume and is not supposed
1204 * to be called by normal code, write proper resume handler and use it instead.
1205 */
Tejun Heo0b62e132007-07-27 14:43:35 +09001206int pci_reenable_device(struct pci_dev *dev)
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001207{
Yuji Shimada296ccb02009-04-03 16:41:46 +09001208 if (pci_is_enabled(dev))
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001209 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1210 return 0;
1211}
1212
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001213static int __pci_enable_device_flags(struct pci_dev *dev,
1214 resource_size_t flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215{
1216 int err;
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001217 int i, bars = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Jesse Barnes97c145f2010-11-05 15:16:36 -04001219 /*
1220 * Power state could be unknown at this point, either due to a fresh
1221 * boot or a device removal call. So get the current power state
1222 * so that things like MSI message writing will behave as expected
1223 * (e.g. if the device really is in D0 at enable time).
1224 */
1225 if (dev->pm_cap) {
1226 u16 pmcsr;
1227 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1228 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1229 }
1230
Hidetoshi Seto9fb625c2006-12-18 10:28:43 +09001231 if (atomic_add_return(1, &dev->enable_cnt) > 1)
1232 return 0; /* already enabled */
1233
Yinghai Lu497f16f2011-12-17 18:33:37 -08001234 /* only skip sriov related */
1235 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1236 if (dev->resource[i].flags & flags)
1237 bars |= (1 << i);
1238 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001239 if (dev->resource[i].flags & flags)
1240 bars |= (1 << i);
1241
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001242 err = do_pci_enable_device(dev, bars);
Greg Kroah-Hartman95a62962005-07-28 11:37:33 -07001243 if (err < 0)
Hidetoshi Seto38cc1302006-12-18 10:30:00 +09001244 atomic_dec(&dev->enable_cnt);
Hidetoshi Seto9fb625c2006-12-18 10:28:43 +09001245 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246}
1247
1248/**
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001249 * pci_enable_device_io - Initialize a device for use with IO space
1250 * @dev: PCI device to be initialized
1251 *
1252 * Initialize device before it's used by a driver. Ask low-level code
1253 * to enable I/O resources. Wake up the device if it was suspended.
1254 * Beware, this function can fail.
1255 */
1256int pci_enable_device_io(struct pci_dev *dev)
1257{
1258 return __pci_enable_device_flags(dev, IORESOURCE_IO);
1259}
1260
1261/**
1262 * pci_enable_device_mem - Initialize a device for use with Memory space
1263 * @dev: PCI device to be initialized
1264 *
1265 * Initialize device before it's used by a driver. Ask low-level code
1266 * to enable Memory resources. Wake up the device if it was suspended.
1267 * Beware, this function can fail.
1268 */
1269int pci_enable_device_mem(struct pci_dev *dev)
1270{
1271 return __pci_enable_device_flags(dev, IORESOURCE_MEM);
1272}
1273
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274/**
1275 * pci_enable_device - Initialize device before it's used by a driver.
1276 * @dev: PCI device to be initialized
1277 *
1278 * Initialize device before it's used by a driver. Ask low-level code
1279 * to enable I/O and memory. Wake up the device if it was suspended.
1280 * Beware, this function can fail.
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001281 *
1282 * Note we don't actually enable the device many times if we call
1283 * this function repeatedly (we just increment the count).
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 */
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001285int pci_enable_device(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286{
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11001287 return __pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288}
1289
Tejun Heo9ac78492007-01-20 16:00:26 +09001290/*
1291 * Managed PCI resources. This manages device on/off, intx/msi/msix
1292 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1293 * there's no need to track it separately. pci_devres is initialized
1294 * when a device is enabled using managed PCI device enable interface.
1295 */
1296struct pci_devres {
Tejun Heo7f375f32007-02-25 04:36:01 -08001297 unsigned int enabled:1;
1298 unsigned int pinned:1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001299 unsigned int orig_intx:1;
1300 unsigned int restore_intx:1;
1301 u32 region_mask;
1302};
1303
1304static void pcim_release(struct device *gendev, void *res)
1305{
1306 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
1307 struct pci_devres *this = res;
1308 int i;
1309
1310 if (dev->msi_enabled)
1311 pci_disable_msi(dev);
1312 if (dev->msix_enabled)
1313 pci_disable_msix(dev);
1314
1315 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1316 if (this->region_mask & (1 << i))
1317 pci_release_region(dev, i);
1318
1319 if (this->restore_intx)
1320 pci_intx(dev, this->orig_intx);
1321
Tejun Heo7f375f32007-02-25 04:36:01 -08001322 if (this->enabled && !this->pinned)
Tejun Heo9ac78492007-01-20 16:00:26 +09001323 pci_disable_device(dev);
1324}
1325
1326static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
1327{
1328 struct pci_devres *dr, *new_dr;
1329
1330 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1331 if (dr)
1332 return dr;
1333
1334 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1335 if (!new_dr)
1336 return NULL;
1337 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1338}
1339
1340static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
1341{
1342 if (pci_is_managed(pdev))
1343 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1344 return NULL;
1345}
1346
1347/**
1348 * pcim_enable_device - Managed pci_enable_device()
1349 * @pdev: PCI device to be initialized
1350 *
1351 * Managed pci_enable_device().
1352 */
1353int pcim_enable_device(struct pci_dev *pdev)
1354{
1355 struct pci_devres *dr;
1356 int rc;
1357
1358 dr = get_pci_dr(pdev);
1359 if (unlikely(!dr))
1360 return -ENOMEM;
Tejun Heob95d58e2008-01-30 18:20:04 +09001361 if (dr->enabled)
1362 return 0;
Tejun Heo9ac78492007-01-20 16:00:26 +09001363
1364 rc = pci_enable_device(pdev);
1365 if (!rc) {
1366 pdev->is_managed = 1;
Tejun Heo7f375f32007-02-25 04:36:01 -08001367 dr->enabled = 1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001368 }
1369 return rc;
1370}
1371
1372/**
1373 * pcim_pin_device - Pin managed PCI device
1374 * @pdev: PCI device to pin
1375 *
1376 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1377 * driver detach. @pdev must have been enabled with
1378 * pcim_enable_device().
1379 */
1380void pcim_pin_device(struct pci_dev *pdev)
1381{
1382 struct pci_devres *dr;
1383
1384 dr = find_pci_dr(pdev);
Tejun Heo7f375f32007-02-25 04:36:01 -08001385 WARN_ON(!dr || !dr->enabled);
Tejun Heo9ac78492007-01-20 16:00:26 +09001386 if (dr)
Tejun Heo7f375f32007-02-25 04:36:01 -08001387 dr->pinned = 1;
Tejun Heo9ac78492007-01-20 16:00:26 +09001388}
1389
Linus Torvalds1da177e2005-04-16 15:20:36 -07001390/**
1391 * pcibios_disable_device - disable arch specific PCI resources for device dev
1392 * @dev: the PCI device to disable
1393 *
1394 * Disables architecture specific PCI resources for the device. This
1395 * is the default implementation. Architecture implementations can
1396 * override this.
1397 */
Bjorn Helgaasd6d88c82012-06-19 06:54:49 -06001398void __weak pcibios_disable_device (struct pci_dev *dev) {}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001400static void do_pci_disable_device(struct pci_dev *dev)
1401{
1402 u16 pci_command;
1403
1404 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1405 if (pci_command & PCI_COMMAND_MASTER) {
1406 pci_command &= ~PCI_COMMAND_MASTER;
1407 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1408 }
1409
1410 pcibios_disable_device(dev);
1411}
1412
1413/**
1414 * pci_disable_enabled_device - Disable device without updating enable_cnt
1415 * @dev: PCI device to disable
1416 *
1417 * NOTE: This function is a backend of PCI power management routines and is
1418 * not supposed to be called drivers.
1419 */
1420void pci_disable_enabled_device(struct pci_dev *dev)
1421{
Yuji Shimada296ccb02009-04-03 16:41:46 +09001422 if (pci_is_enabled(dev))
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001423 do_pci_disable_device(dev);
1424}
1425
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426/**
1427 * pci_disable_device - Disable PCI device after use
1428 * @dev: PCI device to be disabled
1429 *
1430 * Signal to the system that the PCI device is not in use by the system
1431 * anymore. This only involves disabling PCI bus-mastering, if active.
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001432 *
1433 * Note we don't actually disable the device until all callers of
Roman Fietzeee6583f2010-05-18 14:45:47 +02001434 * pci_enable_device() have called pci_disable_device().
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 */
1436void
1437pci_disable_device(struct pci_dev *dev)
1438{
Tejun Heo9ac78492007-01-20 16:00:26 +09001439 struct pci_devres *dr;
Shaohua Li99dc8042006-05-26 10:58:27 +08001440
Tejun Heo9ac78492007-01-20 16:00:26 +09001441 dr = find_pci_dr(dev);
1442 if (dr)
Tejun Heo7f375f32007-02-25 04:36:01 -08001443 dr->enabled = 0;
Tejun Heo9ac78492007-01-20 16:00:26 +09001444
Inaky Perez-Gonzalezbae94d02006-11-22 12:40:31 -08001445 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
1446 return;
1447
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001448 do_pci_disable_device(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449
Rafael J. Wysockifa58d302009-01-07 13:03:42 +01001450 dev->is_busmaster = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451}
1452
1453/**
Brian Kingf7bdd122007-04-06 16:39:36 -05001454 * pcibios_set_pcie_reset_state - set reset state for device dev
Stefan Assmann45e829e2009-12-03 06:49:24 -05001455 * @dev: the PCIe device reset
Brian Kingf7bdd122007-04-06 16:39:36 -05001456 * @state: Reset state to enter into
1457 *
1458 *
Stefan Assmann45e829e2009-12-03 06:49:24 -05001459 * Sets the PCIe reset state for the device. This is the default
Brian Kingf7bdd122007-04-06 16:39:36 -05001460 * implementation. Architecture implementations can override this.
1461 */
Bjorn Helgaasd6d88c82012-06-19 06:54:49 -06001462int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1463 enum pcie_reset_state state)
Brian Kingf7bdd122007-04-06 16:39:36 -05001464{
1465 return -EINVAL;
1466}
1467
1468/**
1469 * pci_set_pcie_reset_state - set reset state for device dev
Stefan Assmann45e829e2009-12-03 06:49:24 -05001470 * @dev: the PCIe device reset
Brian Kingf7bdd122007-04-06 16:39:36 -05001471 * @state: Reset state to enter into
1472 *
1473 *
1474 * Sets the PCI reset state for the device.
1475 */
1476int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1477{
1478 return pcibios_set_pcie_reset_state(dev, state);
1479}
1480
1481/**
Rafael J. Wysocki58ff4632010-02-17 23:36:58 +01001482 * pci_check_pme_status - Check if given device has generated PME.
1483 * @dev: Device to check.
1484 *
1485 * Check the PME status of the device and if set, clear it and clear PME enable
1486 * (if set). Return 'true' if PME status and PME enable were both set or
1487 * 'false' otherwise.
1488 */
1489bool pci_check_pme_status(struct pci_dev *dev)
1490{
1491 int pmcsr_pos;
1492 u16 pmcsr;
1493 bool ret = false;
1494
1495 if (!dev->pm_cap)
1496 return false;
1497
1498 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1499 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1500 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1501 return false;
1502
1503 /* Clear PME status. */
1504 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1505 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1506 /* Disable PME to avoid interrupt flood. */
1507 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1508 ret = true;
1509 }
1510
1511 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1512
1513 return ret;
1514}
1515
1516/**
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001517 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1518 * @dev: Device to handle.
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001519 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001520 *
1521 * Check if @dev has generated PME and queue a resume request for it in that
1522 * case.
1523 */
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001524static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001525{
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001526 if (pme_poll_reset && dev->pme_poll)
1527 dev->pme_poll = false;
1528
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001529 if (pci_check_pme_status(dev)) {
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001530 pci_wakeup_event(dev);
Rafael J. Wysocki0f953bf2010-12-29 13:22:08 +01001531 pm_request_resume(&dev->dev);
Rafael J. Wysockic125e962010-07-05 22:43:53 +02001532 }
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001533 return 0;
1534}
1535
1536/**
1537 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1538 * @bus: Top bus of the subtree to walk.
1539 */
1540void pci_pme_wakeup_bus(struct pci_bus *bus)
1541{
1542 if (bus)
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001543 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001544}
1545
1546/**
Huang Ying448bd852012-06-23 10:23:51 +08001547 * pci_wakeup - Wake up a PCI device
1548 * @dev: Device to handle.
1549 * @ign: ignored parameter
1550 */
1551static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
1552{
1553 pci_wakeup_event(pci_dev);
1554 pm_request_resume(&pci_dev->dev);
1555 return 0;
1556}
1557
1558/**
1559 * pci_wakeup_bus - Walk given bus and wake up devices on it
1560 * @bus: Top bus of the subtree to walk.
1561 */
1562void pci_wakeup_bus(struct pci_bus *bus)
1563{
1564 if (bus)
1565 pci_walk_bus(bus, pci_wakeup, NULL);
1566}
1567
1568/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001569 * pci_pme_capable - check the capability of PCI device to generate PME#
1570 * @dev: PCI device to handle.
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001571 * @state: PCI state from which device will issue PME#.
1572 */
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001573bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001574{
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001575 if (!dev->pm_cap)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001576 return false;
1577
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001578 return !!(dev->pme_support & (1 << state));
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001579}
1580
Matthew Garrettdf17e622010-10-04 14:22:29 -04001581static void pci_pme_list_scan(struct work_struct *work)
1582{
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001583 struct pci_pme_device *pme_dev, *n;
Matthew Garrettdf17e622010-10-04 14:22:29 -04001584
1585 mutex_lock(&pci_pme_list_mutex);
1586 if (!list_empty(&pci_pme_list)) {
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001587 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1588 if (pme_dev->dev->pme_poll) {
Zheng Yan71a83bd2012-06-23 10:23:49 +08001589 struct pci_dev *bridge;
1590
1591 bridge = pme_dev->dev->bus->self;
1592 /*
1593 * If bridge is in low power state, the
1594 * configuration space of subordinate devices
1595 * may be not accessible
1596 */
1597 if (bridge && bridge->current_state != PCI_D0)
1598 continue;
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001599 pci_pme_wakeup(pme_dev->dev, NULL);
1600 } else {
1601 list_del(&pme_dev->list);
1602 kfree(pme_dev);
1603 }
1604 }
1605 if (!list_empty(&pci_pme_list))
1606 schedule_delayed_work(&pci_pme_work,
1607 msecs_to_jiffies(PME_TIMEOUT));
Matthew Garrettdf17e622010-10-04 14:22:29 -04001608 }
1609 mutex_unlock(&pci_pme_list_mutex);
1610}
1611
1612/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001613 * pci_pme_active - enable or disable PCI device's PME# function
1614 * @dev: PCI device to handle.
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001615 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1616 *
1617 * The caller must verify that the device is capable of generating PME# before
1618 * calling this function with @enable equal to 'true'.
1619 */
Rafael J. Wysocki5a6c9b62008-08-08 00:14:24 +02001620void pci_pme_active(struct pci_dev *dev, bool enable)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001621{
1622 u16 pmcsr;
1623
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001624 if (!dev->pm_cap)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001625 return;
1626
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001627 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001628 /* Clear PME_Status by writing 1 to it and enable PME# */
1629 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1630 if (!enable)
1631 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1632
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001633 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001634
Matthew Garrettdf17e622010-10-04 14:22:29 -04001635 /* PCI (as opposed to PCIe) PME requires that the device have
1636 its PME# line hooked up correctly. Not all hardware vendors
1637 do this, so the PME never gets delivered and the device
1638 remains asleep. The easiest way around this is to
1639 periodically walk the list of suspended devices and check
1640 whether any have their PME flag set. The assumption is that
1641 we'll wake up often enough anyway that this won't be a huge
1642 hit, and the power savings from the devices will still be a
1643 win. */
1644
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001645 if (dev->pme_poll) {
Matthew Garrettdf17e622010-10-04 14:22:29 -04001646 struct pci_pme_device *pme_dev;
1647 if (enable) {
1648 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1649 GFP_KERNEL);
1650 if (!pme_dev)
1651 goto out;
1652 pme_dev->dev = dev;
1653 mutex_lock(&pci_pme_list_mutex);
1654 list_add(&pme_dev->list, &pci_pme_list);
1655 if (list_is_singular(&pci_pme_list))
1656 schedule_delayed_work(&pci_pme_work,
1657 msecs_to_jiffies(PME_TIMEOUT));
1658 mutex_unlock(&pci_pme_list_mutex);
1659 } else {
1660 mutex_lock(&pci_pme_list_mutex);
1661 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1662 if (pme_dev->dev == dev) {
1663 list_del(&pme_dev->list);
1664 kfree(pme_dev);
1665 break;
1666 }
1667 }
1668 mutex_unlock(&pci_pme_list_mutex);
1669 }
1670 }
1671
1672out:
Vincent Palatin85b85822011-12-05 11:51:18 -08001673 dev_dbg(&dev->dev, "PME# %s\n", enable ? "enabled" : "disabled");
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001674}
1675
1676/**
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001677 * __pci_enable_wake - enable PCI device as wakeup event source
David Brownell075c1772007-04-26 00:12:06 -07001678 * @dev: PCI device affected
1679 * @state: PCI state from which device will issue wakeup events
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001680 * @runtime: True if the events are to be generated at run time
David Brownell075c1772007-04-26 00:12:06 -07001681 * @enable: True to enable event generation; false to disable
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 *
David Brownell075c1772007-04-26 00:12:06 -07001683 * This enables the device as a wakeup event source, or disables it.
1684 * When such events involves platform-specific hooks, those hooks are
1685 * called automatically by this routine.
1686 *
1687 * Devices with legacy power management (no standard PCI PM capabilities)
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001688 * always require such platform hooks.
David Brownell075c1772007-04-26 00:12:06 -07001689 *
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001690 * RETURN VALUE:
1691 * 0 is returned on success
1692 * -EINVAL is returned if device is not supposed to wake up the system
1693 * Error code depending on the platform is returned if both the platform and
1694 * the native mechanism fail to enable the generation of wake-up events
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695 */
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001696int __pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1697 bool runtime, bool enable)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698{
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001699 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001701 if (enable && !runtime && !device_may_wakeup(&dev->dev))
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001702 return -EINVAL;
1703
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001704 /* Don't do the same thing twice in a row for one device. */
1705 if (!!enable == !!dev->wakeup_prepared)
1706 return 0;
1707
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001708 /*
1709 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1710 * Anderson we should be doing PME# wake enable followed by ACPI wake
1711 * enable. To disable wake-up we call the platform first, for symmetry.
David Brownell075c1772007-04-26 00:12:06 -07001712 */
1713
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001714 if (enable) {
1715 int error;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001716
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001717 if (pci_pme_capable(dev, state))
1718 pci_pme_active(dev, true);
1719 else
1720 ret = 1;
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001721 error = runtime ? platform_pci_run_wake(dev, true) :
1722 platform_pci_sleep_wake(dev, true);
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001723 if (ret)
1724 ret = error;
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001725 if (!ret)
1726 dev->wakeup_prepared = true;
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001727 } else {
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001728 if (runtime)
1729 platform_pci_run_wake(dev, false);
1730 else
1731 platform_pci_sleep_wake(dev, false);
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001732 pci_pme_active(dev, false);
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001733 dev->wakeup_prepared = false;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001734 }
1735
Rafael J. Wysocki5bcc2fb2009-09-08 23:12:59 +02001736 return ret;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001737}
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001738EXPORT_SYMBOL(__pci_enable_wake);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001739
1740/**
Rafael J. Wysocki0235c4f2008-08-18 21:38:00 +02001741 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
1742 * @dev: PCI device to prepare
1743 * @enable: True to enable wake-up event generation; false to disable
1744 *
1745 * Many drivers want the device to wake up the system from D3_hot or D3_cold
1746 * and this function allows them to set that up cleanly - pci_enable_wake()
1747 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
1748 * ordering constraints.
1749 *
1750 * This function only returns error code if the device is not capable of
1751 * generating PME# from both D3_hot and D3_cold, and the platform is unable to
1752 * enable wake-up power for it.
1753 */
1754int pci_wake_from_d3(struct pci_dev *dev, bool enable)
1755{
1756 return pci_pme_capable(dev, PCI_D3cold) ?
1757 pci_enable_wake(dev, PCI_D3cold, enable) :
1758 pci_enable_wake(dev, PCI_D3hot, enable);
1759}
1760
1761/**
Jesse Barnes37139072008-07-28 11:49:26 -07001762 * pci_target_state - find an appropriate low power state for a given PCI dev
1763 * @dev: PCI device
1764 *
1765 * Use underlying platform code to find a supported low power state for @dev.
1766 * If the platform can't manage @dev, return the deepest state from which it
1767 * can generate wake events, based on any available PME info.
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001768 */
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001769pci_power_t pci_target_state(struct pci_dev *dev)
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001770{
1771 pci_power_t target_state = PCI_D3hot;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001772
1773 if (platform_pci_power_manageable(dev)) {
1774 /*
1775 * Call the platform to choose the target state of the device
1776 * and enable wake-up from this state if supported.
1777 */
1778 pci_power_t state = platform_pci_choose_state(dev);
1779
1780 switch (state) {
1781 case PCI_POWER_ERROR:
1782 case PCI_UNKNOWN:
1783 break;
1784 case PCI_D1:
1785 case PCI_D2:
1786 if (pci_no_d1d2(dev))
1787 break;
1788 default:
1789 target_state = state;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001790 }
Rafael J. Wysockid2abdf62009-06-14 21:25:02 +02001791 } else if (!dev->pm_cap) {
1792 target_state = PCI_D0;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001793 } else if (device_may_wakeup(&dev->dev)) {
1794 /*
1795 * Find the deepest state from which the device can generate
1796 * wake-up events, make it the target state and enable device
1797 * to generate PME#.
1798 */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001799 if (dev->pme_support) {
1800 while (target_state
1801 && !(dev->pme_support & (1 << target_state)))
1802 target_state--;
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001803 }
1804 }
1805
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02001806 return target_state;
1807}
1808
1809/**
1810 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
1811 * @dev: Device to handle.
1812 *
1813 * Choose the power state appropriate for the device depending on whether
1814 * it can wake up the system and/or is power manageable by the platform
1815 * (PCI_D3hot is the default) and put the device into that state.
1816 */
1817int pci_prepare_to_sleep(struct pci_dev *dev)
1818{
1819 pci_power_t target_state = pci_target_state(dev);
1820 int error;
1821
1822 if (target_state == PCI_POWER_ERROR)
1823 return -EIO;
1824
Huang Ying448bd852012-06-23 10:23:51 +08001825 /* D3cold during system suspend/hibernate is not supported */
1826 if (target_state > PCI_D3hot)
1827 target_state = PCI_D3hot;
1828
Rafael J. Wysocki8efb8c72009-03-30 21:46:27 +02001829 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
Rafael J. Wysockic157dfa2008-07-13 22:45:06 +02001830
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001831 error = pci_set_power_state(dev, target_state);
1832
1833 if (error)
1834 pci_enable_wake(dev, target_state, false);
1835
1836 return error;
1837}
1838
1839/**
Randy Dunlap443bd1c2008-07-21 09:27:18 -07001840 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001841 * @dev: Device to handle.
1842 *
Thomas Weber88393162010-03-16 11:47:56 +01001843 * Disable device's system wake-up capability and put it into D0.
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02001844 */
1845int pci_back_from_sleep(struct pci_dev *dev)
1846{
1847 pci_enable_wake(dev, PCI_D0, false);
1848 return pci_set_power_state(dev, PCI_D0);
1849}
1850
1851/**
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001852 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
1853 * @dev: PCI device being suspended.
1854 *
1855 * Prepare @dev to generate wake-up events at run time and put it into a low
1856 * power state.
1857 */
1858int pci_finish_runtime_suspend(struct pci_dev *dev)
1859{
1860 pci_power_t target_state = pci_target_state(dev);
1861 int error;
1862
1863 if (target_state == PCI_POWER_ERROR)
1864 return -EIO;
1865
Huang Ying448bd852012-06-23 10:23:51 +08001866 dev->runtime_d3cold = target_state == PCI_D3cold;
1867
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001868 __pci_enable_wake(dev, target_state, true, pci_dev_run_wake(dev));
1869
1870 error = pci_set_power_state(dev, target_state);
1871
Huang Ying448bd852012-06-23 10:23:51 +08001872 if (error) {
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001873 __pci_enable_wake(dev, target_state, true, false);
Huang Ying448bd852012-06-23 10:23:51 +08001874 dev->runtime_d3cold = false;
1875 }
Rafael J. Wysocki6cbf8212010-02-17 23:44:58 +01001876
1877 return error;
1878}
1879
1880/**
Rafael J. Wysockib67ea762010-02-17 23:44:09 +01001881 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
1882 * @dev: Device to check.
1883 *
1884 * Return true if the device itself is cabable of generating wake-up events
1885 * (through the platform or using the native PCIe PME) or if the device supports
1886 * PME and one of its upstream bridges can generate wake-up events.
1887 */
1888bool pci_dev_run_wake(struct pci_dev *dev)
1889{
1890 struct pci_bus *bus = dev->bus;
1891
1892 if (device_run_wake(&dev->dev))
1893 return true;
1894
1895 if (!dev->pme_support)
1896 return false;
1897
1898 while (bus->parent) {
1899 struct pci_dev *bridge = bus->self;
1900
1901 if (device_run_wake(&bridge->dev))
1902 return true;
1903
1904 bus = bus->parent;
1905 }
1906
1907 /* We have reached the root bus. */
1908 if (bus->bridge)
1909 return device_run_wake(bus->bridge);
1910
1911 return false;
1912}
1913EXPORT_SYMBOL_GPL(pci_dev_run_wake);
1914
1915/**
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001916 * pci_pm_init - Initialize PM functions of given PCI device
1917 * @dev: PCI device to handle.
1918 */
1919void pci_pm_init(struct pci_dev *dev)
1920{
1921 int pm;
1922 u16 pmc;
David Brownell075c1772007-04-26 00:12:06 -07001923
Rafael J. Wysockibb910a72010-02-27 21:37:37 +01001924 pm_runtime_forbid(&dev->dev);
Rafael J. Wysockia1e4d722010-02-08 19:16:33 +01001925 device_enable_async_suspend(&dev->dev);
Rafael J. Wysockie80bb092009-09-08 23:14:49 +02001926 dev->wakeup_prepared = false;
Rafael J. Wysockibb910a72010-02-27 21:37:37 +01001927
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001928 dev->pm_cap = 0;
1929
Linus Torvalds1da177e2005-04-16 15:20:36 -07001930 /* find PCI PM capability in list */
1931 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
David Brownell075c1772007-04-26 00:12:06 -07001932 if (!pm)
Linus Torvalds50246dd2009-01-16 08:14:51 -08001933 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001934 /* Check device's ability to generate PME# */
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001935 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001936
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001937 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
1938 dev_err(&dev->dev, "unsupported PM cap regs version (%u)\n",
1939 pmc & PCI_PM_CAP_VER_MASK);
Linus Torvalds50246dd2009-01-16 08:14:51 -08001940 return;
David Brownell075c1772007-04-26 00:12:06 -07001941 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001942
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001943 dev->pm_cap = pm;
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01001944 dev->d3_delay = PCI_PM_D3_WAIT;
Huang Ying448bd852012-06-23 10:23:51 +08001945 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001946
1947 dev->d1_support = false;
1948 dev->d2_support = false;
1949 if (!pci_no_d1d2(dev)) {
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001950 if (pmc & PCI_PM_CAP_D1)
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001951 dev->d1_support = true;
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001952 if (pmc & PCI_PM_CAP_D2)
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001953 dev->d2_support = true;
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001954
1955 if (dev->d1_support || dev->d2_support)
1956 dev_printk(KERN_DEBUG, &dev->dev, "supports%s%s\n",
Jesse Barnesec84f122008-09-23 11:43:34 -07001957 dev->d1_support ? " D1" : "",
1958 dev->d2_support ? " D2" : "");
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001959 }
1960
1961 pmc &= PCI_PM_CAP_PME_MASK;
1962 if (pmc) {
Bjorn Helgaas10c3d712009-11-04 10:32:42 -07001963 dev_printk(KERN_DEBUG, &dev->dev,
1964 "PME# supported from%s%s%s%s%s\n",
Bjorn Helgaasc9ed77e2008-08-22 09:37:02 -06001965 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
1966 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
1967 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
1968 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
1969 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001970 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
Rafael J. Wysocki379021d2011-10-03 23:16:33 +02001971 dev->pme_poll = true;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001972 /*
1973 * Make device's PM flags reflect the wake-up capability, but
1974 * let the user space enable it to wake up the system as needed.
1975 */
1976 device_set_wakeup_capable(&dev->dev, true);
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001977 /* Disable the PME# generation functionality */
Rafael J. Wysocki337001b2008-07-07 03:36:24 +02001978 pci_pme_active(dev, false);
1979 } else {
1980 dev->pme_support = 0;
Rafael J. Wysockieb9d0fe2008-07-07 03:34:48 +02001981 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001982}
1983
Yu Zhao58c3a722008-10-14 14:02:53 +08001984/**
Jesse Barneseb9c39d2008-12-17 12:10:05 -08001985 * platform_pci_wakeup_init - init platform wakeup if present
1986 * @dev: PCI device
1987 *
1988 * Some devices don't have PCI PM caps but can still generate wakeup
1989 * events through platform methods (like ACPI events). If @dev supports
1990 * platform wakeup events, set the device flag to indicate as much. This
1991 * may be redundant if the device also supports PCI PM caps, but double
1992 * initialization should be safe in that case.
1993 */
1994void platform_pci_wakeup_init(struct pci_dev *dev)
1995{
1996 if (!platform_pci_can_wakeup(dev))
1997 return;
1998
1999 device_set_wakeup_capable(&dev->dev, true);
Jesse Barneseb9c39d2008-12-17 12:10:05 -08002000 platform_pci_sleep_wake(dev, false);
2001}
2002
Yinghai Lu34a48762012-02-11 00:18:41 -08002003static void pci_add_saved_cap(struct pci_dev *pci_dev,
2004 struct pci_cap_saved_state *new_cap)
2005{
2006 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2007}
2008
Jesse Barneseb9c39d2008-12-17 12:10:05 -08002009/**
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01002010 * pci_add_save_buffer - allocate buffer for saving given capability registers
2011 * @dev: the PCI device
2012 * @cap: the capability to allocate the buffer for
2013 * @size: requested size of the buffer
2014 */
2015static int pci_add_cap_save_buffer(
2016 struct pci_dev *dev, char cap, unsigned int size)
2017{
2018 int pos;
2019 struct pci_cap_saved_state *save_state;
2020
2021 pos = pci_find_capability(dev, cap);
2022 if (pos <= 0)
2023 return 0;
2024
2025 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2026 if (!save_state)
2027 return -ENOMEM;
2028
Alex Williamson24a4742f2011-05-10 10:02:11 -06002029 save_state->cap.cap_nr = cap;
2030 save_state->cap.size = size;
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01002031 pci_add_saved_cap(dev, save_state);
2032
2033 return 0;
2034}
2035
2036/**
2037 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2038 * @dev: the PCI device
2039 */
2040void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2041{
2042 int error;
2043
Yu Zhao89858512009-02-16 02:55:47 +08002044 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2045 PCI_EXP_SAVE_REGS * sizeof(u16));
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01002046 if (error)
2047 dev_err(&dev->dev,
2048 "unable to preallocate PCI Express save buffer\n");
2049
2050 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2051 if (error)
2052 dev_err(&dev->dev,
2053 "unable to preallocate PCI-X save buffer\n");
2054}
2055
Yinghai Luf7968412012-02-11 00:18:30 -08002056void pci_free_cap_save_buffers(struct pci_dev *dev)
2057{
2058 struct pci_cap_saved_state *tmp;
2059 struct hlist_node *pos, *n;
2060
2061 hlist_for_each_entry_safe(tmp, pos, n, &dev->saved_cap_space, next)
2062 kfree(tmp);
2063}
2064
Rafael J. Wysocki63f48982008-12-07 22:02:58 +01002065/**
Yu Zhao58c3a722008-10-14 14:02:53 +08002066 * pci_enable_ari - enable ARI forwarding if hardware support it
2067 * @dev: the PCI device
2068 */
2069void pci_enable_ari(struct pci_dev *dev)
2070{
2071 int pos;
2072 u32 cap;
Myron Stowec463b8c2012-06-01 15:16:37 -06002073 u16 ctrl;
Zhao, Yu81135872008-10-23 13:15:39 +08002074 struct pci_dev *bridge;
Yu Zhao58c3a722008-10-14 14:02:53 +08002075
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +01002076 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
Yu Zhao58c3a722008-10-14 14:02:53 +08002077 return;
2078
Zhao, Yu81135872008-10-23 13:15:39 +08002079 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
Yu Zhao58c3a722008-10-14 14:02:53 +08002080 if (!pos)
2081 return;
2082
Zhao, Yu81135872008-10-23 13:15:39 +08002083 bridge = dev->bus->self;
Myron Stowecb97ae32012-06-01 15:16:31 -06002084 if (!bridge)
Zhao, Yu81135872008-10-23 13:15:39 +08002085 return;
2086
Myron Stowec463b8c2012-06-01 15:16:37 -06002087 /* ARI is a PCIe cap v2 feature */
2088 pos = pci_pcie_cap2(bridge);
Zhao, Yu81135872008-10-23 13:15:39 +08002089 if (!pos)
2090 return;
2091
2092 pci_read_config_dword(bridge, pos + PCI_EXP_DEVCAP2, &cap);
Yu Zhao58c3a722008-10-14 14:02:53 +08002093 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2094 return;
2095
Zhao, Yu81135872008-10-23 13:15:39 +08002096 pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl);
Yu Zhao58c3a722008-10-14 14:02:53 +08002097 ctrl |= PCI_EXP_DEVCTL2_ARI;
Zhao, Yu81135872008-10-23 13:15:39 +08002098 pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl);
Yu Zhao58c3a722008-10-14 14:02:53 +08002099
Zhao, Yu81135872008-10-23 13:15:39 +08002100 bridge->ari_enabled = 1;
Yu Zhao58c3a722008-10-14 14:02:53 +08002101}
2102
Jesse Barnesb48d4422010-10-19 13:07:57 -07002103/**
Myron Stowec463b8c2012-06-01 15:16:37 -06002104 * pci_enable_ido - enable ID-based Ordering on a device
Jesse Barnesb48d4422010-10-19 13:07:57 -07002105 * @dev: the PCI device
2106 * @type: which types of IDO to enable
2107 *
2108 * Enable ID-based ordering on @dev. @type can contain the bits
2109 * %PCI_EXP_IDO_REQUEST and/or %PCI_EXP_IDO_COMPLETION to indicate
2110 * which types of transactions are allowed to be re-ordered.
2111 */
2112void pci_enable_ido(struct pci_dev *dev, unsigned long type)
2113{
2114 int pos;
2115 u16 ctrl;
2116
Myron Stowec463b8c2012-06-01 15:16:37 -06002117 /* ID-based Ordering is a PCIe cap v2 feature */
2118 pos = pci_pcie_cap2(dev);
Jesse Barnesb48d4422010-10-19 13:07:57 -07002119 if (!pos)
2120 return;
2121
2122 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2123 if (type & PCI_EXP_IDO_REQUEST)
2124 ctrl |= PCI_EXP_IDO_REQ_EN;
2125 if (type & PCI_EXP_IDO_COMPLETION)
2126 ctrl |= PCI_EXP_IDO_CMP_EN;
2127 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2128}
2129EXPORT_SYMBOL(pci_enable_ido);
2130
2131/**
2132 * pci_disable_ido - disable ID-based ordering on a device
2133 * @dev: the PCI device
2134 * @type: which types of IDO to disable
2135 */
2136void pci_disable_ido(struct pci_dev *dev, unsigned long type)
2137{
2138 int pos;
2139 u16 ctrl;
2140
Myron Stowec463b8c2012-06-01 15:16:37 -06002141 /* ID-based Ordering is a PCIe cap v2 feature */
2142 pos = pci_pcie_cap2(dev);
Jesse Barnesb48d4422010-10-19 13:07:57 -07002143 if (!pos)
2144 return;
2145
2146 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2147 if (type & PCI_EXP_IDO_REQUEST)
2148 ctrl &= ~PCI_EXP_IDO_REQ_EN;
2149 if (type & PCI_EXP_IDO_COMPLETION)
2150 ctrl &= ~PCI_EXP_IDO_CMP_EN;
2151 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2152}
2153EXPORT_SYMBOL(pci_disable_ido);
2154
Jesse Barnes48a92a82011-01-10 12:46:36 -08002155/**
2156 * pci_enable_obff - enable optimized buffer flush/fill
2157 * @dev: PCI device
2158 * @type: type of signaling to use
2159 *
2160 * Try to enable @type OBFF signaling on @dev. It will try using WAKE#
2161 * signaling if possible, falling back to message signaling only if
2162 * WAKE# isn't supported. @type should indicate whether the PCIe link
2163 * be brought out of L0s or L1 to send the message. It should be either
2164 * %PCI_EXP_OBFF_SIGNAL_ALWAYS or %PCI_OBFF_SIGNAL_L0.
2165 *
2166 * If your device can benefit from receiving all messages, even at the
2167 * power cost of bringing the link back up from a low power state, use
2168 * %PCI_EXP_OBFF_SIGNAL_ALWAYS. Otherwise, use %PCI_OBFF_SIGNAL_L0 (the
2169 * preferred type).
2170 *
2171 * RETURNS:
2172 * Zero on success, appropriate error number on failure.
2173 */
2174int pci_enable_obff(struct pci_dev *dev, enum pci_obff_signal_type type)
2175{
2176 int pos;
2177 u32 cap;
2178 u16 ctrl;
2179 int ret;
2180
Myron Stowec463b8c2012-06-01 15:16:37 -06002181 /* OBFF is a PCIe cap v2 feature */
2182 pos = pci_pcie_cap2(dev);
Jesse Barnes48a92a82011-01-10 12:46:36 -08002183 if (!pos)
2184 return -ENOTSUPP;
2185
2186 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2187 if (!(cap & PCI_EXP_OBFF_MASK))
2188 return -ENOTSUPP; /* no OBFF support at all */
2189
2190 /* Make sure the topology supports OBFF as well */
Bjorn Helgaas82915502012-06-19 07:35:34 -06002191 if (dev->bus->self) {
Jesse Barnes48a92a82011-01-10 12:46:36 -08002192 ret = pci_enable_obff(dev->bus->self, type);
2193 if (ret)
2194 return ret;
2195 }
2196
2197 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2198 if (cap & PCI_EXP_OBFF_WAKE)
2199 ctrl |= PCI_EXP_OBFF_WAKE_EN;
2200 else {
2201 switch (type) {
2202 case PCI_EXP_OBFF_SIGNAL_L0:
2203 if (!(ctrl & PCI_EXP_OBFF_WAKE_EN))
2204 ctrl |= PCI_EXP_OBFF_MSGA_EN;
2205 break;
2206 case PCI_EXP_OBFF_SIGNAL_ALWAYS:
2207 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2208 ctrl |= PCI_EXP_OBFF_MSGB_EN;
2209 break;
2210 default:
2211 WARN(1, "bad OBFF signal type\n");
2212 return -ENOTSUPP;
2213 }
2214 }
2215 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2216
2217 return 0;
2218}
2219EXPORT_SYMBOL(pci_enable_obff);
2220
2221/**
2222 * pci_disable_obff - disable optimized buffer flush/fill
2223 * @dev: PCI device
2224 *
2225 * Disable OBFF on @dev.
2226 */
2227void pci_disable_obff(struct pci_dev *dev)
2228{
2229 int pos;
2230 u16 ctrl;
2231
Myron Stowec463b8c2012-06-01 15:16:37 -06002232 /* OBFF is a PCIe cap v2 feature */
2233 pos = pci_pcie_cap2(dev);
Jesse Barnes48a92a82011-01-10 12:46:36 -08002234 if (!pos)
2235 return;
2236
2237 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2238 ctrl &= ~PCI_EXP_OBFF_WAKE_EN;
2239 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2240}
2241EXPORT_SYMBOL(pci_disable_obff);
2242
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002243/**
2244 * pci_ltr_supported - check whether a device supports LTR
2245 * @dev: PCI device
2246 *
2247 * RETURNS:
2248 * True if @dev supports latency tolerance reporting, false otherwise.
2249 */
Myron Stowec32823f2012-06-01 15:16:25 -06002250static bool pci_ltr_supported(struct pci_dev *dev)
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002251{
2252 int pos;
2253 u32 cap;
2254
Myron Stowec463b8c2012-06-01 15:16:37 -06002255 /* LTR is a PCIe cap v2 feature */
2256 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002257 if (!pos)
2258 return false;
2259
2260 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP2, &cap);
2261
2262 return cap & PCI_EXP_DEVCAP2_LTR;
2263}
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002264
2265/**
2266 * pci_enable_ltr - enable latency tolerance reporting
2267 * @dev: PCI device
2268 *
2269 * Enable LTR on @dev if possible, which means enabling it first on
2270 * upstream ports.
2271 *
2272 * RETURNS:
2273 * Zero on success, errno on failure.
2274 */
2275int pci_enable_ltr(struct pci_dev *dev)
2276{
2277 int pos;
2278 u16 ctrl;
2279 int ret;
2280
2281 if (!pci_ltr_supported(dev))
2282 return -ENOTSUPP;
2283
Myron Stowec463b8c2012-06-01 15:16:37 -06002284 /* LTR is a PCIe cap v2 feature */
2285 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002286 if (!pos)
2287 return -ENOTSUPP;
2288
2289 /* Only primary function can enable/disable LTR */
2290 if (PCI_FUNC(dev->devfn) != 0)
2291 return -EINVAL;
2292
2293 /* Enable upstream ports first */
Bjorn Helgaas82915502012-06-19 07:35:34 -06002294 if (dev->bus->self) {
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002295 ret = pci_enable_ltr(dev->bus->self);
2296 if (ret)
2297 return ret;
2298 }
2299
2300 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2301 ctrl |= PCI_EXP_LTR_EN;
2302 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2303
2304 return 0;
2305}
2306EXPORT_SYMBOL(pci_enable_ltr);
2307
2308/**
2309 * pci_disable_ltr - disable latency tolerance reporting
2310 * @dev: PCI device
2311 */
2312void pci_disable_ltr(struct pci_dev *dev)
2313{
2314 int pos;
2315 u16 ctrl;
2316
2317 if (!pci_ltr_supported(dev))
2318 return;
2319
Myron Stowec463b8c2012-06-01 15:16:37 -06002320 /* LTR is a PCIe cap v2 feature */
2321 pos = pci_pcie_cap2(dev);
Jesse Barnes51c2e0a2011-01-14 08:53:04 -08002322 if (!pos)
2323 return;
2324
2325 /* Only primary function can enable/disable LTR */
2326 if (PCI_FUNC(dev->devfn) != 0)
2327 return;
2328
2329 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL2, &ctrl);
2330 ctrl &= ~PCI_EXP_LTR_EN;
2331 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL2, ctrl);
2332}
2333EXPORT_SYMBOL(pci_disable_ltr);
2334
2335static int __pci_ltr_scale(int *val)
2336{
2337 int scale = 0;
2338
2339 while (*val > 1023) {
2340 *val = (*val + 31) / 32;
2341 scale++;
2342 }
2343 return scale;
2344}
2345
2346/**
2347 * pci_set_ltr - set LTR latency values
2348 * @dev: PCI device
2349 * @snoop_lat_ns: snoop latency in nanoseconds
2350 * @nosnoop_lat_ns: nosnoop latency in nanoseconds
2351 *
2352 * Figure out the scale and set the LTR values accordingly.
2353 */
2354int pci_set_ltr(struct pci_dev *dev, int snoop_lat_ns, int nosnoop_lat_ns)
2355{
2356 int pos, ret, snoop_scale, nosnoop_scale;
2357 u16 val;
2358
2359 if (!pci_ltr_supported(dev))
2360 return -ENOTSUPP;
2361
2362 snoop_scale = __pci_ltr_scale(&snoop_lat_ns);
2363 nosnoop_scale = __pci_ltr_scale(&nosnoop_lat_ns);
2364
2365 if (snoop_lat_ns > PCI_LTR_VALUE_MASK ||
2366 nosnoop_lat_ns > PCI_LTR_VALUE_MASK)
2367 return -EINVAL;
2368
2369 if ((snoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)) ||
2370 (nosnoop_scale > (PCI_LTR_SCALE_MASK >> PCI_LTR_SCALE_SHIFT)))
2371 return -EINVAL;
2372
2373 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_LTR);
2374 if (!pos)
2375 return -ENOTSUPP;
2376
2377 val = (snoop_scale << PCI_LTR_SCALE_SHIFT) | snoop_lat_ns;
2378 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_SNOOP_LAT, val);
2379 if (ret != 4)
2380 return -EIO;
2381
2382 val = (nosnoop_scale << PCI_LTR_SCALE_SHIFT) | nosnoop_lat_ns;
2383 ret = pci_write_config_word(dev, pos + PCI_LTR_MAX_NOSNOOP_LAT, val);
2384 if (ret != 4)
2385 return -EIO;
2386
2387 return 0;
2388}
2389EXPORT_SYMBOL(pci_set_ltr);
2390
Chris Wright5d990b62009-12-04 12:15:21 -08002391static int pci_acs_enable;
2392
2393/**
2394 * pci_request_acs - ask for ACS to be enabled if supported
2395 */
2396void pci_request_acs(void)
2397{
2398 pci_acs_enable = 1;
2399}
2400
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002401/**
Allen Kayae21ee62009-10-07 10:27:17 -07002402 * pci_enable_acs - enable ACS if hardware support it
2403 * @dev: the PCI device
2404 */
2405void pci_enable_acs(struct pci_dev *dev)
2406{
2407 int pos;
2408 u16 cap;
2409 u16 ctrl;
2410
Chris Wright5d990b62009-12-04 12:15:21 -08002411 if (!pci_acs_enable)
2412 return;
2413
Kenji Kaneshige5f4d91a2009-11-11 14:36:17 +09002414 if (!pci_is_pcie(dev))
Allen Kayae21ee62009-10-07 10:27:17 -07002415 return;
2416
2417 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2418 if (!pos)
2419 return;
2420
2421 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2422 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2423
2424 /* Source Validation */
2425 ctrl |= (cap & PCI_ACS_SV);
2426
2427 /* P2P Request Redirect */
2428 ctrl |= (cap & PCI_ACS_RR);
2429
2430 /* P2P Completion Redirect */
2431 ctrl |= (cap & PCI_ACS_CR);
2432
2433 /* Upstream Forwarding */
2434 ctrl |= (cap & PCI_ACS_UF);
2435
2436 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2437}
2438
2439/**
Alex Williamsonad805752012-06-11 05:27:07 +00002440 * pci_acs_enabled - test ACS against required flags for a given device
2441 * @pdev: device to test
2442 * @acs_flags: required PCI ACS flags
2443 *
2444 * Return true if the device supports the provided flags. Automatically
2445 * filters out flags that are not implemented on multifunction devices.
2446 */
2447bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2448{
2449 int pos, ret;
2450 u16 ctrl;
2451
2452 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2453 if (ret >= 0)
2454 return ret > 0;
2455
2456 if (!pci_is_pcie(pdev))
2457 return false;
2458
2459 /* Filter out flags not applicable to multifunction */
2460 if (pdev->multifunction)
2461 acs_flags &= (PCI_ACS_RR | PCI_ACS_CR |
2462 PCI_ACS_EC | PCI_ACS_DT);
2463
Yijing Wang62f87c02012-07-24 17:20:03 +08002464 if (pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM ||
2465 pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT ||
Alex Williamsonad805752012-06-11 05:27:07 +00002466 pdev->multifunction) {
2467 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2468 if (!pos)
2469 return false;
2470
2471 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2472 if ((ctrl & acs_flags) != acs_flags)
2473 return false;
2474 }
2475
2476 return true;
2477}
2478
2479/**
2480 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2481 * @start: starting downstream device
2482 * @end: ending upstream device or NULL to search to the root bus
2483 * @acs_flags: required flags
2484 *
2485 * Walk up a device tree from start to end testing PCI ACS support. If
2486 * any step along the way does not support the required flags, return false.
2487 */
2488bool pci_acs_path_enabled(struct pci_dev *start,
2489 struct pci_dev *end, u16 acs_flags)
2490{
2491 struct pci_dev *pdev, *parent = start;
2492
2493 do {
2494 pdev = parent;
2495
2496 if (!pci_acs_enabled(pdev, acs_flags))
2497 return false;
2498
2499 if (pci_is_root_bus(pdev->bus))
2500 return (end == NULL);
2501
2502 parent = pdev->bus->self;
2503 } while (pdev != end);
2504
2505 return true;
2506}
2507
2508/**
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002509 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
2510 * @dev: the PCI device
2511 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2512 *
2513 * Perform INTx swizzling for a device behind one level of bridge. This is
2514 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
Matthew Wilcox46b952a2009-07-01 14:24:30 -07002515 * behind bridges on add-in cards. For devices with ARI enabled, the slot
2516 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2517 * the PCI Express Base Specification, Revision 2.1)
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002518 */
John Crispin3df425f2012-04-12 17:33:07 +02002519u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002520{
Matthew Wilcox46b952a2009-07-01 14:24:30 -07002521 int slot;
2522
2523 if (pci_ari_enabled(dev->bus))
2524 slot = 0;
2525 else
2526 slot = PCI_SLOT(dev->devfn);
2527
2528 return (((pin - 1) + slot) % 4) + 1;
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002529}
2530
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531int
2532pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
2533{
2534 u8 pin;
2535
Kristen Accardi514d2072005-11-02 16:24:39 -08002536 pin = dev->pin;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 if (!pin)
2538 return -1;
Bjorn Helgaas878f2e52008-12-09 16:11:46 -07002539
Kenji Kaneshige8784fd42009-05-26 16:07:33 +09002540 while (!pci_is_root_bus(dev->bus)) {
Bjorn Helgaas57c2cf72008-12-11 11:24:23 -07002541 pin = pci_swizzle_interrupt_pin(dev, pin);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542 dev = dev->bus->self;
2543 }
2544 *bridge = dev;
2545 return pin;
2546}
2547
2548/**
Bjorn Helgaas68feac82008-12-16 21:36:55 -07002549 * pci_common_swizzle - swizzle INTx all the way to root bridge
2550 * @dev: the PCI device
2551 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
2552 *
2553 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
2554 * bridges all the way up to a PCI root bus.
2555 */
2556u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
2557{
2558 u8 pin = *pinp;
2559
Kenji Kaneshige1eb39482009-05-26 16:08:36 +09002560 while (!pci_is_root_bus(dev->bus)) {
Bjorn Helgaas68feac82008-12-16 21:36:55 -07002561 pin = pci_swizzle_interrupt_pin(dev, pin);
2562 dev = dev->bus->self;
2563 }
2564 *pinp = pin;
2565 return PCI_SLOT(dev->devfn);
2566}
2567
2568/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 * pci_release_region - Release a PCI bar
2570 * @pdev: PCI device whose resources were previously reserved by pci_request_region
2571 * @bar: BAR to release
2572 *
2573 * Releases the PCI I/O and memory resources previously reserved by a
2574 * successful call to pci_request_region. Call this function only
2575 * after all use of the PCI regions has ceased.
2576 */
2577void pci_release_region(struct pci_dev *pdev, int bar)
2578{
Tejun Heo9ac78492007-01-20 16:00:26 +09002579 struct pci_devres *dr;
2580
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 if (pci_resource_len(pdev, bar) == 0)
2582 return;
2583 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
2584 release_region(pci_resource_start(pdev, bar),
2585 pci_resource_len(pdev, bar));
2586 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
2587 release_mem_region(pci_resource_start(pdev, bar),
2588 pci_resource_len(pdev, bar));
Tejun Heo9ac78492007-01-20 16:00:26 +09002589
2590 dr = find_pci_dr(pdev);
2591 if (dr)
2592 dr->region_mask &= ~(1 << bar);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002593}
2594
2595/**
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002596 * __pci_request_region - Reserved PCI I/O and memory resource
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 * @pdev: PCI device whose resources are to be reserved
2598 * @bar: BAR to be reserved
2599 * @res_name: Name to be associated with resource.
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002600 * @exclusive: whether the region access is exclusive or not
Linus Torvalds1da177e2005-04-16 15:20:36 -07002601 *
2602 * Mark the PCI region associated with PCI device @pdev BR @bar as
2603 * being reserved by owner @res_name. Do not access any
2604 * address inside the PCI regions unless this call returns
2605 * successfully.
2606 *
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002607 * If @exclusive is set, then the region is marked so that userspace
2608 * is explicitly not allowed to map the resource via /dev/mem or
2609 * sysfs MMIO access.
2610 *
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 * Returns 0 on success, or %EBUSY on error. A warning
2612 * message is also printed on failure.
2613 */
Arjan van de Vene8de1482008-10-22 19:55:31 -07002614static int __pci_request_region(struct pci_dev *pdev, int bar, const char *res_name,
2615 int exclusive)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616{
Tejun Heo9ac78492007-01-20 16:00:26 +09002617 struct pci_devres *dr;
2618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 if (pci_resource_len(pdev, bar) == 0)
2620 return 0;
2621
2622 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
2623 if (!request_region(pci_resource_start(pdev, bar),
2624 pci_resource_len(pdev, bar), res_name))
2625 goto err_out;
2626 }
2627 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
Arjan van de Vene8de1482008-10-22 19:55:31 -07002628 if (!__request_mem_region(pci_resource_start(pdev, bar),
2629 pci_resource_len(pdev, bar), res_name,
2630 exclusive))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 goto err_out;
2632 }
Tejun Heo9ac78492007-01-20 16:00:26 +09002633
2634 dr = find_pci_dr(pdev);
2635 if (dr)
2636 dr->region_mask |= 1 << bar;
2637
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 return 0;
2639
2640err_out:
Bjorn Helgaasc7dabef2009-10-27 13:26:47 -06002641 dev_warn(&pdev->dev, "BAR %d: can't reserve %pR\n", bar,
Benjamin Herrenschmidt096e6f62008-10-20 15:07:37 +11002642 &pdev->resource[bar]);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 return -EBUSY;
2644}
2645
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002646/**
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002647 * pci_request_region - Reserve PCI I/O and memory resource
Arjan van de Vene8de1482008-10-22 19:55:31 -07002648 * @pdev: PCI device whose resources are to be reserved
2649 * @bar: BAR to be reserved
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002650 * @res_name: Name to be associated with resource
Arjan van de Vene8de1482008-10-22 19:55:31 -07002651 *
Randy Dunlapf5ddcac2009-01-09 17:03:20 -08002652 * Mark the PCI region associated with PCI device @pdev BAR @bar as
Arjan van de Vene8de1482008-10-22 19:55:31 -07002653 * being reserved by owner @res_name. Do not access any
2654 * address inside the PCI regions unless this call returns
2655 * successfully.
2656 *
2657 * Returns 0 on success, or %EBUSY on error. A warning
2658 * message is also printed on failure.
2659 */
2660int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
2661{
2662 return __pci_request_region(pdev, bar, res_name, 0);
2663}
2664
2665/**
2666 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
2667 * @pdev: PCI device whose resources are to be reserved
2668 * @bar: BAR to be reserved
2669 * @res_name: Name to be associated with resource.
2670 *
2671 * Mark the PCI region associated with PCI device @pdev BR @bar as
2672 * being reserved by owner @res_name. Do not access any
2673 * address inside the PCI regions unless this call returns
2674 * successfully.
2675 *
2676 * Returns 0 on success, or %EBUSY on error. A warning
2677 * message is also printed on failure.
2678 *
2679 * The key difference that _exclusive makes it that userspace is
2680 * explicitly not allowed to map the resource via /dev/mem or
2681 * sysfs.
2682 */
2683int pci_request_region_exclusive(struct pci_dev *pdev, int bar, const char *res_name)
2684{
2685 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
2686}
2687/**
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002688 * pci_release_selected_regions - Release selected PCI I/O and memory resources
2689 * @pdev: PCI device whose resources were previously reserved
2690 * @bars: Bitmask of BARs to be released
2691 *
2692 * Release selected PCI I/O and memory resources previously reserved.
2693 * Call this function only after all use of the PCI regions has ceased.
2694 */
2695void pci_release_selected_regions(struct pci_dev *pdev, int bars)
2696{
2697 int i;
2698
2699 for (i = 0; i < 6; i++)
2700 if (bars & (1 << i))
2701 pci_release_region(pdev, i);
2702}
2703
Arjan van de Vene8de1482008-10-22 19:55:31 -07002704int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
2705 const char *res_name, int excl)
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002706{
2707 int i;
2708
2709 for (i = 0; i < 6; i++)
2710 if (bars & (1 << i))
Arjan van de Vene8de1482008-10-22 19:55:31 -07002711 if (__pci_request_region(pdev, i, res_name, excl))
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002712 goto err_out;
2713 return 0;
2714
2715err_out:
2716 while(--i >= 0)
2717 if (bars & (1 << i))
2718 pci_release_region(pdev, i);
2719
2720 return -EBUSY;
2721}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
Arjan van de Vene8de1482008-10-22 19:55:31 -07002723
2724/**
2725 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
2726 * @pdev: PCI device whose resources are to be reserved
2727 * @bars: Bitmask of BARs to be requested
2728 * @res_name: Name to be associated with resource
2729 */
2730int pci_request_selected_regions(struct pci_dev *pdev, int bars,
2731 const char *res_name)
2732{
2733 return __pci_request_selected_regions(pdev, bars, res_name, 0);
2734}
2735
2736int pci_request_selected_regions_exclusive(struct pci_dev *pdev,
2737 int bars, const char *res_name)
2738{
2739 return __pci_request_selected_regions(pdev, bars, res_name,
2740 IORESOURCE_EXCLUSIVE);
2741}
2742
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743/**
2744 * pci_release_regions - Release reserved PCI I/O and memory resources
2745 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
2746 *
2747 * Releases all PCI I/O and memory resources previously reserved by a
2748 * successful call to pci_request_regions. Call this function only
2749 * after all use of the PCI regions has ceased.
2750 */
2751
2752void pci_release_regions(struct pci_dev *pdev)
2753{
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002754 pci_release_selected_regions(pdev, (1 << 6) - 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755}
2756
2757/**
2758 * pci_request_regions - Reserved PCI I/O and memory resources
2759 * @pdev: PCI device whose resources are to be reserved
2760 * @res_name: Name to be associated with resource.
2761 *
2762 * Mark all PCI regions associated with PCI device @pdev as
2763 * being reserved by owner @res_name. Do not access any
2764 * address inside the PCI regions unless this call returns
2765 * successfully.
2766 *
2767 * Returns 0 on success, or %EBUSY on error. A warning
2768 * message is also printed on failure.
2769 */
Jeff Garzik3c990e92006-03-04 21:52:42 -05002770int pci_request_regions(struct pci_dev *pdev, const char *res_name)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771{
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09002772 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773}
2774
2775/**
Arjan van de Vene8de1482008-10-22 19:55:31 -07002776 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
2777 * @pdev: PCI device whose resources are to be reserved
2778 * @res_name: Name to be associated with resource.
2779 *
2780 * Mark all PCI regions associated with PCI device @pdev as
2781 * being reserved by owner @res_name. Do not access any
2782 * address inside the PCI regions unless this call returns
2783 * successfully.
2784 *
2785 * pci_request_regions_exclusive() will mark the region so that
2786 * /dev/mem and the sysfs MMIO access will not be allowed.
2787 *
2788 * Returns 0 on success, or %EBUSY on error. A warning
2789 * message is also printed on failure.
2790 */
2791int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
2792{
2793 return pci_request_selected_regions_exclusive(pdev,
2794 ((1 << 6) - 1), res_name);
2795}
2796
Ben Hutchings6a479072008-12-23 03:08:29 +00002797static void __pci_set_master(struct pci_dev *dev, bool enable)
2798{
2799 u16 old_cmd, cmd;
2800
2801 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
2802 if (enable)
2803 cmd = old_cmd | PCI_COMMAND_MASTER;
2804 else
2805 cmd = old_cmd & ~PCI_COMMAND_MASTER;
2806 if (cmd != old_cmd) {
2807 dev_dbg(&dev->dev, "%s bus mastering\n",
2808 enable ? "enabling" : "disabling");
2809 pci_write_config_word(dev, PCI_COMMAND, cmd);
2810 }
2811 dev->is_busmaster = enable;
2812}
Arjan van de Vene8de1482008-10-22 19:55:31 -07002813
2814/**
Myron Stowe2b6f2c32012-06-25 21:30:57 -06002815 * pcibios_setup - process "pci=" kernel boot arguments
2816 * @str: string used to pass in "pci=" kernel boot arguments
2817 *
2818 * Process kernel boot arguments. This is the default implementation.
2819 * Architecture specific implementations can override this as necessary.
2820 */
2821char * __weak __init pcibios_setup(char *str)
2822{
2823 return str;
2824}
2825
2826/**
Myron Stowe96c55902011-10-28 15:48:38 -06002827 * pcibios_set_master - enable PCI bus-mastering for device dev
2828 * @dev: the PCI device to enable
2829 *
2830 * Enables PCI bus-mastering for the device. This is the default
2831 * implementation. Architecture specific implementations can override
2832 * this if necessary.
2833 */
2834void __weak pcibios_set_master(struct pci_dev *dev)
2835{
2836 u8 lat;
2837
Myron Stowef6766782011-10-28 15:49:20 -06002838 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
2839 if (pci_is_pcie(dev))
2840 return;
2841
Myron Stowe96c55902011-10-28 15:48:38 -06002842 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
2843 if (lat < 16)
2844 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
2845 else if (lat > pcibios_max_latency)
2846 lat = pcibios_max_latency;
2847 else
2848 return;
2849 dev_printk(KERN_DEBUG, &dev->dev, "setting latency timer to %d\n", lat);
2850 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
2851}
2852
2853/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 * pci_set_master - enables bus-mastering for device dev
2855 * @dev: the PCI device to enable
2856 *
2857 * Enables bus-mastering on the device and calls pcibios_set_master()
2858 * to do the needed arch specific settings.
2859 */
Ben Hutchings6a479072008-12-23 03:08:29 +00002860void pci_set_master(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861{
Ben Hutchings6a479072008-12-23 03:08:29 +00002862 __pci_set_master(dev, true);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 pcibios_set_master(dev);
2864}
2865
Ben Hutchings6a479072008-12-23 03:08:29 +00002866/**
2867 * pci_clear_master - disables bus-mastering for device dev
2868 * @dev: the PCI device to disable
2869 */
2870void pci_clear_master(struct pci_dev *dev)
2871{
2872 __pci_set_master(dev, false);
2873}
2874
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875/**
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002876 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
2877 * @dev: the PCI device for which MWI is to be enabled
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 *
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002879 * Helper function for pci_set_mwi.
2880 * Originally copied from drivers/net/acenic.c.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
2882 *
2883 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2884 */
Tejun Heo15ea76d2009-09-22 17:34:48 +09002885int pci_set_cacheline_size(struct pci_dev *dev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886{
2887 u8 cacheline_size;
2888
2889 if (!pci_cache_line_size)
Tejun Heo15ea76d2009-09-22 17:34:48 +09002890 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891
2892 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
2893 equal to or multiple of the right value. */
2894 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2895 if (cacheline_size >= pci_cache_line_size &&
2896 (cacheline_size % pci_cache_line_size) == 0)
2897 return 0;
2898
2899 /* Write the correct value. */
2900 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
2901 /* Read it back. */
2902 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
2903 if (cacheline_size == pci_cache_line_size)
2904 return 0;
2905
Bjorn Helgaas80ccba12008-06-13 10:52:11 -06002906 dev_printk(KERN_DEBUG, &dev->dev, "cache line size of %d is not "
2907 "supported\n", pci_cache_line_size << 2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
2909 return -EINVAL;
2910}
Tejun Heo15ea76d2009-09-22 17:34:48 +09002911EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
2912
2913#ifdef PCI_DISABLE_MWI
2914int pci_set_mwi(struct pci_dev *dev)
2915{
2916 return 0;
2917}
2918
2919int pci_try_set_mwi(struct pci_dev *dev)
2920{
2921 return 0;
2922}
2923
2924void pci_clear_mwi(struct pci_dev *dev)
2925{
2926}
2927
2928#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929
2930/**
2931 * pci_set_mwi - enables memory-write-invalidate PCI transaction
2932 * @dev: the PCI device for which MWI is enabled
2933 *
Randy Dunlap694625c2007-07-09 11:55:54 -07002934 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 *
2936 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2937 */
2938int
2939pci_set_mwi(struct pci_dev *dev)
2940{
2941 int rc;
2942 u16 cmd;
2943
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002944 rc = pci_set_cacheline_size(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 if (rc)
2946 return rc;
2947
2948 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2949 if (! (cmd & PCI_COMMAND_INVALIDATE)) {
Bjorn Helgaas80ccba12008-06-13 10:52:11 -06002950 dev_dbg(&dev->dev, "enabling Mem-Wr-Inval\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 cmd |= PCI_COMMAND_INVALIDATE;
2952 pci_write_config_word(dev, PCI_COMMAND, cmd);
2953 }
2954
2955 return 0;
2956}
2957
2958/**
Randy Dunlap694625c2007-07-09 11:55:54 -07002959 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
2960 * @dev: the PCI device for which MWI is enabled
2961 *
2962 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
2963 * Callers are not required to check the return value.
2964 *
2965 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
2966 */
2967int pci_try_set_mwi(struct pci_dev *dev)
2968{
2969 int rc = pci_set_mwi(dev);
2970 return rc;
2971}
2972
2973/**
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
2975 * @dev: the PCI device to disable
2976 *
2977 * Disables PCI Memory-Write-Invalidate transaction on the device
2978 */
2979void
2980pci_clear_mwi(struct pci_dev *dev)
2981{
2982 u16 cmd;
2983
2984 pci_read_config_word(dev, PCI_COMMAND, &cmd);
2985 if (cmd & PCI_COMMAND_INVALIDATE) {
2986 cmd &= ~PCI_COMMAND_INVALIDATE;
2987 pci_write_config_word(dev, PCI_COMMAND, cmd);
2988 }
2989}
Matthew Wilcoxedb2d972006-10-10 08:01:21 -06002990#endif /* ! PCI_DISABLE_MWI */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002991
Brett M Russa04ce0f2005-08-15 15:23:41 -04002992/**
2993 * pci_intx - enables/disables PCI INTx for device dev
Randy Dunlap8f7020d2005-10-23 11:57:38 -07002994 * @pdev: the PCI device to operate on
2995 * @enable: boolean: whether to enable or disable PCI INTx
Brett M Russa04ce0f2005-08-15 15:23:41 -04002996 *
2997 * Enables/disables PCI INTx for device dev
2998 */
2999void
3000pci_intx(struct pci_dev *pdev, int enable)
3001{
3002 u16 pci_command, new;
3003
3004 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3005
3006 if (enable) {
3007 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3008 } else {
3009 new = pci_command | PCI_COMMAND_INTX_DISABLE;
3010 }
3011
3012 if (new != pci_command) {
Tejun Heo9ac78492007-01-20 16:00:26 +09003013 struct pci_devres *dr;
3014
Brett M Russ2fd9d742005-09-09 10:02:22 -07003015 pci_write_config_word(pdev, PCI_COMMAND, new);
Tejun Heo9ac78492007-01-20 16:00:26 +09003016
3017 dr = find_pci_dr(pdev);
3018 if (dr && !dr->restore_intx) {
3019 dr->restore_intx = 1;
3020 dr->orig_intx = !enable;
3021 }
Brett M Russa04ce0f2005-08-15 15:23:41 -04003022 }
3023}
3024
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08003025/**
Jan Kiszkaa2e27782011-11-04 09:46:00 +01003026 * pci_intx_mask_supported - probe for INTx masking support
Randy Dunlap6e9292c2012-01-21 11:02:35 -08003027 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01003028 *
3029 * Check if the device dev support INTx masking via the config space
3030 * command word.
3031 */
3032bool pci_intx_mask_supported(struct pci_dev *dev)
3033{
3034 bool mask_supported = false;
3035 u16 orig, new;
3036
Bjorn Helgaasfbebb9f2012-06-16 14:40:22 -06003037 if (dev->broken_intx_masking)
3038 return false;
3039
Jan Kiszkaa2e27782011-11-04 09:46:00 +01003040 pci_cfg_access_lock(dev);
3041
3042 pci_read_config_word(dev, PCI_COMMAND, &orig);
3043 pci_write_config_word(dev, PCI_COMMAND,
3044 orig ^ PCI_COMMAND_INTX_DISABLE);
3045 pci_read_config_word(dev, PCI_COMMAND, &new);
3046
3047 /*
3048 * There's no way to protect against hardware bugs or detect them
3049 * reliably, but as long as we know what the value should be, let's
3050 * go ahead and check it.
3051 */
3052 if ((new ^ orig) & ~PCI_COMMAND_INTX_DISABLE) {
3053 dev_err(&dev->dev, "Command register changed from "
3054 "0x%x to 0x%x: driver or hardware bug?\n", orig, new);
3055 } else if ((new ^ orig) & PCI_COMMAND_INTX_DISABLE) {
3056 mask_supported = true;
3057 pci_write_config_word(dev, PCI_COMMAND, orig);
3058 }
3059
3060 pci_cfg_access_unlock(dev);
3061 return mask_supported;
3062}
3063EXPORT_SYMBOL_GPL(pci_intx_mask_supported);
3064
3065static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3066{
3067 struct pci_bus *bus = dev->bus;
3068 bool mask_updated = true;
3069 u32 cmd_status_dword;
3070 u16 origcmd, newcmd;
3071 unsigned long flags;
3072 bool irq_pending;
3073
3074 /*
3075 * We do a single dword read to retrieve both command and status.
3076 * Document assumptions that make this possible.
3077 */
3078 BUILD_BUG_ON(PCI_COMMAND % 4);
3079 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3080
3081 raw_spin_lock_irqsave(&pci_lock, flags);
3082
3083 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3084
3085 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3086
3087 /*
3088 * Check interrupt status register to see whether our device
3089 * triggered the interrupt (when masking) or the next IRQ is
3090 * already pending (when unmasking).
3091 */
3092 if (mask != irq_pending) {
3093 mask_updated = false;
3094 goto done;
3095 }
3096
3097 origcmd = cmd_status_dword;
3098 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3099 if (mask)
3100 newcmd |= PCI_COMMAND_INTX_DISABLE;
3101 if (newcmd != origcmd)
3102 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3103
3104done:
3105 raw_spin_unlock_irqrestore(&pci_lock, flags);
3106
3107 return mask_updated;
3108}
3109
3110/**
3111 * pci_check_and_mask_intx - mask INTx on pending interrupt
Randy Dunlap6e9292c2012-01-21 11:02:35 -08003112 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01003113 *
3114 * Check if the device dev has its INTx line asserted, mask it and
3115 * return true in that case. False is returned if not interrupt was
3116 * pending.
3117 */
3118bool pci_check_and_mask_intx(struct pci_dev *dev)
3119{
3120 return pci_check_and_set_intx_mask(dev, true);
3121}
3122EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3123
3124/**
3125 * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
Randy Dunlap6e9292c2012-01-21 11:02:35 -08003126 * @dev: the PCI device to operate on
Jan Kiszkaa2e27782011-11-04 09:46:00 +01003127 *
3128 * Check if the device dev has its INTx line asserted, unmask it if not
3129 * and return true. False is returned and the mask remains active if
3130 * there was still an interrupt pending.
3131 */
3132bool pci_check_and_unmask_intx(struct pci_dev *dev)
3133{
3134 return pci_check_and_set_intx_mask(dev, false);
3135}
3136EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3137
3138/**
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08003139 * pci_msi_off - disables any msi or msix capabilities
Randy Dunlap8d7d86e2007-03-16 19:55:52 -07003140 * @dev: the PCI device to operate on
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08003141 *
3142 * If you want to use msi see pci_enable_msi and friends.
3143 * This is a lower level primitive that allows us to disable
3144 * msi operation at the device level.
3145 */
3146void pci_msi_off(struct pci_dev *dev)
3147{
3148 int pos;
3149 u16 control;
3150
3151 pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
3152 if (pos) {
3153 pci_read_config_word(dev, pos + PCI_MSI_FLAGS, &control);
3154 control &= ~PCI_MSI_FLAGS_ENABLE;
3155 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
3156 }
3157 pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
3158 if (pos) {
3159 pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
3160 control &= ~PCI_MSIX_FLAGS_ENABLE;
3161 pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
3162 }
3163}
Michael S. Tsirkinb03214d2010-06-23 22:49:06 -06003164EXPORT_SYMBOL_GPL(pci_msi_off);
Eric W. Biedermanf5f2b132007-03-05 00:30:07 -08003165
FUJITA Tomonori4d57cdf2008-02-04 22:27:55 -08003166int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
3167{
3168 return dma_set_max_seg_size(&dev->dev, size);
3169}
3170EXPORT_SYMBOL(pci_set_dma_max_seg_size);
FUJITA Tomonori4d57cdf2008-02-04 22:27:55 -08003171
FUJITA Tomonori59fc67d2008-02-04 22:28:14 -08003172int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
3173{
3174 return dma_set_seg_boundary(&dev->dev, mask);
3175}
3176EXPORT_SYMBOL(pci_set_dma_seg_boundary);
FUJITA Tomonori59fc67d2008-02-04 22:28:14 -08003177
Yu Zhao8c1c6992009-06-13 15:52:13 +08003178static int pcie_flr(struct pci_dev *dev, int probe)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003179{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003180 int i;
3181 int pos;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003182 u32 cap;
Shmulik Ravid04b55c42009-12-03 22:27:51 +02003183 u16 status, control;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003184
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003185 pos = pci_pcie_cap(dev);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003186 if (!pos)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003187 return -ENOTTY;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003188
3189 pci_read_config_dword(dev, pos + PCI_EXP_DEVCAP, &cap);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003190 if (!(cap & PCI_EXP_DEVCAP_FLR))
3191 return -ENOTTY;
3192
Sheng Yangd91cdc72008-11-11 17:17:47 +08003193 if (probe)
3194 return 0;
3195
Sheng Yang8dd7f802008-10-21 17:38:25 +08003196 /* Wait for Transaction Pending bit clean */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003197 for (i = 0; i < 4; i++) {
3198 if (i)
3199 msleep((1 << (i - 1)) * 100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003200
Yu Zhao8c1c6992009-06-13 15:52:13 +08003201 pci_read_config_word(dev, pos + PCI_EXP_DEVSTA, &status);
3202 if (!(status & PCI_EXP_DEVSTA_TRPND))
3203 goto clear;
3204 }
Sheng Yang8dd7f802008-10-21 17:38:25 +08003205
Yu Zhao8c1c6992009-06-13 15:52:13 +08003206 dev_err(&dev->dev, "transaction is not cleared; "
3207 "proceeding with reset anyway\n");
Sheng Yang5fe5db02009-02-09 14:53:47 +08003208
Yu Zhao8c1c6992009-06-13 15:52:13 +08003209clear:
Shmulik Ravid04b55c42009-12-03 22:27:51 +02003210 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &control);
3211 control |= PCI_EXP_DEVCTL_BCR_FLR;
3212 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, control);
3213
Yu Zhao8c1c6992009-06-13 15:52:13 +08003214 msleep(100);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003215
Sheng Yang8dd7f802008-10-21 17:38:25 +08003216 return 0;
3217}
Sheng Yangd91cdc72008-11-11 17:17:47 +08003218
Yu Zhao8c1c6992009-06-13 15:52:13 +08003219static int pci_af_flr(struct pci_dev *dev, int probe)
Sheng Yang1ca88792008-11-11 17:17:48 +08003220{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003221 int i;
3222 int pos;
Sheng Yang1ca88792008-11-11 17:17:48 +08003223 u8 cap;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003224 u8 status;
Sheng Yang1ca88792008-11-11 17:17:48 +08003225
Yu Zhao8c1c6992009-06-13 15:52:13 +08003226 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
3227 if (!pos)
Sheng Yang1ca88792008-11-11 17:17:48 +08003228 return -ENOTTY;
Yu Zhao8c1c6992009-06-13 15:52:13 +08003229
3230 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
Sheng Yang1ca88792008-11-11 17:17:48 +08003231 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
3232 return -ENOTTY;
3233
3234 if (probe)
3235 return 0;
3236
Sheng Yang1ca88792008-11-11 17:17:48 +08003237 /* Wait for Transaction Pending bit clean */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003238 for (i = 0; i < 4; i++) {
3239 if (i)
3240 msleep((1 << (i - 1)) * 100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003241
Yu Zhao8c1c6992009-06-13 15:52:13 +08003242 pci_read_config_byte(dev, pos + PCI_AF_STATUS, &status);
3243 if (!(status & PCI_AF_STATUS_TP))
3244 goto clear;
3245 }
3246
3247 dev_err(&dev->dev, "transaction is not cleared; "
3248 "proceeding with reset anyway\n");
3249
3250clear:
3251 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
Sheng Yang1ca88792008-11-11 17:17:48 +08003252 msleep(100);
Sheng Yang5fe5db02009-02-09 14:53:47 +08003253
Sheng Yang1ca88792008-11-11 17:17:48 +08003254 return 0;
3255}
3256
Rafael J. Wysocki83d74e02011-03-05 21:48:44 +01003257/**
3258 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
3259 * @dev: Device to reset.
3260 * @probe: If set, only check if the device can be reset this way.
3261 *
3262 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
3263 * unset, it will be reinitialized internally when going from PCI_D3hot to
3264 * PCI_D0. If that's the case and the device is not in a low-power state
3265 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
3266 *
3267 * NOTE: This causes the caller to sleep for twice the device power transition
3268 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
3269 * by devault (i.e. unless the @dev's d3_delay field has a different value).
3270 * Moreover, only devices in D0 can be reset by this function.
3271 */
Yu Zhaof85876b2009-06-13 15:52:14 +08003272static int pci_pm_reset(struct pci_dev *dev, int probe)
Sheng Yangd91cdc72008-11-11 17:17:47 +08003273{
Yu Zhaof85876b2009-06-13 15:52:14 +08003274 u16 csr;
Sheng Yangd91cdc72008-11-11 17:17:47 +08003275
Yu Zhaof85876b2009-06-13 15:52:14 +08003276 if (!dev->pm_cap)
3277 return -ENOTTY;
Sheng Yangd91cdc72008-11-11 17:17:47 +08003278
Yu Zhaof85876b2009-06-13 15:52:14 +08003279 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
3280 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
3281 return -ENOTTY;
Sheng Yang1ca88792008-11-11 17:17:48 +08003282
Yu Zhaof85876b2009-06-13 15:52:14 +08003283 if (probe)
3284 return 0;
3285
3286 if (dev->current_state != PCI_D0)
3287 return -EINVAL;
3288
3289 csr &= ~PCI_PM_CTRL_STATE_MASK;
3290 csr |= PCI_D3hot;
3291 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01003292 pci_dev_d3_sleep(dev);
Yu Zhaof85876b2009-06-13 15:52:14 +08003293
3294 csr &= ~PCI_PM_CTRL_STATE_MASK;
3295 csr |= PCI_D0;
3296 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
Rafael J. Wysocki1ae861e2009-12-31 12:15:54 +01003297 pci_dev_d3_sleep(dev);
Yu Zhaof85876b2009-06-13 15:52:14 +08003298
3299 return 0;
3300}
3301
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003302static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
3303{
3304 u16 ctrl;
3305 struct pci_dev *pdev;
3306
Yu Zhao654b75e2009-06-26 14:04:46 +08003307 if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003308 return -ENOTTY;
3309
3310 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
3311 if (pdev != dev)
3312 return -ENOTTY;
3313
3314 if (probe)
3315 return 0;
3316
3317 pci_read_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, &ctrl);
3318 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
3319 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3320 msleep(100);
3321
3322 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
3323 pci_write_config_word(dev->bus->self, PCI_BRIDGE_CONTROL, ctrl);
3324 msleep(100);
3325
3326 return 0;
3327}
3328
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003329static int __pci_dev_reset(struct pci_dev *dev, int probe)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003330{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003331 int rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003332
Yu Zhao8c1c6992009-06-13 15:52:13 +08003333 might_sleep();
Sheng Yang8dd7f802008-10-21 17:38:25 +08003334
Dexuan Cuib9c3b262009-12-07 13:03:21 +08003335 rc = pci_dev_specific_reset(dev, probe);
3336 if (rc != -ENOTTY)
3337 goto done;
3338
Yu Zhao8c1c6992009-06-13 15:52:13 +08003339 rc = pcie_flr(dev, probe);
3340 if (rc != -ENOTTY)
3341 goto done;
3342
3343 rc = pci_af_flr(dev, probe);
Yu Zhaof85876b2009-06-13 15:52:14 +08003344 if (rc != -ENOTTY)
3345 goto done;
3346
3347 rc = pci_pm_reset(dev, probe);
Yu Zhaoc12ff1d2009-06-13 15:52:15 +08003348 if (rc != -ENOTTY)
3349 goto done;
3350
3351 rc = pci_parent_bus_reset(dev, probe);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003352done:
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003353 return rc;
3354}
3355
3356static int pci_dev_reset(struct pci_dev *dev, int probe)
3357{
3358 int rc;
3359
3360 if (!probe) {
3361 pci_cfg_access_lock(dev);
3362 /* block PM suspend, driver probe, etc. */
3363 device_lock(&dev->dev);
3364 }
3365
3366 rc = __pci_dev_reset(dev, probe);
3367
Yu Zhao8c1c6992009-06-13 15:52:13 +08003368 if (!probe) {
Greg Kroah-Hartman8e9394c2010-02-17 10:57:05 -08003369 device_unlock(&dev->dev);
Jan Kiszkafb51ccb2011-11-04 09:45:59 +01003370 pci_cfg_access_unlock(dev);
Yu Zhao8c1c6992009-06-13 15:52:13 +08003371 }
Yu Zhao8c1c6992009-06-13 15:52:13 +08003372 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003373}
Sheng Yang8dd7f802008-10-21 17:38:25 +08003374/**
Yu Zhao8c1c6992009-06-13 15:52:13 +08003375 * __pci_reset_function - reset a PCI device function
3376 * @dev: PCI device to reset
Sheng Yang8dd7f802008-10-21 17:38:25 +08003377 *
3378 * Some devices allow an individual function to be reset without affecting
3379 * other functions in the same device. The PCI device must be responsive
3380 * to PCI config space in order to use this function.
3381 *
3382 * The device function is presumed to be unused when this function is called.
3383 * Resetting the device will make the contents of PCI configuration space
3384 * random, so any caller of this must be prepared to reinitialise the
3385 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3386 * etc.
3387 *
Yu Zhao8c1c6992009-06-13 15:52:13 +08003388 * Returns 0 if the device function was successfully reset or negative if the
Sheng Yang8dd7f802008-10-21 17:38:25 +08003389 * device doesn't support resetting a single function.
3390 */
Yu Zhao8c1c6992009-06-13 15:52:13 +08003391int __pci_reset_function(struct pci_dev *dev)
Sheng Yang8dd7f802008-10-21 17:38:25 +08003392{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003393 return pci_dev_reset(dev, 0);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003394}
Yu Zhao8c1c6992009-06-13 15:52:13 +08003395EXPORT_SYMBOL_GPL(__pci_reset_function);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003396
3397/**
Konrad Rzeszutek Wilk6fbf9e72012-01-12 12:06:46 -05003398 * __pci_reset_function_locked - reset a PCI device function while holding
3399 * the @dev mutex lock.
3400 * @dev: PCI device to reset
3401 *
3402 * Some devices allow an individual function to be reset without affecting
3403 * other functions in the same device. The PCI device must be responsive
3404 * to PCI config space in order to use this function.
3405 *
3406 * The device function is presumed to be unused and the caller is holding
3407 * the device mutex lock when this function is called.
3408 * Resetting the device will make the contents of PCI configuration space
3409 * random, so any caller of this must be prepared to reinitialise the
3410 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
3411 * etc.
3412 *
3413 * Returns 0 if the device function was successfully reset or negative if the
3414 * device doesn't support resetting a single function.
3415 */
3416int __pci_reset_function_locked(struct pci_dev *dev)
3417{
Konrad Rzeszutek Wilk977f8572012-04-24 13:15:18 -06003418 return __pci_dev_reset(dev, 0);
Konrad Rzeszutek Wilk6fbf9e72012-01-12 12:06:46 -05003419}
3420EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
3421
3422/**
Michael S. Tsirkin711d5772009-07-27 23:37:48 +03003423 * pci_probe_reset_function - check whether the device can be safely reset
3424 * @dev: PCI device to reset
3425 *
3426 * Some devices allow an individual function to be reset without affecting
3427 * other functions in the same device. The PCI device must be responsive
3428 * to PCI config space in order to use this function.
3429 *
3430 * Returns 0 if the device function can be reset or negative if the
3431 * device doesn't support resetting a single function.
3432 */
3433int pci_probe_reset_function(struct pci_dev *dev)
3434{
3435 return pci_dev_reset(dev, 1);
3436}
3437
3438/**
Yu Zhao8c1c6992009-06-13 15:52:13 +08003439 * pci_reset_function - quiesce and reset a PCI device function
3440 * @dev: PCI device to reset
Sheng Yang8dd7f802008-10-21 17:38:25 +08003441 *
3442 * Some devices allow an individual function to be reset without affecting
3443 * other functions in the same device. The PCI device must be responsive
3444 * to PCI config space in order to use this function.
3445 *
3446 * This function does not just reset the PCI portion of a device, but
3447 * clears all the state associated with the device. This function differs
Yu Zhao8c1c6992009-06-13 15:52:13 +08003448 * from __pci_reset_function in that it saves and restores device state
Sheng Yang8dd7f802008-10-21 17:38:25 +08003449 * over the reset.
3450 *
Yu Zhao8c1c6992009-06-13 15:52:13 +08003451 * Returns 0 if the device function was successfully reset or negative if the
Sheng Yang8dd7f802008-10-21 17:38:25 +08003452 * device doesn't support resetting a single function.
3453 */
3454int pci_reset_function(struct pci_dev *dev)
3455{
Yu Zhao8c1c6992009-06-13 15:52:13 +08003456 int rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003457
Yu Zhao8c1c6992009-06-13 15:52:13 +08003458 rc = pci_dev_reset(dev, 1);
3459 if (rc)
3460 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003461
Sheng Yang8dd7f802008-10-21 17:38:25 +08003462 pci_save_state(dev);
3463
Yu Zhao8c1c6992009-06-13 15:52:13 +08003464 /*
3465 * both INTx and MSI are disabled after the Interrupt Disable bit
3466 * is set and the Bus Master bit is cleared.
3467 */
Sheng Yang8dd7f802008-10-21 17:38:25 +08003468 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
3469
Yu Zhao8c1c6992009-06-13 15:52:13 +08003470 rc = pci_dev_reset(dev, 0);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003471
3472 pci_restore_state(dev);
Sheng Yang8dd7f802008-10-21 17:38:25 +08003473
Yu Zhao8c1c6992009-06-13 15:52:13 +08003474 return rc;
Sheng Yang8dd7f802008-10-21 17:38:25 +08003475}
3476EXPORT_SYMBOL_GPL(pci_reset_function);
3477
3478/**
Peter Orubad556ad42007-05-15 13:59:13 +02003479 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
3480 * @dev: PCI device to query
3481 *
3482 * Returns mmrbc: maximum designed memory read count in bytes
3483 * or appropriate error value.
3484 */
3485int pcix_get_max_mmrbc(struct pci_dev *dev)
3486{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003487 int cap;
Peter Orubad556ad42007-05-15 13:59:13 +02003488 u32 stat;
3489
3490 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3491 if (!cap)
3492 return -EINVAL;
3493
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003494 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
Peter Orubad556ad42007-05-15 13:59:13 +02003495 return -EINVAL;
3496
Dean Nelson25daeb52010-03-09 22:26:40 -05003497 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
Peter Orubad556ad42007-05-15 13:59:13 +02003498}
3499EXPORT_SYMBOL(pcix_get_max_mmrbc);
3500
3501/**
3502 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
3503 * @dev: PCI device to query
3504 *
3505 * Returns mmrbc: maximum memory read count in bytes
3506 * or appropriate error value.
3507 */
3508int pcix_get_mmrbc(struct pci_dev *dev)
3509{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003510 int cap;
Dean Nelsonbdc2bda2010-03-09 22:26:48 -05003511 u16 cmd;
Peter Orubad556ad42007-05-15 13:59:13 +02003512
3513 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3514 if (!cap)
3515 return -EINVAL;
3516
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003517 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3518 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003519
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003520 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
Peter Orubad556ad42007-05-15 13:59:13 +02003521}
3522EXPORT_SYMBOL(pcix_get_mmrbc);
3523
3524/**
3525 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
3526 * @dev: PCI device to query
3527 * @mmrbc: maximum memory read count in bytes
3528 * valid values are 512, 1024, 2048, 4096
3529 *
3530 * If possible sets maximum memory read byte count, some bridges have erratas
3531 * that prevent this.
3532 */
3533int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
3534{
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003535 int cap;
Dean Nelsonbdc2bda2010-03-09 22:26:48 -05003536 u32 stat, v, o;
3537 u16 cmd;
Peter Orubad556ad42007-05-15 13:59:13 +02003538
vignesh babu229f5af2007-08-13 18:23:14 +05303539 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003540 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003541
3542 v = ffs(mmrbc) - 10;
3543
3544 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
3545 if (!cap)
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003546 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003547
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003548 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
3549 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003550
3551 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
3552 return -E2BIG;
3553
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003554 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
3555 return -EINVAL;
Peter Orubad556ad42007-05-15 13:59:13 +02003556
3557 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
3558 if (o != v) {
Bjorn Helgaas809a3bf2012-06-20 16:41:16 -06003559 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
Peter Orubad556ad42007-05-15 13:59:13 +02003560 return -EIO;
3561
3562 cmd &= ~PCI_X_CMD_MAX_READ;
3563 cmd |= v << 2;
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003564 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
3565 return -EIO;
Peter Orubad556ad42007-05-15 13:59:13 +02003566 }
Dean Nelson7c9e2b12010-03-09 22:26:55 -05003567 return 0;
Peter Orubad556ad42007-05-15 13:59:13 +02003568}
3569EXPORT_SYMBOL(pcix_set_mmrbc);
3570
3571/**
3572 * pcie_get_readrq - get PCI Express read request size
3573 * @dev: PCI device to query
3574 *
3575 * Returns maximum memory read request in bytes
3576 * or appropriate error value.
3577 */
3578int pcie_get_readrq(struct pci_dev *dev)
3579{
3580 int ret, cap;
3581 u16 ctl;
3582
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003583 cap = pci_pcie_cap(dev);
Peter Orubad556ad42007-05-15 13:59:13 +02003584 if (!cap)
3585 return -EINVAL;
3586
3587 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3588 if (!ret)
Julia Lawall93e75fa2010-08-05 22:23:16 +02003589 ret = 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
Peter Orubad556ad42007-05-15 13:59:13 +02003590
3591 return ret;
3592}
3593EXPORT_SYMBOL(pcie_get_readrq);
3594
3595/**
3596 * pcie_set_readrq - set PCI Express maximum memory read request
3597 * @dev: PCI device to query
Randy Dunlap42e61f42007-07-23 21:42:11 -07003598 * @rq: maximum memory read count in bytes
Peter Orubad556ad42007-05-15 13:59:13 +02003599 * valid values are 128, 256, 512, 1024, 2048, 4096
3600 *
Jon Masonc9b378c2011-06-28 18:26:25 -05003601 * If possible sets maximum memory read request in bytes
Peter Orubad556ad42007-05-15 13:59:13 +02003602 */
3603int pcie_set_readrq(struct pci_dev *dev, int rq)
3604{
3605 int cap, err = -EINVAL;
3606 u16 ctl, v;
3607
vignesh babu229f5af2007-08-13 18:23:14 +05303608 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
Peter Orubad556ad42007-05-15 13:59:13 +02003609 goto out;
3610
Kenji Kaneshige06a1cba2009-11-11 14:30:56 +09003611 cap = pci_pcie_cap(dev);
Peter Orubad556ad42007-05-15 13:59:13 +02003612 if (!cap)
3613 goto out;
3614
3615 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3616 if (err)
3617 goto out;
Benjamin Herrenschmidta1c473a2011-10-14 14:56:15 -05003618 /*
3619 * If using the "performance" PCIe config, we clamp the
3620 * read rq size to the max packet size to prevent the
3621 * host bridge generating requests larger than we can
3622 * cope with
3623 */
3624 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
3625 int mps = pcie_get_mps(dev);
3626
3627 if (mps < 0)
3628 return mps;
3629 if (mps < rq)
3630 rq = mps;
3631 }
3632
3633 v = (ffs(rq) - 8) << 12;
Peter Orubad556ad42007-05-15 13:59:13 +02003634
3635 if ((ctl & PCI_EXP_DEVCTL_READRQ) != v) {
3636 ctl &= ~PCI_EXP_DEVCTL_READRQ;
3637 ctl |= v;
Jon Masonc9b378c2011-06-28 18:26:25 -05003638 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
Peter Orubad556ad42007-05-15 13:59:13 +02003639 }
3640
3641out:
3642 return err;
3643}
3644EXPORT_SYMBOL(pcie_set_readrq);
3645
3646/**
Jon Masonb03e7492011-07-20 15:20:54 -05003647 * pcie_get_mps - get PCI Express maximum payload size
3648 * @dev: PCI device to query
3649 *
3650 * Returns maximum payload size in bytes
3651 * or appropriate error value.
3652 */
3653int pcie_get_mps(struct pci_dev *dev)
3654{
3655 int ret, cap;
3656 u16 ctl;
3657
3658 cap = pci_pcie_cap(dev);
3659 if (!cap)
3660 return -EINVAL;
3661
3662 ret = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3663 if (!ret)
3664 ret = 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3665
3666 return ret;
3667}
3668
3669/**
3670 * pcie_set_mps - set PCI Express maximum payload size
3671 * @dev: PCI device to query
Randy Dunlap47c08f32011-08-20 11:49:43 -07003672 * @mps: maximum payload size in bytes
Jon Masonb03e7492011-07-20 15:20:54 -05003673 * valid values are 128, 256, 512, 1024, 2048, 4096
3674 *
3675 * If possible sets maximum payload size
3676 */
3677int pcie_set_mps(struct pci_dev *dev, int mps)
3678{
3679 int cap, err = -EINVAL;
3680 u16 ctl, v;
3681
3682 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
3683 goto out;
3684
3685 v = ffs(mps) - 8;
3686 if (v > dev->pcie_mpss)
3687 goto out;
3688 v <<= 5;
3689
3690 cap = pci_pcie_cap(dev);
3691 if (!cap)
3692 goto out;
3693
3694 err = pci_read_config_word(dev, cap + PCI_EXP_DEVCTL, &ctl);
3695 if (err)
3696 goto out;
3697
3698 if ((ctl & PCI_EXP_DEVCTL_PAYLOAD) != v) {
3699 ctl &= ~PCI_EXP_DEVCTL_PAYLOAD;
3700 ctl |= v;
3701 err = pci_write_config_word(dev, cap + PCI_EXP_DEVCTL, ctl);
3702 }
3703out:
3704 return err;
3705}
3706
3707/**
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003708 * pci_select_bars - Make BAR mask from the type of resource
Randy Dunlapf95d8822007-02-10 14:41:56 -08003709 * @dev: the PCI device for which BAR mask is made
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09003710 * @flags: resource type mask to be selected
3711 *
3712 * This helper routine makes bar mask from the type of resource.
3713 */
3714int pci_select_bars(struct pci_dev *dev, unsigned long flags)
3715{
3716 int i, bars = 0;
3717 for (i = 0; i < PCI_NUM_RESOURCES; i++)
3718 if (pci_resource_flags(dev, i) & flags)
3719 bars |= (1 << i);
3720 return bars;
3721}
3722
Yu Zhao613e7ed2008-11-22 02:41:27 +08003723/**
3724 * pci_resource_bar - get position of the BAR associated with a resource
3725 * @dev: the PCI device
3726 * @resno: the resource number
3727 * @type: the BAR type to be filled in
3728 *
3729 * Returns BAR position in config space, or 0 if the BAR is invalid.
3730 */
3731int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
3732{
Yu Zhaod1b054d2009-03-20 11:25:11 +08003733 int reg;
3734
Yu Zhao613e7ed2008-11-22 02:41:27 +08003735 if (resno < PCI_ROM_RESOURCE) {
3736 *type = pci_bar_unknown;
3737 return PCI_BASE_ADDRESS_0 + 4 * resno;
3738 } else if (resno == PCI_ROM_RESOURCE) {
3739 *type = pci_bar_mem32;
3740 return dev->rom_base_reg;
Yu Zhaod1b054d2009-03-20 11:25:11 +08003741 } else if (resno < PCI_BRIDGE_RESOURCES) {
3742 /* device specific resource */
3743 reg = pci_iov_resource_bar(dev, resno, type);
3744 if (reg)
3745 return reg;
Yu Zhao613e7ed2008-11-22 02:41:27 +08003746 }
3747
Bjorn Helgaas865df572009-11-04 10:32:57 -07003748 dev_err(&dev->dev, "BAR %d: invalid resource\n", resno);
Yu Zhao613e7ed2008-11-22 02:41:27 +08003749 return 0;
3750}
3751
Mike Travis95a8b6e2010-02-02 14:38:13 -08003752/* Some architectures require additional programming to enable VGA */
3753static arch_set_vga_state_t arch_set_vga_state;
3754
3755void __init pci_register_set_vga_state(arch_set_vga_state_t func)
3756{
3757 arch_set_vga_state = func; /* NULL disables */
3758}
3759
3760static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
Dave Airlie7ad35cf2011-05-25 14:00:49 +10003761 unsigned int command_bits, u32 flags)
Mike Travis95a8b6e2010-02-02 14:38:13 -08003762{
3763 if (arch_set_vga_state)
3764 return arch_set_vga_state(dev, decode, command_bits,
Dave Airlie7ad35cf2011-05-25 14:00:49 +10003765 flags);
Mike Travis95a8b6e2010-02-02 14:38:13 -08003766 return 0;
3767}
3768
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003769/**
3770 * pci_set_vga_state - set VGA decode state on device and parents if requested
Randy Dunlap19eea632009-09-17 15:28:22 -07003771 * @dev: the PCI device
3772 * @decode: true = enable decoding, false = disable decoding
3773 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
Randy Dunlap3f37d622011-05-25 19:21:25 -07003774 * @flags: traverse ancestors and change bridges
Dave Airlie3448a192010-06-01 15:32:24 +10003775 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003776 */
3777int pci_set_vga_state(struct pci_dev *dev, bool decode,
Dave Airlie3448a192010-06-01 15:32:24 +10003778 unsigned int command_bits, u32 flags)
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003779{
3780 struct pci_bus *bus;
3781 struct pci_dev *bridge;
3782 u16 cmd;
Mike Travis95a8b6e2010-02-02 14:38:13 -08003783 int rc;
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003784
Dave Airlie3448a192010-06-01 15:32:24 +10003785 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) & (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003786
Mike Travis95a8b6e2010-02-02 14:38:13 -08003787 /* ARCH specific VGA enables */
Dave Airlie3448a192010-06-01 15:32:24 +10003788 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
Mike Travis95a8b6e2010-02-02 14:38:13 -08003789 if (rc)
3790 return rc;
3791
Dave Airlie3448a192010-06-01 15:32:24 +10003792 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
3793 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3794 if (decode == true)
3795 cmd |= command_bits;
3796 else
3797 cmd &= ~command_bits;
3798 pci_write_config_word(dev, PCI_COMMAND, cmd);
3799 }
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003800
Dave Airlie3448a192010-06-01 15:32:24 +10003801 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
Benjamin Herrenschmidtdeb2d2e2009-08-11 15:52:06 +10003802 return 0;
3803
3804 bus = dev->bus;
3805 while (bus) {
3806 bridge = bus->self;
3807 if (bridge) {
3808 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
3809 &cmd);
3810 if (decode == true)
3811 cmd |= PCI_BRIDGE_CTL_VGA;
3812 else
3813 cmd &= ~PCI_BRIDGE_CTL_VGA;
3814 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
3815 cmd);
3816 }
3817 bus = bus->parent;
3818 }
3819 return 0;
3820}
3821
Yuji Shimada32a9a6822009-03-16 17:13:39 +09003822#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
3823static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
Thomas Gleixnere9d1e492009-11-06 22:41:23 +00003824static DEFINE_SPINLOCK(resource_alignment_lock);
Yuji Shimada32a9a6822009-03-16 17:13:39 +09003825
3826/**
3827 * pci_specified_resource_alignment - get resource alignment specified by user.
3828 * @dev: the PCI device to get
3829 *
3830 * RETURNS: Resource alignment if it is specified.
3831 * Zero if it is not specified.
3832 */
3833resource_size_t pci_specified_resource_alignment(struct pci_dev *dev)
3834{
3835 int seg, bus, slot, func, align_order, count;
3836 resource_size_t align = 0;
3837 char *p;
3838
3839 spin_lock(&resource_alignment_lock);
3840 p = resource_alignment_param;
3841 while (*p) {
3842 count = 0;
3843 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
3844 p[count] == '@') {
3845 p += count + 1;
3846 } else {
3847 align_order = -1;
3848 }
3849 if (sscanf(p, "%x:%x:%x.%x%n",
3850 &seg, &bus, &slot, &func, &count) != 4) {
3851 seg = 0;
3852 if (sscanf(p, "%x:%x.%x%n",
3853 &bus, &slot, &func, &count) != 3) {
3854 /* Invalid format */
3855 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
3856 p);
3857 break;
3858 }
3859 }
3860 p += count;
3861 if (seg == pci_domain_nr(dev->bus) &&
3862 bus == dev->bus->number &&
3863 slot == PCI_SLOT(dev->devfn) &&
3864 func == PCI_FUNC(dev->devfn)) {
3865 if (align_order == -1) {
3866 align = PAGE_SIZE;
3867 } else {
3868 align = 1 << align_order;
3869 }
3870 /* Found */
3871 break;
3872 }
3873 if (*p != ';' && *p != ',') {
3874 /* End of param or invalid format */
3875 break;
3876 }
3877 p++;
3878 }
3879 spin_unlock(&resource_alignment_lock);
3880 return align;
3881}
3882
3883/**
3884 * pci_is_reassigndev - check if specified PCI is target device to reassign
3885 * @dev: the PCI device to check
3886 *
3887 * RETURNS: non-zero for PCI device is a target device to reassign,
3888 * or zero is not.
3889 */
3890int pci_is_reassigndev(struct pci_dev *dev)
3891{
3892 return (pci_specified_resource_alignment(dev) != 0);
3893}
3894
Yinghai Lu2069ecf2012-02-15 21:40:31 -08003895/*
3896 * This function disables memory decoding and releases memory resources
3897 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
3898 * It also rounds up size to specified alignment.
3899 * Later on, the kernel will assign page-aligned memory resource back
3900 * to the device.
3901 */
3902void pci_reassigndev_resource_alignment(struct pci_dev *dev)
3903{
3904 int i;
3905 struct resource *r;
3906 resource_size_t align, size;
3907 u16 command;
3908
3909 if (!pci_is_reassigndev(dev))
3910 return;
3911
3912 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
3913 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
3914 dev_warn(&dev->dev,
3915 "Can't reassign resources to host bridge.\n");
3916 return;
3917 }
3918
3919 dev_info(&dev->dev,
3920 "Disabling memory decoding and releasing memory resources.\n");
3921 pci_read_config_word(dev, PCI_COMMAND, &command);
3922 command &= ~PCI_COMMAND_MEMORY;
3923 pci_write_config_word(dev, PCI_COMMAND, command);
3924
3925 align = pci_specified_resource_alignment(dev);
3926 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
3927 r = &dev->resource[i];
3928 if (!(r->flags & IORESOURCE_MEM))
3929 continue;
3930 size = resource_size(r);
3931 if (size < align) {
3932 size = align;
3933 dev_info(&dev->dev,
3934 "Rounding up size of resource #%d to %#llx.\n",
3935 i, (unsigned long long)size);
3936 }
3937 r->end = size - 1;
3938 r->start = 0;
3939 }
3940 /* Need to disable bridge's resource window,
3941 * to enable the kernel to reassign new resource
3942 * window later on.
3943 */
3944 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
3945 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
3946 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
3947 r = &dev->resource[i];
3948 if (!(r->flags & IORESOURCE_MEM))
3949 continue;
3950 r->end = resource_size(r) - 1;
3951 r->start = 0;
3952 }
3953 pci_disable_bridge_window(dev);
3954 }
3955}
3956
Yuji Shimada32a9a6822009-03-16 17:13:39 +09003957ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
3958{
3959 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
3960 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
3961 spin_lock(&resource_alignment_lock);
3962 strncpy(resource_alignment_param, buf, count);
3963 resource_alignment_param[count] = '\0';
3964 spin_unlock(&resource_alignment_lock);
3965 return count;
3966}
3967
3968ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
3969{
3970 size_t count;
3971 spin_lock(&resource_alignment_lock);
3972 count = snprintf(buf, size, "%s", resource_alignment_param);
3973 spin_unlock(&resource_alignment_lock);
3974 return count;
3975}
3976
3977static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
3978{
3979 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
3980}
3981
3982static ssize_t pci_resource_alignment_store(struct bus_type *bus,
3983 const char *buf, size_t count)
3984{
3985 return pci_set_resource_alignment_param(buf, count);
3986}
3987
3988BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
3989 pci_resource_alignment_store);
3990
3991static int __init pci_resource_alignment_sysfs_init(void)
3992{
3993 return bus_create_file(&pci_bus_type,
3994 &bus_attr_resource_alignment);
3995}
3996
3997late_initcall(pci_resource_alignment_sysfs_init);
3998
Jeff Garzik32a2eea2007-10-11 16:57:27 -04003999static void __devinit pci_no_domains(void)
4000{
4001#ifdef CONFIG_PCI_DOMAINS
4002 pci_domains_supported = 0;
4003#endif
4004}
4005
Andrew Patterson0ef5f8f2008-11-10 15:30:50 -07004006/**
4007 * pci_ext_cfg_enabled - can we access extended PCI config space?
4008 * @dev: The PCI device of the root bridge.
4009 *
4010 * Returns 1 if we can access PCI extended config space (offsets
4011 * greater than 0xff). This is the default implementation. Architecture
4012 * implementations can override this.
4013 */
Bjorn Helgaasd6d88c82012-06-19 06:54:49 -06004014int __weak pci_ext_cfg_avail(struct pci_dev *dev)
Andrew Patterson0ef5f8f2008-11-10 15:30:50 -07004015{
4016 return 1;
4017}
4018
Benjamin Herrenschmidt2d1c8612009-12-09 17:52:13 +11004019void __weak pci_fixup_cardbus(struct pci_bus *bus)
4020{
4021}
4022EXPORT_SYMBOL(pci_fixup_cardbus);
4023
Al Viroad04d312008-11-22 17:37:14 +00004024static int __init pci_setup(char *str)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025{
4026 while (str) {
4027 char *k = strchr(str, ',');
4028 if (k)
4029 *k++ = 0;
4030 if (*str && (str = pcibios_setup(str)) && *str) {
Matthew Wilcox309e57d2006-03-05 22:33:34 -07004031 if (!strcmp(str, "nomsi")) {
4032 pci_no_msi();
Randy Dunlap7f785762007-10-05 13:17:58 -07004033 } else if (!strcmp(str, "noaer")) {
4034 pci_no_aer();
Yinghai Lub55438f2012-02-23 19:23:30 -08004035 } else if (!strncmp(str, "realloc=", 8)) {
4036 pci_realloc_get_opt(str + 8);
Ram Paif483d392011-07-07 11:19:10 -07004037 } else if (!strncmp(str, "realloc", 7)) {
Yinghai Lub55438f2012-02-23 19:23:30 -08004038 pci_realloc_get_opt("on");
Jeff Garzik32a2eea2007-10-11 16:57:27 -04004039 } else if (!strcmp(str, "nodomains")) {
4040 pci_no_domains();
Rafael J. Wysocki6748dcc2012-03-01 00:06:33 +01004041 } else if (!strncmp(str, "noari", 5)) {
4042 pcie_ari_disabled = true;
Atsushi Nemoto4516a612007-02-05 16:36:06 -08004043 } else if (!strncmp(str, "cbiosize=", 9)) {
4044 pci_cardbus_io_size = memparse(str + 9, &str);
4045 } else if (!strncmp(str, "cbmemsize=", 10)) {
4046 pci_cardbus_mem_size = memparse(str + 10, &str);
Yuji Shimada32a9a6822009-03-16 17:13:39 +09004047 } else if (!strncmp(str, "resource_alignment=", 19)) {
4048 pci_set_resource_alignment_param(str + 19,
4049 strlen(str + 19));
Andrew Patterson43c16402009-04-22 16:52:09 -06004050 } else if (!strncmp(str, "ecrc=", 5)) {
4051 pcie_ecrc_get_policy(str + 5);
Eric W. Biederman28760482009-09-09 14:09:24 -07004052 } else if (!strncmp(str, "hpiosize=", 9)) {
4053 pci_hotplug_io_size = memparse(str + 9, &str);
4054 } else if (!strncmp(str, "hpmemsize=", 10)) {
4055 pci_hotplug_mem_size = memparse(str + 10, &str);
Jon Mason5f39e672011-10-03 09:50:20 -05004056 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
4057 pcie_bus_config = PCIE_BUS_TUNE_OFF;
Jon Masonb03e7492011-07-20 15:20:54 -05004058 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
4059 pcie_bus_config = PCIE_BUS_SAFE;
4060 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
4061 pcie_bus_config = PCIE_BUS_PERFORMANCE;
Jon Mason5f39e672011-10-03 09:50:20 -05004062 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
4063 pcie_bus_config = PCIE_BUS_PEER2PEER;
Bjorn Helgaas284f5f92012-04-30 15:21:02 -06004064 } else if (!strncmp(str, "pcie_scan_all", 13)) {
4065 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
Matthew Wilcox309e57d2006-03-05 22:33:34 -07004066 } else {
4067 printk(KERN_ERR "PCI: Unknown option `%s'\n",
4068 str);
4069 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004070 }
4071 str = k;
4072 }
Andi Kleen0637a702006-09-26 10:52:41 +02004073 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004074}
Andi Kleen0637a702006-09-26 10:52:41 +02004075early_param("pci", pci_setup);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076
Tejun Heo0b62e132007-07-27 14:43:35 +09004077EXPORT_SYMBOL(pci_reenable_device);
Benjamin Herrenschmidtb7189892007-12-20 15:28:08 +11004078EXPORT_SYMBOL(pci_enable_device_io);
4079EXPORT_SYMBOL(pci_enable_device_mem);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080EXPORT_SYMBOL(pci_enable_device);
Tejun Heo9ac78492007-01-20 16:00:26 +09004081EXPORT_SYMBOL(pcim_enable_device);
4082EXPORT_SYMBOL(pcim_pin_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083EXPORT_SYMBOL(pci_disable_device);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084EXPORT_SYMBOL(pci_find_capability);
4085EXPORT_SYMBOL(pci_bus_find_capability);
4086EXPORT_SYMBOL(pci_release_regions);
4087EXPORT_SYMBOL(pci_request_regions);
Arjan van de Vene8de1482008-10-22 19:55:31 -07004088EXPORT_SYMBOL(pci_request_regions_exclusive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004089EXPORT_SYMBOL(pci_release_region);
4090EXPORT_SYMBOL(pci_request_region);
Arjan van de Vene8de1482008-10-22 19:55:31 -07004091EXPORT_SYMBOL(pci_request_region_exclusive);
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09004092EXPORT_SYMBOL(pci_release_selected_regions);
4093EXPORT_SYMBOL(pci_request_selected_regions);
Arjan van de Vene8de1482008-10-22 19:55:31 -07004094EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004095EXPORT_SYMBOL(pci_set_master);
Ben Hutchings6a479072008-12-23 03:08:29 +00004096EXPORT_SYMBOL(pci_clear_master);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004097EXPORT_SYMBOL(pci_set_mwi);
Randy Dunlap694625c2007-07-09 11:55:54 -07004098EXPORT_SYMBOL(pci_try_set_mwi);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099EXPORT_SYMBOL(pci_clear_mwi);
Brett M Russa04ce0f2005-08-15 15:23:41 -04004100EXPORT_SYMBOL_GPL(pci_intx);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101EXPORT_SYMBOL(pci_assign_resource);
4102EXPORT_SYMBOL(pci_find_parent_resource);
Hidetoshi Setoc87deff2006-12-18 10:31:06 +09004103EXPORT_SYMBOL(pci_select_bars);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104
4105EXPORT_SYMBOL(pci_set_power_state);
4106EXPORT_SYMBOL(pci_save_state);
4107EXPORT_SYMBOL(pci_restore_state);
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02004108EXPORT_SYMBOL(pci_pme_capable);
Rafael J. Wysocki5a6c9b62008-08-08 00:14:24 +02004109EXPORT_SYMBOL(pci_pme_active);
Rafael J. Wysocki0235c4f2008-08-18 21:38:00 +02004110EXPORT_SYMBOL(pci_wake_from_d3);
Rafael J. Wysockie5899e12008-07-19 14:39:24 +02004111EXPORT_SYMBOL(pci_target_state);
Rafael J. Wysocki404cc2d2008-07-07 03:35:26 +02004112EXPORT_SYMBOL(pci_prepare_to_sleep);
4113EXPORT_SYMBOL(pci_back_from_sleep);
Brian Kingf7bdd122007-04-06 16:39:36 -05004114EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);