cxl_getfile(): switch to alloc_file_pseudo()
[linux-2.6-block.git] / drivers / pci / pci.c
CommitLineData
7328c8f4 1// SPDX-License-Identifier: GPL-2.0
1da177e4 2/*
df62ab5e 3 * PCI Bus Services, see include/linux/pci.h for further explanation.
1da177e4 4 *
df62ab5e
BH
5 * Copyright 1993 -- 1997 Drew Eckhardt, Frederic Potter,
6 * David Mosberger-Tang
1da177e4 7 *
df62ab5e 8 * Copyright 1997 -- 2000 Martin Mares <mj@ucw.cz>
1da177e4
LT
9 */
10
2ab51dde 11#include <linux/acpi.h>
1da177e4
LT
12#include <linux/kernel.h>
13#include <linux/delay.h>
9d26d3a8 14#include <linux/dmi.h>
1da177e4 15#include <linux/init.h>
7c674700
LP
16#include <linux/of.h>
17#include <linux/of_pci.h>
1da177e4 18#include <linux/pci.h>
075c1771 19#include <linux/pm.h>
5a0e3ad6 20#include <linux/slab.h>
1da177e4
LT
21#include <linux/module.h>
22#include <linux/spinlock.h>
4e57b681 23#include <linux/string.h>
229f5afd 24#include <linux/log2.h>
5745392e 25#include <linux/logic_pio.h>
7d715a6c 26#include <linux/pci-aspm.h>
c300bd2f 27#include <linux/pm_wakeup.h>
8dd7f803 28#include <linux/interrupt.h>
32a9a682 29#include <linux/device.h>
b67ea761 30#include <linux/pm_runtime.h>
608c3881 31#include <linux/pci_hotplug.h>
4d3f1384 32#include <linux/vmalloc.h>
4ebeb1ec 33#include <linux/pci-ats.h>
32a9a682 34#include <asm/setup.h>
2a2aca31 35#include <asm/dma.h>
b07461a8 36#include <linux/aer.h>
bc56b9e0 37#include "pci.h"
1da177e4 38
00240c38
AS
39const char *pci_power_names[] = {
40 "error", "D0", "D1", "D2", "D3hot", "D3cold", "unknown",
41};
42EXPORT_SYMBOL_GPL(pci_power_names);
43
93177a74
RW
44int isa_dma_bridge_buggy;
45EXPORT_SYMBOL(isa_dma_bridge_buggy);
46
47int pci_pci_problems;
48EXPORT_SYMBOL(pci_pci_problems);
49
1ae861e6
RW
50unsigned int pci_pm_d3_delay;
51
df17e62e
MG
52static void pci_pme_list_scan(struct work_struct *work);
53
54static LIST_HEAD(pci_pme_list);
55static DEFINE_MUTEX(pci_pme_list_mutex);
56static DECLARE_DELAYED_WORK(pci_pme_work, pci_pme_list_scan);
57
58struct pci_pme_device {
59 struct list_head list;
60 struct pci_dev *dev;
61};
62
63#define PME_TIMEOUT 1000 /* How long between PME checks */
64
1ae861e6
RW
65static void pci_dev_d3_sleep(struct pci_dev *dev)
66{
67 unsigned int delay = dev->d3_delay;
68
69 if (delay < pci_pm_d3_delay)
70 delay = pci_pm_d3_delay;
71
50b2b540
AH
72 if (delay)
73 msleep(delay);
1ae861e6 74}
1da177e4 75
32a2eea7
JG
76#ifdef CONFIG_PCI_DOMAINS
77int pci_domains_supported = 1;
78#endif
79
4516a618
AN
80#define DEFAULT_CARDBUS_IO_SIZE (256)
81#define DEFAULT_CARDBUS_MEM_SIZE (64*1024*1024)
82/* pci=cbmemsize=nnM,cbiosize=nn can override this */
83unsigned long pci_cardbus_io_size = DEFAULT_CARDBUS_IO_SIZE;
84unsigned long pci_cardbus_mem_size = DEFAULT_CARDBUS_MEM_SIZE;
85
28760489
EB
86#define DEFAULT_HOTPLUG_IO_SIZE (256)
87#define DEFAULT_HOTPLUG_MEM_SIZE (2*1024*1024)
88/* pci=hpmemsize=nnM,hpiosize=nn can override this */
89unsigned long pci_hotplug_io_size = DEFAULT_HOTPLUG_IO_SIZE;
90unsigned long pci_hotplug_mem_size = DEFAULT_HOTPLUG_MEM_SIZE;
91
e16b4660
KB
92#define DEFAULT_HOTPLUG_BUS_SIZE 1
93unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
94
27d868b5 95enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
b03e7495 96
ac1aa47b
JB
97/*
98 * The default CLS is used if arch didn't set CLS explicitly and not
99 * all pci devices agree on the same value. Arch can override either
100 * the dfl or actual value as it sees fit. Don't forget this is
101 * measured in 32-bit words, not bytes.
102 */
15856ad5 103u8 pci_dfl_cache_line_size = L1_CACHE_BYTES >> 2;
ac1aa47b
JB
104u8 pci_cache_line_size;
105
96c55900
MS
106/*
107 * If we set up a device for bus mastering, we need to check the latency
108 * timer as certain BIOSes forget to set it properly.
109 */
110unsigned int pcibios_max_latency = 255;
111
6748dcc2
RW
112/* If set, the PCIe ARI capability will not be used. */
113static bool pcie_ari_disabled;
114
cef74409
GK
115/* If set, the PCIe ATS capability will not be used. */
116static bool pcie_ats_disabled;
117
118bool pci_ats_disabled(void)
119{
120 return pcie_ats_disabled;
121}
122
9d26d3a8
MW
123/* Disable bridge_d3 for all PCIe ports */
124static bool pci_bridge_d3_disable;
125/* Force bridge_d3 for all PCIe ports */
126static bool pci_bridge_d3_force;
127
128static int __init pcie_port_pm_setup(char *str)
129{
130 if (!strcmp(str, "off"))
131 pci_bridge_d3_disable = true;
132 else if (!strcmp(str, "force"))
133 pci_bridge_d3_force = true;
134 return 1;
135}
136__setup("pcie_port_pm=", pcie_port_pm_setup);
137
a2758b6b
SK
138/* Time to wait after a reset for device to become responsive */
139#define PCIE_RESET_READY_POLL_MS 60000
140
1da177e4
LT
141/**
142 * pci_bus_max_busnr - returns maximum PCI bus number of given bus' children
143 * @bus: pointer to PCI bus structure to search
144 *
145 * Given a PCI bus, returns the highest PCI bus number present in the set
146 * including the given PCI bus and its list of child PCI buses.
147 */
07656d83 148unsigned char pci_bus_max_busnr(struct pci_bus *bus)
1da177e4 149{
94e6a9b9 150 struct pci_bus *tmp;
1da177e4
LT
151 unsigned char max, n;
152
b918c62e 153 max = bus->busn_res.end;
94e6a9b9
YW
154 list_for_each_entry(tmp, &bus->children, node) {
155 n = pci_bus_max_busnr(tmp);
3c78bc61 156 if (n > max)
1da177e4
LT
157 max = n;
158 }
159 return max;
160}
b82db5ce 161EXPORT_SYMBOL_GPL(pci_bus_max_busnr);
1da177e4 162
1684f5dd
AM
163#ifdef CONFIG_HAS_IOMEM
164void __iomem *pci_ioremap_bar(struct pci_dev *pdev, int bar)
165{
1f7bf3bf
BH
166 struct resource *res = &pdev->resource[bar];
167
1684f5dd
AM
168 /*
169 * Make sure the BAR is actually a memory resource, not an IO resource
170 */
646c0282 171 if (res->flags & IORESOURCE_UNSET || !(res->flags & IORESOURCE_MEM)) {
7506dc79 172 pci_warn(pdev, "can't ioremap BAR %d: %pR\n", bar, res);
1684f5dd
AM
173 return NULL;
174 }
1f7bf3bf 175 return ioremap_nocache(res->start, resource_size(res));
1684f5dd
AM
176}
177EXPORT_SYMBOL_GPL(pci_ioremap_bar);
c43996f4
LR
178
179void __iomem *pci_ioremap_wc_bar(struct pci_dev *pdev, int bar)
180{
181 /*
182 * Make sure the BAR is actually a memory resource, not an IO resource
183 */
184 if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {
185 WARN_ON(1);
186 return NULL;
187 }
188 return ioremap_wc(pci_resource_start(pdev, bar),
189 pci_resource_len(pdev, bar));
190}
191EXPORT_SYMBOL_GPL(pci_ioremap_wc_bar);
1684f5dd
AM
192#endif
193
687d5fe3
ME
194
195static int __pci_find_next_cap_ttl(struct pci_bus *bus, unsigned int devfn,
196 u8 pos, int cap, int *ttl)
24a4e377
RD
197{
198 u8 id;
55db3208
SS
199 u16 ent;
200
201 pci_bus_read_config_byte(bus, devfn, pos, &pos);
24a4e377 202
687d5fe3 203 while ((*ttl)--) {
24a4e377
RD
204 if (pos < 0x40)
205 break;
206 pos &= ~3;
55db3208
SS
207 pci_bus_read_config_word(bus, devfn, pos, &ent);
208
209 id = ent & 0xff;
24a4e377
RD
210 if (id == 0xff)
211 break;
212 if (id == cap)
213 return pos;
55db3208 214 pos = (ent >> 8);
24a4e377
RD
215 }
216 return 0;
217}
218
687d5fe3
ME
219static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn,
220 u8 pos, int cap)
221{
222 int ttl = PCI_FIND_CAP_TTL;
223
224 return __pci_find_next_cap_ttl(bus, devfn, pos, cap, &ttl);
225}
226
24a4e377
RD
227int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
228{
229 return __pci_find_next_cap(dev->bus, dev->devfn,
230 pos + PCI_CAP_LIST_NEXT, cap);
231}
232EXPORT_SYMBOL_GPL(pci_find_next_capability);
233
d3bac118
ME
234static int __pci_bus_find_cap_start(struct pci_bus *bus,
235 unsigned int devfn, u8 hdr_type)
1da177e4
LT
236{
237 u16 status;
1da177e4
LT
238
239 pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
240 if (!(status & PCI_STATUS_CAP_LIST))
241 return 0;
242
243 switch (hdr_type) {
244 case PCI_HEADER_TYPE_NORMAL:
245 case PCI_HEADER_TYPE_BRIDGE:
d3bac118 246 return PCI_CAPABILITY_LIST;
1da177e4 247 case PCI_HEADER_TYPE_CARDBUS:
d3bac118 248 return PCI_CB_CAPABILITY_LIST;
1da177e4 249 }
d3bac118
ME
250
251 return 0;
1da177e4
LT
252}
253
254/**
f7625980 255 * pci_find_capability - query for devices' capabilities
1da177e4
LT
256 * @dev: PCI device to query
257 * @cap: capability code
258 *
259 * Tell if a device supports a given PCI capability.
260 * Returns the address of the requested capability structure within the
261 * device's PCI configuration space or 0 in case the device does not
262 * support it. Possible values for @cap:
263 *
f7625980
BH
264 * %PCI_CAP_ID_PM Power Management
265 * %PCI_CAP_ID_AGP Accelerated Graphics Port
266 * %PCI_CAP_ID_VPD Vital Product Data
267 * %PCI_CAP_ID_SLOTID Slot Identification
1da177e4 268 * %PCI_CAP_ID_MSI Message Signalled Interrupts
f7625980 269 * %PCI_CAP_ID_CHSWP CompactPCI HotSwap
1da177e4
LT
270 * %PCI_CAP_ID_PCIX PCI-X
271 * %PCI_CAP_ID_EXP PCI Express
272 */
273int pci_find_capability(struct pci_dev *dev, int cap)
274{
d3bac118
ME
275 int pos;
276
277 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
278 if (pos)
279 pos = __pci_find_next_cap(dev->bus, dev->devfn, pos, cap);
280
281 return pos;
1da177e4 282}
b7fe9434 283EXPORT_SYMBOL(pci_find_capability);
1da177e4
LT
284
285/**
f7625980 286 * pci_bus_find_capability - query for devices' capabilities
1da177e4
LT
287 * @bus: the PCI bus to query
288 * @devfn: PCI device to query
289 * @cap: capability code
290 *
291 * Like pci_find_capability() but works for pci devices that do not have a
f7625980 292 * pci_dev structure set up yet.
1da177e4
LT
293 *
294 * Returns the address of the requested capability structure within the
295 * device's PCI configuration space or 0 in case the device does not
296 * support it.
297 */
298int pci_bus_find_capability(struct pci_bus *bus, unsigned int devfn, int cap)
299{
d3bac118 300 int pos;
1da177e4
LT
301 u8 hdr_type;
302
303 pci_bus_read_config_byte(bus, devfn, PCI_HEADER_TYPE, &hdr_type);
304
d3bac118
ME
305 pos = __pci_bus_find_cap_start(bus, devfn, hdr_type & 0x7f);
306 if (pos)
307 pos = __pci_find_next_cap(bus, devfn, pos, cap);
308
309 return pos;
1da177e4 310}
b7fe9434 311EXPORT_SYMBOL(pci_bus_find_capability);
1da177e4
LT
312
313/**
44a9a36f 314 * pci_find_next_ext_capability - Find an extended capability
1da177e4 315 * @dev: PCI device to query
44a9a36f 316 * @start: address at which to start looking (0 to start at beginning of list)
1da177e4
LT
317 * @cap: capability code
318 *
44a9a36f 319 * Returns the address of the next matching extended capability structure
1da177e4 320 * within the device's PCI configuration space or 0 if the device does
44a9a36f
BH
321 * not support it. Some capabilities can occur several times, e.g., the
322 * vendor-specific capability, and this provides a way to find them all.
1da177e4 323 */
44a9a36f 324int pci_find_next_ext_capability(struct pci_dev *dev, int start, int cap)
1da177e4
LT
325{
326 u32 header;
557848c3
ZY
327 int ttl;
328 int pos = PCI_CFG_SPACE_SIZE;
1da177e4 329
557848c3
ZY
330 /* minimum 8 bytes per capability */
331 ttl = (PCI_CFG_SPACE_EXP_SIZE - PCI_CFG_SPACE_SIZE) / 8;
332
333 if (dev->cfg_size <= PCI_CFG_SPACE_SIZE)
1da177e4
LT
334 return 0;
335
44a9a36f
BH
336 if (start)
337 pos = start;
338
1da177e4
LT
339 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
340 return 0;
341
342 /*
343 * If we have no capabilities, this is indicated by cap ID,
344 * cap version and next pointer all being 0.
345 */
346 if (header == 0)
347 return 0;
348
349 while (ttl-- > 0) {
44a9a36f 350 if (PCI_EXT_CAP_ID(header) == cap && pos != start)
1da177e4
LT
351 return pos;
352
353 pos = PCI_EXT_CAP_NEXT(header);
557848c3 354 if (pos < PCI_CFG_SPACE_SIZE)
1da177e4
LT
355 break;
356
357 if (pci_read_config_dword(dev, pos, &header) != PCIBIOS_SUCCESSFUL)
358 break;
359 }
360
361 return 0;
362}
44a9a36f
BH
363EXPORT_SYMBOL_GPL(pci_find_next_ext_capability);
364
365/**
366 * pci_find_ext_capability - Find an extended capability
367 * @dev: PCI device to query
368 * @cap: capability code
369 *
370 * Returns the address of the requested extended capability structure
371 * within the device's PCI configuration space or 0 if the device does
372 * not support it. Possible values for @cap:
373 *
374 * %PCI_EXT_CAP_ID_ERR Advanced Error Reporting
375 * %PCI_EXT_CAP_ID_VC Virtual Channel
376 * %PCI_EXT_CAP_ID_DSN Device Serial Number
377 * %PCI_EXT_CAP_ID_PWR Power Budgeting
378 */
379int pci_find_ext_capability(struct pci_dev *dev, int cap)
380{
381 return pci_find_next_ext_capability(dev, 0, cap);
382}
3a720d72 383EXPORT_SYMBOL_GPL(pci_find_ext_capability);
1da177e4 384
687d5fe3
ME
385static int __pci_find_next_ht_cap(struct pci_dev *dev, int pos, int ht_cap)
386{
387 int rc, ttl = PCI_FIND_CAP_TTL;
388 u8 cap, mask;
389
390 if (ht_cap == HT_CAPTYPE_SLAVE || ht_cap == HT_CAPTYPE_HOST)
391 mask = HT_3BIT_CAP_MASK;
392 else
393 mask = HT_5BIT_CAP_MASK;
394
395 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn, pos,
396 PCI_CAP_ID_HT, &ttl);
397 while (pos) {
398 rc = pci_read_config_byte(dev, pos + 3, &cap);
399 if (rc != PCIBIOS_SUCCESSFUL)
400 return 0;
401
402 if ((cap & mask) == ht_cap)
403 return pos;
404
47a4d5be
BG
405 pos = __pci_find_next_cap_ttl(dev->bus, dev->devfn,
406 pos + PCI_CAP_LIST_NEXT,
687d5fe3
ME
407 PCI_CAP_ID_HT, &ttl);
408 }
409
410 return 0;
411}
412/**
413 * pci_find_next_ht_capability - query a device's Hypertransport capabilities
414 * @dev: PCI device to query
415 * @pos: Position from which to continue searching
416 * @ht_cap: Hypertransport capability code
417 *
418 * To be used in conjunction with pci_find_ht_capability() to search for
419 * all capabilities matching @ht_cap. @pos should always be a value returned
420 * from pci_find_ht_capability().
421 *
422 * NB. To be 100% safe against broken PCI devices, the caller should take
423 * steps to avoid an infinite loop.
424 */
425int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap)
426{
427 return __pci_find_next_ht_cap(dev, pos + PCI_CAP_LIST_NEXT, ht_cap);
428}
429EXPORT_SYMBOL_GPL(pci_find_next_ht_capability);
430
431/**
432 * pci_find_ht_capability - query a device's Hypertransport capabilities
433 * @dev: PCI device to query
434 * @ht_cap: Hypertransport capability code
435 *
436 * Tell if a device supports a given Hypertransport capability.
437 * Returns an address within the device's PCI configuration space
438 * or 0 in case the device does not support the request capability.
439 * The address points to the PCI capability, of type PCI_CAP_ID_HT,
440 * which has a Hypertransport capability matching @ht_cap.
441 */
442int pci_find_ht_capability(struct pci_dev *dev, int ht_cap)
443{
444 int pos;
445
446 pos = __pci_bus_find_cap_start(dev->bus, dev->devfn, dev->hdr_type);
447 if (pos)
448 pos = __pci_find_next_ht_cap(dev, pos, ht_cap);
449
450 return pos;
451}
452EXPORT_SYMBOL_GPL(pci_find_ht_capability);
453
1da177e4
LT
454/**
455 * pci_find_parent_resource - return resource region of parent bus of given region
456 * @dev: PCI device structure contains resources to be searched
457 * @res: child resource record for which parent is sought
458 *
459 * For given resource region of given device, return the resource
f44116ae 460 * region of parent bus the given region is contained in.
1da177e4 461 */
3c78bc61
RD
462struct resource *pci_find_parent_resource(const struct pci_dev *dev,
463 struct resource *res)
1da177e4
LT
464{
465 const struct pci_bus *bus = dev->bus;
f44116ae 466 struct resource *r;
1da177e4 467 int i;
1da177e4 468
89a74ecc 469 pci_bus_for_each_resource(bus, r, i) {
1da177e4
LT
470 if (!r)
471 continue;
31342330 472 if (resource_contains(r, res)) {
f44116ae
BH
473
474 /*
475 * If the window is prefetchable but the BAR is
476 * not, the allocator made a mistake.
477 */
478 if (r->flags & IORESOURCE_PREFETCH &&
479 !(res->flags & IORESOURCE_PREFETCH))
480 return NULL;
481
482 /*
483 * If we're below a transparent bridge, there may
484 * be both a positively-decoded aperture and a
485 * subtractively-decoded region that contain the BAR.
486 * We want the positively-decoded one, so this depends
487 * on pci_bus_for_each_resource() giving us those
488 * first.
489 */
490 return r;
491 }
1da177e4 492 }
f44116ae 493 return NULL;
1da177e4 494}
b7fe9434 495EXPORT_SYMBOL(pci_find_parent_resource);
1da177e4 496
afd29f90
MW
497/**
498 * pci_find_resource - Return matching PCI device resource
499 * @dev: PCI device to query
500 * @res: Resource to look for
501 *
502 * Goes over standard PCI resources (BARs) and checks if the given resource
503 * is partially or fully contained in any of them. In that case the
504 * matching resource is returned, %NULL otherwise.
505 */
506struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res)
507{
508 int i;
509
510 for (i = 0; i < PCI_ROM_RESOURCE; i++) {
511 struct resource *r = &dev->resource[i];
512
513 if (r->start && resource_contains(r, res))
514 return r;
515 }
516
517 return NULL;
518}
519EXPORT_SYMBOL(pci_find_resource);
520
c56d4450
HS
521/**
522 * pci_find_pcie_root_port - return PCIe Root Port
523 * @dev: PCI device to query
524 *
525 * Traverse up the parent chain and return the PCIe Root Port PCI Device
526 * for a given PCI Device.
527 */
528struct pci_dev *pci_find_pcie_root_port(struct pci_dev *dev)
529{
b6f6d56c 530 struct pci_dev *bridge, *highest_pcie_bridge = dev;
c56d4450
HS
531
532 bridge = pci_upstream_bridge(dev);
533 while (bridge && pci_is_pcie(bridge)) {
534 highest_pcie_bridge = bridge;
535 bridge = pci_upstream_bridge(bridge);
536 }
537
b6f6d56c
TR
538 if (pci_pcie_type(highest_pcie_bridge) != PCI_EXP_TYPE_ROOT_PORT)
539 return NULL;
c56d4450 540
b6f6d56c 541 return highest_pcie_bridge;
c56d4450
HS
542}
543EXPORT_SYMBOL(pci_find_pcie_root_port);
544
157e876f
AW
545/**
546 * pci_wait_for_pending - wait for @mask bit(s) to clear in status word @pos
547 * @dev: the PCI device to operate on
548 * @pos: config space offset of status word
549 * @mask: mask of bit(s) to care about in status word
550 *
551 * Return 1 when mask bit(s) in status word clear, 0 otherwise.
552 */
553int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask)
554{
555 int i;
556
557 /* Wait for Transaction Pending bit clean */
558 for (i = 0; i < 4; i++) {
559 u16 status;
560 if (i)
561 msleep((1 << (i - 1)) * 100);
562
563 pci_read_config_word(dev, pos, &status);
564 if (!(status & mask))
565 return 1;
566 }
567
568 return 0;
569}
570
064b53db 571/**
70675e0b 572 * pci_restore_bars - restore a device's BAR values (e.g. after wake-up)
064b53db
JL
573 * @dev: PCI device to have its BARs restored
574 *
575 * Restore the BAR values for a given device, so as to make it
576 * accessible by its driver.
577 */
3c78bc61 578static void pci_restore_bars(struct pci_dev *dev)
064b53db 579{
bc5f5a82 580 int i;
064b53db 581
bc5f5a82 582 for (i = 0; i < PCI_BRIDGE_RESOURCES; i++)
14add80b 583 pci_update_resource(dev, i);
064b53db
JL
584}
585
299f2ffe 586static const struct pci_platform_pm_ops *pci_platform_pm;
961d9120 587
299f2ffe 588int pci_set_platform_pm(const struct pci_platform_pm_ops *ops)
961d9120 589{
cc7cc02b 590 if (!ops->is_manageable || !ops->set_state || !ops->get_state ||
0847684c 591 !ops->choose_state || !ops->set_wakeup || !ops->need_resume)
961d9120
RW
592 return -EINVAL;
593 pci_platform_pm = ops;
594 return 0;
595}
596
597static inline bool platform_pci_power_manageable(struct pci_dev *dev)
598{
599 return pci_platform_pm ? pci_platform_pm->is_manageable(dev) : false;
600}
601
602static inline int platform_pci_set_power_state(struct pci_dev *dev,
3c78bc61 603 pci_power_t t)
961d9120
RW
604{
605 return pci_platform_pm ? pci_platform_pm->set_state(dev, t) : -ENOSYS;
606}
607
cc7cc02b
LW
608static inline pci_power_t platform_pci_get_power_state(struct pci_dev *dev)
609{
610 return pci_platform_pm ? pci_platform_pm->get_state(dev) : PCI_UNKNOWN;
611}
612
961d9120
RW
613static inline pci_power_t platform_pci_choose_state(struct pci_dev *dev)
614{
615 return pci_platform_pm ?
616 pci_platform_pm->choose_state(dev) : PCI_POWER_ERROR;
617}
8f7020d3 618
0847684c 619static inline int platform_pci_set_wakeup(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
620{
621 return pci_platform_pm ?
0847684c 622 pci_platform_pm->set_wakeup(dev, enable) : -ENODEV;
b67ea761
RW
623}
624
bac2a909
RW
625static inline bool platform_pci_need_resume(struct pci_dev *dev)
626{
627 return pci_platform_pm ? pci_platform_pm->need_resume(dev) : false;
628}
629
1da177e4 630/**
44e4e66e
RW
631 * pci_raw_set_power_state - Use PCI PM registers to set the power state of
632 * given PCI device
633 * @dev: PCI device to handle.
44e4e66e 634 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
1da177e4 635 *
44e4e66e
RW
636 * RETURN VALUE:
637 * -EINVAL if the requested state is invalid.
638 * -EIO if device does not support PCI PM or its PM capabilities register has a
639 * wrong version, or device doesn't support the requested state.
640 * 0 if device already is in the requested state.
641 * 0 if device's power state has been successfully changed.
1da177e4 642 */
f00a20ef 643static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
1da177e4 644{
337001b6 645 u16 pmcsr;
44e4e66e 646 bool need_restore = false;
1da177e4 647
4a865905
RW
648 /* Check if we're already there */
649 if (dev->current_state == state)
650 return 0;
651
337001b6 652 if (!dev->pm_cap)
cca03dec
AL
653 return -EIO;
654
44e4e66e
RW
655 if (state < PCI_D0 || state > PCI_D3hot)
656 return -EINVAL;
657
1da177e4 658 /* Validate current state:
f7625980 659 * Can enter D0 from any state, but if we can only go deeper
1da177e4
LT
660 * to sleep if we're already in a low power state
661 */
4a865905 662 if (state != PCI_D0 && dev->current_state <= PCI_D3cold
44e4e66e 663 && dev->current_state > state) {
7506dc79 664 pci_err(dev, "invalid power transition (from state %d to %d)\n",
227f0647 665 dev->current_state, state);
1da177e4 666 return -EINVAL;
44e4e66e 667 }
1da177e4 668
1da177e4 669 /* check if this device supports the desired state */
337001b6
RW
670 if ((state == PCI_D1 && !dev->d1_support)
671 || (state == PCI_D2 && !dev->d2_support))
3fe9d19f 672 return -EIO;
1da177e4 673
337001b6 674 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
064b53db 675
32a36585 676 /* If we're (effectively) in D3, force entire word to 0.
1da177e4
LT
677 * This doesn't affect PME_Status, disables PME_En, and
678 * sets PowerState to 0.
679 */
32a36585 680 switch (dev->current_state) {
d3535fbb
JL
681 case PCI_D0:
682 case PCI_D1:
683 case PCI_D2:
684 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
685 pmcsr |= state;
686 break;
f62795f1
RW
687 case PCI_D3hot:
688 case PCI_D3cold:
32a36585
JL
689 case PCI_UNKNOWN: /* Boot-up */
690 if ((pmcsr & PCI_PM_CTRL_STATE_MASK) == PCI_D3hot
f00a20ef 691 && !(pmcsr & PCI_PM_CTRL_NO_SOFT_RESET))
44e4e66e 692 need_restore = true;
32a36585 693 /* Fall-through: force to D0 */
32a36585 694 default:
d3535fbb 695 pmcsr = 0;
32a36585 696 break;
1da177e4
LT
697 }
698
699 /* enter specified state */
337001b6 700 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1da177e4
LT
701
702 /* Mandatory power management transition delays */
703 /* see PCI PM 1.1 5.6.1 table 18 */
704 if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
1ae861e6 705 pci_dev_d3_sleep(dev);
1da177e4 706 else if (state == PCI_D2 || dev->current_state == PCI_D2)
aa8c6c93 707 udelay(PCI_PM_D2_DELAY);
1da177e4 708
e13cdbd7
RW
709 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
710 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
711 if (dev->current_state != state && printk_ratelimit())
7506dc79 712 pci_info(dev, "Refused to change power state, currently in D%d\n",
227f0647 713 dev->current_state);
064b53db 714
448bd857
HY
715 /*
716 * According to section 5.4.1 of the "PCI BUS POWER MANAGEMENT
064b53db
JL
717 * INTERFACE SPECIFICATION, REV. 1.2", a device transitioning
718 * from D3hot to D0 _may_ perform an internal reset, thereby
719 * going to "D0 Uninitialized" rather than "D0 Initialized".
720 * For example, at least some versions of the 3c905B and the
721 * 3c556B exhibit this behaviour.
722 *
723 * At least some laptop BIOSen (e.g. the Thinkpad T21) leave
724 * devices in a D3hot state at boot. Consequently, we need to
725 * restore at least the BARs so that the device will be
726 * accessible to its driver.
727 */
728 if (need_restore)
729 pci_restore_bars(dev);
730
f00a20ef 731 if (dev->bus->self)
7d715a6c
SL
732 pcie_aspm_pm_state_change(dev->bus->self);
733
1da177e4
LT
734 return 0;
735}
736
44e4e66e 737/**
a6a64026 738 * pci_update_current_state - Read power state of given device and cache it
44e4e66e 739 * @dev: PCI device to handle.
f06fc0b6 740 * @state: State to cache in case the device doesn't have the PM capability
a6a64026
LW
741 *
742 * The power state is read from the PMCSR register, which however is
743 * inaccessible in D3cold. The platform firmware is therefore queried first
744 * to detect accessibility of the register. In case the platform firmware
745 * reports an incorrect state or the device isn't power manageable by the
746 * platform at all, we try to detect D3cold by testing accessibility of the
747 * vendor ID in config space.
44e4e66e 748 */
73410429 749void pci_update_current_state(struct pci_dev *dev, pci_power_t state)
44e4e66e 750{
a6a64026
LW
751 if (platform_pci_get_power_state(dev) == PCI_D3cold ||
752 !pci_device_is_present(dev)) {
753 dev->current_state = PCI_D3cold;
754 } else if (dev->pm_cap) {
44e4e66e
RW
755 u16 pmcsr;
756
337001b6 757 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
44e4e66e 758 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
f06fc0b6
RW
759 } else {
760 dev->current_state = state;
44e4e66e
RW
761 }
762}
763
db288c9c
RW
764/**
765 * pci_power_up - Put the given device into D0 forcibly
766 * @dev: PCI device to power up
767 */
768void pci_power_up(struct pci_dev *dev)
769{
770 if (platform_pci_power_manageable(dev))
771 platform_pci_set_power_state(dev, PCI_D0);
772
773 pci_raw_set_power_state(dev, PCI_D0);
774 pci_update_current_state(dev, PCI_D0);
775}
776
0e5dd46b
RW
777/**
778 * pci_platform_power_transition - Use platform to change device power state
779 * @dev: PCI device to handle.
780 * @state: State to put the device into.
781 */
782static int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state)
783{
784 int error;
785
786 if (platform_pci_power_manageable(dev)) {
787 error = platform_pci_set_power_state(dev, state);
788 if (!error)
789 pci_update_current_state(dev, state);
769ba721 790 } else
0e5dd46b 791 error = -ENODEV;
769ba721
RW
792
793 if (error && !dev->pm_cap) /* Fall back to PCI_D0 */
794 dev->current_state = PCI_D0;
0e5dd46b
RW
795
796 return error;
797}
798
0b950f0f
SH
799/**
800 * pci_wakeup - Wake up a PCI device
801 * @pci_dev: Device to handle.
802 * @ign: ignored parameter
803 */
804static int pci_wakeup(struct pci_dev *pci_dev, void *ign)
805{
806 pci_wakeup_event(pci_dev);
807 pm_request_resume(&pci_dev->dev);
808 return 0;
809}
810
811/**
812 * pci_wakeup_bus - Walk given bus and wake up devices on it
813 * @bus: Top bus of the subtree to walk.
814 */
2a4d2c42 815void pci_wakeup_bus(struct pci_bus *bus)
0b950f0f
SH
816{
817 if (bus)
818 pci_walk_bus(bus, pci_wakeup, NULL);
819}
820
0e5dd46b
RW
821/**
822 * __pci_start_power_transition - Start power transition of a PCI device
823 * @dev: PCI device to handle.
824 * @state: State to put the device into.
825 */
826static void __pci_start_power_transition(struct pci_dev *dev, pci_power_t state)
827{
448bd857 828 if (state == PCI_D0) {
0e5dd46b 829 pci_platform_power_transition(dev, PCI_D0);
448bd857
HY
830 /*
831 * Mandatory power management transition delays, see
832 * PCI Express Base Specification Revision 2.0 Section
833 * 6.6.1: Conventional Reset. Do not delay for
834 * devices powered on/off by corresponding bridge,
835 * because have already delayed for the bridge.
836 */
837 if (dev->runtime_d3cold) {
50b2b540
AH
838 if (dev->d3cold_delay)
839 msleep(dev->d3cold_delay);
448bd857
HY
840 /*
841 * When powering on a bridge from D3cold, the
842 * whole hierarchy may be powered on into
843 * D0uninitialized state, resume them to give
844 * them a chance to suspend again
845 */
846 pci_wakeup_bus(dev->subordinate);
847 }
848 }
849}
850
851/**
852 * __pci_dev_set_current_state - Set current state of a PCI device
853 * @dev: Device to handle
854 * @data: pointer to state to be set
855 */
856static int __pci_dev_set_current_state(struct pci_dev *dev, void *data)
857{
858 pci_power_t state = *(pci_power_t *)data;
859
860 dev->current_state = state;
861 return 0;
862}
863
864/**
2a4d2c42 865 * pci_bus_set_current_state - Walk given bus and set current state of devices
448bd857
HY
866 * @bus: Top bus of the subtree to walk.
867 * @state: state to be set
868 */
2a4d2c42 869void pci_bus_set_current_state(struct pci_bus *bus, pci_power_t state)
448bd857
HY
870{
871 if (bus)
872 pci_walk_bus(bus, __pci_dev_set_current_state, &state);
0e5dd46b
RW
873}
874
875/**
876 * __pci_complete_power_transition - Complete power transition of a PCI device
877 * @dev: PCI device to handle.
878 * @state: State to put the device into.
879 *
880 * This function should not be called directly by device drivers.
881 */
882int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state)
883{
448bd857
HY
884 int ret;
885
db288c9c 886 if (state <= PCI_D0)
448bd857
HY
887 return -EINVAL;
888 ret = pci_platform_power_transition(dev, state);
889 /* Power off the bridge may power off the whole hierarchy */
890 if (!ret && state == PCI_D3cold)
2a4d2c42 891 pci_bus_set_current_state(dev->subordinate, PCI_D3cold);
448bd857 892 return ret;
0e5dd46b
RW
893}
894EXPORT_SYMBOL_GPL(__pci_complete_power_transition);
895
44e4e66e
RW
896/**
897 * pci_set_power_state - Set the power state of a PCI device
898 * @dev: PCI device to handle.
899 * @state: PCI power state (D0, D1, D2, D3hot) to put the device into.
900 *
877d0310 901 * Transition a device to a new power state, using the platform firmware and/or
44e4e66e
RW
902 * the device's PCI PM registers.
903 *
904 * RETURN VALUE:
905 * -EINVAL if the requested state is invalid.
906 * -EIO if device does not support PCI PM or its PM capabilities register has a
907 * wrong version, or device doesn't support the requested state.
ab4b8a47 908 * 0 if the transition is to D1 or D2 but D1 and D2 are not supported.
44e4e66e 909 * 0 if device already is in the requested state.
ab4b8a47 910 * 0 if the transition is to D3 but D3 is not supported.
44e4e66e
RW
911 * 0 if device's power state has been successfully changed.
912 */
913int pci_set_power_state(struct pci_dev *dev, pci_power_t state)
914{
337001b6 915 int error;
44e4e66e
RW
916
917 /* bound the state we're entering */
448bd857
HY
918 if (state > PCI_D3cold)
919 state = PCI_D3cold;
44e4e66e
RW
920 else if (state < PCI_D0)
921 state = PCI_D0;
922 else if ((state == PCI_D1 || state == PCI_D2) && pci_no_d1d2(dev))
923 /*
924 * If the device or the parent bridge do not support PCI PM,
925 * ignore the request if we're doing anything other than putting
926 * it into D0 (which would only happen on boot).
927 */
928 return 0;
929
db288c9c
RW
930 /* Check if we're already there */
931 if (dev->current_state == state)
932 return 0;
933
0e5dd46b
RW
934 __pci_start_power_transition(dev, state);
935
979b1791
AC
936 /* This device is quirked not to be put into D3, so
937 don't put it in D3 */
448bd857 938 if (state >= PCI_D3hot && (dev->dev_flags & PCI_DEV_FLAGS_NO_D3))
979b1791 939 return 0;
44e4e66e 940
448bd857
HY
941 /*
942 * To put device in D3cold, we put device into D3hot in native
943 * way, then put device into D3cold with platform ops
944 */
945 error = pci_raw_set_power_state(dev, state > PCI_D3hot ?
946 PCI_D3hot : state);
44e4e66e 947
0e5dd46b
RW
948 if (!__pci_complete_power_transition(dev, state))
949 error = 0;
44e4e66e
RW
950
951 return error;
952}
b7fe9434 953EXPORT_SYMBOL(pci_set_power_state);
44e4e66e 954
1da177e4
LT
955/**
956 * pci_choose_state - Choose the power state of a PCI device
957 * @dev: PCI device to be suspended
958 * @state: target sleep state for the whole system. This is the value
959 * that is passed to suspend() function.
960 *
961 * Returns PCI power state suitable for given device and given system
962 * message.
963 */
964
965pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state)
966{
ab826ca4 967 pci_power_t ret;
0f64474b 968
728cdb75 969 if (!dev->pm_cap)
1da177e4
LT
970 return PCI_D0;
971
961d9120
RW
972 ret = platform_pci_choose_state(dev);
973 if (ret != PCI_POWER_ERROR)
974 return ret;
ca078bae
PM
975
976 switch (state.event) {
977 case PM_EVENT_ON:
978 return PCI_D0;
979 case PM_EVENT_FREEZE:
b887d2e6
DB
980 case PM_EVENT_PRETHAW:
981 /* REVISIT both freeze and pre-thaw "should" use D0 */
ca078bae 982 case PM_EVENT_SUSPEND:
3a2d5b70 983 case PM_EVENT_HIBERNATE:
ca078bae 984 return PCI_D3hot;
1da177e4 985 default:
7506dc79 986 pci_info(dev, "unrecognized suspend event %d\n",
80ccba11 987 state.event);
1da177e4
LT
988 BUG();
989 }
990 return PCI_D0;
991}
1da177e4
LT
992EXPORT_SYMBOL(pci_choose_state);
993
89858517
YZ
994#define PCI_EXP_SAVE_REGS 7
995
fd0f7f73
AW
996static struct pci_cap_saved_state *_pci_find_saved_cap(struct pci_dev *pci_dev,
997 u16 cap, bool extended)
34a4876e
YL
998{
999 struct pci_cap_saved_state *tmp;
34a4876e 1000
b67bfe0d 1001 hlist_for_each_entry(tmp, &pci_dev->saved_cap_space, next) {
fd0f7f73 1002 if (tmp->cap.cap_extended == extended && tmp->cap.cap_nr == cap)
34a4876e
YL
1003 return tmp;
1004 }
1005 return NULL;
1006}
1007
fd0f7f73
AW
1008struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap)
1009{
1010 return _pci_find_saved_cap(dev, cap, false);
1011}
1012
1013struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev, u16 cap)
1014{
1015 return _pci_find_saved_cap(dev, cap, true);
1016}
1017
b56a5a23
MT
1018static int pci_save_pcie_state(struct pci_dev *dev)
1019{
59875ae4 1020 int i = 0;
b56a5a23
MT
1021 struct pci_cap_saved_state *save_state;
1022 u16 *cap;
1023
59875ae4 1024 if (!pci_is_pcie(dev))
b56a5a23
MT
1025 return 0;
1026
9f35575d 1027 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
b56a5a23 1028 if (!save_state) {
7506dc79 1029 pci_err(dev, "buffer not found in %s\n", __func__);
b56a5a23
MT
1030 return -ENOMEM;
1031 }
63f4898a 1032
59875ae4
JL
1033 cap = (u16 *)&save_state->cap.data[0];
1034 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &cap[i++]);
1035 pcie_capability_read_word(dev, PCI_EXP_LNKCTL, &cap[i++]);
1036 pcie_capability_read_word(dev, PCI_EXP_SLTCTL, &cap[i++]);
1037 pcie_capability_read_word(dev, PCI_EXP_RTCTL, &cap[i++]);
1038 pcie_capability_read_word(dev, PCI_EXP_DEVCTL2, &cap[i++]);
1039 pcie_capability_read_word(dev, PCI_EXP_LNKCTL2, &cap[i++]);
1040 pcie_capability_read_word(dev, PCI_EXP_SLTCTL2, &cap[i++]);
9cb604ed 1041
b56a5a23
MT
1042 return 0;
1043}
1044
1045static void pci_restore_pcie_state(struct pci_dev *dev)
1046{
59875ae4 1047 int i = 0;
b56a5a23
MT
1048 struct pci_cap_saved_state *save_state;
1049 u16 *cap;
1050
1051 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_EXP);
59875ae4 1052 if (!save_state)
9cb604ed
MS
1053 return;
1054
59875ae4
JL
1055 cap = (u16 *)&save_state->cap.data[0];
1056 pcie_capability_write_word(dev, PCI_EXP_DEVCTL, cap[i++]);
1057 pcie_capability_write_word(dev, PCI_EXP_LNKCTL, cap[i++]);
1058 pcie_capability_write_word(dev, PCI_EXP_SLTCTL, cap[i++]);
1059 pcie_capability_write_word(dev, PCI_EXP_RTCTL, cap[i++]);
1060 pcie_capability_write_word(dev, PCI_EXP_DEVCTL2, cap[i++]);
1061 pcie_capability_write_word(dev, PCI_EXP_LNKCTL2, cap[i++]);
1062 pcie_capability_write_word(dev, PCI_EXP_SLTCTL2, cap[i++]);
b56a5a23
MT
1063}
1064
cc692a5f
SH
1065
1066static int pci_save_pcix_state(struct pci_dev *dev)
1067{
63f4898a 1068 int pos;
cc692a5f 1069 struct pci_cap_saved_state *save_state;
cc692a5f
SH
1070
1071 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
0a1a9b49 1072 if (!pos)
cc692a5f
SH
1073 return 0;
1074
f34303de 1075 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
cc692a5f 1076 if (!save_state) {
7506dc79 1077 pci_err(dev, "buffer not found in %s\n", __func__);
cc692a5f
SH
1078 return -ENOMEM;
1079 }
cc692a5f 1080
24a4742f
AW
1081 pci_read_config_word(dev, pos + PCI_X_CMD,
1082 (u16 *)save_state->cap.data);
63f4898a 1083
cc692a5f
SH
1084 return 0;
1085}
1086
1087static void pci_restore_pcix_state(struct pci_dev *dev)
1088{
1089 int i = 0, pos;
1090 struct pci_cap_saved_state *save_state;
1091 u16 *cap;
1092
1093 save_state = pci_find_saved_cap(dev, PCI_CAP_ID_PCIX);
1094 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
0a1a9b49 1095 if (!save_state || !pos)
cc692a5f 1096 return;
24a4742f 1097 cap = (u16 *)&save_state->cap.data[0];
cc692a5f
SH
1098
1099 pci_write_config_word(dev, pos + PCI_X_CMD, cap[i++]);
cc692a5f
SH
1100}
1101
1102
1da177e4
LT
1103/**
1104 * pci_save_state - save the PCI configuration space of a device before suspending
1105 * @dev: - PCI device that we're dealing with
1da177e4 1106 */
3c78bc61 1107int pci_save_state(struct pci_dev *dev)
1da177e4
LT
1108{
1109 int i;
1110 /* XXX: 100% dword access ok here? */
1111 for (i = 0; i < 16; i++)
9e0b5b2c 1112 pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
aa8c6c93 1113 dev->state_saved = true;
79e50e72
QL
1114
1115 i = pci_save_pcie_state(dev);
1116 if (i != 0)
b56a5a23 1117 return i;
79e50e72
QL
1118
1119 i = pci_save_pcix_state(dev);
1120 if (i != 0)
cc692a5f 1121 return i;
79e50e72 1122
754834b9 1123 return pci_save_vc_state(dev);
1da177e4 1124}
b7fe9434 1125EXPORT_SYMBOL(pci_save_state);
1da177e4 1126
ebfc5b80
RW
1127static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
1128 u32 saved_val, int retry)
1129{
1130 u32 val;
1131
1132 pci_read_config_dword(pdev, offset, &val);
1133 if (val == saved_val)
1134 return;
1135
1136 for (;;) {
7506dc79 1137 pci_dbg(pdev, "restoring config space at offset %#x (was %#x, writing %#x)\n",
227f0647 1138 offset, val, saved_val);
ebfc5b80
RW
1139 pci_write_config_dword(pdev, offset, saved_val);
1140 if (retry-- <= 0)
1141 return;
1142
1143 pci_read_config_dword(pdev, offset, &val);
1144 if (val == saved_val)
1145 return;
1146
1147 mdelay(1);
1148 }
1149}
1150
a6cb9ee7
RW
1151static void pci_restore_config_space_range(struct pci_dev *pdev,
1152 int start, int end, int retry)
ebfc5b80
RW
1153{
1154 int index;
1155
1156 for (index = end; index >= start; index--)
1157 pci_restore_config_dword(pdev, 4 * index,
1158 pdev->saved_config_space[index],
1159 retry);
1160}
1161
a6cb9ee7
RW
1162static void pci_restore_config_space(struct pci_dev *pdev)
1163{
1164 if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
1165 pci_restore_config_space_range(pdev, 10, 15, 0);
1166 /* Restore BARs before the command register. */
1167 pci_restore_config_space_range(pdev, 4, 9, 10);
1168 pci_restore_config_space_range(pdev, 0, 3, 0);
1169 } else {
1170 pci_restore_config_space_range(pdev, 0, 15, 0);
1171 }
1172}
1173
f7625980 1174/**
1da177e4
LT
1175 * pci_restore_state - Restore the saved state of a PCI device
1176 * @dev: - PCI device that we're dealing with
1da177e4 1177 */
1d3c16a8 1178void pci_restore_state(struct pci_dev *dev)
1da177e4 1179{
c82f63e4 1180 if (!dev->state_saved)
1d3c16a8 1181 return;
4b77b0a2 1182
b56a5a23
MT
1183 /* PCI Express register must be restored first */
1184 pci_restore_pcie_state(dev);
4ebeb1ec
CT
1185 pci_restore_pasid_state(dev);
1186 pci_restore_pri_state(dev);
1900ca13 1187 pci_restore_ats_state(dev);
425c1b22 1188 pci_restore_vc_state(dev);
b56a5a23 1189
b07461a8
TI
1190 pci_cleanup_aer_error_status_regs(dev);
1191
a6cb9ee7 1192 pci_restore_config_space(dev);
ebfc5b80 1193
cc692a5f 1194 pci_restore_pcix_state(dev);
41017f0c 1195 pci_restore_msi_state(dev);
ccbc175a
AD
1196
1197 /* Restore ACS and IOV configuration state */
1198 pci_enable_acs(dev);
8c5cdb6a 1199 pci_restore_iov_state(dev);
8fed4b65 1200
4b77b0a2 1201 dev->state_saved = false;
1da177e4 1202}
b7fe9434 1203EXPORT_SYMBOL(pci_restore_state);
1da177e4 1204
ffbdd3f7
AW
1205struct pci_saved_state {
1206 u32 config_space[16];
1207 struct pci_cap_saved_data cap[0];
1208};
1209
1210/**
1211 * pci_store_saved_state - Allocate and return an opaque struct containing
1212 * the device saved state.
1213 * @dev: PCI device that we're dealing with
1214 *
f7625980 1215 * Return NULL if no state or error.
ffbdd3f7
AW
1216 */
1217struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev)
1218{
1219 struct pci_saved_state *state;
1220 struct pci_cap_saved_state *tmp;
1221 struct pci_cap_saved_data *cap;
ffbdd3f7
AW
1222 size_t size;
1223
1224 if (!dev->state_saved)
1225 return NULL;
1226
1227 size = sizeof(*state) + sizeof(struct pci_cap_saved_data);
1228
b67bfe0d 1229 hlist_for_each_entry(tmp, &dev->saved_cap_space, next)
ffbdd3f7
AW
1230 size += sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1231
1232 state = kzalloc(size, GFP_KERNEL);
1233 if (!state)
1234 return NULL;
1235
1236 memcpy(state->config_space, dev->saved_config_space,
1237 sizeof(state->config_space));
1238
1239 cap = state->cap;
b67bfe0d 1240 hlist_for_each_entry(tmp, &dev->saved_cap_space, next) {
ffbdd3f7
AW
1241 size_t len = sizeof(struct pci_cap_saved_data) + tmp->cap.size;
1242 memcpy(cap, &tmp->cap, len);
1243 cap = (struct pci_cap_saved_data *)((u8 *)cap + len);
1244 }
1245 /* Empty cap_save terminates list */
1246
1247 return state;
1248}
1249EXPORT_SYMBOL_GPL(pci_store_saved_state);
1250
1251/**
1252 * pci_load_saved_state - Reload the provided save state into struct pci_dev.
1253 * @dev: PCI device that we're dealing with
1254 * @state: Saved state returned from pci_store_saved_state()
1255 */
98d9b271
KRW
1256int pci_load_saved_state(struct pci_dev *dev,
1257 struct pci_saved_state *state)
ffbdd3f7
AW
1258{
1259 struct pci_cap_saved_data *cap;
1260
1261 dev->state_saved = false;
1262
1263 if (!state)
1264 return 0;
1265
1266 memcpy(dev->saved_config_space, state->config_space,
1267 sizeof(state->config_space));
1268
1269 cap = state->cap;
1270 while (cap->size) {
1271 struct pci_cap_saved_state *tmp;
1272
fd0f7f73 1273 tmp = _pci_find_saved_cap(dev, cap->cap_nr, cap->cap_extended);
ffbdd3f7
AW
1274 if (!tmp || tmp->cap.size != cap->size)
1275 return -EINVAL;
1276
1277 memcpy(tmp->cap.data, cap->data, tmp->cap.size);
1278 cap = (struct pci_cap_saved_data *)((u8 *)cap +
1279 sizeof(struct pci_cap_saved_data) + cap->size);
1280 }
1281
1282 dev->state_saved = true;
1283 return 0;
1284}
98d9b271 1285EXPORT_SYMBOL_GPL(pci_load_saved_state);
ffbdd3f7
AW
1286
1287/**
1288 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
1289 * and free the memory allocated for it.
1290 * @dev: PCI device that we're dealing with
1291 * @state: Pointer to saved state returned from pci_store_saved_state()
1292 */
1293int pci_load_and_free_saved_state(struct pci_dev *dev,
1294 struct pci_saved_state **state)
1295{
1296 int ret = pci_load_saved_state(dev, *state);
1297 kfree(*state);
1298 *state = NULL;
1299 return ret;
1300}
1301EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
1302
8a9d5609
BH
1303int __weak pcibios_enable_device(struct pci_dev *dev, int bars)
1304{
1305 return pci_enable_resources(dev, bars);
1306}
1307
38cc1302
HS
1308static int do_pci_enable_device(struct pci_dev *dev, int bars)
1309{
1310 int err;
1f6ae47e 1311 struct pci_dev *bridge;
1e2571a7
BH
1312 u16 cmd;
1313 u8 pin;
38cc1302
HS
1314
1315 err = pci_set_power_state(dev, PCI_D0);
1316 if (err < 0 && err != -EIO)
1317 return err;
1f6ae47e
VS
1318
1319 bridge = pci_upstream_bridge(dev);
1320 if (bridge)
1321 pcie_aspm_powersave_config_link(bridge);
1322
38cc1302
HS
1323 err = pcibios_enable_device(dev, bars);
1324 if (err < 0)
1325 return err;
1326 pci_fixup_device(pci_fixup_enable, dev);
1327
866d5417
BH
1328 if (dev->msi_enabled || dev->msix_enabled)
1329 return 0;
1330
1e2571a7
BH
1331 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
1332 if (pin) {
1333 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1334 if (cmd & PCI_COMMAND_INTX_DISABLE)
1335 pci_write_config_word(dev, PCI_COMMAND,
1336 cmd & ~PCI_COMMAND_INTX_DISABLE);
1337 }
1338
38cc1302
HS
1339 return 0;
1340}
1341
1342/**
0b62e13b 1343 * pci_reenable_device - Resume abandoned device
38cc1302
HS
1344 * @dev: PCI device to be resumed
1345 *
1346 * Note this function is a backend of pci_default_resume and is not supposed
1347 * to be called by normal code, write proper resume handler and use it instead.
1348 */
0b62e13b 1349int pci_reenable_device(struct pci_dev *dev)
38cc1302 1350{
296ccb08 1351 if (pci_is_enabled(dev))
38cc1302
HS
1352 return do_pci_enable_device(dev, (1 << PCI_NUM_RESOURCES) - 1);
1353 return 0;
1354}
b7fe9434 1355EXPORT_SYMBOL(pci_reenable_device);
38cc1302 1356
928bea96
YL
1357static void pci_enable_bridge(struct pci_dev *dev)
1358{
79272138 1359 struct pci_dev *bridge;
928bea96
YL
1360 int retval;
1361
79272138
BH
1362 bridge = pci_upstream_bridge(dev);
1363 if (bridge)
1364 pci_enable_bridge(bridge);
928bea96 1365
cf3e1feb 1366 if (pci_is_enabled(dev)) {
fbeeb822 1367 if (!dev->is_busmaster)
cf3e1feb 1368 pci_set_master(dev);
0f50a49e 1369 return;
cf3e1feb
YL
1370 }
1371
928bea96
YL
1372 retval = pci_enable_device(dev);
1373 if (retval)
7506dc79 1374 pci_err(dev, "Error enabling bridge (%d), continuing\n",
928bea96
YL
1375 retval);
1376 pci_set_master(dev);
1377}
1378
b4b4fbba 1379static int pci_enable_device_flags(struct pci_dev *dev, unsigned long flags)
1da177e4 1380{
79272138 1381 struct pci_dev *bridge;
1da177e4 1382 int err;
b718989d 1383 int i, bars = 0;
1da177e4 1384
97c145f7
JB
1385 /*
1386 * Power state could be unknown at this point, either due to a fresh
1387 * boot or a device removal call. So get the current power state
1388 * so that things like MSI message writing will behave as expected
1389 * (e.g. if the device really is in D0 at enable time).
1390 */
1391 if (dev->pm_cap) {
1392 u16 pmcsr;
1393 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1394 dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
1395 }
1396
cc7ba39b 1397 if (atomic_inc_return(&dev->enable_cnt) > 1)
9fb625c3
HS
1398 return 0; /* already enabled */
1399
79272138 1400 bridge = pci_upstream_bridge(dev);
0f50a49e 1401 if (bridge)
79272138 1402 pci_enable_bridge(bridge);
928bea96 1403
497f16f2
YL
1404 /* only skip sriov related */
1405 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
1406 if (dev->resource[i].flags & flags)
1407 bars |= (1 << i);
1408 for (i = PCI_BRIDGE_RESOURCES; i < DEVICE_COUNT_RESOURCE; i++)
b718989d
BH
1409 if (dev->resource[i].flags & flags)
1410 bars |= (1 << i);
1411
38cc1302 1412 err = do_pci_enable_device(dev, bars);
95a62965 1413 if (err < 0)
38cc1302 1414 atomic_dec(&dev->enable_cnt);
9fb625c3 1415 return err;
1da177e4
LT
1416}
1417
b718989d
BH
1418/**
1419 * pci_enable_device_io - Initialize a device for use with IO space
1420 * @dev: PCI device to be initialized
1421 *
1422 * Initialize device before it's used by a driver. Ask low-level code
1423 * to enable I/O resources. Wake up the device if it was suspended.
1424 * Beware, this function can fail.
1425 */
1426int pci_enable_device_io(struct pci_dev *dev)
1427{
b4b4fbba 1428 return pci_enable_device_flags(dev, IORESOURCE_IO);
b718989d 1429}
b7fe9434 1430EXPORT_SYMBOL(pci_enable_device_io);
b718989d
BH
1431
1432/**
1433 * pci_enable_device_mem - Initialize a device for use with Memory space
1434 * @dev: PCI device to be initialized
1435 *
1436 * Initialize device before it's used by a driver. Ask low-level code
1437 * to enable Memory resources. Wake up the device if it was suspended.
1438 * Beware, this function can fail.
1439 */
1440int pci_enable_device_mem(struct pci_dev *dev)
1441{
b4b4fbba 1442 return pci_enable_device_flags(dev, IORESOURCE_MEM);
b718989d 1443}
b7fe9434 1444EXPORT_SYMBOL(pci_enable_device_mem);
b718989d 1445
bae94d02
IPG
1446/**
1447 * pci_enable_device - Initialize device before it's used by a driver.
1448 * @dev: PCI device to be initialized
1449 *
1450 * Initialize device before it's used by a driver. Ask low-level code
1451 * to enable I/O and memory. Wake up the device if it was suspended.
1452 * Beware, this function can fail.
1453 *
1454 * Note we don't actually enable the device many times if we call
1455 * this function repeatedly (we just increment the count).
1456 */
1457int pci_enable_device(struct pci_dev *dev)
1458{
b4b4fbba 1459 return pci_enable_device_flags(dev, IORESOURCE_MEM | IORESOURCE_IO);
bae94d02 1460}
b7fe9434 1461EXPORT_SYMBOL(pci_enable_device);
bae94d02 1462
9ac7849e
TH
1463/*
1464 * Managed PCI resources. This manages device on/off, intx/msi/msix
1465 * on/off and BAR regions. pci_dev itself records msi/msix status, so
1466 * there's no need to track it separately. pci_devres is initialized
1467 * when a device is enabled using managed PCI device enable interface.
1468 */
1469struct pci_devres {
7f375f32
TH
1470 unsigned int enabled:1;
1471 unsigned int pinned:1;
9ac7849e
TH
1472 unsigned int orig_intx:1;
1473 unsigned int restore_intx:1;
fc0f9f4d 1474 unsigned int mwi:1;
9ac7849e
TH
1475 u32 region_mask;
1476};
1477
1478static void pcim_release(struct device *gendev, void *res)
1479{
f3d2f165 1480 struct pci_dev *dev = to_pci_dev(gendev);
9ac7849e
TH
1481 struct pci_devres *this = res;
1482 int i;
1483
1484 if (dev->msi_enabled)
1485 pci_disable_msi(dev);
1486 if (dev->msix_enabled)
1487 pci_disable_msix(dev);
1488
1489 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
1490 if (this->region_mask & (1 << i))
1491 pci_release_region(dev, i);
1492
fc0f9f4d
HK
1493 if (this->mwi)
1494 pci_clear_mwi(dev);
1495
9ac7849e
TH
1496 if (this->restore_intx)
1497 pci_intx(dev, this->orig_intx);
1498
7f375f32 1499 if (this->enabled && !this->pinned)
9ac7849e
TH
1500 pci_disable_device(dev);
1501}
1502
07656d83 1503static struct pci_devres *get_pci_dr(struct pci_dev *pdev)
9ac7849e
TH
1504{
1505 struct pci_devres *dr, *new_dr;
1506
1507 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
1508 if (dr)
1509 return dr;
1510
1511 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
1512 if (!new_dr)
1513 return NULL;
1514 return devres_get(&pdev->dev, new_dr, NULL, NULL);
1515}
1516
07656d83 1517static struct pci_devres *find_pci_dr(struct pci_dev *pdev)
9ac7849e
TH
1518{
1519 if (pci_is_managed(pdev))
1520 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
1521 return NULL;
1522}
1523
1524/**
1525 * pcim_enable_device - Managed pci_enable_device()
1526 * @pdev: PCI device to be initialized
1527 *
1528 * Managed pci_enable_device().
1529 */
1530int pcim_enable_device(struct pci_dev *pdev)
1531{
1532 struct pci_devres *dr;
1533 int rc;
1534
1535 dr = get_pci_dr(pdev);
1536 if (unlikely(!dr))
1537 return -ENOMEM;
b95d58ea
TH
1538 if (dr->enabled)
1539 return 0;
9ac7849e
TH
1540
1541 rc = pci_enable_device(pdev);
1542 if (!rc) {
1543 pdev->is_managed = 1;
7f375f32 1544 dr->enabled = 1;
9ac7849e
TH
1545 }
1546 return rc;
1547}
b7fe9434 1548EXPORT_SYMBOL(pcim_enable_device);
9ac7849e
TH
1549
1550/**
1551 * pcim_pin_device - Pin managed PCI device
1552 * @pdev: PCI device to pin
1553 *
1554 * Pin managed PCI device @pdev. Pinned device won't be disabled on
1555 * driver detach. @pdev must have been enabled with
1556 * pcim_enable_device().
1557 */
1558void pcim_pin_device(struct pci_dev *pdev)
1559{
1560 struct pci_devres *dr;
1561
1562 dr = find_pci_dr(pdev);
7f375f32 1563 WARN_ON(!dr || !dr->enabled);
9ac7849e 1564 if (dr)
7f375f32 1565 dr->pinned = 1;
9ac7849e 1566}
b7fe9434 1567EXPORT_SYMBOL(pcim_pin_device);
9ac7849e 1568
eca0d467
MG
1569/*
1570 * pcibios_add_device - provide arch specific hooks when adding device dev
1571 * @dev: the PCI device being added
1572 *
1573 * Permits the platform to provide architecture specific functionality when
1574 * devices are added. This is the default implementation. Architecture
1575 * implementations can override this.
1576 */
3c78bc61 1577int __weak pcibios_add_device(struct pci_dev *dev)
eca0d467
MG
1578{
1579 return 0;
1580}
1581
6ae32c53
SO
1582/**
1583 * pcibios_release_device - provide arch specific hooks when releasing device dev
1584 * @dev: the PCI device being released
1585 *
1586 * Permits the platform to provide architecture specific functionality when
1587 * devices are released. This is the default implementation. Architecture
1588 * implementations can override this.
1589 */
1590void __weak pcibios_release_device(struct pci_dev *dev) {}
1591
1da177e4
LT
1592/**
1593 * pcibios_disable_device - disable arch specific PCI resources for device dev
1594 * @dev: the PCI device to disable
1595 *
1596 * Disables architecture specific PCI resources for the device. This
1597 * is the default implementation. Architecture implementations can
1598 * override this.
1599 */
ff3ce480 1600void __weak pcibios_disable_device(struct pci_dev *dev) {}
1da177e4 1601
a43ae58c
HG
1602/**
1603 * pcibios_penalize_isa_irq - penalize an ISA IRQ
1604 * @irq: ISA IRQ to penalize
1605 * @active: IRQ active or not
1606 *
1607 * Permits the platform to provide architecture-specific functionality when
1608 * penalizing ISA IRQs. This is the default implementation. Architecture
1609 * implementations can override this.
1610 */
1611void __weak pcibios_penalize_isa_irq(int irq, int active) {}
1612
fa58d305
RW
1613static void do_pci_disable_device(struct pci_dev *dev)
1614{
1615 u16 pci_command;
1616
1617 pci_read_config_word(dev, PCI_COMMAND, &pci_command);
1618 if (pci_command & PCI_COMMAND_MASTER) {
1619 pci_command &= ~PCI_COMMAND_MASTER;
1620 pci_write_config_word(dev, PCI_COMMAND, pci_command);
1621 }
1622
1623 pcibios_disable_device(dev);
1624}
1625
1626/**
1627 * pci_disable_enabled_device - Disable device without updating enable_cnt
1628 * @dev: PCI device to disable
1629 *
1630 * NOTE: This function is a backend of PCI power management routines and is
1631 * not supposed to be called drivers.
1632 */
1633void pci_disable_enabled_device(struct pci_dev *dev)
1634{
296ccb08 1635 if (pci_is_enabled(dev))
fa58d305
RW
1636 do_pci_disable_device(dev);
1637}
1638
1da177e4
LT
1639/**
1640 * pci_disable_device - Disable PCI device after use
1641 * @dev: PCI device to be disabled
1642 *
1643 * Signal to the system that the PCI device is not in use by the system
1644 * anymore. This only involves disabling PCI bus-mastering, if active.
bae94d02
IPG
1645 *
1646 * Note we don't actually disable the device until all callers of
ee6583f6 1647 * pci_enable_device() have called pci_disable_device().
1da177e4 1648 */
3c78bc61 1649void pci_disable_device(struct pci_dev *dev)
1da177e4 1650{
9ac7849e 1651 struct pci_devres *dr;
99dc804d 1652
9ac7849e
TH
1653 dr = find_pci_dr(dev);
1654 if (dr)
7f375f32 1655 dr->enabled = 0;
9ac7849e 1656
fd6dceab
KK
1657 dev_WARN_ONCE(&dev->dev, atomic_read(&dev->enable_cnt) <= 0,
1658 "disabling already-disabled device");
1659
cc7ba39b 1660 if (atomic_dec_return(&dev->enable_cnt) != 0)
bae94d02
IPG
1661 return;
1662
fa58d305 1663 do_pci_disable_device(dev);
1da177e4 1664
fa58d305 1665 dev->is_busmaster = 0;
1da177e4 1666}
b7fe9434 1667EXPORT_SYMBOL(pci_disable_device);
1da177e4 1668
f7bdd12d
BK
1669/**
1670 * pcibios_set_pcie_reset_state - set reset state for device dev
45e829ea 1671 * @dev: the PCIe device reset
f7bdd12d
BK
1672 * @state: Reset state to enter into
1673 *
1674 *
45e829ea 1675 * Sets the PCIe reset state for the device. This is the default
f7bdd12d
BK
1676 * implementation. Architecture implementations can override this.
1677 */
d6d88c83
BH
1678int __weak pcibios_set_pcie_reset_state(struct pci_dev *dev,
1679 enum pcie_reset_state state)
f7bdd12d
BK
1680{
1681 return -EINVAL;
1682}
1683
1684/**
1685 * pci_set_pcie_reset_state - set reset state for device dev
45e829ea 1686 * @dev: the PCIe device reset
f7bdd12d
BK
1687 * @state: Reset state to enter into
1688 *
1689 *
1690 * Sets the PCI reset state for the device.
1691 */
1692int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
1693{
1694 return pcibios_set_pcie_reset_state(dev, state);
1695}
b7fe9434 1696EXPORT_SYMBOL_GPL(pci_set_pcie_reset_state);
f7bdd12d 1697
dcb0453d
BH
1698/**
1699 * pcie_clear_root_pme_status - Clear root port PME interrupt status.
1700 * @dev: PCIe root port or event collector.
1701 */
1702void pcie_clear_root_pme_status(struct pci_dev *dev)
1703{
1704 pcie_capability_set_dword(dev, PCI_EXP_RTSTA, PCI_EXP_RTSTA_PME);
1705}
1706
58ff4633
RW
1707/**
1708 * pci_check_pme_status - Check if given device has generated PME.
1709 * @dev: Device to check.
1710 *
1711 * Check the PME status of the device and if set, clear it and clear PME enable
1712 * (if set). Return 'true' if PME status and PME enable were both set or
1713 * 'false' otherwise.
1714 */
1715bool pci_check_pme_status(struct pci_dev *dev)
1716{
1717 int pmcsr_pos;
1718 u16 pmcsr;
1719 bool ret = false;
1720
1721 if (!dev->pm_cap)
1722 return false;
1723
1724 pmcsr_pos = dev->pm_cap + PCI_PM_CTRL;
1725 pci_read_config_word(dev, pmcsr_pos, &pmcsr);
1726 if (!(pmcsr & PCI_PM_CTRL_PME_STATUS))
1727 return false;
1728
1729 /* Clear PME status. */
1730 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1731 if (pmcsr & PCI_PM_CTRL_PME_ENABLE) {
1732 /* Disable PME to avoid interrupt flood. */
1733 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1734 ret = true;
1735 }
1736
1737 pci_write_config_word(dev, pmcsr_pos, pmcsr);
1738
1739 return ret;
1740}
1741
b67ea761
RW
1742/**
1743 * pci_pme_wakeup - Wake up a PCI device if its PME Status bit is set.
1744 * @dev: Device to handle.
379021d5 1745 * @pme_poll_reset: Whether or not to reset the device's pme_poll flag.
b67ea761
RW
1746 *
1747 * Check if @dev has generated PME and queue a resume request for it in that
1748 * case.
1749 */
379021d5 1750static int pci_pme_wakeup(struct pci_dev *dev, void *pme_poll_reset)
b67ea761 1751{
379021d5
RW
1752 if (pme_poll_reset && dev->pme_poll)
1753 dev->pme_poll = false;
1754
c125e96f 1755 if (pci_check_pme_status(dev)) {
c125e96f 1756 pci_wakeup_event(dev);
0f953bf6 1757 pm_request_resume(&dev->dev);
c125e96f 1758 }
b67ea761
RW
1759 return 0;
1760}
1761
1762/**
1763 * pci_pme_wakeup_bus - Walk given bus and wake up devices on it, if necessary.
1764 * @bus: Top bus of the subtree to walk.
1765 */
1766void pci_pme_wakeup_bus(struct pci_bus *bus)
1767{
1768 if (bus)
379021d5 1769 pci_walk_bus(bus, pci_pme_wakeup, (void *)true);
b67ea761
RW
1770}
1771
448bd857 1772
eb9d0fe4
RW
1773/**
1774 * pci_pme_capable - check the capability of PCI device to generate PME#
1775 * @dev: PCI device to handle.
eb9d0fe4
RW
1776 * @state: PCI state from which device will issue PME#.
1777 */
e5899e1b 1778bool pci_pme_capable(struct pci_dev *dev, pci_power_t state)
eb9d0fe4 1779{
337001b6 1780 if (!dev->pm_cap)
eb9d0fe4
RW
1781 return false;
1782
337001b6 1783 return !!(dev->pme_support & (1 << state));
eb9d0fe4 1784}
b7fe9434 1785EXPORT_SYMBOL(pci_pme_capable);
eb9d0fe4 1786
df17e62e
MG
1787static void pci_pme_list_scan(struct work_struct *work)
1788{
379021d5 1789 struct pci_pme_device *pme_dev, *n;
df17e62e
MG
1790
1791 mutex_lock(&pci_pme_list_mutex);
ce300008
BH
1792 list_for_each_entry_safe(pme_dev, n, &pci_pme_list, list) {
1793 if (pme_dev->dev->pme_poll) {
1794 struct pci_dev *bridge;
1795
1796 bridge = pme_dev->dev->bus->self;
1797 /*
1798 * If bridge is in low power state, the
1799 * configuration space of subordinate devices
1800 * may be not accessible
1801 */
1802 if (bridge && bridge->current_state != PCI_D0)
1803 continue;
1804 pci_pme_wakeup(pme_dev->dev, NULL);
1805 } else {
1806 list_del(&pme_dev->list);
1807 kfree(pme_dev);
379021d5 1808 }
df17e62e 1809 }
ce300008 1810 if (!list_empty(&pci_pme_list))
ea00353f
LW
1811 queue_delayed_work(system_freezable_wq, &pci_pme_work,
1812 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1813 mutex_unlock(&pci_pme_list_mutex);
1814}
1815
2cef548a 1816static void __pci_pme_active(struct pci_dev *dev, bool enable)
eb9d0fe4
RW
1817{
1818 u16 pmcsr;
1819
ffaddbe8 1820 if (!dev->pme_support)
eb9d0fe4
RW
1821 return;
1822
337001b6 1823 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
eb9d0fe4
RW
1824 /* Clear PME_Status by writing 1 to it and enable PME# */
1825 pmcsr |= PCI_PM_CTRL_PME_STATUS | PCI_PM_CTRL_PME_ENABLE;
1826 if (!enable)
1827 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1828
337001b6 1829 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
2cef548a
RW
1830}
1831
0ce3fcaf
RW
1832/**
1833 * pci_pme_restore - Restore PME configuration after config space restore.
1834 * @dev: PCI device to update.
1835 */
1836void pci_pme_restore(struct pci_dev *dev)
dc15e71e
RW
1837{
1838 u16 pmcsr;
1839
1840 if (!dev->pme_support)
1841 return;
1842
1843 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
1844 if (dev->wakeup_prepared) {
1845 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
0ce3fcaf 1846 pmcsr &= ~PCI_PM_CTRL_PME_STATUS;
dc15e71e
RW
1847 } else {
1848 pmcsr &= ~PCI_PM_CTRL_PME_ENABLE;
1849 pmcsr |= PCI_PM_CTRL_PME_STATUS;
1850 }
1851 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, pmcsr);
1852}
1853
2cef548a
RW
1854/**
1855 * pci_pme_active - enable or disable PCI device's PME# function
1856 * @dev: PCI device to handle.
1857 * @enable: 'true' to enable PME# generation; 'false' to disable it.
1858 *
1859 * The caller must verify that the device is capable of generating PME# before
1860 * calling this function with @enable equal to 'true'.
1861 */
1862void pci_pme_active(struct pci_dev *dev, bool enable)
1863{
1864 __pci_pme_active(dev, enable);
eb9d0fe4 1865
6e965e0d
HY
1866 /*
1867 * PCI (as opposed to PCIe) PME requires that the device have
1868 * its PME# line hooked up correctly. Not all hardware vendors
1869 * do this, so the PME never gets delivered and the device
1870 * remains asleep. The easiest way around this is to
1871 * periodically walk the list of suspended devices and check
1872 * whether any have their PME flag set. The assumption is that
1873 * we'll wake up often enough anyway that this won't be a huge
1874 * hit, and the power savings from the devices will still be a
1875 * win.
1876 *
1877 * Although PCIe uses in-band PME message instead of PME# line
1878 * to report PME, PME does not work for some PCIe devices in
1879 * reality. For example, there are devices that set their PME
1880 * status bits, but don't really bother to send a PME message;
1881 * there are PCI Express Root Ports that don't bother to
1882 * trigger interrupts when they receive PME messages from the
1883 * devices below. So PME poll is used for PCIe devices too.
1884 */
df17e62e 1885
379021d5 1886 if (dev->pme_poll) {
df17e62e
MG
1887 struct pci_pme_device *pme_dev;
1888 if (enable) {
1889 pme_dev = kmalloc(sizeof(struct pci_pme_device),
1890 GFP_KERNEL);
0394cb19 1891 if (!pme_dev) {
7506dc79 1892 pci_warn(dev, "can't enable PME#\n");
0394cb19
BH
1893 return;
1894 }
df17e62e
MG
1895 pme_dev->dev = dev;
1896 mutex_lock(&pci_pme_list_mutex);
1897 list_add(&pme_dev->list, &pci_pme_list);
1898 if (list_is_singular(&pci_pme_list))
ea00353f
LW
1899 queue_delayed_work(system_freezable_wq,
1900 &pci_pme_work,
1901 msecs_to_jiffies(PME_TIMEOUT));
df17e62e
MG
1902 mutex_unlock(&pci_pme_list_mutex);
1903 } else {
1904 mutex_lock(&pci_pme_list_mutex);
1905 list_for_each_entry(pme_dev, &pci_pme_list, list) {
1906 if (pme_dev->dev == dev) {
1907 list_del(&pme_dev->list);
1908 kfree(pme_dev);
1909 break;
1910 }
1911 }
1912 mutex_unlock(&pci_pme_list_mutex);
1913 }
1914 }
1915
7506dc79 1916 pci_dbg(dev, "PME# %s\n", enable ? "enabled" : "disabled");
eb9d0fe4 1917}
b7fe9434 1918EXPORT_SYMBOL(pci_pme_active);
eb9d0fe4 1919
1da177e4 1920/**
cfcadfaa 1921 * __pci_enable_wake - enable PCI device as wakeup event source
075c1771
DB
1922 * @dev: PCI device affected
1923 * @state: PCI state from which device will issue wakeup events
1924 * @enable: True to enable event generation; false to disable
1925 *
1926 * This enables the device as a wakeup event source, or disables it.
1927 * When such events involves platform-specific hooks, those hooks are
1928 * called automatically by this routine.
1929 *
1930 * Devices with legacy power management (no standard PCI PM capabilities)
eb9d0fe4 1931 * always require such platform hooks.
075c1771 1932 *
eb9d0fe4
RW
1933 * RETURN VALUE:
1934 * 0 is returned on success
1935 * -EINVAL is returned if device is not supposed to wake up the system
1936 * Error code depending on the platform is returned if both the platform and
1937 * the native mechanism fail to enable the generation of wake-up events
1da177e4 1938 */
cfcadfaa 1939static int __pci_enable_wake(struct pci_dev *dev, pci_power_t state, bool enable)
1da177e4 1940{
5bcc2fb4 1941 int ret = 0;
075c1771 1942
baecc470
RW
1943 /*
1944 * Bridges can only signal wakeup on behalf of subordinate devices,
1945 * but that is set up elsewhere, so skip them.
1946 */
1947 if (pci_has_subordinate(dev))
1948 return 0;
1949
0ce3fcaf
RW
1950 /* Don't do the same thing twice in a row for one device. */
1951 if (!!enable == !!dev->wakeup_prepared)
e80bb09d
RW
1952 return 0;
1953
eb9d0fe4
RW
1954 /*
1955 * According to "PCI System Architecture" 4th ed. by Tom Shanley & Don
1956 * Anderson we should be doing PME# wake enable followed by ACPI wake
1957 * enable. To disable wake-up we call the platform first, for symmetry.
075c1771 1958 */
1da177e4 1959
5bcc2fb4
RW
1960 if (enable) {
1961 int error;
1da177e4 1962
5bcc2fb4
RW
1963 if (pci_pme_capable(dev, state))
1964 pci_pme_active(dev, true);
1965 else
1966 ret = 1;
0847684c 1967 error = platform_pci_set_wakeup(dev, true);
5bcc2fb4
RW
1968 if (ret)
1969 ret = error;
e80bb09d
RW
1970 if (!ret)
1971 dev->wakeup_prepared = true;
5bcc2fb4 1972 } else {
0847684c 1973 platform_pci_set_wakeup(dev, false);
5bcc2fb4 1974 pci_pme_active(dev, false);
e80bb09d 1975 dev->wakeup_prepared = false;
5bcc2fb4 1976 }
1da177e4 1977
5bcc2fb4 1978 return ret;
eb9d0fe4 1979}
cfcadfaa
RW
1980
1981/**
1982 * pci_enable_wake - change wakeup settings for a PCI device
1983 * @pci_dev: Target device
1984 * @state: PCI state from which device will issue wakeup events
1985 * @enable: Whether or not to enable event generation
1986 *
1987 * If @enable is set, check device_may_wakeup() for the device before calling
1988 * __pci_enable_wake() for it.
1989 */
1990int pci_enable_wake(struct pci_dev *pci_dev, pci_power_t state, bool enable)
1991{
1992 if (enable && !device_may_wakeup(&pci_dev->dev))
1993 return -EINVAL;
1994
1995 return __pci_enable_wake(pci_dev, state, enable);
1996}
0847684c 1997EXPORT_SYMBOL(pci_enable_wake);
1da177e4 1998
0235c4fc
RW
1999/**
2000 * pci_wake_from_d3 - enable/disable device to wake up from D3_hot or D3_cold
2001 * @dev: PCI device to prepare
2002 * @enable: True to enable wake-up event generation; false to disable
2003 *
2004 * Many drivers want the device to wake up the system from D3_hot or D3_cold
2005 * and this function allows them to set that up cleanly - pci_enable_wake()
2006 * should not be called twice in a row to enable wake-up due to PCI PM vs ACPI
2007 * ordering constraints.
2008 *
cfcadfaa
RW
2009 * This function only returns error code if the device is not allowed to wake
2010 * up the system from sleep or it is not capable of generating PME# from both
2011 * D3_hot and D3_cold and the platform is unable to enable wake-up power for it.
0235c4fc
RW
2012 */
2013int pci_wake_from_d3(struct pci_dev *dev, bool enable)
2014{
2015 return pci_pme_capable(dev, PCI_D3cold) ?
2016 pci_enable_wake(dev, PCI_D3cold, enable) :
2017 pci_enable_wake(dev, PCI_D3hot, enable);
2018}
b7fe9434 2019EXPORT_SYMBOL(pci_wake_from_d3);
0235c4fc 2020
404cc2d8 2021/**
37139074
JB
2022 * pci_target_state - find an appropriate low power state for a given PCI dev
2023 * @dev: PCI device
666ff6f8 2024 * @wakeup: Whether or not wakeup functionality will be enabled for the device.
37139074
JB
2025 *
2026 * Use underlying platform code to find a supported low power state for @dev.
2027 * If the platform can't manage @dev, return the deepest state from which it
2028 * can generate wake events, based on any available PME info.
404cc2d8 2029 */
666ff6f8 2030static pci_power_t pci_target_state(struct pci_dev *dev, bool wakeup)
404cc2d8
RW
2031{
2032 pci_power_t target_state = PCI_D3hot;
404cc2d8
RW
2033
2034 if (platform_pci_power_manageable(dev)) {
2035 /*
60ee031a 2036 * Call the platform to find the target state for the device.
404cc2d8
RW
2037 */
2038 pci_power_t state = platform_pci_choose_state(dev);
2039
2040 switch (state) {
2041 case PCI_POWER_ERROR:
2042 case PCI_UNKNOWN:
2043 break;
2044 case PCI_D1:
2045 case PCI_D2:
2046 if (pci_no_d1d2(dev))
2047 break;
2048 default:
2049 target_state = state;
404cc2d8 2050 }
4132a577
LW
2051
2052 return target_state;
2053 }
2054
2055 if (!dev->pm_cap)
d2abdf62 2056 target_state = PCI_D0;
4132a577
LW
2057
2058 /*
2059 * If the device is in D3cold even though it's not power-manageable by
2060 * the platform, it may have been powered down by non-standard means.
2061 * Best to let it slumber.
2062 */
2063 if (dev->current_state == PCI_D3cold)
2064 target_state = PCI_D3cold;
2065
666ff6f8 2066 if (wakeup) {
404cc2d8
RW
2067 /*
2068 * Find the deepest state from which the device can generate
60ee031a 2069 * PME#.
404cc2d8 2070 */
337001b6
RW
2071 if (dev->pme_support) {
2072 while (target_state
2073 && !(dev->pme_support & (1 << target_state)))
2074 target_state--;
404cc2d8
RW
2075 }
2076 }
2077
e5899e1b
RW
2078 return target_state;
2079}
2080
2081/**
2082 * pci_prepare_to_sleep - prepare PCI device for system-wide transition into a sleep state
2083 * @dev: Device to handle.
2084 *
2085 * Choose the power state appropriate for the device depending on whether
2086 * it can wake up the system and/or is power manageable by the platform
2087 * (PCI_D3hot is the default) and put the device into that state.
2088 */
2089int pci_prepare_to_sleep(struct pci_dev *dev)
2090{
666ff6f8
RW
2091 bool wakeup = device_may_wakeup(&dev->dev);
2092 pci_power_t target_state = pci_target_state(dev, wakeup);
e5899e1b
RW
2093 int error;
2094
2095 if (target_state == PCI_POWER_ERROR)
2096 return -EIO;
2097
666ff6f8 2098 pci_enable_wake(dev, target_state, wakeup);
c157dfa3 2099
404cc2d8
RW
2100 error = pci_set_power_state(dev, target_state);
2101
2102 if (error)
2103 pci_enable_wake(dev, target_state, false);
2104
2105 return error;
2106}
b7fe9434 2107EXPORT_SYMBOL(pci_prepare_to_sleep);
404cc2d8
RW
2108
2109/**
443bd1c4 2110 * pci_back_from_sleep - turn PCI device on during system-wide transition into working state
404cc2d8
RW
2111 * @dev: Device to handle.
2112 *
88393161 2113 * Disable device's system wake-up capability and put it into D0.
404cc2d8
RW
2114 */
2115int pci_back_from_sleep(struct pci_dev *dev)
2116{
2117 pci_enable_wake(dev, PCI_D0, false);
2118 return pci_set_power_state(dev, PCI_D0);
2119}
b7fe9434 2120EXPORT_SYMBOL(pci_back_from_sleep);
404cc2d8 2121
6cbf8214
RW
2122/**
2123 * pci_finish_runtime_suspend - Carry out PCI-specific part of runtime suspend.
2124 * @dev: PCI device being suspended.
2125 *
2126 * Prepare @dev to generate wake-up events at run time and put it into a low
2127 * power state.
2128 */
2129int pci_finish_runtime_suspend(struct pci_dev *dev)
2130{
666ff6f8 2131 pci_power_t target_state;
6cbf8214
RW
2132 int error;
2133
666ff6f8 2134 target_state = pci_target_state(dev, device_can_wakeup(&dev->dev));
6cbf8214
RW
2135 if (target_state == PCI_POWER_ERROR)
2136 return -EIO;
2137
448bd857
HY
2138 dev->runtime_d3cold = target_state == PCI_D3cold;
2139
cfcadfaa 2140 __pci_enable_wake(dev, target_state, pci_dev_run_wake(dev));
6cbf8214
RW
2141
2142 error = pci_set_power_state(dev, target_state);
2143
448bd857 2144 if (error) {
0847684c 2145 pci_enable_wake(dev, target_state, false);
448bd857
HY
2146 dev->runtime_d3cold = false;
2147 }
6cbf8214
RW
2148
2149 return error;
2150}
2151
b67ea761
RW
2152/**
2153 * pci_dev_run_wake - Check if device can generate run-time wake-up events.
2154 * @dev: Device to check.
2155 *
f7625980 2156 * Return true if the device itself is capable of generating wake-up events
b67ea761
RW
2157 * (through the platform or using the native PCIe PME) or if the device supports
2158 * PME and one of its upstream bridges can generate wake-up events.
2159 */
2160bool pci_dev_run_wake(struct pci_dev *dev)
2161{
2162 struct pci_bus *bus = dev->bus;
2163
b67ea761
RW
2164 if (!dev->pme_support)
2165 return false;
2166
666ff6f8 2167 /* PME-capable in principle, but not from the target power state */
8feaec33 2168 if (!pci_pme_capable(dev, pci_target_state(dev, true)))
6496ebd7
AS
2169 return false;
2170
8feaec33
KHF
2171 if (device_can_wakeup(&dev->dev))
2172 return true;
2173
b67ea761
RW
2174 while (bus->parent) {
2175 struct pci_dev *bridge = bus->self;
2176
de3ef1eb 2177 if (device_can_wakeup(&bridge->dev))
b67ea761
RW
2178 return true;
2179
2180 bus = bus->parent;
2181 }
2182
2183 /* We have reached the root bus. */
2184 if (bus->bridge)
de3ef1eb 2185 return device_can_wakeup(bus->bridge);
b67ea761
RW
2186
2187 return false;
2188}
2189EXPORT_SYMBOL_GPL(pci_dev_run_wake);
2190
bac2a909
RW
2191/**
2192 * pci_dev_keep_suspended - Check if the device can stay in the suspended state.
2193 * @pci_dev: Device to check.
2194 *
2195 * Return 'true' if the device is runtime-suspended, it doesn't have to be
2196 * reconfigured due to wakeup settings difference between system and runtime
2197 * suspend and the current power state of it is suitable for the upcoming
2198 * (system) transition.
2cef548a
RW
2199 *
2200 * If the device is not configured for system wakeup, disable PME for it before
2201 * returning 'true' to prevent it from waking up the system unnecessarily.
bac2a909
RW
2202 */
2203bool pci_dev_keep_suspended(struct pci_dev *pci_dev)
2204{
2205 struct device *dev = &pci_dev->dev;
666ff6f8 2206 bool wakeup = device_may_wakeup(dev);
bac2a909
RW
2207
2208 if (!pm_runtime_suspended(dev)
666ff6f8 2209 || pci_target_state(pci_dev, wakeup) != pci_dev->current_state
c2eac4d3 2210 || platform_pci_need_resume(pci_dev))
bac2a909
RW
2211 return false;
2212
2cef548a
RW
2213 /*
2214 * At this point the device is good to go unless it's been configured
2215 * to generate PME at the runtime suspend time, but it is not supposed
2216 * to wake up the system. In that case, simply disable PME for it
2217 * (it will have to be re-enabled on exit from system resume).
2218 *
2219 * If the device's power state is D3cold and the platform check above
2220 * hasn't triggered, the device's configuration is suitable and we don't
2221 * need to manipulate it at all.
2222 */
2223 spin_lock_irq(&dev->power.lock);
2224
2225 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold &&
666ff6f8 2226 !wakeup)
2cef548a
RW
2227 __pci_pme_active(pci_dev, false);
2228
2229 spin_unlock_irq(&dev->power.lock);
2230 return true;
2231}
2232
2233/**
2234 * pci_dev_complete_resume - Finalize resume from system sleep for a device.
2235 * @pci_dev: Device to handle.
2236 *
2237 * If the device is runtime suspended and wakeup-capable, enable PME for it as
2238 * it might have been disabled during the prepare phase of system suspend if
2239 * the device was not configured for system wakeup.
2240 */
2241void pci_dev_complete_resume(struct pci_dev *pci_dev)
2242{
2243 struct device *dev = &pci_dev->dev;
2244
2245 if (!pci_dev_run_wake(pci_dev))
2246 return;
2247
2248 spin_lock_irq(&dev->power.lock);
2249
2250 if (pm_runtime_suspended(dev) && pci_dev->current_state < PCI_D3cold)
2251 __pci_pme_active(pci_dev, true);
2252
2253 spin_unlock_irq(&dev->power.lock);
bac2a909
RW
2254}
2255
b3c32c4f
HY
2256void pci_config_pm_runtime_get(struct pci_dev *pdev)
2257{
2258 struct device *dev = &pdev->dev;
2259 struct device *parent = dev->parent;
2260
2261 if (parent)
2262 pm_runtime_get_sync(parent);
2263 pm_runtime_get_noresume(dev);
2264 /*
2265 * pdev->current_state is set to PCI_D3cold during suspending,
2266 * so wait until suspending completes
2267 */
2268 pm_runtime_barrier(dev);
2269 /*
2270 * Only need to resume devices in D3cold, because config
2271 * registers are still accessible for devices suspended but
2272 * not in D3cold.
2273 */
2274 if (pdev->current_state == PCI_D3cold)
2275 pm_runtime_resume(dev);
2276}
2277
2278void pci_config_pm_runtime_put(struct pci_dev *pdev)
2279{
2280 struct device *dev = &pdev->dev;
2281 struct device *parent = dev->parent;
2282
2283 pm_runtime_put(dev);
2284 if (parent)
2285 pm_runtime_put_sync(parent);
2286}
2287
9d26d3a8
MW
2288/**
2289 * pci_bridge_d3_possible - Is it possible to put the bridge into D3
2290 * @bridge: Bridge to check
2291 *
2292 * This function checks if it is possible to move the bridge to D3.
2293 * Currently we only allow D3 for recent enough PCIe ports.
2294 */
c6a63307 2295bool pci_bridge_d3_possible(struct pci_dev *bridge)
9d26d3a8 2296{
9d26d3a8
MW
2297 if (!pci_is_pcie(bridge))
2298 return false;
2299
2300 switch (pci_pcie_type(bridge)) {
2301 case PCI_EXP_TYPE_ROOT_PORT:
2302 case PCI_EXP_TYPE_UPSTREAM:
2303 case PCI_EXP_TYPE_DOWNSTREAM:
2304 if (pci_bridge_d3_disable)
2305 return false;
97a90aee
LW
2306
2307 /*
d98e0929
BH
2308 * Hotplug interrupts cannot be delivered if the link is down,
2309 * so parents of a hotplug port must stay awake. In addition,
2310 * hotplug ports handled by firmware in System Management Mode
97a90aee 2311 * may not be put into D3 by the OS (Thunderbolt on non-Macs).
d98e0929 2312 * For simplicity, disallow in general for now.
97a90aee 2313 */
d98e0929 2314 if (bridge->is_hotplug_bridge)
97a90aee
LW
2315 return false;
2316
9d26d3a8
MW
2317 if (pci_bridge_d3_force)
2318 return true;
2319
2320 /*
2321 * It should be safe to put PCIe ports from 2015 or newer
2322 * to D3.
2323 */
ac95090a 2324 if (dmi_get_bios_year() >= 2015)
9d26d3a8 2325 return true;
9d26d3a8
MW
2326 break;
2327 }
2328
2329 return false;
2330}
2331
2332static int pci_dev_check_d3cold(struct pci_dev *dev, void *data)
2333{
2334 bool *d3cold_ok = data;
9d26d3a8 2335
718a0609
LW
2336 if (/* The device needs to be allowed to go D3cold ... */
2337 dev->no_d3cold || !dev->d3cold_allowed ||
2338
2339 /* ... and if it is wakeup capable to do so from D3cold. */
2340 (device_may_wakeup(&dev->dev) &&
2341 !pci_pme_capable(dev, PCI_D3cold)) ||
2342
2343 /* If it is a bridge it must be allowed to go to D3. */
d98e0929 2344 !pci_power_manageable(dev))
9d26d3a8 2345
718a0609 2346 *d3cold_ok = false;
9d26d3a8 2347
718a0609 2348 return !*d3cold_ok;
9d26d3a8
MW
2349}
2350
2351/*
2352 * pci_bridge_d3_update - Update bridge D3 capabilities
2353 * @dev: PCI device which is changed
9d26d3a8
MW
2354 *
2355 * Update upstream bridge PM capabilities accordingly depending on if the
2356 * device PM configuration was changed or the device is being removed. The
2357 * change is also propagated upstream.
2358 */
1ed276a7 2359void pci_bridge_d3_update(struct pci_dev *dev)
9d26d3a8 2360{
1ed276a7 2361 bool remove = !device_is_registered(&dev->dev);
9d26d3a8
MW
2362 struct pci_dev *bridge;
2363 bool d3cold_ok = true;
2364
2365 bridge = pci_upstream_bridge(dev);
2366 if (!bridge || !pci_bridge_d3_possible(bridge))
2367 return;
2368
9d26d3a8 2369 /*
e8559b71
LW
2370 * If D3 is currently allowed for the bridge, removing one of its
2371 * children won't change that.
2372 */
2373 if (remove && bridge->bridge_d3)
2374 return;
2375
2376 /*
2377 * If D3 is currently allowed for the bridge and a child is added or
2378 * changed, disallowance of D3 can only be caused by that child, so
2379 * we only need to check that single device, not any of its siblings.
2380 *
2381 * If D3 is currently not allowed for the bridge, checking the device
2382 * first may allow us to skip checking its siblings.
9d26d3a8
MW
2383 */
2384 if (!remove)
2385 pci_dev_check_d3cold(dev, &d3cold_ok);
2386
e8559b71
LW
2387 /*
2388 * If D3 is currently not allowed for the bridge, this may be caused
2389 * either by the device being changed/removed or any of its siblings,
2390 * so we need to go through all children to find out if one of them
2391 * continues to block D3.
2392 */
2393 if (d3cold_ok && !bridge->bridge_d3)
9d26d3a8
MW
2394 pci_walk_bus(bridge->subordinate, pci_dev_check_d3cold,
2395 &d3cold_ok);
9d26d3a8
MW
2396
2397 if (bridge->bridge_d3 != d3cold_ok) {
2398 bridge->bridge_d3 = d3cold_ok;
2399 /* Propagate change to upstream bridges */
1ed276a7 2400 pci_bridge_d3_update(bridge);
9d26d3a8 2401 }
9d26d3a8
MW
2402}
2403
9d26d3a8
MW
2404/**
2405 * pci_d3cold_enable - Enable D3cold for device
2406 * @dev: PCI device to handle
2407 *
2408 * This function can be used in drivers to enable D3cold from the device
2409 * they handle. It also updates upstream PCI bridge PM capabilities
2410 * accordingly.
2411 */
2412void pci_d3cold_enable(struct pci_dev *dev)
2413{
2414 if (dev->no_d3cold) {
2415 dev->no_d3cold = false;
1ed276a7 2416 pci_bridge_d3_update(dev);
9d26d3a8
MW
2417 }
2418}
2419EXPORT_SYMBOL_GPL(pci_d3cold_enable);
2420
2421/**
2422 * pci_d3cold_disable - Disable D3cold for device
2423 * @dev: PCI device to handle
2424 *
2425 * This function can be used in drivers to disable D3cold from the device
2426 * they handle. It also updates upstream PCI bridge PM capabilities
2427 * accordingly.
2428 */
2429void pci_d3cold_disable(struct pci_dev *dev)
2430{
2431 if (!dev->no_d3cold) {
2432 dev->no_d3cold = true;
1ed276a7 2433 pci_bridge_d3_update(dev);
9d26d3a8
MW
2434 }
2435}
2436EXPORT_SYMBOL_GPL(pci_d3cold_disable);
2437
eb9d0fe4
RW
2438/**
2439 * pci_pm_init - Initialize PM functions of given PCI device
2440 * @dev: PCI device to handle.
2441 */
2442void pci_pm_init(struct pci_dev *dev)
2443{
2444 int pm;
2445 u16 pmc;
1da177e4 2446
bb910a70 2447 pm_runtime_forbid(&dev->dev);
967577b0
HY
2448 pm_runtime_set_active(&dev->dev);
2449 pm_runtime_enable(&dev->dev);
a1e4d72c 2450 device_enable_async_suspend(&dev->dev);
e80bb09d 2451 dev->wakeup_prepared = false;
bb910a70 2452
337001b6 2453 dev->pm_cap = 0;
ffaddbe8 2454 dev->pme_support = 0;
337001b6 2455
eb9d0fe4
RW
2456 /* find PCI PM capability in list */
2457 pm = pci_find_capability(dev, PCI_CAP_ID_PM);
2458 if (!pm)
50246dd4 2459 return;
eb9d0fe4
RW
2460 /* Check device's ability to generate PME# */
2461 pci_read_config_word(dev, pm + PCI_PM_PMC, &pmc);
075c1771 2462
eb9d0fe4 2463 if ((pmc & PCI_PM_CAP_VER_MASK) > 3) {
7506dc79 2464 pci_err(dev, "unsupported PM cap regs version (%u)\n",
eb9d0fe4 2465 pmc & PCI_PM_CAP_VER_MASK);
50246dd4 2466 return;
eb9d0fe4
RW
2467 }
2468
337001b6 2469 dev->pm_cap = pm;
1ae861e6 2470 dev->d3_delay = PCI_PM_D3_WAIT;
448bd857 2471 dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
9d26d3a8 2472 dev->bridge_d3 = pci_bridge_d3_possible(dev);
4f9c1397 2473 dev->d3cold_allowed = true;
337001b6
RW
2474
2475 dev->d1_support = false;
2476 dev->d2_support = false;
2477 if (!pci_no_d1d2(dev)) {
c9ed77ee 2478 if (pmc & PCI_PM_CAP_D1)
337001b6 2479 dev->d1_support = true;
c9ed77ee 2480 if (pmc & PCI_PM_CAP_D2)
337001b6 2481 dev->d2_support = true;
c9ed77ee
BH
2482
2483 if (dev->d1_support || dev->d2_support)
7506dc79 2484 pci_printk(KERN_DEBUG, dev, "supports%s%s\n",
ec84f126
JB
2485 dev->d1_support ? " D1" : "",
2486 dev->d2_support ? " D2" : "");
337001b6
RW
2487 }
2488
2489 pmc &= PCI_PM_CAP_PME_MASK;
2490 if (pmc) {
7506dc79 2491 pci_printk(KERN_DEBUG, dev, "PME# supported from%s%s%s%s%s\n",
c9ed77ee
BH
2492 (pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
2493 (pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
2494 (pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
2495 (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
2496 (pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
337001b6 2497 dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
379021d5 2498 dev->pme_poll = true;
eb9d0fe4
RW
2499 /*
2500 * Make device's PM flags reflect the wake-up capability, but
2501 * let the user space enable it to wake up the system as needed.
2502 */
2503 device_set_wakeup_capable(&dev->dev, true);
eb9d0fe4 2504 /* Disable the PME# generation functionality */
337001b6 2505 pci_pme_active(dev, false);
eb9d0fe4 2506 }
1da177e4
LT
2507}
2508
938174e5
SS
2509static unsigned long pci_ea_flags(struct pci_dev *dev, u8 prop)
2510{
92efb1bd 2511 unsigned long flags = IORESOURCE_PCI_FIXED | IORESOURCE_PCI_EA_BEI;
938174e5
SS
2512
2513 switch (prop) {
2514 case PCI_EA_P_MEM:
2515 case PCI_EA_P_VF_MEM:
2516 flags |= IORESOURCE_MEM;
2517 break;
2518 case PCI_EA_P_MEM_PREFETCH:
2519 case PCI_EA_P_VF_MEM_PREFETCH:
2520 flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
2521 break;
2522 case PCI_EA_P_IO:
2523 flags |= IORESOURCE_IO;
2524 break;
2525 default:
2526 return 0;
2527 }
2528
2529 return flags;
2530}
2531
2532static struct resource *pci_ea_get_resource(struct pci_dev *dev, u8 bei,
2533 u8 prop)
2534{
2535 if (bei <= PCI_EA_BEI_BAR5 && prop <= PCI_EA_P_IO)
2536 return &dev->resource[bei];
11183991
DD
2537#ifdef CONFIG_PCI_IOV
2538 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5 &&
2539 (prop == PCI_EA_P_VF_MEM || prop == PCI_EA_P_VF_MEM_PREFETCH))
2540 return &dev->resource[PCI_IOV_RESOURCES +
2541 bei - PCI_EA_BEI_VF_BAR0];
2542#endif
938174e5
SS
2543 else if (bei == PCI_EA_BEI_ROM)
2544 return &dev->resource[PCI_ROM_RESOURCE];
2545 else
2546 return NULL;
2547}
2548
2549/* Read an Enhanced Allocation (EA) entry */
2550static int pci_ea_read(struct pci_dev *dev, int offset)
2551{
2552 struct resource *res;
2553 int ent_size, ent_offset = offset;
2554 resource_size_t start, end;
2555 unsigned long flags;
26635112 2556 u32 dw0, bei, base, max_offset;
938174e5
SS
2557 u8 prop;
2558 bool support_64 = (sizeof(resource_size_t) >= 8);
2559
2560 pci_read_config_dword(dev, ent_offset, &dw0);
2561 ent_offset += 4;
2562
2563 /* Entry size field indicates DWORDs after 1st */
2564 ent_size = ((dw0 & PCI_EA_ES) + 1) << 2;
2565
2566 if (!(dw0 & PCI_EA_ENABLE)) /* Entry not enabled */
2567 goto out;
2568
26635112
BH
2569 bei = (dw0 & PCI_EA_BEI) >> 4;
2570 prop = (dw0 & PCI_EA_PP) >> 8;
2571
938174e5
SS
2572 /*
2573 * If the Property is in the reserved range, try the Secondary
2574 * Property instead.
2575 */
2576 if (prop > PCI_EA_P_BRIDGE_IO && prop < PCI_EA_P_MEM_RESERVED)
26635112 2577 prop = (dw0 & PCI_EA_SP) >> 16;
938174e5
SS
2578 if (prop > PCI_EA_P_BRIDGE_IO)
2579 goto out;
2580
26635112 2581 res = pci_ea_get_resource(dev, bei, prop);
938174e5 2582 if (!res) {
7506dc79 2583 pci_err(dev, "Unsupported EA entry BEI: %u\n", bei);
938174e5
SS
2584 goto out;
2585 }
2586
2587 flags = pci_ea_flags(dev, prop);
2588 if (!flags) {
7506dc79 2589 pci_err(dev, "Unsupported EA properties: %#x\n", prop);
938174e5
SS
2590 goto out;
2591 }
2592
2593 /* Read Base */
2594 pci_read_config_dword(dev, ent_offset, &base);
2595 start = (base & PCI_EA_FIELD_MASK);
2596 ent_offset += 4;
2597
2598 /* Read MaxOffset */
2599 pci_read_config_dword(dev, ent_offset, &max_offset);
2600 ent_offset += 4;
2601
2602 /* Read Base MSBs (if 64-bit entry) */
2603 if (base & PCI_EA_IS_64) {
2604 u32 base_upper;
2605
2606 pci_read_config_dword(dev, ent_offset, &base_upper);
2607 ent_offset += 4;
2608
2609 flags |= IORESOURCE_MEM_64;
2610
2611 /* entry starts above 32-bit boundary, can't use */
2612 if (!support_64 && base_upper)
2613 goto out;
2614
2615 if (support_64)
2616 start |= ((u64)base_upper << 32);
2617 }
2618
2619 end = start + (max_offset | 0x03);
2620
2621 /* Read MaxOffset MSBs (if 64-bit entry) */
2622 if (max_offset & PCI_EA_IS_64) {
2623 u32 max_offset_upper;
2624
2625 pci_read_config_dword(dev, ent_offset, &max_offset_upper);
2626 ent_offset += 4;
2627
2628 flags |= IORESOURCE_MEM_64;
2629
2630 /* entry too big, can't use */
2631 if (!support_64 && max_offset_upper)
2632 goto out;
2633
2634 if (support_64)
2635 end += ((u64)max_offset_upper << 32);
2636 }
2637
2638 if (end < start) {
7506dc79 2639 pci_err(dev, "EA Entry crosses address boundary\n");
938174e5
SS
2640 goto out;
2641 }
2642
2643 if (ent_size != ent_offset - offset) {
7506dc79 2644 pci_err(dev, "EA Entry Size (%d) does not match length read (%d)\n",
938174e5
SS
2645 ent_size, ent_offset - offset);
2646 goto out;
2647 }
2648
2649 res->name = pci_name(dev);
2650 res->start = start;
2651 res->end = end;
2652 res->flags = flags;
597becb4
BH
2653
2654 if (bei <= PCI_EA_BEI_BAR5)
7506dc79 2655 pci_printk(KERN_DEBUG, dev, "BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
597becb4
BH
2656 bei, res, prop);
2657 else if (bei == PCI_EA_BEI_ROM)
7506dc79 2658 pci_printk(KERN_DEBUG, dev, "ROM: %pR (from Enhanced Allocation, properties %#02x)\n",
597becb4
BH
2659 res, prop);
2660 else if (bei >= PCI_EA_BEI_VF_BAR0 && bei <= PCI_EA_BEI_VF_BAR5)
7506dc79 2661 pci_printk(KERN_DEBUG, dev, "VF BAR %d: %pR (from Enhanced Allocation, properties %#02x)\n",
597becb4
BH
2662 bei - PCI_EA_BEI_VF_BAR0, res, prop);
2663 else
7506dc79 2664 pci_printk(KERN_DEBUG, dev, "BEI %d res: %pR (from Enhanced Allocation, properties %#02x)\n",
597becb4
BH
2665 bei, res, prop);
2666
938174e5
SS
2667out:
2668 return offset + ent_size;
2669}
2670
dcbb408a 2671/* Enhanced Allocation Initialization */
938174e5
SS
2672void pci_ea_init(struct pci_dev *dev)
2673{
2674 int ea;
2675 u8 num_ent;
2676 int offset;
2677 int i;
2678
2679 /* find PCI EA capability in list */
2680 ea = pci_find_capability(dev, PCI_CAP_ID_EA);
2681 if (!ea)
2682 return;
2683
2684 /* determine the number of entries */
2685 pci_bus_read_config_byte(dev->bus, dev->devfn, ea + PCI_EA_NUM_ENT,
2686 &num_ent);
2687 num_ent &= PCI_EA_NUM_ENT_MASK;
2688
2689 offset = ea + PCI_EA_FIRST_ENT;
2690
2691 /* Skip DWORD 2 for type 1 functions */
2692 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
2693 offset += 4;
2694
2695 /* parse each EA entry */
2696 for (i = 0; i < num_ent; ++i)
2697 offset = pci_ea_read(dev, offset);
2698}
2699
34a4876e
YL
2700static void pci_add_saved_cap(struct pci_dev *pci_dev,
2701 struct pci_cap_saved_state *new_cap)
2702{
2703 hlist_add_head(&new_cap->next, &pci_dev->saved_cap_space);
2704}
2705
63f4898a 2706/**
fd0f7f73
AW
2707 * _pci_add_cap_save_buffer - allocate buffer for saving given
2708 * capability registers
63f4898a
RW
2709 * @dev: the PCI device
2710 * @cap: the capability to allocate the buffer for
fd0f7f73 2711 * @extended: Standard or Extended capability ID
63f4898a
RW
2712 * @size: requested size of the buffer
2713 */
fd0f7f73
AW
2714static int _pci_add_cap_save_buffer(struct pci_dev *dev, u16 cap,
2715 bool extended, unsigned int size)
63f4898a
RW
2716{
2717 int pos;
2718 struct pci_cap_saved_state *save_state;
2719
fd0f7f73
AW
2720 if (extended)
2721 pos = pci_find_ext_capability(dev, cap);
2722 else
2723 pos = pci_find_capability(dev, cap);
2724
0a1a9b49 2725 if (!pos)
63f4898a
RW
2726 return 0;
2727
2728 save_state = kzalloc(sizeof(*save_state) + size, GFP_KERNEL);
2729 if (!save_state)
2730 return -ENOMEM;
2731
24a4742f 2732 save_state->cap.cap_nr = cap;
fd0f7f73 2733 save_state->cap.cap_extended = extended;
24a4742f 2734 save_state->cap.size = size;
63f4898a
RW
2735 pci_add_saved_cap(dev, save_state);
2736
2737 return 0;
2738}
2739
fd0f7f73
AW
2740int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size)
2741{
2742 return _pci_add_cap_save_buffer(dev, cap, false, size);
2743}
2744
2745int pci_add_ext_cap_save_buffer(struct pci_dev *dev, u16 cap, unsigned int size)
2746{
2747 return _pci_add_cap_save_buffer(dev, cap, true, size);
2748}
2749
63f4898a
RW
2750/**
2751 * pci_allocate_cap_save_buffers - allocate buffers for saving capabilities
2752 * @dev: the PCI device
2753 */
2754void pci_allocate_cap_save_buffers(struct pci_dev *dev)
2755{
2756 int error;
2757
89858517
YZ
2758 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_EXP,
2759 PCI_EXP_SAVE_REGS * sizeof(u16));
63f4898a 2760 if (error)
7506dc79 2761 pci_err(dev, "unable to preallocate PCI Express save buffer\n");
63f4898a
RW
2762
2763 error = pci_add_cap_save_buffer(dev, PCI_CAP_ID_PCIX, sizeof(u16));
2764 if (error)
7506dc79 2765 pci_err(dev, "unable to preallocate PCI-X save buffer\n");
425c1b22
AW
2766
2767 pci_allocate_vc_save_buffers(dev);
63f4898a
RW
2768}
2769
f796841e
YL
2770void pci_free_cap_save_buffers(struct pci_dev *dev)
2771{
2772 struct pci_cap_saved_state *tmp;
b67bfe0d 2773 struct hlist_node *n;
f796841e 2774
b67bfe0d 2775 hlist_for_each_entry_safe(tmp, n, &dev->saved_cap_space, next)
f796841e
YL
2776 kfree(tmp);
2777}
2778
58c3a727 2779/**
31ab2476 2780 * pci_configure_ari - enable or disable ARI forwarding
58c3a727 2781 * @dev: the PCI device
b0cc6020
YW
2782 *
2783 * If @dev and its upstream bridge both support ARI, enable ARI in the
2784 * bridge. Otherwise, disable ARI in the bridge.
58c3a727 2785 */
31ab2476 2786void pci_configure_ari(struct pci_dev *dev)
58c3a727 2787{
58c3a727 2788 u32 cap;
8113587c 2789 struct pci_dev *bridge;
58c3a727 2790
6748dcc2 2791 if (pcie_ari_disabled || !pci_is_pcie(dev) || dev->devfn)
58c3a727
YZ
2792 return;
2793
8113587c 2794 bridge = dev->bus->self;
cb97ae34 2795 if (!bridge)
8113587c
ZY
2796 return;
2797
59875ae4 2798 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
58c3a727
YZ
2799 if (!(cap & PCI_EXP_DEVCAP2_ARI))
2800 return;
2801
b0cc6020
YW
2802 if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) {
2803 pcie_capability_set_word(bridge, PCI_EXP_DEVCTL2,
2804 PCI_EXP_DEVCTL2_ARI);
2805 bridge->ari_enabled = 1;
2806 } else {
2807 pcie_capability_clear_word(bridge, PCI_EXP_DEVCTL2,
2808 PCI_EXP_DEVCTL2_ARI);
2809 bridge->ari_enabled = 0;
2810 }
58c3a727
YZ
2811}
2812
5d990b62
CW
2813static int pci_acs_enable;
2814
2815/**
2816 * pci_request_acs - ask for ACS to be enabled if supported
2817 */
2818void pci_request_acs(void)
2819{
2820 pci_acs_enable = 1;
2821}
2822
ae21ee65 2823/**
2c744244 2824 * pci_std_enable_acs - enable ACS on devices using standard ACS capabilites
ae21ee65
AK
2825 * @dev: the PCI device
2826 */
c1d61c9b 2827static void pci_std_enable_acs(struct pci_dev *dev)
ae21ee65
AK
2828{
2829 int pos;
2830 u16 cap;
2831 u16 ctrl;
2832
ae21ee65
AK
2833 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ACS);
2834 if (!pos)
c1d61c9b 2835 return;
ae21ee65
AK
2836
2837 pci_read_config_word(dev, pos + PCI_ACS_CAP, &cap);
2838 pci_read_config_word(dev, pos + PCI_ACS_CTRL, &ctrl);
2839
2840 /* Source Validation */
2841 ctrl |= (cap & PCI_ACS_SV);
2842
2843 /* P2P Request Redirect */
2844 ctrl |= (cap & PCI_ACS_RR);
2845
2846 /* P2P Completion Redirect */
2847 ctrl |= (cap & PCI_ACS_CR);
2848
2849 /* Upstream Forwarding */
2850 ctrl |= (cap & PCI_ACS_UF);
2851
2852 pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
2c744244
AW
2853}
2854
2855/**
2856 * pci_enable_acs - enable ACS if hardware support it
2857 * @dev: the PCI device
2858 */
2859void pci_enable_acs(struct pci_dev *dev)
2860{
2861 if (!pci_acs_enable)
2862 return;
2863
c1d61c9b 2864 if (!pci_dev_specific_enable_acs(dev))
2c744244
AW
2865 return;
2866
c1d61c9b 2867 pci_std_enable_acs(dev);
ae21ee65
AK
2868}
2869
0a67119f
AW
2870static bool pci_acs_flags_enabled(struct pci_dev *pdev, u16 acs_flags)
2871{
2872 int pos;
83db7e0b 2873 u16 cap, ctrl;
0a67119f
AW
2874
2875 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ACS);
2876 if (!pos)
2877 return false;
2878
83db7e0b
AW
2879 /*
2880 * Except for egress control, capabilities are either required
2881 * or only required if controllable. Features missing from the
2882 * capability field can therefore be assumed as hard-wired enabled.
2883 */
2884 pci_read_config_word(pdev, pos + PCI_ACS_CAP, &cap);
2885 acs_flags &= (cap | PCI_ACS_EC);
2886
0a67119f
AW
2887 pci_read_config_word(pdev, pos + PCI_ACS_CTRL, &ctrl);
2888 return (ctrl & acs_flags) == acs_flags;
2889}
2890
ad805758
AW
2891/**
2892 * pci_acs_enabled - test ACS against required flags for a given device
2893 * @pdev: device to test
2894 * @acs_flags: required PCI ACS flags
2895 *
2896 * Return true if the device supports the provided flags. Automatically
2897 * filters out flags that are not implemented on multifunction devices.
0a67119f
AW
2898 *
2899 * Note that this interface checks the effective ACS capabilities of the
2900 * device rather than the actual capabilities. For instance, most single
2901 * function endpoints are not required to support ACS because they have no
2902 * opportunity for peer-to-peer access. We therefore return 'true'
2903 * regardless of whether the device exposes an ACS capability. This makes
2904 * it much easier for callers of this function to ignore the actual type
2905 * or topology of the device when testing ACS support.
ad805758
AW
2906 */
2907bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags)
2908{
0a67119f 2909 int ret;
ad805758
AW
2910
2911 ret = pci_dev_specific_acs_enabled(pdev, acs_flags);
2912 if (ret >= 0)
2913 return ret > 0;
2914
0a67119f
AW
2915 /*
2916 * Conventional PCI and PCI-X devices never support ACS, either
2917 * effectively or actually. The shared bus topology implies that
2918 * any device on the bus can receive or snoop DMA.
2919 */
ad805758
AW
2920 if (!pci_is_pcie(pdev))
2921 return false;
2922
0a67119f
AW
2923 switch (pci_pcie_type(pdev)) {
2924 /*
2925 * PCI/X-to-PCIe bridges are not specifically mentioned by the spec,
f7625980 2926 * but since their primary interface is PCI/X, we conservatively
0a67119f
AW
2927 * handle them as we would a non-PCIe device.
2928 */
2929 case PCI_EXP_TYPE_PCIE_BRIDGE:
2930 /*
2931 * PCIe 3.0, 6.12.1 excludes ACS on these devices. "ACS is never
2932 * applicable... must never implement an ACS Extended Capability...".
2933 * This seems arbitrary, but we take a conservative interpretation
2934 * of this statement.
2935 */
2936 case PCI_EXP_TYPE_PCI_BRIDGE:
2937 case PCI_EXP_TYPE_RC_EC:
2938 return false;
2939 /*
2940 * PCIe 3.0, 6.12.1.1 specifies that downstream and root ports should
2941 * implement ACS in order to indicate their peer-to-peer capabilities,
2942 * regardless of whether they are single- or multi-function devices.
2943 */
2944 case PCI_EXP_TYPE_DOWNSTREAM:
2945 case PCI_EXP_TYPE_ROOT_PORT:
2946 return pci_acs_flags_enabled(pdev, acs_flags);
2947 /*
2948 * PCIe 3.0, 6.12.1.2 specifies ACS capabilities that should be
2949 * implemented by the remaining PCIe types to indicate peer-to-peer
f7625980 2950 * capabilities, but only when they are part of a multifunction
0a67119f
AW
2951 * device. The footnote for section 6.12 indicates the specific
2952 * PCIe types included here.
2953 */
2954 case PCI_EXP_TYPE_ENDPOINT:
2955 case PCI_EXP_TYPE_UPSTREAM:
2956 case PCI_EXP_TYPE_LEG_END:
2957 case PCI_EXP_TYPE_RC_END:
2958 if (!pdev->multifunction)
2959 break;
2960
0a67119f 2961 return pci_acs_flags_enabled(pdev, acs_flags);
ad805758
AW
2962 }
2963
0a67119f 2964 /*
f7625980 2965 * PCIe 3.0, 6.12.1.3 specifies no ACS capabilities are applicable
0a67119f
AW
2966 * to single function devices with the exception of downstream ports.
2967 */
ad805758
AW
2968 return true;
2969}
2970
2971/**
2972 * pci_acs_path_enable - test ACS flags from start to end in a hierarchy
2973 * @start: starting downstream device
2974 * @end: ending upstream device or NULL to search to the root bus
2975 * @acs_flags: required flags
2976 *
2977 * Walk up a device tree from start to end testing PCI ACS support. If
2978 * any step along the way does not support the required flags, return false.
2979 */
2980bool pci_acs_path_enabled(struct pci_dev *start,
2981 struct pci_dev *end, u16 acs_flags)
2982{
2983 struct pci_dev *pdev, *parent = start;
2984
2985 do {
2986 pdev = parent;
2987
2988 if (!pci_acs_enabled(pdev, acs_flags))
2989 return false;
2990
2991 if (pci_is_root_bus(pdev->bus))
2992 return (end == NULL);
2993
2994 parent = pdev->bus->self;
2995 } while (pdev != end);
2996
2997 return true;
2998}
2999
276b738d
CK
3000/**
3001 * pci_rebar_find_pos - find position of resize ctrl reg for BAR
3002 * @pdev: PCI device
3003 * @bar: BAR to find
3004 *
3005 * Helper to find the position of the ctrl register for a BAR.
3006 * Returns -ENOTSUPP if resizable BARs are not supported at all.
3007 * Returns -ENOENT if no ctrl register for the BAR could be found.
3008 */
3009static int pci_rebar_find_pos(struct pci_dev *pdev, int bar)
3010{
3011 unsigned int pos, nbars, i;
3012 u32 ctrl;
3013
3014 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_REBAR);
3015 if (!pos)
3016 return -ENOTSUPP;
3017
3018 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3019 nbars = (ctrl & PCI_REBAR_CTRL_NBAR_MASK) >>
3020 PCI_REBAR_CTRL_NBAR_SHIFT;
3021
3022 for (i = 0; i < nbars; i++, pos += 8) {
3023 int bar_idx;
3024
3025 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3026 bar_idx = ctrl & PCI_REBAR_CTRL_BAR_IDX;
3027 if (bar_idx == bar)
3028 return pos;
3029 }
3030
3031 return -ENOENT;
3032}
3033
3034/**
3035 * pci_rebar_get_possible_sizes - get possible sizes for BAR
3036 * @pdev: PCI device
3037 * @bar: BAR to query
3038 *
3039 * Get the possible sizes of a resizable BAR as bitmask defined in the spec
3040 * (bit 0=1MB, bit 19=512GB). Returns 0 if BAR isn't resizable.
3041 */
3042u32 pci_rebar_get_possible_sizes(struct pci_dev *pdev, int bar)
3043{
3044 int pos;
3045 u32 cap;
3046
3047 pos = pci_rebar_find_pos(pdev, bar);
3048 if (pos < 0)
3049 return 0;
3050
3051 pci_read_config_dword(pdev, pos + PCI_REBAR_CAP, &cap);
3052 return (cap & PCI_REBAR_CAP_SIZES) >> 4;
3053}
3054
3055/**
3056 * pci_rebar_get_current_size - get the current size of a BAR
3057 * @pdev: PCI device
3058 * @bar: BAR to set size to
3059 *
3060 * Read the size of a BAR from the resizable BAR config.
3061 * Returns size if found or negative error code.
3062 */
3063int pci_rebar_get_current_size(struct pci_dev *pdev, int bar)
3064{
3065 int pos;
3066 u32 ctrl;
3067
3068 pos = pci_rebar_find_pos(pdev, bar);
3069 if (pos < 0)
3070 return pos;
3071
3072 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3073 return (ctrl & PCI_REBAR_CTRL_BAR_SIZE) >> 8;
3074}
3075
3076/**
3077 * pci_rebar_set_size - set a new size for a BAR
3078 * @pdev: PCI device
3079 * @bar: BAR to set size to
3080 * @size: new size as defined in the spec (0=1MB, 19=512GB)
3081 *
3082 * Set the new size of a BAR as defined in the spec.
3083 * Returns zero if resizing was successful, error code otherwise.
3084 */
3085int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
3086{
3087 int pos;
3088 u32 ctrl;
3089
3090 pos = pci_rebar_find_pos(pdev, bar);
3091 if (pos < 0)
3092 return pos;
3093
3094 pci_read_config_dword(pdev, pos + PCI_REBAR_CTRL, &ctrl);
3095 ctrl &= ~PCI_REBAR_CTRL_BAR_SIZE;
3096 ctrl |= size << 8;
3097 pci_write_config_dword(pdev, pos + PCI_REBAR_CTRL, ctrl);
3098 return 0;
3099}
3100
430a2368
JC
3101/**
3102 * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
3103 * @dev: the PCI device
3104 * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
3105 * PCI_EXP_DEVCAP2_ATOMIC_COMP32
3106 * PCI_EXP_DEVCAP2_ATOMIC_COMP64
3107 * PCI_EXP_DEVCAP2_ATOMIC_COMP128
3108 *
3109 * Return 0 if all upstream bridges support AtomicOp routing, egress
3110 * blocking is disabled on all upstream ports, and the root port supports
3111 * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
3112 * AtomicOp completion), or negative otherwise.
3113 */
3114int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
3115{
3116 struct pci_bus *bus = dev->bus;
3117 struct pci_dev *bridge;
3118 u32 cap, ctl2;
3119
3120 if (!pci_is_pcie(dev))
3121 return -EINVAL;
3122
3123 /*
3124 * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
3125 * AtomicOp requesters. For now, we only support endpoints as
3126 * requesters and root ports as completers. No endpoints as
3127 * completers, and no peer-to-peer.
3128 */
3129
3130 switch (pci_pcie_type(dev)) {
3131 case PCI_EXP_TYPE_ENDPOINT:
3132 case PCI_EXP_TYPE_LEG_END:
3133 case PCI_EXP_TYPE_RC_END:
3134 break;
3135 default:
3136 return -EINVAL;
3137 }
3138
3139 while (bus->parent) {
3140 bridge = bus->self;
3141
3142 pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
3143
3144 switch (pci_pcie_type(bridge)) {
3145 /* Ensure switch ports support AtomicOp routing */
3146 case PCI_EXP_TYPE_UPSTREAM:
3147 case PCI_EXP_TYPE_DOWNSTREAM:
3148 if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
3149 return -EINVAL;
3150 break;
3151
3152 /* Ensure root port supports all the sizes we care about */
3153 case PCI_EXP_TYPE_ROOT_PORT:
3154 if ((cap & cap_mask) != cap_mask)
3155 return -EINVAL;
3156 break;
3157 }
3158
3159 /* Ensure upstream ports don't block AtomicOps on egress */
3160 if (!bridge->has_secondary_link) {
3161 pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
3162 &ctl2);
3163 if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
3164 return -EINVAL;
3165 }
3166
3167 bus = bus->parent;
3168 }
3169
3170 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
3171 PCI_EXP_DEVCTL2_ATOMIC_REQ);
3172 return 0;
3173}
3174EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
3175
57c2cf71
BH
3176/**
3177 * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
3178 * @dev: the PCI device
bb5c2de2 3179 * @pin: the INTx pin (1=INTA, 2=INTB, 3=INTC, 4=INTD)
57c2cf71
BH
3180 *
3181 * Perform INTx swizzling for a device behind one level of bridge. This is
3182 * required by section 9.1 of the PCI-to-PCI bridge specification for devices
46b952a3
MW
3183 * behind bridges on add-in cards. For devices with ARI enabled, the slot
3184 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
3185 * the PCI Express Base Specification, Revision 2.1)
57c2cf71 3186 */
3df425f3 3187u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
57c2cf71 3188{
46b952a3
MW
3189 int slot;
3190
3191 if (pci_ari_enabled(dev->bus))
3192 slot = 0;
3193 else
3194 slot = PCI_SLOT(dev->devfn);
3195
3196 return (((pin - 1) + slot) % 4) + 1;
57c2cf71
BH
3197}
3198
3c78bc61 3199int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
1da177e4
LT
3200{
3201 u8 pin;
3202
514d207d 3203 pin = dev->pin;
1da177e4
LT
3204 if (!pin)
3205 return -1;
878f2e50 3206
8784fd4d 3207 while (!pci_is_root_bus(dev->bus)) {
57c2cf71 3208 pin = pci_swizzle_interrupt_pin(dev, pin);
1da177e4
LT
3209 dev = dev->bus->self;
3210 }
3211 *bridge = dev;
3212 return pin;
3213}
3214
68feac87
BH
3215/**
3216 * pci_common_swizzle - swizzle INTx all the way to root bridge
3217 * @dev: the PCI device
3218 * @pinp: pointer to the INTx pin value (1=INTA, 2=INTB, 3=INTD, 4=INTD)
3219 *
3220 * Perform INTx swizzling for a device. This traverses through all PCI-to-PCI
3221 * bridges all the way up to a PCI root bus.
3222 */
3223u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp)
3224{
3225 u8 pin = *pinp;
3226
1eb39487 3227 while (!pci_is_root_bus(dev->bus)) {
68feac87
BH
3228 pin = pci_swizzle_interrupt_pin(dev, pin);
3229 dev = dev->bus->self;
3230 }
3231 *pinp = pin;
3232 return PCI_SLOT(dev->devfn);
3233}
e6b29dea 3234EXPORT_SYMBOL_GPL(pci_common_swizzle);
68feac87 3235
1da177e4
LT
3236/**
3237 * pci_release_region - Release a PCI bar
3238 * @pdev: PCI device whose resources were previously reserved by pci_request_region
3239 * @bar: BAR to release
3240 *
3241 * Releases the PCI I/O and memory resources previously reserved by a
3242 * successful call to pci_request_region. Call this function only
3243 * after all use of the PCI regions has ceased.
3244 */
3245void pci_release_region(struct pci_dev *pdev, int bar)
3246{
9ac7849e
TH
3247 struct pci_devres *dr;
3248
1da177e4
LT
3249 if (pci_resource_len(pdev, bar) == 0)
3250 return;
3251 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
3252 release_region(pci_resource_start(pdev, bar),
3253 pci_resource_len(pdev, bar));
3254 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
3255 release_mem_region(pci_resource_start(pdev, bar),
3256 pci_resource_len(pdev, bar));
9ac7849e
TH
3257
3258 dr = find_pci_dr(pdev);
3259 if (dr)
3260 dr->region_mask &= ~(1 << bar);
1da177e4 3261}
b7fe9434 3262EXPORT_SYMBOL(pci_release_region);
1da177e4
LT
3263
3264/**
f5ddcac4 3265 * __pci_request_region - Reserved PCI I/O and memory resource
1da177e4
LT
3266 * @pdev: PCI device whose resources are to be reserved
3267 * @bar: BAR to be reserved
3268 * @res_name: Name to be associated with resource.
f5ddcac4 3269 * @exclusive: whether the region access is exclusive or not
1da177e4
LT
3270 *
3271 * Mark the PCI region associated with PCI device @pdev BR @bar as
3272 * being reserved by owner @res_name. Do not access any
3273 * address inside the PCI regions unless this call returns
3274 * successfully.
3275 *
f5ddcac4
RD
3276 * If @exclusive is set, then the region is marked so that userspace
3277 * is explicitly not allowed to map the resource via /dev/mem or
f7625980 3278 * sysfs MMIO access.
f5ddcac4 3279 *
1da177e4
LT
3280 * Returns 0 on success, or %EBUSY on error. A warning
3281 * message is also printed on failure.
3282 */
3c78bc61
RD
3283static int __pci_request_region(struct pci_dev *pdev, int bar,
3284 const char *res_name, int exclusive)
1da177e4 3285{
9ac7849e
TH
3286 struct pci_devres *dr;
3287
1da177e4
LT
3288 if (pci_resource_len(pdev, bar) == 0)
3289 return 0;
f7625980 3290
1da177e4
LT
3291 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) {
3292 if (!request_region(pci_resource_start(pdev, bar),
3293 pci_resource_len(pdev, bar), res_name))
3294 goto err_out;
3c78bc61 3295 } else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
e8de1481
AV
3296 if (!__request_mem_region(pci_resource_start(pdev, bar),
3297 pci_resource_len(pdev, bar), res_name,
3298 exclusive))
1da177e4
LT
3299 goto err_out;
3300 }
9ac7849e
TH
3301
3302 dr = find_pci_dr(pdev);
3303 if (dr)
3304 dr->region_mask |= 1 << bar;
3305
1da177e4
LT
3306 return 0;
3307
3308err_out:
7506dc79 3309 pci_warn(pdev, "BAR %d: can't reserve %pR\n", bar,
096e6f67 3310 &pdev->resource[bar]);
1da177e4
LT
3311 return -EBUSY;
3312}
3313
e8de1481 3314/**
f5ddcac4 3315 * pci_request_region - Reserve PCI I/O and memory resource
e8de1481
AV
3316 * @pdev: PCI device whose resources are to be reserved
3317 * @bar: BAR to be reserved
f5ddcac4 3318 * @res_name: Name to be associated with resource
e8de1481 3319 *
f5ddcac4 3320 * Mark the PCI region associated with PCI device @pdev BAR @bar as
e8de1481
AV
3321 * being reserved by owner @res_name. Do not access any
3322 * address inside the PCI regions unless this call returns
3323 * successfully.
3324 *
3325 * Returns 0 on success, or %EBUSY on error. A warning
3326 * message is also printed on failure.
3327 */
3328int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
3329{
3330 return __pci_request_region(pdev, bar, res_name, 0);
3331}
b7fe9434 3332EXPORT_SYMBOL(pci_request_region);
e8de1481
AV
3333
3334/**
3335 * pci_request_region_exclusive - Reserved PCI I/O and memory resource
3336 * @pdev: PCI device whose resources are to be reserved
3337 * @bar: BAR to be reserved
3338 * @res_name: Name to be associated with resource.
3339 *
3340 * Mark the PCI region associated with PCI device @pdev BR @bar as
3341 * being reserved by owner @res_name. Do not access any
3342 * address inside the PCI regions unless this call returns
3343 * successfully.
3344 *
3345 * Returns 0 on success, or %EBUSY on error. A warning
3346 * message is also printed on failure.
3347 *
3348 * The key difference that _exclusive makes it that userspace is
3349 * explicitly not allowed to map the resource via /dev/mem or
f7625980 3350 * sysfs.
e8de1481 3351 */
3c78bc61
RD
3352int pci_request_region_exclusive(struct pci_dev *pdev, int bar,
3353 const char *res_name)
e8de1481
AV
3354{
3355 return __pci_request_region(pdev, bar, res_name, IORESOURCE_EXCLUSIVE);
3356}
b7fe9434
RD
3357EXPORT_SYMBOL(pci_request_region_exclusive);
3358
c87deff7
HS
3359/**
3360 * pci_release_selected_regions - Release selected PCI I/O and memory resources
3361 * @pdev: PCI device whose resources were previously reserved
3362 * @bars: Bitmask of BARs to be released
3363 *
3364 * Release selected PCI I/O and memory resources previously reserved.
3365 * Call this function only after all use of the PCI regions has ceased.
3366 */
3367void pci_release_selected_regions(struct pci_dev *pdev, int bars)
3368{
3369 int i;
3370
3371 for (i = 0; i < 6; i++)
3372 if (bars & (1 << i))
3373 pci_release_region(pdev, i);
3374}
b7fe9434 3375EXPORT_SYMBOL(pci_release_selected_regions);
c87deff7 3376
9738abed 3377static int __pci_request_selected_regions(struct pci_dev *pdev, int bars,
3c78bc61 3378 const char *res_name, int excl)
c87deff7
HS
3379{
3380 int i;
3381
3382 for (i = 0; i < 6; i++)
3383 if (bars & (1 << i))
e8de1481 3384 if (__pci_request_region(pdev, i, res_name, excl))
c87deff7
HS
3385 goto err_out;
3386 return 0;
3387
3388err_out:
3c78bc61 3389 while (--i >= 0)
c87deff7
HS
3390 if (bars & (1 << i))
3391 pci_release_region(pdev, i);
3392
3393 return -EBUSY;
3394}
1da177e4 3395
e8de1481
AV
3396
3397/**
3398 * pci_request_selected_regions - Reserve selected PCI I/O and memory resources
3399 * @pdev: PCI device whose resources are to be reserved
3400 * @bars: Bitmask of BARs to be requested
3401 * @res_name: Name to be associated with resource
3402 */
3403int pci_request_selected_regions(struct pci_dev *pdev, int bars,
3404 const char *res_name)
3405{
3406 return __pci_request_selected_regions(pdev, bars, res_name, 0);
3407}
b7fe9434 3408EXPORT_SYMBOL(pci_request_selected_regions);
e8de1481 3409
3c78bc61
RD
3410int pci_request_selected_regions_exclusive(struct pci_dev *pdev, int bars,
3411 const char *res_name)
e8de1481
AV
3412{
3413 return __pci_request_selected_regions(pdev, bars, res_name,
3414 IORESOURCE_EXCLUSIVE);
3415}
b7fe9434 3416EXPORT_SYMBOL(pci_request_selected_regions_exclusive);
e8de1481 3417
1da177e4
LT
3418/**
3419 * pci_release_regions - Release reserved PCI I/O and memory resources
3420 * @pdev: PCI device whose resources were previously reserved by pci_request_regions
3421 *
3422 * Releases all PCI I/O and memory resources previously reserved by a
3423 * successful call to pci_request_regions. Call this function only
3424 * after all use of the PCI regions has ceased.
3425 */
3426
3427void pci_release_regions(struct pci_dev *pdev)
3428{
c87deff7 3429 pci_release_selected_regions(pdev, (1 << 6) - 1);
1da177e4 3430}
b7fe9434 3431EXPORT_SYMBOL(pci_release_regions);
1da177e4
LT
3432
3433/**
3434 * pci_request_regions - Reserved PCI I/O and memory resources
3435 * @pdev: PCI device whose resources are to be reserved
3436 * @res_name: Name to be associated with resource.
3437 *
3438 * Mark all PCI regions associated with PCI device @pdev as
3439 * being reserved by owner @res_name. Do not access any
3440 * address inside the PCI regions unless this call returns
3441 * successfully.
3442 *
3443 * Returns 0 on success, or %EBUSY on error. A warning
3444 * message is also printed on failure.
3445 */
3c990e92 3446int pci_request_regions(struct pci_dev *pdev, const char *res_name)
1da177e4 3447{
c87deff7 3448 return pci_request_selected_regions(pdev, ((1 << 6) - 1), res_name);
1da177e4 3449}
b7fe9434 3450EXPORT_SYMBOL(pci_request_regions);
1da177e4 3451
e8de1481
AV
3452/**
3453 * pci_request_regions_exclusive - Reserved PCI I/O and memory resources
3454 * @pdev: PCI device whose resources are to be reserved
3455 * @res_name: Name to be associated with resource.
3456 *
3457 * Mark all PCI regions associated with PCI device @pdev as
3458 * being reserved by owner @res_name. Do not access any
3459 * address inside the PCI regions unless this call returns
3460 * successfully.
3461 *
3462 * pci_request_regions_exclusive() will mark the region so that
f7625980 3463 * /dev/mem and the sysfs MMIO access will not be allowed.
e8de1481
AV
3464 *
3465 * Returns 0 on success, or %EBUSY on error. A warning
3466 * message is also printed on failure.
3467 */
3468int pci_request_regions_exclusive(struct pci_dev *pdev, const char *res_name)
3469{
3470 return pci_request_selected_regions_exclusive(pdev,
3471 ((1 << 6) - 1), res_name);
3472}
b7fe9434 3473EXPORT_SYMBOL(pci_request_regions_exclusive);
e8de1481 3474
c5076cfe
TN
3475/*
3476 * Record the PCI IO range (expressed as CPU physical address + size).
3477 * Return a negative value if an error has occured, zero otherwise
3478 */
fcfaab30
GP
3479int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
3480 resource_size_t size)
c5076cfe 3481{
5745392e 3482 int ret = 0;
c5076cfe 3483#ifdef PCI_IOBASE
5745392e 3484 struct logic_pio_hwaddr *range;
c5076cfe 3485
5745392e
ZY
3486 if (!size || addr + size < addr)
3487 return -EINVAL;
c5076cfe 3488
c5076cfe 3489 range = kzalloc(sizeof(*range), GFP_ATOMIC);
5745392e
ZY
3490 if (!range)
3491 return -ENOMEM;
c5076cfe 3492
5745392e 3493 range->fwnode = fwnode;
c5076cfe 3494 range->size = size;
5745392e
ZY
3495 range->hw_start = addr;
3496 range->flags = LOGIC_PIO_CPU_MMIO;
c5076cfe 3497
5745392e
ZY
3498 ret = logic_pio_register_range(range);
3499 if (ret)
3500 kfree(range);
c5076cfe
TN
3501#endif
3502
5745392e 3503 return ret;
c5076cfe
TN
3504}
3505
3506phys_addr_t pci_pio_to_address(unsigned long pio)
3507{
3508 phys_addr_t address = (phys_addr_t)OF_BAD_ADDR;
3509
3510#ifdef PCI_IOBASE
5745392e 3511 if (pio >= MMIO_UPPER_LIMIT)
c5076cfe
TN
3512 return address;
3513
5745392e 3514 address = logic_pio_to_hwaddr(pio);
c5076cfe
TN
3515#endif
3516
3517 return address;
3518}
3519
3520unsigned long __weak pci_address_to_pio(phys_addr_t address)
3521{
3522#ifdef PCI_IOBASE
5745392e 3523 return logic_pio_trans_cpuaddr(address);
c5076cfe
TN
3524#else
3525 if (address > IO_SPACE_LIMIT)
3526 return (unsigned long)-1;
3527
3528 return (unsigned long) address;
3529#endif
3530}
3531
8b921acf
LD
3532/**
3533 * pci_remap_iospace - Remap the memory mapped I/O space
3534 * @res: Resource describing the I/O space
3535 * @phys_addr: physical address of range to be mapped
3536 *
3537 * Remap the memory mapped I/O space described by the @res
3538 * and the CPU physical address @phys_addr into virtual address space.
3539 * Only architectures that have memory mapped IO functions defined
3540 * (and the PCI_IOBASE value defined) should call this function.
3541 */
7b309aef 3542int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr)
8b921acf
LD
3543{
3544#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3545 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3546
3547 if (!(res->flags & IORESOURCE_IO))
3548 return -EINVAL;
3549
3550 if (res->end > IO_SPACE_LIMIT)
3551 return -EINVAL;
3552
3553 return ioremap_page_range(vaddr, vaddr + resource_size(res), phys_addr,
3554 pgprot_device(PAGE_KERNEL));
3555#else
3556 /* this architecture does not have memory mapped I/O space,
3557 so this function should never be called */
3558 WARN_ONCE(1, "This architecture does not support memory mapped I/O\n");
3559 return -ENODEV;
3560#endif
3561}
f90b0875 3562EXPORT_SYMBOL(pci_remap_iospace);
8b921acf 3563
4d3f1384
SK
3564/**
3565 * pci_unmap_iospace - Unmap the memory mapped I/O space
3566 * @res: resource to be unmapped
3567 *
3568 * Unmap the CPU virtual address @res from virtual address space.
3569 * Only architectures that have memory mapped IO functions defined
3570 * (and the PCI_IOBASE value defined) should call this function.
3571 */
3572void pci_unmap_iospace(struct resource *res)
3573{
3574#if defined(PCI_IOBASE) && defined(CONFIG_MMU)
3575 unsigned long vaddr = (unsigned long)PCI_IOBASE + res->start;
3576
3577 unmap_kernel_range(vaddr, resource_size(res));
3578#endif
3579}
f90b0875 3580EXPORT_SYMBOL(pci_unmap_iospace);
4d3f1384 3581
490cb6dd
LP
3582/**
3583 * devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
3584 * @dev: Generic device to remap IO address for
3585 * @offset: Resource address to map
3586 * @size: Size of map
3587 *
3588 * Managed pci_remap_cfgspace(). Map is automatically unmapped on driver
3589 * detach.
3590 */
3591void __iomem *devm_pci_remap_cfgspace(struct device *dev,
3592 resource_size_t offset,
3593 resource_size_t size)
3594{
3595 void __iomem **ptr, *addr;
3596
3597 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
3598 if (!ptr)
3599 return NULL;
3600
3601 addr = pci_remap_cfgspace(offset, size);
3602 if (addr) {
3603 *ptr = addr;
3604 devres_add(dev, ptr);
3605 } else
3606 devres_free(ptr);
3607
3608 return addr;
3609}
3610EXPORT_SYMBOL(devm_pci_remap_cfgspace);
3611
3612/**
3613 * devm_pci_remap_cfg_resource - check, request region and ioremap cfg resource
3614 * @dev: generic device to handle the resource for
3615 * @res: configuration space resource to be handled
3616 *
3617 * Checks that a resource is a valid memory region, requests the memory
3618 * region and ioremaps with pci_remap_cfgspace() API that ensures the
3619 * proper PCI configuration space memory attributes are guaranteed.
3620 *
3621 * All operations are managed and will be undone on driver detach.
3622 *
3623 * Returns a pointer to the remapped memory or an ERR_PTR() encoded error code
505fb746 3624 * on failure. Usage example::
490cb6dd
LP
3625 *
3626 * res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
3627 * base = devm_pci_remap_cfg_resource(&pdev->dev, res);
3628 * if (IS_ERR(base))
3629 * return PTR_ERR(base);
3630 */
3631void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
3632 struct resource *res)
3633{
3634 resource_size_t size;
3635 const char *name;
3636 void __iomem *dest_ptr;
3637
3638 BUG_ON(!dev);
3639
3640 if (!res || resource_type(res) != IORESOURCE_MEM) {
3641 dev_err(dev, "invalid resource\n");
3642 return IOMEM_ERR_PTR(-EINVAL);
3643 }
3644
3645 size = resource_size(res);
3646 name = res->name ?: dev_name(dev);
3647
3648 if (!devm_request_mem_region(dev, res->start, size, name)) {
3649 dev_err(dev, "can't request region for resource %pR\n", res);
3650 return IOMEM_ERR_PTR(-EBUSY);
3651 }
3652
3653 dest_ptr = devm_pci_remap_cfgspace(dev, res->start, size);
3654 if (!dest_ptr) {
3655 dev_err(dev, "ioremap failed for resource %pR\n", res);
3656 devm_release_mem_region(dev, res->start, size);
3657 dest_ptr = IOMEM_ERR_PTR(-ENOMEM);
3658 }
3659
3660 return dest_ptr;
3661}
3662EXPORT_SYMBOL(devm_pci_remap_cfg_resource);
3663
6a479079
BH
3664static void __pci_set_master(struct pci_dev *dev, bool enable)
3665{
3666 u16 old_cmd, cmd;
3667
3668 pci_read_config_word(dev, PCI_COMMAND, &old_cmd);
3669 if (enable)
3670 cmd = old_cmd | PCI_COMMAND_MASTER;
3671 else
3672 cmd = old_cmd & ~PCI_COMMAND_MASTER;
3673 if (cmd != old_cmd) {
7506dc79 3674 pci_dbg(dev, "%s bus mastering\n",
6a479079
BH
3675 enable ? "enabling" : "disabling");
3676 pci_write_config_word(dev, PCI_COMMAND, cmd);
3677 }
3678 dev->is_busmaster = enable;
3679}
e8de1481 3680
2b6f2c35
MS
3681/**
3682 * pcibios_setup - process "pci=" kernel boot arguments
3683 * @str: string used to pass in "pci=" kernel boot arguments
3684 *
3685 * Process kernel boot arguments. This is the default implementation.
3686 * Architecture specific implementations can override this as necessary.
3687 */
3688char * __weak __init pcibios_setup(char *str)
3689{
3690 return str;
3691}
3692
96c55900
MS
3693/**
3694 * pcibios_set_master - enable PCI bus-mastering for device dev
3695 * @dev: the PCI device to enable
3696 *
3697 * Enables PCI bus-mastering for the device. This is the default
3698 * implementation. Architecture specific implementations can override
3699 * this if necessary.
3700 */
3701void __weak pcibios_set_master(struct pci_dev *dev)
3702{
3703 u8 lat;
3704
f676678f
MS
3705 /* The latency timer doesn't apply to PCIe (either Type 0 or Type 1) */
3706 if (pci_is_pcie(dev))
3707 return;
3708
96c55900
MS
3709 pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
3710 if (lat < 16)
3711 lat = (64 <= pcibios_max_latency) ? 64 : pcibios_max_latency;
3712 else if (lat > pcibios_max_latency)
3713 lat = pcibios_max_latency;
3714 else
3715 return;
a006482b 3716
96c55900
MS
3717 pci_write_config_byte(dev, PCI_LATENCY_TIMER, lat);
3718}
3719
1da177e4
LT
3720/**
3721 * pci_set_master - enables bus-mastering for device dev
3722 * @dev: the PCI device to enable
3723 *
3724 * Enables bus-mastering on the device and calls pcibios_set_master()
3725 * to do the needed arch specific settings.
3726 */
6a479079 3727void pci_set_master(struct pci_dev *dev)
1da177e4 3728{
6a479079 3729 __pci_set_master(dev, true);
1da177e4
LT
3730 pcibios_set_master(dev);
3731}
b7fe9434 3732EXPORT_SYMBOL(pci_set_master);
1da177e4 3733
6a479079
BH
3734/**
3735 * pci_clear_master - disables bus-mastering for device dev
3736 * @dev: the PCI device to disable
3737 */
3738void pci_clear_master(struct pci_dev *dev)
3739{
3740 __pci_set_master(dev, false);
3741}
b7fe9434 3742EXPORT_SYMBOL(pci_clear_master);
6a479079 3743
1da177e4 3744/**
edb2d97e
MW
3745 * pci_set_cacheline_size - ensure the CACHE_LINE_SIZE register is programmed
3746 * @dev: the PCI device for which MWI is to be enabled
1da177e4 3747 *
edb2d97e
MW
3748 * Helper function for pci_set_mwi.
3749 * Originally copied from drivers/net/acenic.c.
1da177e4
LT
3750 * Copyright 1998-2001 by Jes Sorensen, <jes@trained-monkey.org>.
3751 *
3752 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3753 */
15ea76d4 3754int pci_set_cacheline_size(struct pci_dev *dev)
1da177e4
LT
3755{
3756 u8 cacheline_size;
3757
3758 if (!pci_cache_line_size)
15ea76d4 3759 return -EINVAL;
1da177e4
LT
3760
3761 /* Validate current setting: the PCI_CACHE_LINE_SIZE must be
3762 equal to or multiple of the right value. */
3763 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3764 if (cacheline_size >= pci_cache_line_size &&
3765 (cacheline_size % pci_cache_line_size) == 0)
3766 return 0;
3767
3768 /* Write the correct value. */
3769 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, pci_cache_line_size);
3770 /* Read it back. */
3771 pci_read_config_byte(dev, PCI_CACHE_LINE_SIZE, &cacheline_size);
3772 if (cacheline_size == pci_cache_line_size)
3773 return 0;
3774
7506dc79 3775 pci_printk(KERN_DEBUG, dev, "cache line size of %d is not supported\n",
227f0647 3776 pci_cache_line_size << 2);
1da177e4
LT
3777
3778 return -EINVAL;
3779}
15ea76d4
TH
3780EXPORT_SYMBOL_GPL(pci_set_cacheline_size);
3781
1da177e4
LT
3782/**
3783 * pci_set_mwi - enables memory-write-invalidate PCI transaction
3784 * @dev: the PCI device for which MWI is enabled
3785 *
694625c0 3786 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
1da177e4
LT
3787 *
3788 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3789 */
3c78bc61 3790int pci_set_mwi(struct pci_dev *dev)
1da177e4 3791{
b7fe9434
RD
3792#ifdef PCI_DISABLE_MWI
3793 return 0;
3794#else
1da177e4
LT
3795 int rc;
3796 u16 cmd;
3797
edb2d97e 3798 rc = pci_set_cacheline_size(dev);
1da177e4
LT
3799 if (rc)
3800 return rc;
3801
3802 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3c78bc61 3803 if (!(cmd & PCI_COMMAND_INVALIDATE)) {
7506dc79 3804 pci_dbg(dev, "enabling Mem-Wr-Inval\n");
1da177e4
LT
3805 cmd |= PCI_COMMAND_INVALIDATE;
3806 pci_write_config_word(dev, PCI_COMMAND, cmd);
3807 }
1da177e4 3808 return 0;
b7fe9434 3809#endif
1da177e4 3810}
b7fe9434 3811EXPORT_SYMBOL(pci_set_mwi);
1da177e4 3812
fc0f9f4d
HK
3813/**
3814 * pcim_set_mwi - a device-managed pci_set_mwi()
3815 * @dev: the PCI device for which MWI is enabled
3816 *
3817 * Managed pci_set_mwi().
3818 *
3819 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3820 */
3821int pcim_set_mwi(struct pci_dev *dev)
3822{
3823 struct pci_devres *dr;
3824
3825 dr = find_pci_dr(dev);
3826 if (!dr)
3827 return -ENOMEM;
3828
3829 dr->mwi = 1;
3830 return pci_set_mwi(dev);
3831}
3832EXPORT_SYMBOL(pcim_set_mwi);
3833
694625c0
RD
3834/**
3835 * pci_try_set_mwi - enables memory-write-invalidate PCI transaction
3836 * @dev: the PCI device for which MWI is enabled
3837 *
3838 * Enables the Memory-Write-Invalidate transaction in %PCI_COMMAND.
3839 * Callers are not required to check the return value.
3840 *
3841 * RETURNS: An appropriate -ERRNO error value on error, or zero for success.
3842 */
3843int pci_try_set_mwi(struct pci_dev *dev)
3844{
b7fe9434
RD
3845#ifdef PCI_DISABLE_MWI
3846 return 0;
3847#else
3848 return pci_set_mwi(dev);
3849#endif
694625c0 3850}
b7fe9434 3851EXPORT_SYMBOL(pci_try_set_mwi);
694625c0 3852
1da177e4
LT
3853/**
3854 * pci_clear_mwi - disables Memory-Write-Invalidate for device dev
3855 * @dev: the PCI device to disable
3856 *
3857 * Disables PCI Memory-Write-Invalidate transaction on the device
3858 */
3c78bc61 3859void pci_clear_mwi(struct pci_dev *dev)
1da177e4 3860{
b7fe9434 3861#ifndef PCI_DISABLE_MWI
1da177e4
LT
3862 u16 cmd;
3863
3864 pci_read_config_word(dev, PCI_COMMAND, &cmd);
3865 if (cmd & PCI_COMMAND_INVALIDATE) {
3866 cmd &= ~PCI_COMMAND_INVALIDATE;
3867 pci_write_config_word(dev, PCI_COMMAND, cmd);
3868 }
b7fe9434 3869#endif
1da177e4 3870}
b7fe9434 3871EXPORT_SYMBOL(pci_clear_mwi);
1da177e4 3872
a04ce0ff
BR
3873/**
3874 * pci_intx - enables/disables PCI INTx for device dev
8f7020d3
RD
3875 * @pdev: the PCI device to operate on
3876 * @enable: boolean: whether to enable or disable PCI INTx
a04ce0ff
BR
3877 *
3878 * Enables/disables PCI INTx for device dev
3879 */
3c78bc61 3880void pci_intx(struct pci_dev *pdev, int enable)
a04ce0ff
BR
3881{
3882 u16 pci_command, new;
3883
3884 pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
3885
3c78bc61 3886 if (enable)
a04ce0ff 3887 new = pci_command & ~PCI_COMMAND_INTX_DISABLE;
3c78bc61 3888 else
a04ce0ff 3889 new = pci_command | PCI_COMMAND_INTX_DISABLE;
a04ce0ff
BR
3890
3891 if (new != pci_command) {
9ac7849e
TH
3892 struct pci_devres *dr;
3893
2fd9d74b 3894 pci_write_config_word(pdev, PCI_COMMAND, new);
9ac7849e
TH
3895
3896 dr = find_pci_dr(pdev);
3897 if (dr && !dr->restore_intx) {
3898 dr->restore_intx = 1;
3899 dr->orig_intx = !enable;
3900 }
a04ce0ff
BR
3901 }
3902}
b7fe9434 3903EXPORT_SYMBOL_GPL(pci_intx);
a04ce0ff 3904
a2e27787
JK
3905static bool pci_check_and_set_intx_mask(struct pci_dev *dev, bool mask)
3906{
3907 struct pci_bus *bus = dev->bus;
3908 bool mask_updated = true;
3909 u32 cmd_status_dword;
3910 u16 origcmd, newcmd;
3911 unsigned long flags;
3912 bool irq_pending;
3913
3914 /*
3915 * We do a single dword read to retrieve both command and status.
3916 * Document assumptions that make this possible.
3917 */
3918 BUILD_BUG_ON(PCI_COMMAND % 4);
3919 BUILD_BUG_ON(PCI_COMMAND + 2 != PCI_STATUS);
3920
3921 raw_spin_lock_irqsave(&pci_lock, flags);
3922
3923 bus->ops->read(bus, dev->devfn, PCI_COMMAND, 4, &cmd_status_dword);
3924
3925 irq_pending = (cmd_status_dword >> 16) & PCI_STATUS_INTERRUPT;
3926
3927 /*
3928 * Check interrupt status register to see whether our device
3929 * triggered the interrupt (when masking) or the next IRQ is
3930 * already pending (when unmasking).
3931 */
3932 if (mask != irq_pending) {
3933 mask_updated = false;
3934 goto done;
3935 }
3936
3937 origcmd = cmd_status_dword;
3938 newcmd = origcmd & ~PCI_COMMAND_INTX_DISABLE;
3939 if (mask)
3940 newcmd |= PCI_COMMAND_INTX_DISABLE;
3941 if (newcmd != origcmd)
3942 bus->ops->write(bus, dev->devfn, PCI_COMMAND, 2, newcmd);
3943
3944done:
3945 raw_spin_unlock_irqrestore(&pci_lock, flags);
3946
3947 return mask_updated;
3948}
3949
3950/**
3951 * pci_check_and_mask_intx - mask INTx on pending interrupt
6e9292c5 3952 * @dev: the PCI device to operate on
a2e27787
JK
3953 *
3954 * Check if the device dev has its INTx line asserted, mask it and
99b3c58f 3955 * return true in that case. False is returned if no interrupt was
a2e27787
JK
3956 * pending.
3957 */
3958bool pci_check_and_mask_intx(struct pci_dev *dev)
3959{
3960 return pci_check_and_set_intx_mask(dev, true);
3961}
3962EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
3963
3964/**
ebd50b93 3965 * pci_check_and_unmask_intx - unmask INTx if no interrupt is pending
6e9292c5 3966 * @dev: the PCI device to operate on
a2e27787
JK
3967 *
3968 * Check if the device dev has its INTx line asserted, unmask it if not
3969 * and return true. False is returned and the mask remains active if
3970 * there was still an interrupt pending.
3971 */
3972bool pci_check_and_unmask_intx(struct pci_dev *dev)
3973{
3974 return pci_check_and_set_intx_mask(dev, false);
3975}
3976EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
3977
3775a209
CL
3978/**
3979 * pci_wait_for_pending_transaction - waits for pending transaction
3980 * @dev: the PCI device to operate on
3981 *
3982 * Return 0 if transaction is pending 1 otherwise.
3983 */
3984int pci_wait_for_pending_transaction(struct pci_dev *dev)
8dd7f803 3985{
157e876f
AW
3986 if (!pci_is_pcie(dev))
3987 return 1;
8c1c699f 3988
d0b4cc4e
GS
3989 return pci_wait_for_pending(dev, pci_pcie_cap(dev) + PCI_EXP_DEVSTA,
3990 PCI_EXP_DEVSTA_TRPND);
3775a209
CL
3991}
3992EXPORT_SYMBOL(pci_wait_for_pending_transaction);
3993
a2758b6b 3994static int pci_dev_wait(struct pci_dev *dev, char *reset_type, int timeout)
5adecf81 3995{
a2758b6b 3996 int delay = 1;
5adecf81
AW
3997 u32 id;
3998
821cdad5 3999 /*
a2758b6b 4000 * After reset, the device should not silently discard config
821cdad5
SK
4001 * requests, but it may still indicate that it needs more time by
4002 * responding to them with CRS completions. The Root Port will
4003 * generally synthesize ~0 data to complete the read (except when
4004 * CRS SV is enabled and the read was for the Vendor ID; in that
4005 * case it synthesizes 0x0001 data).
4006 *
4007 * Wait for the device to return a non-CRS completion. Read the
4008 * Command register instead of Vendor ID so we don't have to
4009 * contend with the CRS SV value.
4010 */
4011 pci_read_config_dword(dev, PCI_COMMAND, &id);
4012 while (id == ~0) {
4013 if (delay > timeout) {
a2758b6b
SK
4014 pci_warn(dev, "not ready %dms after %s; giving up\n",
4015 delay - 1, reset_type);
91295d79 4016 return -ENOTTY;
821cdad5
SK
4017 }
4018
4019 if (delay > 1000)
a2758b6b
SK
4020 pci_info(dev, "not ready %dms after %s; waiting\n",
4021 delay - 1, reset_type);
821cdad5
SK
4022
4023 msleep(delay);
4024 delay *= 2;
5adecf81 4025 pci_read_config_dword(dev, PCI_COMMAND, &id);
821cdad5 4026 }
5adecf81 4027
821cdad5 4028 if (delay > 1000)
a2758b6b
SK
4029 pci_info(dev, "ready %dms after %s\n", delay - 1,
4030 reset_type);
91295d79
SK
4031
4032 return 0;
5adecf81
AW
4033}
4034
a60a2b73
CH
4035/**
4036 * pcie_has_flr - check if a device supports function level resets
4037 * @dev: device to check
4038 *
4039 * Returns true if the device advertises support for PCIe function level
4040 * resets.
4041 */
4042static bool pcie_has_flr(struct pci_dev *dev)
3775a209
CL
4043{
4044 u32 cap;
4045
f65fd1aa 4046 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
a60a2b73 4047 return false;
3775a209 4048
a60a2b73
CH
4049 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
4050 return cap & PCI_EXP_DEVCAP_FLR;
4051}
3775a209 4052
a60a2b73
CH
4053/**
4054 * pcie_flr - initiate a PCIe function level reset
4055 * @dev: device to reset
4056 *
4057 * Initiate a function level reset on @dev. The caller should ensure the
4058 * device supports FLR before calling this function, e.g. by using the
4059 * pcie_has_flr() helper.
4060 */
91295d79 4061int pcie_flr(struct pci_dev *dev)
a60a2b73 4062{
3775a209 4063 if (!pci_wait_for_pending_transaction(dev))
7506dc79 4064 pci_err(dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
8c1c699f 4065
59875ae4 4066 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
a2758b6b
SK
4067
4068 /*
4069 * Per PCIe r4.0, sec 6.6.2, a device must complete an FLR within
4070 * 100ms, but may silently discard requests while the FLR is in
4071 * progress. Wait 100ms before trying to access the device.
4072 */
4073 msleep(100);
4074
4075 return pci_dev_wait(dev, "FLR", PCIE_RESET_READY_POLL_MS);
8dd7f803 4076}
a60a2b73 4077EXPORT_SYMBOL_GPL(pcie_flr);
d91cdc74 4078
8c1c699f 4079static int pci_af_flr(struct pci_dev *dev, int probe)
1ca88797 4080{
8c1c699f 4081 int pos;
1ca88797
SY
4082 u8 cap;
4083
8c1c699f
YZ
4084 pos = pci_find_capability(dev, PCI_CAP_ID_AF);
4085 if (!pos)
1ca88797 4086 return -ENOTTY;
8c1c699f 4087
f65fd1aa
SN
4088 if (dev->dev_flags & PCI_DEV_FLAGS_NO_FLR_RESET)
4089 return -ENOTTY;
4090
8c1c699f 4091 pci_read_config_byte(dev, pos + PCI_AF_CAP, &cap);
1ca88797
SY
4092 if (!(cap & PCI_AF_CAP_TP) || !(cap & PCI_AF_CAP_FLR))
4093 return -ENOTTY;
4094
4095 if (probe)
4096 return 0;
4097
d066c946
AW
4098 /*
4099 * Wait for Transaction Pending bit to clear. A word-aligned test
4100 * is used, so we use the conrol offset rather than status and shift
4101 * the test bit to match.
4102 */
bb383e28 4103 if (!pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
d066c946 4104 PCI_AF_STATUS_TP << 8))
7506dc79 4105 pci_err(dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
5fe5db05 4106
8c1c699f 4107 pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
a2758b6b
SK
4108
4109 /*
4110 * Per Advanced Capabilities for Conventional PCI ECN, 13 April 2006,
4111 * updated 27 July 2006; a device must complete an FLR within
4112 * 100ms, but may silently discard requests while the FLR is in
4113 * progress. Wait 100ms before trying to access the device.
4114 */
4115 msleep(100);
4116
4117 return pci_dev_wait(dev, "AF_FLR", PCIE_RESET_READY_POLL_MS);
1ca88797
SY
4118}
4119
83d74e03
RW
4120/**
4121 * pci_pm_reset - Put device into PCI_D3 and back into PCI_D0.
4122 * @dev: Device to reset.
4123 * @probe: If set, only check if the device can be reset this way.
4124 *
4125 * If @dev supports native PCI PM and its PCI_PM_CTRL_NO_SOFT_RESET flag is
4126 * unset, it will be reinitialized internally when going from PCI_D3hot to
4127 * PCI_D0. If that's the case and the device is not in a low-power state
4128 * already, force it into PCI_D3hot and back to PCI_D0, causing it to be reset.
4129 *
4130 * NOTE: This causes the caller to sleep for twice the device power transition
4131 * cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
f7625980 4132 * by default (i.e. unless the @dev's d3_delay field has a different value).
83d74e03
RW
4133 * Moreover, only devices in D0 can be reset by this function.
4134 */
f85876ba 4135static int pci_pm_reset(struct pci_dev *dev, int probe)
d91cdc74 4136{
f85876ba
YZ
4137 u16 csr;
4138
51e53738 4139 if (!dev->pm_cap || dev->dev_flags & PCI_DEV_FLAGS_NO_PM_RESET)
f85876ba 4140 return -ENOTTY;
d91cdc74 4141
f85876ba
YZ
4142 pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &csr);
4143 if (csr & PCI_PM_CTRL_NO_SOFT_RESET)
4144 return -ENOTTY;
d91cdc74 4145
f85876ba
YZ
4146 if (probe)
4147 return 0;
1ca88797 4148
f85876ba
YZ
4149 if (dev->current_state != PCI_D0)
4150 return -EINVAL;
4151
4152 csr &= ~PCI_PM_CTRL_STATE_MASK;
4153 csr |= PCI_D3hot;
4154 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 4155 pci_dev_d3_sleep(dev);
f85876ba
YZ
4156
4157 csr &= ~PCI_PM_CTRL_STATE_MASK;
4158 csr |= PCI_D0;
4159 pci_write_config_word(dev, dev->pm_cap + PCI_PM_CTRL, csr);
1ae861e6 4160 pci_dev_d3_sleep(dev);
f85876ba 4161
abbcf0e2 4162 return pci_dev_wait(dev, "PM D3->D0", PCIE_RESET_READY_POLL_MS);
f85876ba 4163}
9f5a70f1
OP
4164/**
4165 * pcie_wait_for_link - Wait until link is active or inactive
4166 * @pdev: Bridge device
4167 * @active: waiting for active or inactive?
4168 *
4169 * Use this to wait till link becomes active or inactive.
4170 */
4171bool pcie_wait_for_link(struct pci_dev *pdev, bool active)
4172{
4173 int timeout = 1000;
4174 bool ret;
4175 u16 lnk_status;
4176
4177 for (;;) {
4178 pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lnk_status);
4179 ret = !!(lnk_status & PCI_EXP_LNKSTA_DLLLA);
4180 if (ret == active)
4181 return true;
4182 if (timeout <= 0)
4183 break;
4184 msleep(10);
4185 timeout -= 10;
4186 }
4187
4188 pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
4189 active ? "set" : "cleared");
4190
4191 return false;
4192}
f85876ba 4193
9e33002f 4194void pci_reset_secondary_bus(struct pci_dev *dev)
c12ff1df
YZ
4195{
4196 u16 ctrl;
64e8674f
AW
4197
4198 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &ctrl);
4199 ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
4200 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
df62ab5e 4201
de0c548c
AW
4202 /*
4203 * PCI spec v3.0 7.6.4.2 requires minimum Trst of 1ms. Double
f7625980 4204 * this to 2ms to ensure that we meet the minimum requirement.
de0c548c
AW
4205 */
4206 msleep(2);
64e8674f
AW
4207
4208 ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
4209 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, ctrl);
de0c548c
AW
4210
4211 /*
4212 * Trhfa for conventional PCI is 2^25 clock cycles.
4213 * Assuming a minimum 33MHz clock this results in a 1s
4214 * delay before we can consider subordinate devices to
4215 * be re-initialized. PCIe has some ways to shorten this,
4216 * but we don't make use of them yet.
4217 */
4218 ssleep(1);
64e8674f 4219}
d92a208d 4220
9e33002f
GS
4221void __weak pcibios_reset_secondary_bus(struct pci_dev *dev)
4222{
4223 pci_reset_secondary_bus(dev);
4224}
4225
d92a208d
GS
4226/**
4227 * pci_reset_bridge_secondary_bus - Reset the secondary bus on a PCI bridge.
4228 * @dev: Bridge device
4229 *
4230 * Use the bridge control register to assert reset on the secondary bus.
4231 * Devices on the secondary bus are left in power-on state.
4232 */
01fd61c0 4233int pci_reset_bridge_secondary_bus(struct pci_dev *dev)
d92a208d
GS
4234{
4235 pcibios_reset_secondary_bus(dev);
01fd61c0 4236
6b2f1351 4237 return pci_dev_wait(dev, "bus reset", PCIE_RESET_READY_POLL_MS);
d92a208d 4238}
64e8674f
AW
4239EXPORT_SYMBOL_GPL(pci_reset_bridge_secondary_bus);
4240
4241static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
4242{
c12ff1df
YZ
4243 struct pci_dev *pdev;
4244
f331a859
AW
4245 if (pci_is_root_bus(dev->bus) || dev->subordinate ||
4246 !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
c12ff1df
YZ
4247 return -ENOTTY;
4248
4249 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4250 if (pdev != dev)
4251 return -ENOTTY;
4252
4253 if (probe)
4254 return 0;
4255
64e8674f 4256 pci_reset_bridge_secondary_bus(dev->bus->self);
c12ff1df
YZ
4257
4258 return 0;
4259}
4260
608c3881
AW
4261static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
4262{
4263 int rc = -ENOTTY;
4264
4265 if (!hotplug || !try_module_get(hotplug->ops->owner))
4266 return rc;
4267
4268 if (hotplug->ops->reset_slot)
4269 rc = hotplug->ops->reset_slot(hotplug, probe);
4270
4271 module_put(hotplug->ops->owner);
4272
4273 return rc;
4274}
4275
4276static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
4277{
4278 struct pci_dev *pdev;
4279
f331a859
AW
4280 if (dev->subordinate || !dev->slot ||
4281 dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
608c3881
AW
4282 return -ENOTTY;
4283
4284 list_for_each_entry(pdev, &dev->bus->devices, bus_list)
4285 if (pdev != dev && pdev->slot == dev->slot)
4286 return -ENOTTY;
4287
4288 return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
4289}
4290
77cb985a
AW
4291static void pci_dev_lock(struct pci_dev *dev)
4292{
4293 pci_cfg_access_lock(dev);
4294 /* block PM suspend, driver probe, etc. */
4295 device_lock(&dev->dev);
4296}
4297
61cf16d8
AW
4298/* Return 1 on successful lock, 0 on contention */
4299static int pci_dev_trylock(struct pci_dev *dev)
4300{
4301 if (pci_cfg_access_trylock(dev)) {
4302 if (device_trylock(&dev->dev))
4303 return 1;
4304 pci_cfg_access_unlock(dev);
4305 }
4306
4307 return 0;
4308}
4309
77cb985a
AW
4310static void pci_dev_unlock(struct pci_dev *dev)
4311{
4312 device_unlock(&dev->dev);
4313 pci_cfg_access_unlock(dev);
4314}
4315
775755ed 4316static void pci_dev_save_and_disable(struct pci_dev *dev)
3ebe7f9f
KB
4317{
4318 const struct pci_error_handlers *err_handler =
4319 dev->driver ? dev->driver->err_handler : NULL;
3ebe7f9f 4320
b014e96d 4321 /*
775755ed 4322 * dev->driver->err_handler->reset_prepare() is protected against
b014e96d
CH
4323 * races with ->remove() by the device lock, which must be held by
4324 * the caller.
4325 */
775755ed
CH
4326 if (err_handler && err_handler->reset_prepare)
4327 err_handler->reset_prepare(dev);
3ebe7f9f 4328
a6cbaade
AW
4329 /*
4330 * Wake-up device prior to save. PM registers default to D0 after
4331 * reset and a simple register restore doesn't reliably return
4332 * to a non-D0 state anyway.
4333 */
4334 pci_set_power_state(dev, PCI_D0);
4335
77cb985a
AW
4336 pci_save_state(dev);
4337 /*
4338 * Disable the device by clearing the Command register, except for
4339 * INTx-disable which is set. This not only disables MMIO and I/O port
4340 * BARs, but also prevents the device from being Bus Master, preventing
4341 * DMA from the device including MSI/MSI-X interrupts. For PCI 2.3
4342 * compliant devices, INTx-disable prevents legacy interrupts.
4343 */
4344 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
4345}
4346
4347static void pci_dev_restore(struct pci_dev *dev)
4348{
775755ed
CH
4349 const struct pci_error_handlers *err_handler =
4350 dev->driver ? dev->driver->err_handler : NULL;
977f857c 4351
77cb985a 4352 pci_restore_state(dev);
77cb985a 4353
775755ed
CH
4354 /*
4355 * dev->driver->err_handler->reset_done() is protected against
4356 * races with ->remove() by the device lock, which must be held by
4357 * the caller.
4358 */
4359 if (err_handler && err_handler->reset_done)
4360 err_handler->reset_done(dev);
d91cdc74 4361}
3ebe7f9f 4362
6fbf9e7a
KRW
4363/**
4364 * __pci_reset_function_locked - reset a PCI device function while holding
4365 * the @dev mutex lock.
4366 * @dev: PCI device to reset
4367 *
4368 * Some devices allow an individual function to be reset without affecting
4369 * other functions in the same device. The PCI device must be responsive
4370 * to PCI config space in order to use this function.
4371 *
4372 * The device function is presumed to be unused and the caller is holding
4373 * the device mutex lock when this function is called.
4374 * Resetting the device will make the contents of PCI configuration space
4375 * random, so any caller of this must be prepared to reinitialise the
4376 * device including MSI, bus mastering, BARs, decoding IO and memory spaces,
4377 * etc.
4378 *
4379 * Returns 0 if the device function was successfully reset or negative if the
4380 * device doesn't support resetting a single function.
4381 */
4382int __pci_reset_function_locked(struct pci_dev *dev)
4383{
52354b9d
CH
4384 int rc;
4385
4386 might_sleep();
4387
832c418a
BH
4388 /*
4389 * A reset method returns -ENOTTY if it doesn't support this device
4390 * and we should try the next method.
4391 *
4392 * If it returns 0 (success), we're finished. If it returns any
4393 * other error, we're also finished: this indicates that further
4394 * reset mechanisms might be broken on the device.
4395 */
52354b9d
CH
4396 rc = pci_dev_specific_reset(dev, 0);
4397 if (rc != -ENOTTY)
4398 return rc;
4399 if (pcie_has_flr(dev)) {
91295d79
SK
4400 rc = pcie_flr(dev);
4401 if (rc != -ENOTTY)
4402 return rc;
52354b9d
CH
4403 }
4404 rc = pci_af_flr(dev, 0);
4405 if (rc != -ENOTTY)
4406 return rc;
4407 rc = pci_pm_reset(dev, 0);
4408 if (rc != -ENOTTY)
4409 return rc;
4410 rc = pci_dev_reset_slot_function(dev, 0);
4411 if (rc != -ENOTTY)
4412 return rc;
4413 return pci_parent_bus_reset(dev, 0);
6fbf9e7a
KRW
4414}
4415EXPORT_SYMBOL_GPL(__pci_reset_function_locked);
4416
711d5779
MT
4417/**
4418 * pci_probe_reset_function - check whether the device can be safely reset
4419 * @dev: PCI device to reset
4420 *
4421 * Some devices allow an individual function to be reset without affecting
4422 * other functions in the same device. The PCI device must be responsive
4423 * to PCI config space in order to use this function.
4424 *
4425 * Returns 0 if the device function can be reset or negative if the
4426 * device doesn't support resetting a single function.
4427 */
4428int pci_probe_reset_function(struct pci_dev *dev)
4429{
52354b9d
CH
4430 int rc;
4431
4432 might_sleep();
4433
4434 rc = pci_dev_specific_reset(dev, 1);
4435 if (rc != -ENOTTY)
4436 return rc;
4437 if (pcie_has_flr(dev))
4438 return 0;
4439 rc = pci_af_flr(dev, 1);
4440 if (rc != -ENOTTY)
4441 return rc;
4442 rc = pci_pm_reset(dev, 1);
4443 if (rc != -ENOTTY)
4444 return rc;
4445 rc = pci_dev_reset_slot_function(dev, 1);
4446 if (rc != -ENOTTY)
4447 return rc;
4448
4449 return pci_parent_bus_reset(dev, 1);
711d5779
MT
4450}
4451
8dd7f803 4452/**
8c1c699f
YZ
4453 * pci_reset_function - quiesce and reset a PCI device function
4454 * @dev: PCI device to reset
8dd7f803
SY
4455 *
4456 * Some devices allow an individual function to be reset without affecting
4457 * other functions in the same device. The PCI device must be responsive
4458 * to PCI config space in order to use this function.
4459 *
4460 * This function does not just reset the PCI portion of a device, but
4461 * clears all the state associated with the device. This function differs
79e699b6
JS
4462 * from __pci_reset_function_locked() in that it saves and restores device state
4463 * over the reset and takes the PCI device lock.
8dd7f803 4464 *
8c1c699f 4465 * Returns 0 if the device function was successfully reset or negative if the
8dd7f803
SY
4466 * device doesn't support resetting a single function.
4467 */
4468int pci_reset_function(struct pci_dev *dev)
4469{
8c1c699f 4470 int rc;
8dd7f803 4471
204f4afa
BH
4472 if (!dev->reset_fn)
4473 return -ENOTTY;
8dd7f803 4474
b014e96d 4475 pci_dev_lock(dev);
77cb985a 4476 pci_dev_save_and_disable(dev);
8dd7f803 4477
52354b9d 4478 rc = __pci_reset_function_locked(dev);
8dd7f803 4479
77cb985a 4480 pci_dev_restore(dev);
b014e96d 4481 pci_dev_unlock(dev);
8dd7f803 4482
8c1c699f 4483 return rc;
8dd7f803
SY
4484}
4485EXPORT_SYMBOL_GPL(pci_reset_function);
4486
a477b9cd
MZ
4487/**
4488 * pci_reset_function_locked - quiesce and reset a PCI device function
4489 * @dev: PCI device to reset
4490 *
4491 * Some devices allow an individual function to be reset without affecting
4492 * other functions in the same device. The PCI device must be responsive
4493 * to PCI config space in order to use this function.
4494 *
4495 * This function does not just reset the PCI portion of a device, but
4496 * clears all the state associated with the device. This function differs
79e699b6 4497 * from __pci_reset_function_locked() in that it saves and restores device state
a477b9cd
MZ
4498 * over the reset. It also differs from pci_reset_function() in that it
4499 * requires the PCI device lock to be held.
4500 *
4501 * Returns 0 if the device function was successfully reset or negative if the
4502 * device doesn't support resetting a single function.
4503 */
4504int pci_reset_function_locked(struct pci_dev *dev)
4505{
4506 int rc;
4507
204f4afa
BH
4508 if (!dev->reset_fn)
4509 return -ENOTTY;
a477b9cd
MZ
4510
4511 pci_dev_save_and_disable(dev);
4512
4513 rc = __pci_reset_function_locked(dev);
4514
4515 pci_dev_restore(dev);
4516
4517 return rc;
4518}
4519EXPORT_SYMBOL_GPL(pci_reset_function_locked);
4520
61cf16d8
AW
4521/**
4522 * pci_try_reset_function - quiesce and reset a PCI device function
4523 * @dev: PCI device to reset
4524 *
4525 * Same as above, except return -EAGAIN if unable to lock device.
4526 */
4527int pci_try_reset_function(struct pci_dev *dev)
4528{
4529 int rc;
4530
204f4afa
BH
4531 if (!dev->reset_fn)
4532 return -ENOTTY;
61cf16d8 4533
b014e96d
CH
4534 if (!pci_dev_trylock(dev))
4535 return -EAGAIN;
61cf16d8 4536
b014e96d 4537 pci_dev_save_and_disable(dev);
52354b9d 4538 rc = __pci_reset_function_locked(dev);
cb5e0d06 4539 pci_dev_restore(dev);
b014e96d 4540 pci_dev_unlock(dev);
61cf16d8 4541
61cf16d8
AW
4542 return rc;
4543}
4544EXPORT_SYMBOL_GPL(pci_try_reset_function);
4545
f331a859
AW
4546/* Do any devices on or below this bus prevent a bus reset? */
4547static bool pci_bus_resetable(struct pci_bus *bus)
4548{
4549 struct pci_dev *dev;
4550
35702778
DD
4551
4552 if (bus->self && (bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
4553 return false;
4554
f331a859
AW
4555 list_for_each_entry(dev, &bus->devices, bus_list) {
4556 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4557 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4558 return false;
4559 }
4560
4561 return true;
4562}
4563
090a3c53
AW
4564/* Lock devices from the top of the tree down */
4565static void pci_bus_lock(struct pci_bus *bus)
4566{
4567 struct pci_dev *dev;
4568
4569 list_for_each_entry(dev, &bus->devices, bus_list) {
4570 pci_dev_lock(dev);
4571 if (dev->subordinate)
4572 pci_bus_lock(dev->subordinate);
4573 }
4574}
4575
4576/* Unlock devices from the bottom of the tree up */
4577static void pci_bus_unlock(struct pci_bus *bus)
4578{
4579 struct pci_dev *dev;
4580
4581 list_for_each_entry(dev, &bus->devices, bus_list) {
4582 if (dev->subordinate)
4583 pci_bus_unlock(dev->subordinate);
4584 pci_dev_unlock(dev);
4585 }
4586}
4587
61cf16d8
AW
4588/* Return 1 on successful lock, 0 on contention */
4589static int pci_bus_trylock(struct pci_bus *bus)
4590{
4591 struct pci_dev *dev;
4592
4593 list_for_each_entry(dev, &bus->devices, bus_list) {
4594 if (!pci_dev_trylock(dev))
4595 goto unlock;
4596 if (dev->subordinate) {
4597 if (!pci_bus_trylock(dev->subordinate)) {
4598 pci_dev_unlock(dev);
4599 goto unlock;
4600 }
4601 }
4602 }
4603 return 1;
4604
4605unlock:
4606 list_for_each_entry_continue_reverse(dev, &bus->devices, bus_list) {
4607 if (dev->subordinate)
4608 pci_bus_unlock(dev->subordinate);
4609 pci_dev_unlock(dev);
4610 }
4611 return 0;
4612}
4613
f331a859
AW
4614/* Do any devices on or below this slot prevent a bus reset? */
4615static bool pci_slot_resetable(struct pci_slot *slot)
4616{
4617 struct pci_dev *dev;
4618
33ba90aa
JG
4619 if (slot->bus->self &&
4620 (slot->bus->self->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET))
4621 return false;
4622
f331a859
AW
4623 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4624 if (!dev->slot || dev->slot != slot)
4625 continue;
4626 if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
4627 (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
4628 return false;
4629 }
4630
4631 return true;
4632}
4633
090a3c53
AW
4634/* Lock devices from the top of the tree down */
4635static void pci_slot_lock(struct pci_slot *slot)
4636{
4637 struct pci_dev *dev;
4638
4639 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4640 if (!dev->slot || dev->slot != slot)
4641 continue;
4642 pci_dev_lock(dev);
4643 if (dev->subordinate)
4644 pci_bus_lock(dev->subordinate);
4645 }
4646}
4647
4648/* Unlock devices from the bottom of the tree up */
4649static void pci_slot_unlock(struct pci_slot *slot)
4650{
4651 struct pci_dev *dev;
4652
4653 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4654 if (!dev->slot || dev->slot != slot)
4655 continue;
4656 if (dev->subordinate)
4657 pci_bus_unlock(dev->subordinate);
4658 pci_dev_unlock(dev);
4659 }
4660}
4661
61cf16d8
AW
4662/* Return 1 on successful lock, 0 on contention */
4663static int pci_slot_trylock(struct pci_slot *slot)
4664{
4665 struct pci_dev *dev;
4666
4667 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4668 if (!dev->slot || dev->slot != slot)
4669 continue;
4670 if (!pci_dev_trylock(dev))
4671 goto unlock;
4672 if (dev->subordinate) {
4673 if (!pci_bus_trylock(dev->subordinate)) {
4674 pci_dev_unlock(dev);
4675 goto unlock;
4676 }
4677 }
4678 }
4679 return 1;
4680
4681unlock:
4682 list_for_each_entry_continue_reverse(dev,
4683 &slot->bus->devices, bus_list) {
4684 if (!dev->slot || dev->slot != slot)
4685 continue;
4686 if (dev->subordinate)
4687 pci_bus_unlock(dev->subordinate);
4688 pci_dev_unlock(dev);
4689 }
4690 return 0;
4691}
4692
090a3c53
AW
4693/* Save and disable devices from the top of the tree down */
4694static void pci_bus_save_and_disable(struct pci_bus *bus)
4695{
4696 struct pci_dev *dev;
4697
4698 list_for_each_entry(dev, &bus->devices, bus_list) {
b014e96d 4699 pci_dev_lock(dev);
090a3c53 4700 pci_dev_save_and_disable(dev);
b014e96d 4701 pci_dev_unlock(dev);
090a3c53
AW
4702 if (dev->subordinate)
4703 pci_bus_save_and_disable(dev->subordinate);
4704 }
4705}
4706
4707/*
4708 * Restore devices from top of the tree down - parent bridges need to be
4709 * restored before we can get to subordinate devices.
4710 */
4711static void pci_bus_restore(struct pci_bus *bus)
4712{
4713 struct pci_dev *dev;
4714
4715 list_for_each_entry(dev, &bus->devices, bus_list) {
b014e96d 4716 pci_dev_lock(dev);
090a3c53 4717 pci_dev_restore(dev);
b014e96d 4718 pci_dev_unlock(dev);
090a3c53
AW
4719 if (dev->subordinate)
4720 pci_bus_restore(dev->subordinate);
4721 }
4722}
4723
4724/* Save and disable devices from the top of the tree down */
4725static void pci_slot_save_and_disable(struct pci_slot *slot)
4726{
4727 struct pci_dev *dev;
4728
4729 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4730 if (!dev->slot || dev->slot != slot)
4731 continue;
4732 pci_dev_save_and_disable(dev);
4733 if (dev->subordinate)
4734 pci_bus_save_and_disable(dev->subordinate);
4735 }
4736}
4737
4738/*
4739 * Restore devices from top of the tree down - parent bridges need to be
4740 * restored before we can get to subordinate devices.
4741 */
4742static void pci_slot_restore(struct pci_slot *slot)
4743{
4744 struct pci_dev *dev;
4745
4746 list_for_each_entry(dev, &slot->bus->devices, bus_list) {
4747 if (!dev->slot || dev->slot != slot)
4748 continue;
cb5e0d06 4749 pci_dev_lock(dev);
090a3c53 4750 pci_dev_restore(dev);
cb5e0d06 4751 pci_dev_unlock(dev);
090a3c53
AW
4752 if (dev->subordinate)
4753 pci_bus_restore(dev->subordinate);
4754 }
4755}
4756
4757static int pci_slot_reset(struct pci_slot *slot, int probe)
4758{
4759 int rc;
4760
f331a859 4761 if (!slot || !pci_slot_resetable(slot))
090a3c53
AW
4762 return -ENOTTY;
4763
4764 if (!probe)
4765 pci_slot_lock(slot);
4766
4767 might_sleep();
4768
4769 rc = pci_reset_hotplug_slot(slot->hotplug, probe);
4770
4771 if (!probe)
4772 pci_slot_unlock(slot);
4773
4774 return rc;
4775}
4776
9a3d2b9b
AW
4777/**
4778 * pci_probe_reset_slot - probe whether a PCI slot can be reset
4779 * @slot: PCI slot to probe
4780 *
4781 * Return 0 if slot can be reset, negative if a slot reset is not supported.
4782 */
4783int pci_probe_reset_slot(struct pci_slot *slot)
4784{
4785 return pci_slot_reset(slot, 1);
4786}
4787EXPORT_SYMBOL_GPL(pci_probe_reset_slot);
4788
090a3c53
AW
4789/**
4790 * pci_reset_slot - reset a PCI slot
4791 * @slot: PCI slot to reset
4792 *
4793 * A PCI bus may host multiple slots, each slot may support a reset mechanism
4794 * independent of other slots. For instance, some slots may support slot power
4795 * control. In the case of a 1:1 bus to slot architecture, this function may
4796 * wrap the bus reset to avoid spurious slot related events such as hotplug.
4797 * Generally a slot reset should be attempted before a bus reset. All of the
4798 * function of the slot and any subordinate buses behind the slot are reset
4799 * through this function. PCI config space of all devices in the slot and
4800 * behind the slot is saved before and restored after reset.
4801 *
4802 * Return 0 on success, non-zero on error.
4803 */
4804int pci_reset_slot(struct pci_slot *slot)
4805{
4806 int rc;
4807
4808 rc = pci_slot_reset(slot, 1);
4809 if (rc)
4810 return rc;
4811
4812 pci_slot_save_and_disable(slot);
4813
4814 rc = pci_slot_reset(slot, 0);
4815
4816 pci_slot_restore(slot);
4817
4818 return rc;
4819}
4820EXPORT_SYMBOL_GPL(pci_reset_slot);
4821
61cf16d8
AW
4822/**
4823 * pci_try_reset_slot - Try to reset a PCI slot
4824 * @slot: PCI slot to reset
4825 *
4826 * Same as above except return -EAGAIN if the slot cannot be locked
4827 */
4828int pci_try_reset_slot(struct pci_slot *slot)
4829{
4830 int rc;
4831
4832 rc = pci_slot_reset(slot, 1);
4833 if (rc)
4834 return rc;
4835
4836 pci_slot_save_and_disable(slot);
4837
4838 if (pci_slot_trylock(slot)) {
4839 might_sleep();
4840 rc = pci_reset_hotplug_slot(slot->hotplug, 0);
4841 pci_slot_unlock(slot);
4842 } else
4843 rc = -EAGAIN;
4844
4845 pci_slot_restore(slot);
4846
4847 return rc;
4848}
4849EXPORT_SYMBOL_GPL(pci_try_reset_slot);
4850
090a3c53
AW
4851static int pci_bus_reset(struct pci_bus *bus, int probe)
4852{
f331a859 4853 if (!bus->self || !pci_bus_resetable(bus))
090a3c53
AW
4854 return -ENOTTY;
4855
4856 if (probe)
4857 return 0;
4858
4859 pci_bus_lock(bus);
4860
4861 might_sleep();
4862
4863 pci_reset_bridge_secondary_bus(bus->self);
4864
4865 pci_bus_unlock(bus);
4866
4867 return 0;
4868}
4869
9a3d2b9b
AW
4870/**
4871 * pci_probe_reset_bus - probe whether a PCI bus can be reset
4872 * @bus: PCI bus to probe
4873 *
4874 * Return 0 if bus can be reset, negative if a bus reset is not supported.
4875 */
4876int pci_probe_reset_bus(struct pci_bus *bus)
4877{
4878 return pci_bus_reset(bus, 1);
4879}
4880EXPORT_SYMBOL_GPL(pci_probe_reset_bus);
4881
090a3c53
AW
4882/**
4883 * pci_reset_bus - reset a PCI bus
4884 * @bus: top level PCI bus to reset
4885 *
4886 * Do a bus reset on the given bus and any subordinate buses, saving
4887 * and restoring state of all devices.
4888 *
4889 * Return 0 on success, non-zero on error.
4890 */
4891int pci_reset_bus(struct pci_bus *bus)
4892{
4893 int rc;
4894
4895 rc = pci_bus_reset(bus, 1);
4896 if (rc)
4897 return rc;
4898
4899 pci_bus_save_and_disable(bus);
4900
4901 rc = pci_bus_reset(bus, 0);
4902
4903 pci_bus_restore(bus);
4904
4905 return rc;
4906}
4907EXPORT_SYMBOL_GPL(pci_reset_bus);
4908
61cf16d8
AW
4909/**
4910 * pci_try_reset_bus - Try to reset a PCI bus
4911 * @bus: top level PCI bus to reset
4912 *
4913 * Same as above except return -EAGAIN if the bus cannot be locked
4914 */
4915int pci_try_reset_bus(struct pci_bus *bus)
4916{
4917 int rc;
4918
4919 rc = pci_bus_reset(bus, 1);
4920 if (rc)
4921 return rc;
4922
4923 pci_bus_save_and_disable(bus);
4924
4925 if (pci_bus_trylock(bus)) {
4926 might_sleep();
4927 pci_reset_bridge_secondary_bus(bus->self);
4928 pci_bus_unlock(bus);
4929 } else
4930 rc = -EAGAIN;
4931
4932 pci_bus_restore(bus);
4933
4934 return rc;
4935}
4936EXPORT_SYMBOL_GPL(pci_try_reset_bus);
4937
d556ad4b
PO
4938/**
4939 * pcix_get_max_mmrbc - get PCI-X maximum designed memory read byte count
4940 * @dev: PCI device to query
4941 *
4942 * Returns mmrbc: maximum designed memory read count in bytes
4943 * or appropriate error value.
4944 */
4945int pcix_get_max_mmrbc(struct pci_dev *dev)
4946{
7c9e2b1c 4947 int cap;
d556ad4b
PO
4948 u32 stat;
4949
4950 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4951 if (!cap)
4952 return -EINVAL;
4953
7c9e2b1c 4954 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
d556ad4b
PO
4955 return -EINVAL;
4956
25daeb55 4957 return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
d556ad4b
PO
4958}
4959EXPORT_SYMBOL(pcix_get_max_mmrbc);
4960
4961/**
4962 * pcix_get_mmrbc - get PCI-X maximum memory read byte count
4963 * @dev: PCI device to query
4964 *
4965 * Returns mmrbc: maximum memory read count in bytes
4966 * or appropriate error value.
4967 */
4968int pcix_get_mmrbc(struct pci_dev *dev)
4969{
7c9e2b1c 4970 int cap;
bdc2bda7 4971 u16 cmd;
d556ad4b
PO
4972
4973 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
4974 if (!cap)
4975 return -EINVAL;
4976
7c9e2b1c
DN
4977 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
4978 return -EINVAL;
d556ad4b 4979
7c9e2b1c 4980 return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
d556ad4b
PO
4981}
4982EXPORT_SYMBOL(pcix_get_mmrbc);
4983
4984/**
4985 * pcix_set_mmrbc - set PCI-X maximum memory read byte count
4986 * @dev: PCI device to query
4987 * @mmrbc: maximum memory read count in bytes
4988 * valid values are 512, 1024, 2048, 4096
4989 *
4990 * If possible sets maximum memory read byte count, some bridges have erratas
4991 * that prevent this.
4992 */
4993int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
4994{
7c9e2b1c 4995 int cap;
bdc2bda7
DN
4996 u32 stat, v, o;
4997 u16 cmd;
d556ad4b 4998
229f5afd 4999 if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
7c9e2b1c 5000 return -EINVAL;
d556ad4b
PO
5001
5002 v = ffs(mmrbc) - 10;
5003
5004 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
5005 if (!cap)
7c9e2b1c 5006 return -EINVAL;
d556ad4b 5007
7c9e2b1c
DN
5008 if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
5009 return -EINVAL;
d556ad4b
PO
5010
5011 if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
5012 return -E2BIG;
5013
7c9e2b1c
DN
5014 if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
5015 return -EINVAL;
d556ad4b
PO
5016
5017 o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
5018 if (o != v) {
809a3bf9 5019 if (v > o && (dev->bus->bus_flags & PCI_BUS_FLAGS_NO_MMRBC))
d556ad4b
PO
5020 return -EIO;
5021
5022 cmd &= ~PCI_X_CMD_MAX_READ;
5023 cmd |= v << 2;
7c9e2b1c
DN
5024 if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
5025 return -EIO;
d556ad4b 5026 }
7c9e2b1c 5027 return 0;
d556ad4b
PO
5028}
5029EXPORT_SYMBOL(pcix_set_mmrbc);
5030
5031/**
5032 * pcie_get_readrq - get PCI Express read request size
5033 * @dev: PCI device to query
5034 *
5035 * Returns maximum memory read request in bytes
5036 * or appropriate error value.
5037 */
5038int pcie_get_readrq(struct pci_dev *dev)
5039{
d556ad4b
PO
5040 u16 ctl;
5041
59875ae4 5042 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
d556ad4b 5043
59875ae4 5044 return 128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12);
d556ad4b
PO
5045}
5046EXPORT_SYMBOL(pcie_get_readrq);
5047
5048/**
5049 * pcie_set_readrq - set PCI Express maximum memory read request
5050 * @dev: PCI device to query
42e61f4a 5051 * @rq: maximum memory read count in bytes
d556ad4b
PO
5052 * valid values are 128, 256, 512, 1024, 2048, 4096
5053 *
c9b378c7 5054 * If possible sets maximum memory read request in bytes
d556ad4b
PO
5055 */
5056int pcie_set_readrq(struct pci_dev *dev, int rq)
5057{
59875ae4 5058 u16 v;
d556ad4b 5059
229f5afd 5060 if (rq < 128 || rq > 4096 || !is_power_of_2(rq))
59875ae4 5061 return -EINVAL;
d556ad4b 5062
a1c473aa
BH
5063 /*
5064 * If using the "performance" PCIe config, we clamp the
5065 * read rq size to the max packet size to prevent the
5066 * host bridge generating requests larger than we can
5067 * cope with
5068 */
5069 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
5070 int mps = pcie_get_mps(dev);
5071
a1c473aa
BH
5072 if (mps < rq)
5073 rq = mps;
5074 }
5075
5076 v = (ffs(rq) - 8) << 12;
d556ad4b 5077
59875ae4
JL
5078 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5079 PCI_EXP_DEVCTL_READRQ, v);
d556ad4b
PO
5080}
5081EXPORT_SYMBOL(pcie_set_readrq);
5082
b03e7495
JM
5083/**
5084 * pcie_get_mps - get PCI Express maximum payload size
5085 * @dev: PCI device to query
5086 *
5087 * Returns maximum payload size in bytes
b03e7495
JM
5088 */
5089int pcie_get_mps(struct pci_dev *dev)
5090{
b03e7495
JM
5091 u16 ctl;
5092
59875ae4 5093 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
b03e7495 5094
59875ae4 5095 return 128 << ((ctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
b03e7495 5096}
f1c66c46 5097EXPORT_SYMBOL(pcie_get_mps);
b03e7495
JM
5098
5099/**
5100 * pcie_set_mps - set PCI Express maximum payload size
5101 * @dev: PCI device to query
47c08f31 5102 * @mps: maximum payload size in bytes
b03e7495
JM
5103 * valid values are 128, 256, 512, 1024, 2048, 4096
5104 *
5105 * If possible sets maximum payload size
5106 */
5107int pcie_set_mps(struct pci_dev *dev, int mps)
5108{
59875ae4 5109 u16 v;
b03e7495
JM
5110
5111 if (mps < 128 || mps > 4096 || !is_power_of_2(mps))
59875ae4 5112 return -EINVAL;
b03e7495
JM
5113
5114 v = ffs(mps) - 8;
f7625980 5115 if (v > dev->pcie_mpss)
59875ae4 5116 return -EINVAL;
b03e7495
JM
5117 v <<= 5;
5118
59875ae4
JL
5119 return pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
5120 PCI_EXP_DEVCTL_PAYLOAD, v);
b03e7495 5121}
f1c66c46 5122EXPORT_SYMBOL(pcie_set_mps);
b03e7495 5123
6db79a88
TG
5124/**
5125 * pcie_bandwidth_available - determine minimum link settings of a PCIe
5126 * device and its bandwidth limitation
5127 * @dev: PCI device to query
5128 * @limiting_dev: storage for device causing the bandwidth limitation
5129 * @speed: storage for speed of limiting device
5130 * @width: storage for width of limiting device
5131 *
5132 * Walk up the PCI device chain and find the point where the minimum
5133 * bandwidth is available. Return the bandwidth available there and (if
5134 * limiting_dev, speed, and width pointers are supplied) information about
5135 * that point. The bandwidth returned is in Mb/s, i.e., megabits/second of
5136 * raw bandwidth.
5137 */
5138u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
5139 enum pci_bus_speed *speed,
5140 enum pcie_link_width *width)
5141{
5142 u16 lnksta;
5143 enum pci_bus_speed next_speed;
5144 enum pcie_link_width next_width;
5145 u32 bw, next_bw;
5146
5147 if (speed)
5148 *speed = PCI_SPEED_UNKNOWN;
5149 if (width)
5150 *width = PCIE_LNK_WIDTH_UNKNOWN;
5151
5152 bw = 0;
5153
5154 while (dev) {
5155 pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta);
5156
5157 next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS];
5158 next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >>
5159 PCI_EXP_LNKSTA_NLW_SHIFT;
5160
5161 next_bw = next_width * PCIE_SPEED2MBS_ENC(next_speed);
5162
5163 /* Check if current device limits the total bandwidth */
5164 if (!bw || next_bw <= bw) {
5165 bw = next_bw;
5166
5167 if (limiting_dev)
5168 *limiting_dev = dev;
5169 if (speed)
5170 *speed = next_speed;
5171 if (width)
5172 *width = next_width;
5173 }
5174
5175 dev = pci_upstream_bridge(dev);
5176 }
5177
5178 return bw;
5179}
5180EXPORT_SYMBOL(pcie_bandwidth_available);
5181
6cf57be0
TG
5182/**
5183 * pcie_get_speed_cap - query for the PCI device's link speed capability
5184 * @dev: PCI device to query
5185 *
5186 * Query the PCI device speed capability. Return the maximum link speed
5187 * supported by the device.
5188 */
5189enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev)
5190{
5191 u32 lnkcap2, lnkcap;
5192
5193 /*
5194 * PCIe r4.0 sec 7.5.3.18 recommends using the Supported Link
5195 * Speeds Vector in Link Capabilities 2 when supported, falling
5196 * back to Max Link Speed in Link Capabilities otherwise.
5197 */
5198 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP2, &lnkcap2);
5199 if (lnkcap2) { /* PCIe r3.0-compliant */
5200 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
5201 return PCIE_SPEED_16_0GT;
5202 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
5203 return PCIE_SPEED_8_0GT;
5204 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
5205 return PCIE_SPEED_5_0GT;
5206 else if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
5207 return PCIE_SPEED_2_5GT;
5208 return PCI_SPEED_UNKNOWN;
5209 }
5210
5211 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5212 if (lnkcap) {
5213 if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
5214 return PCIE_SPEED_16_0GT;
5215 else if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
5216 return PCIE_SPEED_8_0GT;
5217 else if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
5218 return PCIE_SPEED_5_0GT;
5219 else if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
5220 return PCIE_SPEED_2_5GT;
5221 }
5222
5223 return PCI_SPEED_UNKNOWN;
5224}
5225
c70b65fb
TG
5226/**
5227 * pcie_get_width_cap - query for the PCI device's link width capability
5228 * @dev: PCI device to query
5229 *
5230 * Query the PCI device width capability. Return the maximum link width
5231 * supported by the device.
5232 */
5233enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev)
5234{
5235 u32 lnkcap;
5236
5237 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
5238 if (lnkcap)
5239 return (lnkcap & PCI_EXP_LNKCAP_MLW) >> 4;
5240
5241 return PCIE_LNK_WIDTH_UNKNOWN;
5242}
5243
b852f63a
TG
5244/**
5245 * pcie_bandwidth_capable - calculate a PCI device's link bandwidth capability
5246 * @dev: PCI device
5247 * @speed: storage for link speed
5248 * @width: storage for link width
5249 *
5250 * Calculate a PCI device's link bandwidth by querying for its link speed
5251 * and width, multiplying them, and applying encoding overhead. The result
5252 * is in Mb/s, i.e., megabits/second of raw bandwidth.
5253 */
5254u32 pcie_bandwidth_capable(struct pci_dev *dev, enum pci_bus_speed *speed,
5255 enum pcie_link_width *width)
5256{
5257 *speed = pcie_get_speed_cap(dev);
5258 *width = pcie_get_width_cap(dev);
5259
5260 if (*speed == PCI_SPEED_UNKNOWN || *width == PCIE_LNK_WIDTH_UNKNOWN)
5261 return 0;
5262
5263 return *width * PCIE_SPEED2MBS_ENC(*speed);
5264}
5265
9e506a7b
TG
5266/**
5267 * pcie_print_link_status - Report the PCI device's link speed and width
5268 * @dev: PCI device to query
5269 *
5270 * Report the available bandwidth at the device. If this is less than the
5271 * device is capable of, report the device's maximum possible bandwidth and
5272 * the upstream link that limits its performance to less than that.
5273 */
5274void pcie_print_link_status(struct pci_dev *dev)
5275{
5276 enum pcie_link_width width, width_cap;
5277 enum pci_bus_speed speed, speed_cap;
5278 struct pci_dev *limiting_dev = NULL;
5279 u32 bw_avail, bw_cap;
5280
5281 bw_cap = pcie_bandwidth_capable(dev, &speed_cap, &width_cap);
5282 bw_avail = pcie_bandwidth_available(dev, &limiting_dev, &speed, &width);
5283
5284 if (bw_avail >= bw_cap)
0cf22d6b 5285 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth (%s x%d link)\n",
9e506a7b
TG
5286 bw_cap / 1000, bw_cap % 1000,
5287 PCIE_SPEED2STR(speed_cap), width_cap);
5288 else
0cf22d6b 5289 pci_info(dev, "%u.%03u Gb/s available PCIe bandwidth, limited by %s x%d link at %s (capable of %u.%03u Gb/s with %s x%d link)\n",
9e506a7b
TG
5290 bw_avail / 1000, bw_avail % 1000,
5291 PCIE_SPEED2STR(speed), width,
5292 limiting_dev ? pci_name(limiting_dev) : "<unknown>",
5293 bw_cap / 1000, bw_cap % 1000,
5294 PCIE_SPEED2STR(speed_cap), width_cap);
5295}
5296EXPORT_SYMBOL(pcie_print_link_status);
5297
c87deff7
HS
5298/**
5299 * pci_select_bars - Make BAR mask from the type of resource
f95d882d 5300 * @dev: the PCI device for which BAR mask is made
c87deff7
HS
5301 * @flags: resource type mask to be selected
5302 *
5303 * This helper routine makes bar mask from the type of resource.
5304 */
5305int pci_select_bars(struct pci_dev *dev, unsigned long flags)
5306{
5307 int i, bars = 0;
5308 for (i = 0; i < PCI_NUM_RESOURCES; i++)
5309 if (pci_resource_flags(dev, i) & flags)
5310 bars |= (1 << i);
5311 return bars;
5312}
b7fe9434 5313EXPORT_SYMBOL(pci_select_bars);
c87deff7 5314
95a8b6ef
MT
5315/* Some architectures require additional programming to enable VGA */
5316static arch_set_vga_state_t arch_set_vga_state;
5317
5318void __init pci_register_set_vga_state(arch_set_vga_state_t func)
5319{
5320 arch_set_vga_state = func; /* NULL disables */
5321}
5322
5323static int pci_set_vga_state_arch(struct pci_dev *dev, bool decode,
3c78bc61 5324 unsigned int command_bits, u32 flags)
95a8b6ef
MT
5325{
5326 if (arch_set_vga_state)
5327 return arch_set_vga_state(dev, decode, command_bits,
7ad35cf2 5328 flags);
95a8b6ef
MT
5329 return 0;
5330}
5331
deb2d2ec
BH
5332/**
5333 * pci_set_vga_state - set VGA decode state on device and parents if requested
19eea630
RD
5334 * @dev: the PCI device
5335 * @decode: true = enable decoding, false = disable decoding
5336 * @command_bits: PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
3f37d622 5337 * @flags: traverse ancestors and change bridges
3448a19d 5338 * CHANGE_BRIDGE_ONLY / CHANGE_BRIDGE
deb2d2ec
BH
5339 */
5340int pci_set_vga_state(struct pci_dev *dev, bool decode,
3448a19d 5341 unsigned int command_bits, u32 flags)
deb2d2ec
BH
5342{
5343 struct pci_bus *bus;
5344 struct pci_dev *bridge;
5345 u16 cmd;
95a8b6ef 5346 int rc;
deb2d2ec 5347
67ebd814 5348 WARN_ON((flags & PCI_VGA_STATE_CHANGE_DECODES) && (command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY)));
deb2d2ec 5349
95a8b6ef 5350 /* ARCH specific VGA enables */
3448a19d 5351 rc = pci_set_vga_state_arch(dev, decode, command_bits, flags);
95a8b6ef
MT
5352 if (rc)
5353 return rc;
5354
3448a19d
DA
5355 if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
5356 pci_read_config_word(dev, PCI_COMMAND, &cmd);
5357 if (decode == true)
5358 cmd |= command_bits;
5359 else
5360 cmd &= ~command_bits;
5361 pci_write_config_word(dev, PCI_COMMAND, cmd);
5362 }
deb2d2ec 5363
3448a19d 5364 if (!(flags & PCI_VGA_STATE_CHANGE_BRIDGE))
deb2d2ec
BH
5365 return 0;
5366
5367 bus = dev->bus;
5368 while (bus) {
5369 bridge = bus->self;
5370 if (bridge) {
5371 pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
5372 &cmd);
5373 if (decode == true)
5374 cmd |= PCI_BRIDGE_CTL_VGA;
5375 else
5376 cmd &= ~PCI_BRIDGE_CTL_VGA;
5377 pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
5378 cmd);
5379 }
5380 bus = bus->parent;
5381 }
5382 return 0;
5383}
5384
f0af9593
BH
5385/**
5386 * pci_add_dma_alias - Add a DMA devfn alias for a device
5387 * @dev: the PCI device for which alias is added
5388 * @devfn: alias slot and function
5389 *
5390 * This helper encodes 8-bit devfn as bit number in dma_alias_mask.
5391 * It should be called early, preferably as PCI fixup header quirk.
5392 */
5393void pci_add_dma_alias(struct pci_dev *dev, u8 devfn)
5394{
338c3149
JL
5395 if (!dev->dma_alias_mask)
5396 dev->dma_alias_mask = kcalloc(BITS_TO_LONGS(U8_MAX),
5397 sizeof(long), GFP_KERNEL);
5398 if (!dev->dma_alias_mask) {
7506dc79 5399 pci_warn(dev, "Unable to allocate DMA alias mask\n");
338c3149
JL
5400 return;
5401 }
5402
5403 set_bit(devfn, dev->dma_alias_mask);
7506dc79 5404 pci_info(dev, "Enabling fixed DMA alias to %02x.%d\n",
48c83080 5405 PCI_SLOT(devfn), PCI_FUNC(devfn));
f0af9593
BH
5406}
5407
338c3149
JL
5408bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2)
5409{
5410 return (dev1->dma_alias_mask &&
5411 test_bit(dev2->devfn, dev1->dma_alias_mask)) ||
5412 (dev2->dma_alias_mask &&
5413 test_bit(dev1->devfn, dev2->dma_alias_mask));
5414}
5415
8496e85c
RW
5416bool pci_device_is_present(struct pci_dev *pdev)
5417{
5418 u32 v;
5419
fe2bd75b
KB
5420 if (pci_dev_is_disconnected(pdev))
5421 return false;
8496e85c
RW
5422 return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
5423}
5424EXPORT_SYMBOL_GPL(pci_device_is_present);
5425
08249651
RW
5426void pci_ignore_hotplug(struct pci_dev *dev)
5427{
5428 struct pci_dev *bridge = dev->bus->self;
5429
5430 dev->ignore_hotplug = 1;
5431 /* Propagate the "ignore hotplug" setting to the parent bridge. */
5432 if (bridge)
5433 bridge->ignore_hotplug = 1;
5434}
5435EXPORT_SYMBOL_GPL(pci_ignore_hotplug);
5436
0a701aa6
YX
5437resource_size_t __weak pcibios_default_alignment(void)
5438{
5439 return 0;
5440}
5441
32a9a682
YS
5442#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
5443static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
e9d1e492 5444static DEFINE_SPINLOCK(resource_alignment_lock);
32a9a682
YS
5445
5446/**
5447 * pci_specified_resource_alignment - get resource alignment specified by user.
5448 * @dev: the PCI device to get
e3adec72 5449 * @resize: whether or not to change resources' size when reassigning alignment
32a9a682
YS
5450 *
5451 * RETURNS: Resource alignment if it is specified.
5452 * Zero if it is not specified.
5453 */
e3adec72
YX
5454static resource_size_t pci_specified_resource_alignment(struct pci_dev *dev,
5455 bool *resize)
32a9a682
YS
5456{
5457 int seg, bus, slot, func, align_order, count;
644a544f 5458 unsigned short vendor, device, subsystem_vendor, subsystem_device;
0a701aa6 5459 resource_size_t align = pcibios_default_alignment();
32a9a682
YS
5460 char *p;
5461
5462 spin_lock(&resource_alignment_lock);
5463 p = resource_alignment_param;
0a701aa6 5464 if (!*p && !align)
f0b99f70
YX
5465 goto out;
5466 if (pci_has_flag(PCI_PROBE_ONLY)) {
0a701aa6 5467 align = 0;
f0b99f70
YX
5468 pr_info_once("PCI: Ignoring requested alignments (PCI_PROBE_ONLY)\n");
5469 goto out;
5470 }
5471
32a9a682
YS
5472 while (*p) {
5473 count = 0;
5474 if (sscanf(p, "%d%n", &align_order, &count) == 1 &&
5475 p[count] == '@') {
5476 p += count + 1;
5477 } else {
5478 align_order = -1;
5479 }
644a544f
KMEE
5480 if (strncmp(p, "pci:", 4) == 0) {
5481 /* PCI vendor/device (subvendor/subdevice) ids are specified */
5482 p += 4;
5483 if (sscanf(p, "%hx:%hx:%hx:%hx%n",
5484 &vendor, &device, &subsystem_vendor, &subsystem_device, &count) != 4) {
5485 if (sscanf(p, "%hx:%hx%n", &vendor, &device, &count) != 2) {
5486 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: pci:%s\n",
5487 p);
5488 break;
5489 }
5490 subsystem_vendor = subsystem_device = 0;
5491 }
5492 p += count;
5493 if ((!vendor || (vendor == dev->vendor)) &&
5494 (!device || (device == dev->device)) &&
5495 (!subsystem_vendor || (subsystem_vendor == dev->subsystem_vendor)) &&
5496 (!subsystem_device || (subsystem_device == dev->subsystem_device))) {
e3adec72 5497 *resize = true;
644a544f
KMEE
5498 if (align_order == -1)
5499 align = PAGE_SIZE;
5500 else
5501 align = 1 << align_order;
5502 /* Found */
32a9a682
YS
5503 break;
5504 }
5505 }
644a544f
KMEE
5506 else {
5507 if (sscanf(p, "%x:%x:%x.%x%n",
5508 &seg, &bus, &slot, &func, &count) != 4) {
5509 seg = 0;
5510 if (sscanf(p, "%x:%x.%x%n",
5511 &bus, &slot, &func, &count) != 3) {
5512 /* Invalid format */
5513 printk(KERN_ERR "PCI: Can't parse resource_alignment parameter: %s\n",
5514 p);
5515 break;
5516 }
5517 }
5518 p += count;
5519 if (seg == pci_domain_nr(dev->bus) &&
5520 bus == dev->bus->number &&
5521 slot == PCI_SLOT(dev->devfn) &&
5522 func == PCI_FUNC(dev->devfn)) {
e3adec72 5523 *resize = true;
644a544f
KMEE
5524 if (align_order == -1)
5525 align = PAGE_SIZE;
5526 else
5527 align = 1 << align_order;
5528 /* Found */
5529 break;
5530 }
32a9a682
YS
5531 }
5532 if (*p != ';' && *p != ',') {
5533 /* End of param or invalid format */
5534 break;
5535 }
5536 p++;
5537 }
f0b99f70 5538out:
32a9a682
YS
5539 spin_unlock(&resource_alignment_lock);
5540 return align;
5541}
5542
81a5e70e 5543static void pci_request_resource_alignment(struct pci_dev *dev, int bar,
e3adec72 5544 resource_size_t align, bool resize)
81a5e70e
BH
5545{
5546 struct resource *r = &dev->resource[bar];
5547 resource_size_t size;
5548
5549 if (!(r->flags & IORESOURCE_MEM))
5550 return;
5551
5552 if (r->flags & IORESOURCE_PCI_FIXED) {
7506dc79 5553 pci_info(dev, "BAR%d %pR: ignoring requested alignment %#llx\n",
81a5e70e
BH
5554 bar, r, (unsigned long long)align);
5555 return;
5556 }
5557
5558 size = resource_size(r);
0dde1c08
BH
5559 if (size >= align)
5560 return;
81a5e70e 5561
0dde1c08 5562 /*
e3adec72
YX
5563 * Increase the alignment of the resource. There are two ways we
5564 * can do this:
0dde1c08 5565 *
e3adec72
YX
5566 * 1) Increase the size of the resource. BARs are aligned on their
5567 * size, so when we reallocate space for this resource, we'll
5568 * allocate it with the larger alignment. This also prevents
5569 * assignment of any other BARs inside the alignment region, so
5570 * if we're requesting page alignment, this means no other BARs
5571 * will share the page.
5572 *
5573 * The disadvantage is that this makes the resource larger than
5574 * the hardware BAR, which may break drivers that compute things
5575 * based on the resource size, e.g., to find registers at a
5576 * fixed offset before the end of the BAR.
5577 *
5578 * 2) Retain the resource size, but use IORESOURCE_STARTALIGN and
5579 * set r->start to the desired alignment. By itself this
5580 * doesn't prevent other BARs being put inside the alignment
5581 * region, but if we realign *every* resource of every device in
5582 * the system, none of them will share an alignment region.
5583 *
5584 * When the user has requested alignment for only some devices via
5585 * the "pci=resource_alignment" argument, "resize" is true and we
5586 * use the first method. Otherwise we assume we're aligning all
5587 * devices and we use the second.
0dde1c08 5588 */
e3adec72 5589
7506dc79 5590 pci_info(dev, "BAR%d %pR: requesting alignment to %#llx\n",
0dde1c08 5591 bar, r, (unsigned long long)align);
81a5e70e 5592
e3adec72
YX
5593 if (resize) {
5594 r->start = 0;
5595 r->end = align - 1;
5596 } else {
5597 r->flags &= ~IORESOURCE_SIZEALIGN;
5598 r->flags |= IORESOURCE_STARTALIGN;
5599 r->start = align;
5600 r->end = r->start + size - 1;
5601 }
0dde1c08 5602 r->flags |= IORESOURCE_UNSET;
81a5e70e
BH
5603}
5604
2069ecfb
YL
5605/*
5606 * This function disables memory decoding and releases memory resources
5607 * of the device specified by kernel's boot parameter 'pci=resource_alignment='.
5608 * It also rounds up size to specified alignment.
5609 * Later on, the kernel will assign page-aligned memory resource back
5610 * to the device.
5611 */
5612void pci_reassigndev_resource_alignment(struct pci_dev *dev)
5613{
5614 int i;
5615 struct resource *r;
81a5e70e 5616 resource_size_t align;
2069ecfb 5617 u16 command;
e3adec72 5618 bool resize = false;
2069ecfb 5619
62d9a78f
YX
5620 /*
5621 * VF BARs are read-only zero according to SR-IOV spec r1.1, sec
5622 * 3.4.1.11. Their resources are allocated from the space
5623 * described by the VF BARx register in the PF's SR-IOV capability.
5624 * We can't influence their alignment here.
5625 */
5626 if (dev->is_virtfn)
5627 return;
5628
10c463a7 5629 /* check if specified PCI is target device to reassign */
e3adec72 5630 align = pci_specified_resource_alignment(dev, &resize);
10c463a7 5631 if (!align)
2069ecfb
YL
5632 return;
5633
5634 if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL &&
5635 (dev->class >> 8) == PCI_CLASS_BRIDGE_HOST) {
7506dc79 5636 pci_warn(dev, "Can't reassign resources to host bridge\n");
2069ecfb
YL
5637 return;
5638 }
5639
2069ecfb
YL
5640 pci_read_config_word(dev, PCI_COMMAND, &command);
5641 command &= ~PCI_COMMAND_MEMORY;
5642 pci_write_config_word(dev, PCI_COMMAND, command);
5643
81a5e70e 5644 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
e3adec72 5645 pci_request_resource_alignment(dev, i, align, resize);
f0b99f70 5646
81a5e70e
BH
5647 /*
5648 * Need to disable bridge's resource window,
2069ecfb
YL
5649 * to enable the kernel to reassign new resource
5650 * window later on.
5651 */
5652 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
5653 (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
5654 for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
5655 r = &dev->resource[i];
5656 if (!(r->flags & IORESOURCE_MEM))
5657 continue;
bd064f0a 5658 r->flags |= IORESOURCE_UNSET;
2069ecfb
YL
5659 r->end = resource_size(r) - 1;
5660 r->start = 0;
5661 }
5662 pci_disable_bridge_window(dev);
5663 }
5664}
5665
9738abed 5666static ssize_t pci_set_resource_alignment_param(const char *buf, size_t count)
32a9a682
YS
5667{
5668 if (count > RESOURCE_ALIGNMENT_PARAM_SIZE - 1)
5669 count = RESOURCE_ALIGNMENT_PARAM_SIZE - 1;
5670 spin_lock(&resource_alignment_lock);
5671 strncpy(resource_alignment_param, buf, count);
5672 resource_alignment_param[count] = '\0';
5673 spin_unlock(&resource_alignment_lock);
5674 return count;
5675}
5676
9738abed 5677static ssize_t pci_get_resource_alignment_param(char *buf, size_t size)
32a9a682
YS
5678{
5679 size_t count;
5680 spin_lock(&resource_alignment_lock);
5681 count = snprintf(buf, size, "%s", resource_alignment_param);
5682 spin_unlock(&resource_alignment_lock);
5683 return count;
5684}
5685
5686static ssize_t pci_resource_alignment_show(struct bus_type *bus, char *buf)
5687{
5688 return pci_get_resource_alignment_param(buf, PAGE_SIZE);
5689}
5690
5691static ssize_t pci_resource_alignment_store(struct bus_type *bus,
5692 const char *buf, size_t count)
5693{
5694 return pci_set_resource_alignment_param(buf, count);
5695}
5696
21751a9a 5697static BUS_ATTR(resource_alignment, 0644, pci_resource_alignment_show,
32a9a682
YS
5698 pci_resource_alignment_store);
5699
5700static int __init pci_resource_alignment_sysfs_init(void)
5701{
5702 return bus_create_file(&pci_bus_type,
5703 &bus_attr_resource_alignment);
5704}
32a9a682
YS
5705late_initcall(pci_resource_alignment_sysfs_init);
5706
15856ad5 5707static void pci_no_domains(void)
32a2eea7
JG
5708{
5709#ifdef CONFIG_PCI_DOMAINS
5710 pci_domains_supported = 0;
5711#endif
5712}
5713
ae07b786 5714#ifdef CONFIG_PCI_DOMAINS_GENERIC
41e5c0f8
LD
5715static atomic_t __domain_nr = ATOMIC_INIT(-1);
5716
ae07b786 5717static int pci_get_new_domain_nr(void)
41e5c0f8
LD
5718{
5719 return atomic_inc_return(&__domain_nr);
5720}
7c674700 5721
1a4f93f7 5722static int of_pci_bus_find_domain_nr(struct device *parent)
7c674700
LP
5723{
5724 static int use_dt_domains = -1;
54c6e2dd 5725 int domain = -1;
7c674700 5726
54c6e2dd
KHC
5727 if (parent)
5728 domain = of_get_pci_domain_nr(parent->of_node);
7c674700
LP
5729 /*
5730 * Check DT domain and use_dt_domains values.
5731 *
5732 * If DT domain property is valid (domain >= 0) and
5733 * use_dt_domains != 0, the DT assignment is valid since this means
5734 * we have not previously allocated a domain number by using
5735 * pci_get_new_domain_nr(); we should also update use_dt_domains to
5736 * 1, to indicate that we have just assigned a domain number from
5737 * DT.
5738 *
5739 * If DT domain property value is not valid (ie domain < 0), and we
5740 * have not previously assigned a domain number from DT
5741 * (use_dt_domains != 1) we should assign a domain number by
5742 * using the:
5743 *
5744 * pci_get_new_domain_nr()
5745 *
5746 * API and update the use_dt_domains value to keep track of method we
5747 * are using to assign domain numbers (use_dt_domains = 0).
5748 *
5749 * All other combinations imply we have a platform that is trying
5750 * to mix domain numbers obtained from DT and pci_get_new_domain_nr(),
5751 * which is a recipe for domain mishandling and it is prevented by
5752 * invalidating the domain value (domain = -1) and printing a
5753 * corresponding error.
5754 */
5755 if (domain >= 0 && use_dt_domains) {
5756 use_dt_domains = 1;
5757 } else if (domain < 0 && use_dt_domains != 1) {
5758 use_dt_domains = 0;
5759 domain = pci_get_new_domain_nr();
5760 } else {
9df1c6ec
SL
5761 if (parent)
5762 pr_err("Node %pOF has ", parent->of_node);
5763 pr_err("Inconsistent \"linux,pci-domain\" property in DT\n");
7c674700
LP
5764 domain = -1;
5765 }
5766
9c7cb891 5767 return domain;
7c674700 5768}
1a4f93f7
TN
5769
5770int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent)
5771{
2ab51dde
TN
5772 return acpi_disabled ? of_pci_bus_find_domain_nr(parent) :
5773 acpi_pci_bus_find_domain_nr(bus);
7c674700
LP
5774}
5775#endif
41e5c0f8 5776
0ef5f8f6 5777/**
642c92da 5778 * pci_ext_cfg_avail - can we access extended PCI config space?
0ef5f8f6
AP
5779 *
5780 * Returns 1 if we can access PCI extended config space (offsets
5781 * greater than 0xff). This is the default implementation. Architecture
5782 * implementations can override this.
5783 */
642c92da 5784int __weak pci_ext_cfg_avail(void)
0ef5f8f6
AP
5785{
5786 return 1;
5787}
5788
2d1c8618
BH
5789void __weak pci_fixup_cardbus(struct pci_bus *bus)
5790{
5791}
5792EXPORT_SYMBOL(pci_fixup_cardbus);
5793
ad04d31e 5794static int __init pci_setup(char *str)
1da177e4
LT
5795{
5796 while (str) {
5797 char *k = strchr(str, ',');
5798 if (k)
5799 *k++ = 0;
5800 if (*str && (str = pcibios_setup(str)) && *str) {
309e57df
MW
5801 if (!strcmp(str, "nomsi")) {
5802 pci_no_msi();
cef74409
GK
5803 } else if (!strncmp(str, "noats", 5)) {
5804 pr_info("PCIe: ATS is disabled\n");
5805 pcie_ats_disabled = true;
7f785763
RD
5806 } else if (!strcmp(str, "noaer")) {
5807 pci_no_aer();
b55438fd
YL
5808 } else if (!strncmp(str, "realloc=", 8)) {
5809 pci_realloc_get_opt(str + 8);
f483d392 5810 } else if (!strncmp(str, "realloc", 7)) {
b55438fd 5811 pci_realloc_get_opt("on");
32a2eea7
JG
5812 } else if (!strcmp(str, "nodomains")) {
5813 pci_no_domains();
6748dcc2
RW
5814 } else if (!strncmp(str, "noari", 5)) {
5815 pcie_ari_disabled = true;
4516a618
AN
5816 } else if (!strncmp(str, "cbiosize=", 9)) {
5817 pci_cardbus_io_size = memparse(str + 9, &str);
5818 } else if (!strncmp(str, "cbmemsize=", 10)) {
5819 pci_cardbus_mem_size = memparse(str + 10, &str);
32a9a682
YS
5820 } else if (!strncmp(str, "resource_alignment=", 19)) {
5821 pci_set_resource_alignment_param(str + 19,
5822 strlen(str + 19));
43c16408
AP
5823 } else if (!strncmp(str, "ecrc=", 5)) {
5824 pcie_ecrc_get_policy(str + 5);
28760489
EB
5825 } else if (!strncmp(str, "hpiosize=", 9)) {
5826 pci_hotplug_io_size = memparse(str + 9, &str);
5827 } else if (!strncmp(str, "hpmemsize=", 10)) {
5828 pci_hotplug_mem_size = memparse(str + 10, &str);
e16b4660
KB
5829 } else if (!strncmp(str, "hpbussize=", 10)) {
5830 pci_hotplug_bus_size =
5831 simple_strtoul(str + 10, &str, 0);
5832 if (pci_hotplug_bus_size > 0xff)
5833 pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
5f39e670
JM
5834 } else if (!strncmp(str, "pcie_bus_tune_off", 17)) {
5835 pcie_bus_config = PCIE_BUS_TUNE_OFF;
b03e7495
JM
5836 } else if (!strncmp(str, "pcie_bus_safe", 13)) {
5837 pcie_bus_config = PCIE_BUS_SAFE;
5838 } else if (!strncmp(str, "pcie_bus_perf", 13)) {
5839 pcie_bus_config = PCIE_BUS_PERFORMANCE;
5f39e670
JM
5840 } else if (!strncmp(str, "pcie_bus_peer2peer", 18)) {
5841 pcie_bus_config = PCIE_BUS_PEER2PEER;
284f5f9d
BH
5842 } else if (!strncmp(str, "pcie_scan_all", 13)) {
5843 pci_add_flags(PCI_SCAN_ALL_PCIE_DEVS);
309e57df
MW
5844 } else {
5845 printk(KERN_ERR "PCI: Unknown option `%s'\n",
5846 str);
5847 }
1da177e4
LT
5848 }
5849 str = k;
5850 }
0637a70a 5851 return 0;
1da177e4 5852}
0637a70a 5853early_param("pci", pci_setup);