nvme: fix Kconfig description for BLK_DEV_NVME_SCSI
[linux-2.6-block.git] / drivers / pci / probe.c
CommitLineData
1da177e4
LT
1/*
2 * probe.c - PCI detection and setup code
3 */
4
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/init.h>
8#include <linux/pci.h>
50230713 9#include <linux/of_device.h>
de335bb4 10#include <linux/of_pci.h>
589fcc23 11#include <linux/pci_hotplug.h>
1da177e4
LT
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/cpumask.h>
7d715a6c 15#include <linux/pci-aspm.h>
b07461a8 16#include <linux/aer.h>
29dbe1f0 17#include <linux/acpi.h>
284f5f9d 18#include <asm-generic/pci-bridge.h>
bc56b9e0 19#include "pci.h"
1da177e4
LT
20
21#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
22#define CARDBUS_RESERVE_BUSNR 3
1da177e4 23
0b950f0f 24static struct resource busn_resource = {
67cdc827
YL
25 .name = "PCI busn",
26 .start = 0,
27 .end = 255,
28 .flags = IORESOURCE_BUS,
29};
30
1da177e4
LT
31/* Ugh. Need to stop exporting this to modules. */
32LIST_HEAD(pci_root_buses);
33EXPORT_SYMBOL(pci_root_buses);
34
5cc62c20
YL
35static LIST_HEAD(pci_domain_busn_res_list);
36
37struct pci_domain_busn_res {
38 struct list_head list;
39 struct resource res;
40 int domain_nr;
41};
42
43static struct resource *get_pci_domain_busn_res(int domain_nr)
44{
45 struct pci_domain_busn_res *r;
46
47 list_for_each_entry(r, &pci_domain_busn_res_list, list)
48 if (r->domain_nr == domain_nr)
49 return &r->res;
50
51 r = kzalloc(sizeof(*r), GFP_KERNEL);
52 if (!r)
53 return NULL;
54
55 r->domain_nr = domain_nr;
56 r->res.start = 0;
57 r->res.end = 0xff;
58 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
59
60 list_add_tail(&r->list, &pci_domain_busn_res_list);
61
62 return &r->res;
63}
64
70308923
GKH
65static int find_anything(struct device *dev, void *data)
66{
67 return 1;
68}
1da177e4 69
ed4aaadb
ZY
70/*
71 * Some device drivers need know if pci is initiated.
72 * Basically, we think pci is not initiated when there
70308923 73 * is no device to be found on the pci_bus_type.
ed4aaadb
ZY
74 */
75int no_pci_devices(void)
76{
70308923
GKH
77 struct device *dev;
78 int no_devices;
ed4aaadb 79
70308923
GKH
80 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
81 no_devices = (dev == NULL);
82 put_device(dev);
83 return no_devices;
84}
ed4aaadb
ZY
85EXPORT_SYMBOL(no_pci_devices);
86
1da177e4
LT
87/*
88 * PCI Bus Class
89 */
fd7d1ced 90static void release_pcibus_dev(struct device *dev)
1da177e4 91{
fd7d1ced 92 struct pci_bus *pci_bus = to_pci_bus(dev);
1da177e4 93
ff0387c3 94 put_device(pci_bus->bridge);
2fe2abf8 95 pci_bus_remove_resources(pci_bus);
98d9f30c 96 pci_release_bus_of_node(pci_bus);
1da177e4
LT
97 kfree(pci_bus);
98}
99
100static struct class pcibus_class = {
101 .name = "pci_bus",
fd7d1ced 102 .dev_release = &release_pcibus_dev,
56039e65 103 .dev_groups = pcibus_groups,
1da177e4
LT
104};
105
106static int __init pcibus_class_init(void)
107{
108 return class_register(&pcibus_class);
109}
110postcore_initcall(pcibus_class_init);
111
6ac665c6 112static u64 pci_size(u64 base, u64 maxbase, u64 mask)
1da177e4 113{
6ac665c6 114 u64 size = mask & maxbase; /* Find the significant bits */
1da177e4
LT
115 if (!size)
116 return 0;
117
118 /* Get the lowest of them to find the decode size, and
119 from that the extent. */
120 size = (size & ~(size-1)) - 1;
121
122 /* base == maxbase can be valid only if the BAR has
123 already been programmed with all 1s. */
124 if (base == maxbase && ((base | size) & mask) != mask)
125 return 0;
126
127 return size;
128}
129
28c6821a 130static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
6ac665c6 131{
8d6a6a47 132 u32 mem_type;
28c6821a 133 unsigned long flags;
8d6a6a47 134
6ac665c6 135 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
28c6821a
BH
136 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
137 flags |= IORESOURCE_IO;
138 return flags;
6ac665c6 139 }
07eddf3d 140
28c6821a
BH
141 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
142 flags |= IORESOURCE_MEM;
143 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
144 flags |= IORESOURCE_PREFETCH;
07eddf3d 145
8d6a6a47
BH
146 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
147 switch (mem_type) {
148 case PCI_BASE_ADDRESS_MEM_TYPE_32:
149 break;
150 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
0ff9514b 151 /* 1M mem BAR treated as 32-bit BAR */
8d6a6a47
BH
152 break;
153 case PCI_BASE_ADDRESS_MEM_TYPE_64:
28c6821a
BH
154 flags |= IORESOURCE_MEM_64;
155 break;
8d6a6a47 156 default:
0ff9514b 157 /* mem unknown type treated as 32-bit BAR */
8d6a6a47
BH
158 break;
159 }
28c6821a 160 return flags;
07eddf3d
YL
161}
162
808e34e2
ZK
163#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
164
0b400c7e
YZ
165/**
166 * pci_read_base - read a PCI BAR
167 * @dev: the PCI device
168 * @type: type of the BAR
169 * @res: resource buffer to be filled in
170 * @pos: BAR position in the config space
171 *
172 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
6ac665c6 173 */
0b400c7e 174int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
3c78bc61 175 struct resource *res, unsigned int pos)
07eddf3d 176{
6ac665c6 177 u32 l, sz, mask;
23b13bc7 178 u64 l64, sz64, mask64;
253d2e54 179 u16 orig_cmd;
cf4d1cf5 180 struct pci_bus_region region, inverted_region;
6ac665c6 181
1ed67439 182 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
6ac665c6 183
0ff9514b 184 /* No printks while decoding is disabled! */
253d2e54
JP
185 if (!dev->mmio_always_on) {
186 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
808e34e2
ZK
187 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
188 pci_write_config_word(dev, PCI_COMMAND,
189 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
190 }
253d2e54
JP
191 }
192
6ac665c6
MW
193 res->name = pci_name(dev);
194
195 pci_read_config_dword(dev, pos, &l);
1ed67439 196 pci_write_config_dword(dev, pos, l | mask);
6ac665c6
MW
197 pci_read_config_dword(dev, pos, &sz);
198 pci_write_config_dword(dev, pos, l);
199
200 /*
201 * All bits set in sz means the device isn't working properly.
45aa23b4
BH
202 * If the BAR isn't implemented, all bits must be 0. If it's a
203 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
204 * 1 must be clear.
6ac665c6 205 */
f795d86a
MS
206 if (sz == 0xffffffff)
207 sz = 0;
6ac665c6
MW
208
209 /*
210 * I don't know how l can have all bits set. Copied from old code.
211 * Maybe it fixes a bug on some ancient platform.
212 */
213 if (l == 0xffffffff)
214 l = 0;
215
216 if (type == pci_bar_unknown) {
28c6821a
BH
217 res->flags = decode_bar(dev, l);
218 res->flags |= IORESOURCE_SIZEALIGN;
219 if (res->flags & IORESOURCE_IO) {
f795d86a
MS
220 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
221 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
222 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
6ac665c6 223 } else {
f795d86a
MS
224 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
225 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
226 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
6ac665c6
MW
227 }
228 } else {
229 res->flags |= (l & IORESOURCE_ROM_ENABLE);
f795d86a
MS
230 l64 = l & PCI_ROM_ADDRESS_MASK;
231 sz64 = sz & PCI_ROM_ADDRESS_MASK;
232 mask64 = (u32)PCI_ROM_ADDRESS_MASK;
6ac665c6
MW
233 }
234
28c6821a 235 if (res->flags & IORESOURCE_MEM_64) {
6ac665c6
MW
236 pci_read_config_dword(dev, pos + 4, &l);
237 pci_write_config_dword(dev, pos + 4, ~0);
238 pci_read_config_dword(dev, pos + 4, &sz);
239 pci_write_config_dword(dev, pos + 4, l);
240
241 l64 |= ((u64)l << 32);
242 sz64 |= ((u64)sz << 32);
f795d86a
MS
243 mask64 |= ((u64)~0 << 32);
244 }
6ac665c6 245
f795d86a
MS
246 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
247 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
6ac665c6 248
f795d86a
MS
249 if (!sz64)
250 goto fail;
6ac665c6 251
f795d86a 252 sz64 = pci_size(l64, sz64, mask64);
7e79c5f8
MS
253 if (!sz64) {
254 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
255 pos);
f795d86a 256 goto fail;
7e79c5f8 257 }
f795d86a
MS
258
259 if (res->flags & IORESOURCE_MEM_64) {
3a9ad0b4
YL
260 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
261 && sz64 > 0x100000000ULL) {
23b13bc7
BH
262 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
263 res->start = 0;
264 res->end = 0;
f795d86a
MS
265 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
266 pos, (unsigned long long)sz64);
23b13bc7 267 goto out;
c7dabef8
BH
268 }
269
3a9ad0b4 270 if ((sizeof(pci_bus_addr_t) < 8) && l) {
31e9dd25 271 /* Above 32-bit boundary; try to reallocate */
c83bd900 272 res->flags |= IORESOURCE_UNSET;
72dc5601
BH
273 res->start = 0;
274 res->end = sz64;
f795d86a
MS
275 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
276 pos, (unsigned long long)l64);
72dc5601 277 goto out;
6ac665c6 278 }
6ac665c6
MW
279 }
280
f795d86a
MS
281 region.start = l64;
282 region.end = l64 + sz64;
283
fc279850
YL
284 pcibios_bus_to_resource(dev->bus, res, &region);
285 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
cf4d1cf5
KH
286
287 /*
288 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
289 * the corresponding resource address (the physical address used by
290 * the CPU. Converting that resource address back to a bus address
291 * should yield the original BAR value:
292 *
293 * resource_to_bus(bus_to_resource(A)) == A
294 *
295 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
296 * be claimed by the device.
297 */
298 if (inverted_region.start != region.start) {
cf4d1cf5 299 res->flags |= IORESOURCE_UNSET;
cf4d1cf5 300 res->start = 0;
26370fc6 301 res->end = region.end - region.start;
f795d86a
MS
302 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
303 pos, (unsigned long long)region.start);
cf4d1cf5 304 }
96ddef25 305
0ff9514b
BH
306 goto out;
307
308
309fail:
310 res->flags = 0;
311out:
31e9dd25 312 if (res->flags)
33963e30 313 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
0ff9514b 314
28c6821a 315 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
07eddf3d
YL
316}
317
1da177e4
LT
318static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
319{
6ac665c6 320 unsigned int pos, reg;
07eddf3d 321
6ac665c6
MW
322 for (pos = 0; pos < howmany; pos++) {
323 struct resource *res = &dev->resource[pos];
1da177e4 324 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
6ac665c6 325 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
1da177e4 326 }
6ac665c6 327
1da177e4 328 if (rom) {
6ac665c6 329 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
1da177e4 330 dev->rom_base_reg = rom;
6ac665c6 331 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
92b19ff5 332 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
6ac665c6 333 __pci_read_base(dev, pci_bar_mem32, res, rom);
1da177e4
LT
334 }
335}
336
15856ad5 337static void pci_read_bridge_io(struct pci_bus *child)
1da177e4
LT
338{
339 struct pci_dev *dev = child->self;
340 u8 io_base_lo, io_limit_lo;
2b28ae19 341 unsigned long io_mask, io_granularity, base, limit;
5bfa14ed 342 struct pci_bus_region region;
2b28ae19
BH
343 struct resource *res;
344
345 io_mask = PCI_IO_RANGE_MASK;
346 io_granularity = 0x1000;
347 if (dev->io_window_1k) {
348 /* Support 1K I/O space granularity */
349 io_mask = PCI_IO_1K_RANGE_MASK;
350 io_granularity = 0x400;
351 }
1da177e4 352
1da177e4
LT
353 res = child->resource[0];
354 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
355 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
2b28ae19
BH
356 base = (io_base_lo & io_mask) << 8;
357 limit = (io_limit_lo & io_mask) << 8;
1da177e4
LT
358
359 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
360 u16 io_base_hi, io_limit_hi;
8f38eaca 361
1da177e4
LT
362 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
363 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
8f38eaca
BH
364 base |= ((unsigned long) io_base_hi << 16);
365 limit |= ((unsigned long) io_limit_hi << 16);
1da177e4
LT
366 }
367
5dde383e 368 if (base <= limit) {
1da177e4 369 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
5bfa14ed 370 region.start = base;
2b28ae19 371 region.end = limit + io_granularity - 1;
fc279850 372 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 373 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 374 }
fa27b2d1
BH
375}
376
15856ad5 377static void pci_read_bridge_mmio(struct pci_bus *child)
fa27b2d1
BH
378{
379 struct pci_dev *dev = child->self;
380 u16 mem_base_lo, mem_limit_lo;
381 unsigned long base, limit;
5bfa14ed 382 struct pci_bus_region region;
fa27b2d1 383 struct resource *res;
1da177e4
LT
384
385 res = child->resource[1];
386 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
387 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
8f38eaca
BH
388 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
389 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
5dde383e 390 if (base <= limit) {
1da177e4 391 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
5bfa14ed
BH
392 region.start = base;
393 region.end = limit + 0xfffff;
fc279850 394 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 395 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 396 }
fa27b2d1
BH
397}
398
15856ad5 399static void pci_read_bridge_mmio_pref(struct pci_bus *child)
fa27b2d1
BH
400{
401 struct pci_dev *dev = child->self;
402 u16 mem_base_lo, mem_limit_lo;
7fc986d8 403 u64 base64, limit64;
3a9ad0b4 404 pci_bus_addr_t base, limit;
5bfa14ed 405 struct pci_bus_region region;
fa27b2d1 406 struct resource *res;
1da177e4
LT
407
408 res = child->resource[2];
409 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
410 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
7fc986d8
YL
411 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
412 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1da177e4
LT
413
414 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
415 u32 mem_base_hi, mem_limit_hi;
8f38eaca 416
1da177e4
LT
417 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
418 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
419
420 /*
421 * Some bridges set the base > limit by default, and some
422 * (broken) BIOSes do not initialize them. If we find
423 * this, just assume they are not being used.
424 */
425 if (mem_base_hi <= mem_limit_hi) {
7fc986d8
YL
426 base64 |= (u64) mem_base_hi << 32;
427 limit64 |= (u64) mem_limit_hi << 32;
1da177e4
LT
428 }
429 }
7fc986d8 430
3a9ad0b4
YL
431 base = (pci_bus_addr_t) base64;
432 limit = (pci_bus_addr_t) limit64;
7fc986d8
YL
433
434 if (base != base64) {
435 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
436 (unsigned long long) base64);
437 return;
438 }
439
5dde383e 440 if (base <= limit) {
1f82de10
YL
441 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
442 IORESOURCE_MEM | IORESOURCE_PREFETCH;
443 if (res->flags & PCI_PREF_RANGE_TYPE_64)
444 res->flags |= IORESOURCE_MEM_64;
5bfa14ed
BH
445 region.start = base;
446 region.end = limit + 0xfffff;
fc279850 447 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 448 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4
LT
449 }
450}
451
15856ad5 452void pci_read_bridge_bases(struct pci_bus *child)
fa27b2d1
BH
453{
454 struct pci_dev *dev = child->self;
2fe2abf8 455 struct resource *res;
fa27b2d1
BH
456 int i;
457
458 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
459 return;
460
b918c62e
YL
461 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
462 &child->busn_res,
fa27b2d1
BH
463 dev->transparent ? " (subtractive decode)" : "");
464
2fe2abf8
BH
465 pci_bus_remove_resources(child);
466 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
467 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
468
fa27b2d1
BH
469 pci_read_bridge_io(child);
470 pci_read_bridge_mmio(child);
471 pci_read_bridge_mmio_pref(child);
2adf7516
BH
472
473 if (dev->transparent) {
2fe2abf8 474 pci_bus_for_each_resource(child->parent, res, i) {
d739a099 475 if (res && res->flags) {
2fe2abf8
BH
476 pci_bus_add_resource(child, res,
477 PCI_SUBTRACTIVE_DECODE);
2adf7516
BH
478 dev_printk(KERN_DEBUG, &dev->dev,
479 " bridge window %pR (subtractive decode)\n",
2fe2abf8
BH
480 res);
481 }
2adf7516
BH
482 }
483 }
fa27b2d1
BH
484}
485
670ba0c8 486static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
1da177e4
LT
487{
488 struct pci_bus *b;
489
f5afe806 490 b = kzalloc(sizeof(*b), GFP_KERNEL);
05013486
BH
491 if (!b)
492 return NULL;
493
494 INIT_LIST_HEAD(&b->node);
495 INIT_LIST_HEAD(&b->children);
496 INIT_LIST_HEAD(&b->devices);
497 INIT_LIST_HEAD(&b->slots);
498 INIT_LIST_HEAD(&b->resources);
499 b->max_bus_speed = PCI_SPEED_UNKNOWN;
500 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
670ba0c8
CM
501#ifdef CONFIG_PCI_DOMAINS_GENERIC
502 if (parent)
503 b->domain_nr = parent->domain_nr;
504#endif
1da177e4
LT
505 return b;
506}
507
70efde2a
JL
508static void pci_release_host_bridge_dev(struct device *dev)
509{
510 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
511
512 if (bridge->release_fn)
513 bridge->release_fn(bridge);
514
515 pci_free_resource_list(&bridge->windows);
516
517 kfree(bridge);
518}
519
7b543663
YL
520static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
521{
522 struct pci_host_bridge *bridge;
523
524 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
05013486
BH
525 if (!bridge)
526 return NULL;
7b543663 527
05013486
BH
528 INIT_LIST_HEAD(&bridge->windows);
529 bridge->bus = b;
7b543663
YL
530 return bridge;
531}
532
0b950f0f 533static const unsigned char pcix_bus_speed[] = {
9be60ca0
MW
534 PCI_SPEED_UNKNOWN, /* 0 */
535 PCI_SPEED_66MHz_PCIX, /* 1 */
536 PCI_SPEED_100MHz_PCIX, /* 2 */
537 PCI_SPEED_133MHz_PCIX, /* 3 */
538 PCI_SPEED_UNKNOWN, /* 4 */
539 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
540 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
541 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
542 PCI_SPEED_UNKNOWN, /* 8 */
543 PCI_SPEED_66MHz_PCIX_266, /* 9 */
544 PCI_SPEED_100MHz_PCIX_266, /* A */
545 PCI_SPEED_133MHz_PCIX_266, /* B */
546 PCI_SPEED_UNKNOWN, /* C */
547 PCI_SPEED_66MHz_PCIX_533, /* D */
548 PCI_SPEED_100MHz_PCIX_533, /* E */
549 PCI_SPEED_133MHz_PCIX_533 /* F */
550};
551
343e51ae 552const unsigned char pcie_link_speed[] = {
3749c51a
MW
553 PCI_SPEED_UNKNOWN, /* 0 */
554 PCIE_SPEED_2_5GT, /* 1 */
555 PCIE_SPEED_5_0GT, /* 2 */
9dfd97fe 556 PCIE_SPEED_8_0GT, /* 3 */
3749c51a
MW
557 PCI_SPEED_UNKNOWN, /* 4 */
558 PCI_SPEED_UNKNOWN, /* 5 */
559 PCI_SPEED_UNKNOWN, /* 6 */
560 PCI_SPEED_UNKNOWN, /* 7 */
561 PCI_SPEED_UNKNOWN, /* 8 */
562 PCI_SPEED_UNKNOWN, /* 9 */
563 PCI_SPEED_UNKNOWN, /* A */
564 PCI_SPEED_UNKNOWN, /* B */
565 PCI_SPEED_UNKNOWN, /* C */
566 PCI_SPEED_UNKNOWN, /* D */
567 PCI_SPEED_UNKNOWN, /* E */
568 PCI_SPEED_UNKNOWN /* F */
569};
570
571void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
572{
231afea1 573 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
3749c51a
MW
574}
575EXPORT_SYMBOL_GPL(pcie_update_link_speed);
576
45b4cdd5
MW
577static unsigned char agp_speeds[] = {
578 AGP_UNKNOWN,
579 AGP_1X,
580 AGP_2X,
581 AGP_4X,
582 AGP_8X
583};
584
585static enum pci_bus_speed agp_speed(int agp3, int agpstat)
586{
587 int index = 0;
588
589 if (agpstat & 4)
590 index = 3;
591 else if (agpstat & 2)
592 index = 2;
593 else if (agpstat & 1)
594 index = 1;
595 else
596 goto out;
f7625980 597
45b4cdd5
MW
598 if (agp3) {
599 index += 2;
600 if (index == 5)
601 index = 0;
602 }
603
604 out:
605 return agp_speeds[index];
606}
607
9be60ca0
MW
608static void pci_set_bus_speed(struct pci_bus *bus)
609{
610 struct pci_dev *bridge = bus->self;
611 int pos;
612
45b4cdd5
MW
613 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
614 if (!pos)
615 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
616 if (pos) {
617 u32 agpstat, agpcmd;
618
619 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
620 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
621
622 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
623 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
624 }
625
9be60ca0
MW
626 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
627 if (pos) {
628 u16 status;
629 enum pci_bus_speed max;
9be60ca0 630
7793eeab
BH
631 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
632 &status);
633
634 if (status & PCI_X_SSTATUS_533MHZ) {
9be60ca0 635 max = PCI_SPEED_133MHz_PCIX_533;
7793eeab 636 } else if (status & PCI_X_SSTATUS_266MHZ) {
9be60ca0 637 max = PCI_SPEED_133MHz_PCIX_266;
7793eeab 638 } else if (status & PCI_X_SSTATUS_133MHZ) {
3c78bc61 639 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
9be60ca0 640 max = PCI_SPEED_133MHz_PCIX_ECC;
3c78bc61 641 else
9be60ca0 642 max = PCI_SPEED_133MHz_PCIX;
9be60ca0
MW
643 } else {
644 max = PCI_SPEED_66MHz_PCIX;
645 }
646
647 bus->max_bus_speed = max;
7793eeab
BH
648 bus->cur_bus_speed = pcix_bus_speed[
649 (status & PCI_X_SSTATUS_FREQ) >> 6];
9be60ca0
MW
650
651 return;
652 }
653
fdfe1511 654 if (pci_is_pcie(bridge)) {
9be60ca0
MW
655 u32 linkcap;
656 u16 linksta;
657
59875ae4 658 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
231afea1 659 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
9be60ca0 660
59875ae4 661 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
9be60ca0
MW
662 pcie_update_link_speed(bus, linksta);
663 }
664}
665
44aa0c65
MZ
666static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
667{
b165e2b6
MZ
668 struct irq_domain *d;
669
44aa0c65
MZ
670 /*
671 * Any firmware interface that can resolve the msi_domain
672 * should be called from here.
673 */
b165e2b6 674 d = pci_host_bridge_of_msi_domain(bus);
471036b2
SS
675 if (!d)
676 d = pci_host_bridge_acpi_msi_domain(bus);
44aa0c65 677
b165e2b6 678 return d;
44aa0c65
MZ
679}
680
681static void pci_set_bus_msi_domain(struct pci_bus *bus)
682{
683 struct irq_domain *d;
38ea72bd 684 struct pci_bus *b;
44aa0c65
MZ
685
686 /*
38ea72bd
AW
687 * The bus can be a root bus, a subordinate bus, or a virtual bus
688 * created by an SR-IOV device. Walk up to the first bridge device
689 * found or derive the domain from the host bridge.
44aa0c65 690 */
38ea72bd
AW
691 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
692 if (b->self)
693 d = dev_get_msi_domain(&b->self->dev);
694 }
695
696 if (!d)
697 d = pci_host_bridge_msi_domain(b);
44aa0c65
MZ
698
699 dev_set_msi_domain(&bus->dev, d);
700}
701
cbd4e055
AB
702static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
703 struct pci_dev *bridge, int busnr)
1da177e4
LT
704{
705 struct pci_bus *child;
706 int i;
4f535093 707 int ret;
1da177e4
LT
708
709 /*
710 * Allocate a new bus, and inherit stuff from the parent..
711 */
670ba0c8 712 child = pci_alloc_bus(parent);
1da177e4
LT
713 if (!child)
714 return NULL;
715
1da177e4
LT
716 child->parent = parent;
717 child->ops = parent->ops;
0cbdcfcf 718 child->msi = parent->msi;
1da177e4 719 child->sysdata = parent->sysdata;
6e325a62 720 child->bus_flags = parent->bus_flags;
1da177e4 721
fd7d1ced 722 /* initialize some portions of the bus device, but don't register it
4f535093 723 * now as the parent is not properly set up yet.
fd7d1ced
GKH
724 */
725 child->dev.class = &pcibus_class;
1a927133 726 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
1da177e4
LT
727
728 /*
729 * Set up the primary, secondary and subordinate
730 * bus numbers.
731 */
b918c62e
YL
732 child->number = child->busn_res.start = busnr;
733 child->primary = parent->busn_res.start;
734 child->busn_res.end = 0xff;
1da177e4 735
4f535093
YL
736 if (!bridge) {
737 child->dev.parent = parent->bridge;
738 goto add_dev;
739 }
3789fa8a
YZ
740
741 child->self = bridge;
742 child->bridge = get_device(&bridge->dev);
4f535093 743 child->dev.parent = child->bridge;
98d9f30c 744 pci_set_bus_of_node(child);
9be60ca0
MW
745 pci_set_bus_speed(child);
746
1da177e4 747 /* Set up default resource pointers and names.. */
fde09c6d 748 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
1da177e4
LT
749 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
750 child->resource[i]->name = child->name;
751 }
752 bridge->subordinate = child;
753
4f535093 754add_dev:
44aa0c65 755 pci_set_bus_msi_domain(child);
4f535093
YL
756 ret = device_register(&child->dev);
757 WARN_ON(ret < 0);
758
10a95747
JL
759 pcibios_add_bus(child);
760
4f535093
YL
761 /* Create legacy_io and legacy_mem files for this bus */
762 pci_create_legacy_files(child);
763
1da177e4
LT
764 return child;
765}
766
3c78bc61
RD
767struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
768 int busnr)
1da177e4
LT
769{
770 struct pci_bus *child;
771
772 child = pci_alloc_child_bus(parent, dev, busnr);
e4ea9bb7 773 if (child) {
d71374da 774 down_write(&pci_bus_sem);
1da177e4 775 list_add_tail(&child->node, &parent->children);
d71374da 776 up_write(&pci_bus_sem);
e4ea9bb7 777 }
1da177e4
LT
778 return child;
779}
b7fe9434 780EXPORT_SYMBOL(pci_add_new_bus);
1da177e4 781
f3dbd802
RJ
782static void pci_enable_crs(struct pci_dev *pdev)
783{
784 u16 root_cap = 0;
785
786 /* Enable CRS Software Visibility if supported */
787 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
788 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
789 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
790 PCI_EXP_RTCTL_CRSSVE);
791}
792
1da177e4
LT
793/*
794 * If it's a bridge, configure it and scan the bus behind it.
795 * For CardBus bridges, we don't scan behind as the devices will
796 * be handled by the bridge driver itself.
797 *
798 * We need to process bridges in two passes -- first we scan those
799 * already configured by the BIOS and after we are done with all of
800 * them, we proceed to assigning numbers to the remaining buses in
801 * order to avoid overlaps between old and new bus numbers.
802 */
15856ad5 803int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1da177e4
LT
804{
805 struct pci_bus *child;
806 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
49887941 807 u32 buses, i, j = 0;
1da177e4 808 u16 bctl;
99ddd552 809 u8 primary, secondary, subordinate;
a1c19894 810 int broken = 0;
1da177e4
LT
811
812 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
99ddd552
BH
813 primary = buses & 0xFF;
814 secondary = (buses >> 8) & 0xFF;
815 subordinate = (buses >> 16) & 0xFF;
1da177e4 816
99ddd552
BH
817 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
818 secondary, subordinate, pass);
1da177e4 819
71f6bd4a
YL
820 if (!primary && (primary != bus->number) && secondary && subordinate) {
821 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
822 primary = bus->number;
823 }
824
a1c19894
BH
825 /* Check if setup is sensible at all */
826 if (!pass &&
1965f66e 827 (primary != bus->number || secondary <= bus->number ||
12d87069 828 secondary > subordinate)) {
1965f66e
YL
829 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
830 secondary, subordinate);
a1c19894
BH
831 broken = 1;
832 }
833
1da177e4 834 /* Disable MasterAbortMode during probing to avoid reporting
f7625980 835 of bus errors (in some architectures) */
1da177e4
LT
836 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
837 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
838 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
839
f3dbd802
RJ
840 pci_enable_crs(dev);
841
99ddd552
BH
842 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
843 !is_cardbus && !broken) {
844 unsigned int cmax;
1da177e4
LT
845 /*
846 * Bus already configured by firmware, process it in the first
847 * pass and just note the configuration.
848 */
849 if (pass)
bbe8f9a3 850 goto out;
1da177e4
LT
851
852 /*
2ed85823
AN
853 * The bus might already exist for two reasons: Either we are
854 * rescanning the bus or the bus is reachable through more than
855 * one bridge. The second case can happen with the i450NX
856 * chipset.
1da177e4 857 */
99ddd552 858 child = pci_find_bus(pci_domain_nr(bus), secondary);
74710ded 859 if (!child) {
99ddd552 860 child = pci_add_new_bus(bus, dev, secondary);
74710ded
AC
861 if (!child)
862 goto out;
99ddd552 863 child->primary = primary;
bc76b731 864 pci_bus_insert_busn_res(child, secondary, subordinate);
74710ded 865 child->bridge_ctl = bctl;
1da177e4
LT
866 }
867
1da177e4 868 cmax = pci_scan_child_bus(child);
c95b0bd6
AN
869 if (cmax > subordinate)
870 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
871 subordinate, cmax);
872 /* subordinate should equal child->busn_res.end */
873 if (subordinate > max)
874 max = subordinate;
1da177e4
LT
875 } else {
876 /*
877 * We need to assign a number to this bus which we always
878 * do in the second pass.
879 */
12f44f46 880 if (!pass) {
619c8c31 881 if (pcibios_assign_all_busses() || broken || is_cardbus)
12f44f46
IK
882 /* Temporarily disable forwarding of the
883 configuration cycles on all bridges in
884 this bus segment to avoid possible
885 conflicts in the second pass between two
886 bridges programmed with overlapping
887 bus ranges. */
888 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
889 buses & ~0xffffff);
bbe8f9a3 890 goto out;
12f44f46 891 }
1da177e4
LT
892
893 /* Clear errors */
894 pci_write_config_word(dev, PCI_STATUS, 0xffff);
895
7a0b33d4
BH
896 /* Prevent assigning a bus number that already exists.
897 * This can happen when a bridge is hot-plugged, so in
898 * this case we only re-scan this bus. */
b1a98b69
TC
899 child = pci_find_bus(pci_domain_nr(bus), max+1);
900 if (!child) {
9a4d7d87 901 child = pci_add_new_bus(bus, dev, max+1);
b1a98b69
TC
902 if (!child)
903 goto out;
12d87069 904 pci_bus_insert_busn_res(child, max+1, 0xff);
b1a98b69 905 }
9a4d7d87 906 max++;
1da177e4
LT
907 buses = (buses & 0xff000000)
908 | ((unsigned int)(child->primary) << 0)
b918c62e
YL
909 | ((unsigned int)(child->busn_res.start) << 8)
910 | ((unsigned int)(child->busn_res.end) << 16);
1da177e4
LT
911
912 /*
913 * yenta.c forces a secondary latency timer of 176.
914 * Copy that behaviour here.
915 */
916 if (is_cardbus) {
917 buses &= ~0xff000000;
918 buses |= CARDBUS_LATENCY_TIMER << 24;
919 }
7c867c88 920
1da177e4
LT
921 /*
922 * We need to blast all three values with a single write.
923 */
924 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
925
926 if (!is_cardbus) {
11949255 927 child->bridge_ctl = bctl;
1da177e4
LT
928 max = pci_scan_child_bus(child);
929 } else {
930 /*
931 * For CardBus bridges, we leave 4 bus numbers
932 * as cards with a PCI-to-PCI bridge can be
933 * inserted later.
934 */
3c78bc61 935 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
49887941 936 struct pci_bus *parent = bus;
cc57450f
RS
937 if (pci_find_bus(pci_domain_nr(bus),
938 max+i+1))
939 break;
49887941
DB
940 while (parent->parent) {
941 if ((!pcibios_assign_all_busses()) &&
b918c62e
YL
942 (parent->busn_res.end > max) &&
943 (parent->busn_res.end <= max+i)) {
49887941
DB
944 j = 1;
945 }
946 parent = parent->parent;
947 }
948 if (j) {
949 /*
950 * Often, there are two cardbus bridges
951 * -- try to leave one valid bus number
952 * for each one.
953 */
954 i /= 2;
955 break;
956 }
957 }
cc57450f 958 max += i;
1da177e4
LT
959 }
960 /*
961 * Set the subordinate bus number to its real value.
962 */
bc76b731 963 pci_bus_update_busn_res_end(child, max);
1da177e4
LT
964 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
965 }
966
cb3576fa
GH
967 sprintf(child->name,
968 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
969 pci_domain_nr(bus), child->number);
1da177e4 970
d55bef51 971 /* Has only triggered on CardBus, fixup is in yenta_socket */
49887941 972 while (bus->parent) {
b918c62e
YL
973 if ((child->busn_res.end > bus->busn_res.end) ||
974 (child->number > bus->busn_res.end) ||
49887941 975 (child->number < bus->number) ||
b918c62e 976 (child->busn_res.end < bus->number)) {
227f0647 977 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
b918c62e
YL
978 &child->busn_res,
979 (bus->number > child->busn_res.end &&
980 bus->busn_res.end < child->number) ?
a6f29a98
JP
981 "wholly" : "partially",
982 bus->self->transparent ? " transparent" : "",
865df576 983 dev_name(&bus->dev),
b918c62e 984 &bus->busn_res);
49887941
DB
985 }
986 bus = bus->parent;
987 }
988
bbe8f9a3
RB
989out:
990 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
991
1da177e4
LT
992 return max;
993}
b7fe9434 994EXPORT_SYMBOL(pci_scan_bridge);
1da177e4
LT
995
996/*
997 * Read interrupt line and base address registers.
998 * The architecture-dependent code can tweak these, of course.
999 */
1000static void pci_read_irq(struct pci_dev *dev)
1001{
1002 unsigned char irq;
1003
1004 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
ffeff788 1005 dev->pin = irq;
1da177e4
LT
1006 if (irq)
1007 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1008 dev->irq = irq;
1009}
1010
bb209c82 1011void set_pcie_port_type(struct pci_dev *pdev)
480b93b7
YZ
1012{
1013 int pos;
1014 u16 reg16;
d0751b98
YW
1015 int type;
1016 struct pci_dev *parent;
480b93b7
YZ
1017
1018 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1019 if (!pos)
1020 return;
0efea000 1021 pdev->pcie_cap = pos;
480b93b7 1022 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
786e2288 1023 pdev->pcie_flags_reg = reg16;
b03e7495
JM
1024 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1025 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
d0751b98
YW
1026
1027 /*
1028 * A Root Port is always the upstream end of a Link. No PCIe
1029 * component has two Links. Two Links are connected by a Switch
1030 * that has a Port on each Link and internal logic to connect the
1031 * two Ports.
1032 */
1033 type = pci_pcie_type(pdev);
1034 if (type == PCI_EXP_TYPE_ROOT_PORT)
1035 pdev->has_secondary_link = 1;
1036 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1037 type == PCI_EXP_TYPE_DOWNSTREAM) {
1038 parent = pci_upstream_bridge(pdev);
b35b1df5
YW
1039
1040 /*
1041 * Usually there's an upstream device (Root Port or Switch
1042 * Downstream Port), but we can't assume one exists.
1043 */
1044 if (parent && !parent->has_secondary_link)
d0751b98
YW
1045 pdev->has_secondary_link = 1;
1046 }
480b93b7
YZ
1047}
1048
bb209c82 1049void set_pcie_hotplug_bridge(struct pci_dev *pdev)
28760489 1050{
28760489
EB
1051 u32 reg32;
1052
59875ae4 1053 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
28760489
EB
1054 if (reg32 & PCI_EXP_SLTCAP_HPC)
1055 pdev->is_hotplug_bridge = 1;
1056}
1057
78916b00
AW
1058/**
1059 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1060 * @dev: PCI device
1061 *
1062 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1063 * when forwarding a type1 configuration request the bridge must check that
1064 * the extended register address field is zero. The bridge is not permitted
1065 * to forward the transactions and must handle it as an Unsupported Request.
1066 * Some bridges do not follow this rule and simply drop the extended register
1067 * bits, resulting in the standard config space being aliased, every 256
1068 * bytes across the entire configuration space. Test for this condition by
1069 * comparing the first dword of each potential alias to the vendor/device ID.
1070 * Known offenders:
1071 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1072 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1073 */
1074static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1075{
1076#ifdef CONFIG_PCI_QUIRKS
1077 int pos;
1078 u32 header, tmp;
1079
1080 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1081
1082 for (pos = PCI_CFG_SPACE_SIZE;
1083 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1084 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1085 || header != tmp)
1086 return false;
1087 }
1088
1089 return true;
1090#else
1091 return false;
1092#endif
1093}
1094
0b950f0f
SH
1095/**
1096 * pci_cfg_space_size - get the configuration space size of the PCI device.
1097 * @dev: PCI device
1098 *
1099 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1100 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1101 * access it. Maybe we don't have a way to generate extended config space
1102 * accesses, or the device is behind a reverse Express bridge. So we try
1103 * reading the dword at 0x100 which must either be 0 or a valid extended
1104 * capability header.
1105 */
1106static int pci_cfg_space_size_ext(struct pci_dev *dev)
1107{
1108 u32 status;
1109 int pos = PCI_CFG_SPACE_SIZE;
1110
1111 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
8e5a395a 1112 return PCI_CFG_SPACE_SIZE;
78916b00 1113 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
8e5a395a 1114 return PCI_CFG_SPACE_SIZE;
0b950f0f
SH
1115
1116 return PCI_CFG_SPACE_EXP_SIZE;
0b950f0f
SH
1117}
1118
1119int pci_cfg_space_size(struct pci_dev *dev)
1120{
1121 int pos;
1122 u32 status;
1123 u16 class;
1124
1125 class = dev->class >> 8;
1126 if (class == PCI_CLASS_BRIDGE_HOST)
1127 return pci_cfg_space_size_ext(dev);
1128
8e5a395a
BH
1129 if (pci_is_pcie(dev))
1130 return pci_cfg_space_size_ext(dev);
0b950f0f 1131
8e5a395a
BH
1132 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1133 if (!pos)
1134 return PCI_CFG_SPACE_SIZE;
0b950f0f 1135
8e5a395a
BH
1136 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1137 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1138 return pci_cfg_space_size_ext(dev);
0b950f0f 1139
0b950f0f
SH
1140 return PCI_CFG_SPACE_SIZE;
1141}
1142
01abc2aa 1143#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
76e6a1d6 1144
e80e7edc 1145static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1851617c
MT
1146{
1147 /*
1148 * Disable the MSI hardware to avoid screaming interrupts
1149 * during boot. This is the power on reset default so
1150 * usually this should be a noop.
1151 */
1152 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1153 if (dev->msi_cap)
1154 pci_msi_set_enable(dev, 0);
1155
1156 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1157 if (dev->msix_cap)
1158 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1159}
1160
1da177e4
LT
1161/**
1162 * pci_setup_device - fill in class and map information of a device
1163 * @dev: the device structure to fill
1164 *
f7625980 1165 * Initialize the device structure with information about the device's
1da177e4
LT
1166 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1167 * Called at initialisation of the PCI subsystem and by CardBus services.
480b93b7
YZ
1168 * Returns 0 on success and negative if unknown type of device (not normal,
1169 * bridge or CardBus).
1da177e4 1170 */
480b93b7 1171int pci_setup_device(struct pci_dev *dev)
1da177e4
LT
1172{
1173 u32 class;
480b93b7 1174 u8 hdr_type;
bc577d2b 1175 int pos = 0;
5bfa14ed
BH
1176 struct pci_bus_region region;
1177 struct resource *res;
480b93b7
YZ
1178
1179 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1180 return -EIO;
1181
1182 dev->sysdata = dev->bus->sysdata;
1183 dev->dev.parent = dev->bus->bridge;
1184 dev->dev.bus = &pci_bus_type;
1185 dev->hdr_type = hdr_type & 0x7f;
1186 dev->multifunction = !!(hdr_type & 0x80);
480b93b7
YZ
1187 dev->error_state = pci_channel_io_normal;
1188 set_pcie_port_type(dev);
1189
017ffe64 1190 pci_dev_assign_slot(dev);
480b93b7
YZ
1191 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1192 set this higher, assuming the system even supports it. */
1193 dev->dma_mask = 0xffffffff;
1da177e4 1194
eebfcfb5
GKH
1195 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1196 dev->bus->number, PCI_SLOT(dev->devfn),
1197 PCI_FUNC(dev->devfn));
1da177e4
LT
1198
1199 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
b8a3a521 1200 dev->revision = class & 0xff;
2dd8ba92 1201 dev->class = class >> 8; /* upper 3 bytes */
1da177e4 1202
2dd8ba92
YL
1203 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1204 dev->vendor, dev->device, dev->hdr_type, dev->class);
1da177e4 1205
853346e4
YZ
1206 /* need to have dev->class ready */
1207 dev->cfg_size = pci_cfg_space_size(dev);
1208
1da177e4 1209 /* "Unknown power state" */
3fe9d19f 1210 dev->current_state = PCI_UNKNOWN;
1da177e4
LT
1211
1212 /* Early fixups, before probing the BARs */
1213 pci_fixup_device(pci_fixup_early, dev);
f79b1b14
YZ
1214 /* device class may be changed after fixup */
1215 class = dev->class >> 8;
1da177e4
LT
1216
1217 switch (dev->hdr_type) { /* header type */
1218 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1219 if (class == PCI_CLASS_BRIDGE_PCI)
1220 goto bad;
1221 pci_read_irq(dev);
1222 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1223 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1224 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
368c73d4
AC
1225
1226 /*
075eb9e3
BH
1227 * Do the ugly legacy mode stuff here rather than broken chip
1228 * quirk code. Legacy mode ATA controllers have fixed
1229 * addresses. These are not always echoed in BAR0-3, and
1230 * BAR0-3 in a few cases contain junk!
368c73d4
AC
1231 */
1232 if (class == PCI_CLASS_STORAGE_IDE) {
1233 u8 progif;
1234 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1235 if ((progif & 1) == 0) {
5bfa14ed
BH
1236 region.start = 0x1F0;
1237 region.end = 0x1F7;
1238 res = &dev->resource[0];
1239 res->flags = LEGACY_IO_RESOURCE;
fc279850 1240 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1241 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1242 res);
5bfa14ed
BH
1243 region.start = 0x3F6;
1244 region.end = 0x3F6;
1245 res = &dev->resource[1];
1246 res->flags = LEGACY_IO_RESOURCE;
fc279850 1247 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1248 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1249 res);
368c73d4
AC
1250 }
1251 if ((progif & 4) == 0) {
5bfa14ed
BH
1252 region.start = 0x170;
1253 region.end = 0x177;
1254 res = &dev->resource[2];
1255 res->flags = LEGACY_IO_RESOURCE;
fc279850 1256 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1257 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1258 res);
5bfa14ed
BH
1259 region.start = 0x376;
1260 region.end = 0x376;
1261 res = &dev->resource[3];
1262 res->flags = LEGACY_IO_RESOURCE;
fc279850 1263 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1264 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1265 res);
368c73d4
AC
1266 }
1267 }
1da177e4
LT
1268 break;
1269
1270 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1271 if (class != PCI_CLASS_BRIDGE_PCI)
1272 goto bad;
1273 /* The PCI-to-PCI bridge spec requires that subtractive
1274 decoding (i.e. transparent) bridge must have programming
f7625980 1275 interface code of 0x01. */
3efd273b 1276 pci_read_irq(dev);
1da177e4
LT
1277 dev->transparent = ((dev->class & 0xff) == 1);
1278 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
28760489 1279 set_pcie_hotplug_bridge(dev);
bc577d2b
GB
1280 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1281 if (pos) {
1282 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1283 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1284 }
1da177e4
LT
1285 break;
1286
1287 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1288 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1289 goto bad;
1290 pci_read_irq(dev);
1291 pci_read_bases(dev, 1, 0);
1292 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1293 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1294 break;
1295
1296 default: /* unknown header */
227f0647
RD
1297 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1298 dev->hdr_type);
480b93b7 1299 return -EIO;
1da177e4
LT
1300
1301 bad:
227f0647
RD
1302 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1303 dev->class, dev->hdr_type);
2b4aed1d 1304 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1da177e4
LT
1305 }
1306
1307 /* We found a fine healthy device, go go go... */
1308 return 0;
1309}
1310
9dae3a97
BH
1311static void pci_configure_mps(struct pci_dev *dev)
1312{
1313 struct pci_dev *bridge = pci_upstream_bridge(dev);
27d868b5 1314 int mps, p_mps, rc;
9dae3a97
BH
1315
1316 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1317 return;
1318
1319 mps = pcie_get_mps(dev);
1320 p_mps = pcie_get_mps(bridge);
1321
1322 if (mps == p_mps)
1323 return;
1324
1325 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1326 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1327 mps, pci_name(bridge), p_mps);
1328 return;
1329 }
27d868b5
KB
1330
1331 /*
1332 * Fancier MPS configuration is done later by
1333 * pcie_bus_configure_settings()
1334 */
1335 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1336 return;
1337
1338 rc = pcie_set_mps(dev, p_mps);
1339 if (rc) {
1340 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1341 p_mps);
1342 return;
1343 }
1344
1345 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1346 p_mps, mps, 128 << dev->pcie_mpss);
9dae3a97
BH
1347}
1348
589fcc23
BH
1349static struct hpp_type0 pci_default_type0 = {
1350 .revision = 1,
1351 .cache_line_size = 8,
1352 .latency_timer = 0x40,
1353 .enable_serr = 0,
1354 .enable_perr = 0,
1355};
1356
1357static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1358{
1359 u16 pci_cmd, pci_bctl;
1360
c6285fc5 1361 if (!hpp)
589fcc23 1362 hpp = &pci_default_type0;
589fcc23
BH
1363
1364 if (hpp->revision > 1) {
1365 dev_warn(&dev->dev,
1366 "PCI settings rev %d not supported; using defaults\n",
1367 hpp->revision);
1368 hpp = &pci_default_type0;
1369 }
1370
1371 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1372 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1373 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1374 if (hpp->enable_serr)
1375 pci_cmd |= PCI_COMMAND_SERR;
589fcc23
BH
1376 if (hpp->enable_perr)
1377 pci_cmd |= PCI_COMMAND_PARITY;
589fcc23
BH
1378 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1379
1380 /* Program bridge control value */
1381 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1382 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1383 hpp->latency_timer);
1384 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1385 if (hpp->enable_serr)
1386 pci_bctl |= PCI_BRIDGE_CTL_SERR;
589fcc23
BH
1387 if (hpp->enable_perr)
1388 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
589fcc23
BH
1389 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1390 }
1391}
1392
1393static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1394{
1395 if (hpp)
1396 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1397}
1398
1399static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1400{
1401 int pos;
1402 u32 reg32;
1403
1404 if (!hpp)
1405 return;
1406
1407 if (hpp->revision > 1) {
1408 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1409 hpp->revision);
1410 return;
1411 }
1412
302328c0
BH
1413 /*
1414 * Don't allow _HPX to change MPS or MRRS settings. We manage
1415 * those to make sure they're consistent with the rest of the
1416 * platform.
1417 */
1418 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1419 PCI_EXP_DEVCTL_READRQ;
1420 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1421 PCI_EXP_DEVCTL_READRQ);
1422
589fcc23
BH
1423 /* Initialize Device Control Register */
1424 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1425 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1426
1427 /* Initialize Link Control Register */
7a1562d4 1428 if (pcie_cap_has_lnkctl(dev))
589fcc23
BH
1429 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1430 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1431
1432 /* Find Advanced Error Reporting Enhanced Capability */
1433 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1434 if (!pos)
1435 return;
1436
1437 /* Initialize Uncorrectable Error Mask Register */
1438 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1439 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1440 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1441
1442 /* Initialize Uncorrectable Error Severity Register */
1443 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1444 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1445 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1446
1447 /* Initialize Correctable Error Mask Register */
1448 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1449 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1450 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1451
1452 /* Initialize Advanced Error Capabilities and Control Register */
1453 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1454 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1455 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1456
1457 /*
1458 * FIXME: The following two registers are not supported yet.
1459 *
1460 * o Secondary Uncorrectable Error Severity Register
1461 * o Secondary Uncorrectable Error Mask Register
1462 */
1463}
1464
6cd33649
BH
1465static void pci_configure_device(struct pci_dev *dev)
1466{
1467 struct hotplug_params hpp;
1468 int ret;
1469
9dae3a97
BH
1470 pci_configure_mps(dev);
1471
6cd33649
BH
1472 memset(&hpp, 0, sizeof(hpp));
1473 ret = pci_get_hp_params(dev, &hpp);
1474 if (ret)
1475 return;
1476
1477 program_hpp_type2(dev, hpp.t2);
1478 program_hpp_type1(dev, hpp.t1);
1479 program_hpp_type0(dev, hpp.t0);
1480}
1481
201de56e
ZY
1482static void pci_release_capabilities(struct pci_dev *dev)
1483{
1484 pci_vpd_release(dev);
d1b054da 1485 pci_iov_release(dev);
f796841e 1486 pci_free_cap_save_buffers(dev);
201de56e
ZY
1487}
1488
1da177e4
LT
1489/**
1490 * pci_release_dev - free a pci device structure when all users of it are finished.
1491 * @dev: device that's been disconnected
1492 *
1493 * Will be called only by the device core when all users of this pci device are
1494 * done.
1495 */
1496static void pci_release_dev(struct device *dev)
1497{
04480094 1498 struct pci_dev *pci_dev;
1da177e4 1499
04480094 1500 pci_dev = to_pci_dev(dev);
201de56e 1501 pci_release_capabilities(pci_dev);
98d9f30c 1502 pci_release_of_node(pci_dev);
6ae32c53 1503 pcibios_release_device(pci_dev);
8b1fce04 1504 pci_bus_put(pci_dev->bus);
782a985d 1505 kfree(pci_dev->driver_override);
1da177e4
LT
1506 kfree(pci_dev);
1507}
1508
3c6e6ae7 1509struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
65891215
ME
1510{
1511 struct pci_dev *dev;
1512
1513 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1514 if (!dev)
1515 return NULL;
1516
65891215 1517 INIT_LIST_HEAD(&dev->bus_list);
88e7b167 1518 dev->dev.type = &pci_dev_type;
3c6e6ae7 1519 dev->bus = pci_bus_get(bus);
65891215
ME
1520
1521 return dev;
1522}
3c6e6ae7
GZ
1523EXPORT_SYMBOL(pci_alloc_dev);
1524
efdc87da 1525bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
3c78bc61 1526 int crs_timeout)
1da177e4 1527{
1da177e4
LT
1528 int delay = 1;
1529
efdc87da
YL
1530 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1531 return false;
1da177e4
LT
1532
1533 /* some broken boards return 0 or ~0 if a slot is empty: */
efdc87da
YL
1534 if (*l == 0xffffffff || *l == 0x00000000 ||
1535 *l == 0x0000ffff || *l == 0xffff0000)
1536 return false;
1da177e4 1537
89665a6a
RJ
1538 /*
1539 * Configuration Request Retry Status. Some root ports return the
1540 * actual device ID instead of the synthetic ID (0xFFFF) required
1541 * by the PCIe spec. Ignore the device ID and only check for
1542 * (vendor id == 1).
1543 */
1544 while ((*l & 0xffff) == 0x0001) {
efdc87da
YL
1545 if (!crs_timeout)
1546 return false;
1547
1da177e4
LT
1548 msleep(delay);
1549 delay *= 2;
efdc87da
YL
1550 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1551 return false;
1da177e4 1552 /* Card hasn't responded in 60 seconds? Must be stuck. */
efdc87da 1553 if (delay > crs_timeout) {
227f0647
RD
1554 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1555 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1556 PCI_FUNC(devfn));
efdc87da 1557 return false;
1da177e4
LT
1558 }
1559 }
1560
efdc87da
YL
1561 return true;
1562}
1563EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1564
1565/*
1566 * Read the config data for a PCI device, sanity-check it
1567 * and fill in the dev structure...
1568 */
1569static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1570{
1571 struct pci_dev *dev;
1572 u32 l;
1573
1574 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1575 return NULL;
1576
8b1fce04 1577 dev = pci_alloc_dev(bus);
1da177e4
LT
1578 if (!dev)
1579 return NULL;
1580
1da177e4 1581 dev->devfn = devfn;
1da177e4
LT
1582 dev->vendor = l & 0xffff;
1583 dev->device = (l >> 16) & 0xffff;
cef354db 1584
98d9f30c
BH
1585 pci_set_of_node(dev);
1586
480b93b7 1587 if (pci_setup_device(dev)) {
8b1fce04 1588 pci_bus_put(dev->bus);
1da177e4
LT
1589 kfree(dev);
1590 return NULL;
1591 }
1da177e4
LT
1592
1593 return dev;
1594}
1595
201de56e
ZY
1596static void pci_init_capabilities(struct pci_dev *dev)
1597{
938174e5
SS
1598 /* Enhanced Allocation */
1599 pci_ea_init(dev);
1600
e80e7edc
GP
1601 /* Setup MSI caps & disable MSI/MSI-X interrupts */
1602 pci_msi_setup_pci_dev(dev);
201de56e 1603
63f4898a
RW
1604 /* Buffers for saving PCIe and PCI-X capabilities */
1605 pci_allocate_cap_save_buffers(dev);
1606
201de56e
ZY
1607 /* Power Management */
1608 pci_pm_init(dev);
1609
1610 /* Vital Product Data */
1611 pci_vpd_pci22_init(dev);
58c3a727
YZ
1612
1613 /* Alternative Routing-ID Forwarding */
31ab2476 1614 pci_configure_ari(dev);
d1b054da
YZ
1615
1616 /* Single Root I/O Virtualization */
1617 pci_iov_init(dev);
ae21ee65 1618
edc90fee
BH
1619 /* Address Translation Services */
1620 pci_ats_init(dev);
1621
ae21ee65 1622 /* Enable ACS P2P upstream forwarding */
5d990b62 1623 pci_enable_acs(dev);
b07461a8
TI
1624
1625 pci_cleanup_aer_error_status_regs(dev);
201de56e
ZY
1626}
1627
098259eb
MZ
1628/*
1629 * This is the equivalent of pci_host_bridge_msi_domain that acts on
1630 * devices. Firmware interfaces that can select the MSI domain on a
1631 * per-device basis should be called from here.
1632 */
1633static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1634{
1635 struct irq_domain *d;
1636
1637 /*
1638 * If a domain has been set through the pcibios_add_device
1639 * callback, then this is the one (platform code knows best).
1640 */
1641 d = dev_get_msi_domain(&dev->dev);
1642 if (d)
1643 return d;
1644
54fa97ee
MZ
1645 /*
1646 * Let's see if we have a firmware interface able to provide
1647 * the domain.
1648 */
1649 d = pci_msi_get_device_domain(dev);
1650 if (d)
1651 return d;
1652
098259eb
MZ
1653 return NULL;
1654}
1655
44aa0c65
MZ
1656static void pci_set_msi_domain(struct pci_dev *dev)
1657{
098259eb
MZ
1658 struct irq_domain *d;
1659
44aa0c65 1660 /*
098259eb
MZ
1661 * If the platform or firmware interfaces cannot supply a
1662 * device-specific MSI domain, then inherit the default domain
1663 * from the host bridge itself.
44aa0c65 1664 */
098259eb
MZ
1665 d = pci_dev_msi_domain(dev);
1666 if (!d)
1667 d = dev_get_msi_domain(&dev->bus->dev);
1668
1669 dev_set_msi_domain(&dev->dev, d);
44aa0c65
MZ
1670}
1671
50230713
SS
1672/**
1673 * pci_dma_configure - Setup DMA configuration
1674 * @dev: ptr to pci_dev struct of the PCI device
1675 *
1676 * Function to update PCI devices's DMA configuration using the same
29dbe1f0 1677 * info from the OF node or ACPI node of host bridge's parent (if any).
50230713
SS
1678 */
1679static void pci_dma_configure(struct pci_dev *dev)
1680{
1681 struct device *bridge = pci_get_host_bridge_device(dev);
1682
768acd64
SS
1683 if (IS_ENABLED(CONFIG_OF) &&
1684 bridge->parent && bridge->parent->of_node) {
50230713 1685 of_dma_configure(&dev->dev, bridge->parent->of_node);
29dbe1f0
SS
1686 } else if (has_acpi_companion(bridge)) {
1687 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
1688 enum dev_dma_attr attr = acpi_get_dma_attr(adev);
1689
1690 if (attr == DEV_DMA_NOT_SUPPORTED)
1691 dev_warn(&dev->dev, "DMA not supported.\n");
1692 else
1693 arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
1694 attr == DEV_DMA_COHERENT);
50230713
SS
1695 }
1696
1697 pci_put_host_bridge_device(bridge);
1698}
1699
96bde06a 1700void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1da177e4 1701{
4f535093
YL
1702 int ret;
1703
6cd33649
BH
1704 pci_configure_device(dev);
1705
cdb9b9f7
PM
1706 device_initialize(&dev->dev);
1707 dev->dev.release = pci_release_dev;
1da177e4 1708
7629d19a 1709 set_dev_node(&dev->dev, pcibus_to_node(bus));
cdb9b9f7 1710 dev->dev.dma_mask = &dev->dma_mask;
4d57cdfa 1711 dev->dev.dma_parms = &dev->dma_parms;
cdb9b9f7 1712 dev->dev.coherent_dma_mask = 0xffffffffull;
50230713 1713 pci_dma_configure(dev);
1da177e4 1714
4d57cdfa 1715 pci_set_dma_max_seg_size(dev, 65536);
59fc67de 1716 pci_set_dma_seg_boundary(dev, 0xffffffff);
4d57cdfa 1717
1da177e4
LT
1718 /* Fix up broken headers */
1719 pci_fixup_device(pci_fixup_header, dev);
1720
2069ecfb
YL
1721 /* moved out from quirk header fixup code */
1722 pci_reassigndev_resource_alignment(dev);
1723
4b77b0a2
RW
1724 /* Clear the state_saved flag. */
1725 dev->state_saved = false;
1726
201de56e
ZY
1727 /* Initialize various capabilities */
1728 pci_init_capabilities(dev);
eb9d0fe4 1729
1da177e4
LT
1730 /*
1731 * Add the device to our list of discovered devices
1732 * and the bus list for fixup functions, etc.
1733 */
d71374da 1734 down_write(&pci_bus_sem);
1da177e4 1735 list_add_tail(&dev->bus_list, &bus->devices);
d71374da 1736 up_write(&pci_bus_sem);
4f535093 1737
4f535093
YL
1738 ret = pcibios_add_device(dev);
1739 WARN_ON(ret < 0);
1740
44aa0c65
MZ
1741 /* Setup MSI irq domain */
1742 pci_set_msi_domain(dev);
1743
4f535093
YL
1744 /* Notifier could use PCI capabilities */
1745 dev->match_driver = false;
1746 ret = device_add(&dev->dev);
1747 WARN_ON(ret < 0);
cdb9b9f7
PM
1748}
1749
10874f5a 1750struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
cdb9b9f7
PM
1751{
1752 struct pci_dev *dev;
1753
90bdb311
TP
1754 dev = pci_get_slot(bus, devfn);
1755 if (dev) {
1756 pci_dev_put(dev);
1757 return dev;
1758 }
1759
cdb9b9f7
PM
1760 dev = pci_scan_device(bus, devfn);
1761 if (!dev)
1762 return NULL;
1763
1764 pci_device_add(dev, bus);
1da177e4
LT
1765
1766 return dev;
1767}
b73e9687 1768EXPORT_SYMBOL(pci_scan_single_device);
1da177e4 1769
b1bd58e4 1770static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
f07852d6 1771{
b1bd58e4
YW
1772 int pos;
1773 u16 cap = 0;
1774 unsigned next_fn;
4fb88c1a 1775
b1bd58e4
YW
1776 if (pci_ari_enabled(bus)) {
1777 if (!dev)
1778 return 0;
1779 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1780 if (!pos)
1781 return 0;
4fb88c1a 1782
b1bd58e4
YW
1783 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1784 next_fn = PCI_ARI_CAP_NFN(cap);
1785 if (next_fn <= fn)
1786 return 0; /* protect against malformed list */
f07852d6 1787
b1bd58e4
YW
1788 return next_fn;
1789 }
1790
1791 /* dev may be NULL for non-contiguous multifunction devices */
1792 if (!dev || dev->multifunction)
1793 return (fn + 1) % 8;
f07852d6 1794
f07852d6
MW
1795 return 0;
1796}
1797
1798static int only_one_child(struct pci_bus *bus)
1799{
1800 struct pci_dev *parent = bus->self;
284f5f9d 1801
f07852d6
MW
1802 if (!parent || !pci_is_pcie(parent))
1803 return 0;
62f87c0e 1804 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
284f5f9d 1805 return 1;
777e61ea 1806 if (parent->has_secondary_link &&
284f5f9d 1807 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
f07852d6
MW
1808 return 1;
1809 return 0;
1810}
1811
1da177e4
LT
1812/**
1813 * pci_scan_slot - scan a PCI slot on a bus for devices.
1814 * @bus: PCI bus to scan
1815 * @devfn: slot number to scan (must have zero function.)
1816 *
1817 * Scan a PCI slot on the specified PCI bus for devices, adding
1818 * discovered devices to the @bus->devices list. New devices
8a1bc901 1819 * will not have is_added set.
1b69dfc6
TP
1820 *
1821 * Returns the number of new devices found.
1da177e4 1822 */
96bde06a 1823int pci_scan_slot(struct pci_bus *bus, int devfn)
1da177e4 1824{
f07852d6 1825 unsigned fn, nr = 0;
1b69dfc6 1826 struct pci_dev *dev;
f07852d6
MW
1827
1828 if (only_one_child(bus) && (devfn > 0))
1829 return 0; /* Already scanned the entire slot */
1da177e4 1830
1b69dfc6 1831 dev = pci_scan_single_device(bus, devfn);
4fb88c1a
MW
1832 if (!dev)
1833 return 0;
1834 if (!dev->is_added)
1b69dfc6
TP
1835 nr++;
1836
b1bd58e4 1837 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
f07852d6
MW
1838 dev = pci_scan_single_device(bus, devfn + fn);
1839 if (dev) {
1840 if (!dev->is_added)
1841 nr++;
1842 dev->multifunction = 1;
1da177e4
LT
1843 }
1844 }
7d715a6c 1845
149e1637
SL
1846 /* only one slot has pcie device */
1847 if (bus->self && nr)
7d715a6c
SL
1848 pcie_aspm_init_link_state(bus->self);
1849
1da177e4
LT
1850 return nr;
1851}
b7fe9434 1852EXPORT_SYMBOL(pci_scan_slot);
1da177e4 1853
b03e7495
JM
1854static int pcie_find_smpss(struct pci_dev *dev, void *data)
1855{
1856 u8 *smpss = data;
1857
1858 if (!pci_is_pcie(dev))
1859 return 0;
1860
d4aa68f6
YW
1861 /*
1862 * We don't have a way to change MPS settings on devices that have
1863 * drivers attached. A hot-added device might support only the minimum
1864 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1865 * where devices may be hot-added, we limit the fabric MPS to 128 so
1866 * hot-added devices will work correctly.
1867 *
1868 * However, if we hot-add a device to a slot directly below a Root
1869 * Port, it's impossible for there to be other existing devices below
1870 * the port. We don't limit the MPS in this case because we can
1871 * reconfigure MPS on both the Root Port and the hot-added device,
1872 * and there are no other devices involved.
1873 *
1874 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
b03e7495 1875 */
d4aa68f6
YW
1876 if (dev->is_hotplug_bridge &&
1877 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
b03e7495
JM
1878 *smpss = 0;
1879
1880 if (*smpss > dev->pcie_mpss)
1881 *smpss = dev->pcie_mpss;
1882
1883 return 0;
1884}
1885
1886static void pcie_write_mps(struct pci_dev *dev, int mps)
1887{
62f392ea 1888 int rc;
b03e7495
JM
1889
1890 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
62f392ea 1891 mps = 128 << dev->pcie_mpss;
b03e7495 1892
62f87c0e
YW
1893 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1894 dev->bus->self)
62f392ea 1895 /* For "Performance", the assumption is made that
b03e7495
JM
1896 * downstream communication will never be larger than
1897 * the MRRS. So, the MPS only needs to be configured
1898 * for the upstream communication. This being the case,
1899 * walk from the top down and set the MPS of the child
1900 * to that of the parent bus.
62f392ea
JM
1901 *
1902 * Configure the device MPS with the smaller of the
1903 * device MPSS or the bridge MPS (which is assumed to be
1904 * properly configured at this point to the largest
1905 * allowable MPS based on its parent bus).
b03e7495 1906 */
62f392ea 1907 mps = min(mps, pcie_get_mps(dev->bus->self));
b03e7495
JM
1908 }
1909
1910 rc = pcie_set_mps(dev, mps);
1911 if (rc)
1912 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1913}
1914
62f392ea 1915static void pcie_write_mrrs(struct pci_dev *dev)
b03e7495 1916{
62f392ea 1917 int rc, mrrs;
b03e7495 1918
ed2888e9
JM
1919 /* In the "safe" case, do not configure the MRRS. There appear to be
1920 * issues with setting MRRS to 0 on a number of devices.
1921 */
ed2888e9
JM
1922 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1923 return;
1924
ed2888e9
JM
1925 /* For Max performance, the MRRS must be set to the largest supported
1926 * value. However, it cannot be configured larger than the MPS the
62f392ea
JM
1927 * device or the bus can support. This should already be properly
1928 * configured by a prior call to pcie_write_mps.
ed2888e9 1929 */
62f392ea 1930 mrrs = pcie_get_mps(dev);
b03e7495
JM
1931
1932 /* MRRS is a R/W register. Invalid values can be written, but a
ed2888e9 1933 * subsequent read will verify if the value is acceptable or not.
b03e7495
JM
1934 * If the MRRS value provided is not acceptable (e.g., too large),
1935 * shrink the value until it is acceptable to the HW.
f7625980 1936 */
b03e7495
JM
1937 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1938 rc = pcie_set_readrq(dev, mrrs);
62f392ea
JM
1939 if (!rc)
1940 break;
b03e7495 1941
62f392ea 1942 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
b03e7495
JM
1943 mrrs /= 2;
1944 }
62f392ea
JM
1945
1946 if (mrrs < 128)
227f0647 1947 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
b03e7495
JM
1948}
1949
1950static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1951{
a513a99a 1952 int mps, orig_mps;
b03e7495
JM
1953
1954 if (!pci_is_pcie(dev))
1955 return 0;
1956
27d868b5
KB
1957 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
1958 pcie_bus_config == PCIE_BUS_DEFAULT)
5895af79 1959 return 0;
5895af79 1960
a513a99a
JM
1961 mps = 128 << *(u8 *)data;
1962 orig_mps = pcie_get_mps(dev);
b03e7495
JM
1963
1964 pcie_write_mps(dev, mps);
62f392ea 1965 pcie_write_mrrs(dev);
b03e7495 1966
227f0647
RD
1967 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
1968 pcie_get_mps(dev), 128 << dev->pcie_mpss,
a513a99a 1969 orig_mps, pcie_get_readrq(dev));
b03e7495
JM
1970
1971 return 0;
1972}
1973
a513a99a 1974/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
b03e7495
JM
1975 * parents then children fashion. If this changes, then this code will not
1976 * work as designed.
1977 */
a58674ff 1978void pcie_bus_configure_settings(struct pci_bus *bus)
b03e7495 1979{
1e358f94 1980 u8 smpss = 0;
b03e7495 1981
a58674ff 1982 if (!bus->self)
b03e7495
JM
1983 return;
1984
b03e7495 1985 if (!pci_is_pcie(bus->self))
5f39e670
JM
1986 return;
1987
1988 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
3315472c 1989 * to be aware of the MPS of the destination. To work around this,
5f39e670
JM
1990 * simply force the MPS of the entire system to the smallest possible.
1991 */
1992 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1993 smpss = 0;
1994
b03e7495 1995 if (pcie_bus_config == PCIE_BUS_SAFE) {
a58674ff 1996 smpss = bus->self->pcie_mpss;
5f39e670 1997
b03e7495
JM
1998 pcie_find_smpss(bus->self, &smpss);
1999 pci_walk_bus(bus, pcie_find_smpss, &smpss);
2000 }
2001
2002 pcie_bus_configure_set(bus->self, &smpss);
2003 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2004}
debc3b77 2005EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
b03e7495 2006
15856ad5 2007unsigned int pci_scan_child_bus(struct pci_bus *bus)
1da177e4 2008{
b918c62e 2009 unsigned int devfn, pass, max = bus->busn_res.start;
1da177e4
LT
2010 struct pci_dev *dev;
2011
0207c356 2012 dev_dbg(&bus->dev, "scanning bus\n");
1da177e4
LT
2013
2014 /* Go find them, Rover! */
2015 for (devfn = 0; devfn < 0x100; devfn += 8)
2016 pci_scan_slot(bus, devfn);
2017
a28724b0
YZ
2018 /* Reserve buses for SR-IOV capability. */
2019 max += pci_iov_bus_range(bus);
2020
1da177e4
LT
2021 /*
2022 * After performing arch-dependent fixup of the bus, look behind
2023 * all PCI-to-PCI bridges on this bus.
2024 */
74710ded 2025 if (!bus->is_added) {
0207c356 2026 dev_dbg(&bus->dev, "fixups for bus\n");
74710ded 2027 pcibios_fixup_bus(bus);
981cf9ea 2028 bus->is_added = 1;
74710ded
AC
2029 }
2030
3c78bc61 2031 for (pass = 0; pass < 2; pass++)
1da177e4 2032 list_for_each_entry(dev, &bus->devices, bus_list) {
6788a51f 2033 if (pci_is_bridge(dev))
1da177e4
LT
2034 max = pci_scan_bridge(bus, dev, max, pass);
2035 }
2036
2037 /*
2038 * We've scanned the bus and so we know all about what's on
2039 * the other side of any bridges that may be on this bus plus
2040 * any devices.
2041 *
2042 * Return how far we've got finding sub-buses.
2043 */
0207c356 2044 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1da177e4
LT
2045 return max;
2046}
b7fe9434 2047EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1da177e4 2048
6c0cc950
RW
2049/**
2050 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2051 * @bridge: Host bridge to set up.
2052 *
2053 * Default empty implementation. Replace with an architecture-specific setup
2054 * routine, if necessary.
2055 */
2056int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2057{
2058 return 0;
2059}
2060
10a95747
JL
2061void __weak pcibios_add_bus(struct pci_bus *bus)
2062{
2063}
2064
2065void __weak pcibios_remove_bus(struct pci_bus *bus)
2066{
2067}
2068
166c6370
BH
2069struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2070 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1da177e4 2071{
0efd5aab 2072 int error;
5a21d70d 2073 struct pci_host_bridge *bridge;
0207c356 2074 struct pci_bus *b, *b2;
14d76b68 2075 struct resource_entry *window, *n;
a9d9f527 2076 struct resource *res;
0efd5aab
BH
2077 resource_size_t offset;
2078 char bus_addr[64];
2079 char *fmt;
1da177e4 2080
670ba0c8 2081 b = pci_alloc_bus(NULL);
1da177e4 2082 if (!b)
7b543663 2083 return NULL;
1da177e4
LT
2084
2085 b->sysdata = sysdata;
2086 b->ops = ops;
4f535093 2087 b->number = b->busn_res.start = bus;
670ba0c8 2088 pci_bus_assign_domain_nr(b, parent);
0207c356
BH
2089 b2 = pci_find_bus(pci_domain_nr(b), bus);
2090 if (b2) {
1da177e4 2091 /* If we already got to this bus through a different bridge, ignore it */
0207c356 2092 dev_dbg(&b2->dev, "bus already known\n");
1da177e4
LT
2093 goto err_out;
2094 }
d71374da 2095
7b543663
YL
2096 bridge = pci_alloc_host_bridge(b);
2097 if (!bridge)
2098 goto err_out;
2099
2100 bridge->dev.parent = parent;
70efde2a 2101 bridge->dev.release = pci_release_host_bridge_dev;
7b543663 2102 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
6c0cc950 2103 error = pcibios_root_bridge_prepare(bridge);
343df771
JL
2104 if (error) {
2105 kfree(bridge);
2106 goto err_out;
2107 }
6c0cc950 2108
7b543663 2109 error = device_register(&bridge->dev);
343df771
JL
2110 if (error) {
2111 put_device(&bridge->dev);
2112 goto err_out;
2113 }
7b543663 2114 b->bridge = get_device(&bridge->dev);
a1e4d72c 2115 device_enable_async_suspend(b->bridge);
98d9f30c 2116 pci_set_bus_of_node(b);
44aa0c65 2117 pci_set_bus_msi_domain(b);
1da177e4 2118
0d358f22
YL
2119 if (!parent)
2120 set_dev_node(b->bridge, pcibus_to_node(b));
2121
fd7d1ced
GKH
2122 b->dev.class = &pcibus_class;
2123 b->dev.parent = b->bridge;
1a927133 2124 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
fd7d1ced 2125 error = device_register(&b->dev);
1da177e4
LT
2126 if (error)
2127 goto class_dev_reg_err;
1da177e4 2128
10a95747
JL
2129 pcibios_add_bus(b);
2130
1da177e4
LT
2131 /* Create legacy_io and legacy_mem files for this bus */
2132 pci_create_legacy_files(b);
2133
a9d9f527
BH
2134 if (parent)
2135 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
2136 else
2137 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
2138
0efd5aab 2139 /* Add initial resources to the bus */
14d76b68
JL
2140 resource_list_for_each_entry_safe(window, n, resources) {
2141 list_move_tail(&window->node, &bridge->windows);
0efd5aab
BH
2142 res = window->res;
2143 offset = window->offset;
f848ffb1
YL
2144 if (res->flags & IORESOURCE_BUS)
2145 pci_bus_insert_busn_res(b, bus, res->end);
2146 else
2147 pci_bus_add_resource(b, res, 0);
0efd5aab
BH
2148 if (offset) {
2149 if (resource_type(res) == IORESOURCE_IO)
2150 fmt = " (bus address [%#06llx-%#06llx])";
2151 else
2152 fmt = " (bus address [%#010llx-%#010llx])";
2153 snprintf(bus_addr, sizeof(bus_addr), fmt,
2154 (unsigned long long) (res->start - offset),
2155 (unsigned long long) (res->end - offset));
2156 } else
2157 bus_addr[0] = '\0';
2158 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
a9d9f527
BH
2159 }
2160
a5390aa6
BH
2161 down_write(&pci_bus_sem);
2162 list_add_tail(&b->node, &pci_root_buses);
2163 up_write(&pci_bus_sem);
2164
1da177e4
LT
2165 return b;
2166
1da177e4 2167class_dev_reg_err:
7b543663
YL
2168 put_device(&bridge->dev);
2169 device_unregister(&bridge->dev);
1da177e4 2170err_out:
1da177e4
LT
2171 kfree(b);
2172 return NULL;
2173}
e6b29dea 2174EXPORT_SYMBOL_GPL(pci_create_root_bus);
cdb9b9f7 2175
98a35831
YL
2176int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2177{
2178 struct resource *res = &b->busn_res;
2179 struct resource *parent_res, *conflict;
2180
2181 res->start = bus;
2182 res->end = bus_max;
2183 res->flags = IORESOURCE_BUS;
2184
2185 if (!pci_is_root_bus(b))
2186 parent_res = &b->parent->busn_res;
2187 else {
2188 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2189 res->flags |= IORESOURCE_PCI_FIXED;
2190 }
2191
ced04d15 2192 conflict = request_resource_conflict(parent_res, res);
98a35831
YL
2193
2194 if (conflict)
2195 dev_printk(KERN_DEBUG, &b->dev,
2196 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2197 res, pci_is_root_bus(b) ? "domain " : "",
2198 parent_res, conflict->name, conflict);
98a35831
YL
2199
2200 return conflict == NULL;
2201}
2202
2203int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2204{
2205 struct resource *res = &b->busn_res;
2206 struct resource old_res = *res;
2207 resource_size_t size;
2208 int ret;
2209
2210 if (res->start > bus_max)
2211 return -EINVAL;
2212
2213 size = bus_max - res->start + 1;
2214 ret = adjust_resource(res, res->start, size);
2215 dev_printk(KERN_DEBUG, &b->dev,
2216 "busn_res: %pR end %s updated to %02x\n",
2217 &old_res, ret ? "can not be" : "is", bus_max);
2218
2219 if (!ret && !res->parent)
2220 pci_bus_insert_busn_res(b, res->start, res->end);
2221
2222 return ret;
2223}
2224
2225void pci_bus_release_busn_res(struct pci_bus *b)
2226{
2227 struct resource *res = &b->busn_res;
2228 int ret;
2229
2230 if (!res->flags || !res->parent)
2231 return;
2232
2233 ret = release_resource(res);
2234 dev_printk(KERN_DEBUG, &b->dev,
2235 "busn_res: %pR %s released\n",
2236 res, ret ? "can not be" : "is");
2237}
2238
d2a7926d
LP
2239struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2240 struct pci_ops *ops, void *sysdata,
2241 struct list_head *resources, struct msi_controller *msi)
a2ebb827 2242{
14d76b68 2243 struct resource_entry *window;
4d99f524 2244 bool found = false;
a2ebb827 2245 struct pci_bus *b;
4d99f524
YL
2246 int max;
2247
14d76b68 2248 resource_list_for_each_entry(window, resources)
4d99f524
YL
2249 if (window->res->flags & IORESOURCE_BUS) {
2250 found = true;
2251 break;
2252 }
a2ebb827
BH
2253
2254 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2255 if (!b)
2256 return NULL;
2257
d2a7926d
LP
2258 b->msi = msi;
2259
4d99f524
YL
2260 if (!found) {
2261 dev_info(&b->dev,
2262 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2263 bus);
2264 pci_bus_insert_busn_res(b, bus, 255);
2265 }
2266
2267 max = pci_scan_child_bus(b);
2268
2269 if (!found)
2270 pci_bus_update_busn_res_end(b, max);
2271
a2ebb827
BH
2272 return b;
2273}
d2a7926d
LP
2274
2275struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2276 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2277{
2278 return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2279 NULL);
2280}
a2ebb827
BH
2281EXPORT_SYMBOL(pci_scan_root_bus);
2282
15856ad5 2283struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
de4b2f76
BH
2284 void *sysdata)
2285{
2286 LIST_HEAD(resources);
2287 struct pci_bus *b;
2288
2289 pci_add_resource(&resources, &ioport_resource);
2290 pci_add_resource(&resources, &iomem_resource);
857c3b66 2291 pci_add_resource(&resources, &busn_resource);
de4b2f76
BH
2292 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2293 if (b) {
857c3b66 2294 pci_scan_child_bus(b);
de4b2f76
BH
2295 } else {
2296 pci_free_resource_list(&resources);
2297 }
2298 return b;
2299}
2300EXPORT_SYMBOL(pci_scan_bus);
2301
2f320521
YL
2302/**
2303 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2304 * @bridge: PCI bridge for the bus to scan
2305 *
2306 * Scan a PCI bus and child buses for new devices, add them,
2307 * and enable them, resizing bridge mmio/io resource if necessary
2308 * and possible. The caller must ensure the child devices are already
2309 * removed for resizing to occur.
2310 *
2311 * Returns the max number of subordinate bus discovered.
2312 */
10874f5a 2313unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2f320521
YL
2314{
2315 unsigned int max;
2316 struct pci_bus *bus = bridge->subordinate;
2317
2318 max = pci_scan_child_bus(bus);
2319
2320 pci_assign_unassigned_bridge_resources(bridge);
2321
2322 pci_bus_add_devices(bus);
2323
2324 return max;
2325}
2326
a5213a31
YL
2327/**
2328 * pci_rescan_bus - scan a PCI bus for devices.
2329 * @bus: PCI bus to scan
2330 *
2331 * Scan a PCI bus and child buses for new devices, adds them,
2332 * and enables them.
2333 *
2334 * Returns the max number of subordinate bus discovered.
2335 */
10874f5a 2336unsigned int pci_rescan_bus(struct pci_bus *bus)
a5213a31
YL
2337{
2338 unsigned int max;
2339
2340 max = pci_scan_child_bus(bus);
2341 pci_assign_unassigned_bus_resources(bus);
2342 pci_bus_add_devices(bus);
2343
2344 return max;
2345}
2346EXPORT_SYMBOL_GPL(pci_rescan_bus);
2347
9d16947b
RW
2348/*
2349 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2350 * routines should always be executed under this mutex.
2351 */
2352static DEFINE_MUTEX(pci_rescan_remove_lock);
2353
2354void pci_lock_rescan_remove(void)
2355{
2356 mutex_lock(&pci_rescan_remove_lock);
2357}
2358EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2359
2360void pci_unlock_rescan_remove(void)
2361{
2362 mutex_unlock(&pci_rescan_remove_lock);
2363}
2364EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2365
3c78bc61
RD
2366static int __init pci_sort_bf_cmp(const struct device *d_a,
2367 const struct device *d_b)
6b4b78fe 2368{
99178b03
GKH
2369 const struct pci_dev *a = to_pci_dev(d_a);
2370 const struct pci_dev *b = to_pci_dev(d_b);
2371
6b4b78fe
MD
2372 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2373 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2374
2375 if (a->bus->number < b->bus->number) return -1;
2376 else if (a->bus->number > b->bus->number) return 1;
2377
2378 if (a->devfn < b->devfn) return -1;
2379 else if (a->devfn > b->devfn) return 1;
2380
2381 return 0;
2382}
2383
5ff580c1 2384void __init pci_sort_breadthfirst(void)
6b4b78fe 2385{
99178b03 2386 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
6b4b78fe 2387}