Merge branch 'overlayfs-af_unix-fix' into overlayfs-linus
[linux-2.6-block.git] / drivers / pci / probe.c
CommitLineData
1da177e4
LT
1/*
2 * probe.c - PCI detection and setup code
3 */
4
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/init.h>
8#include <linux/pci.h>
50230713 9#include <linux/of_device.h>
de335bb4 10#include <linux/of_pci.h>
589fcc23 11#include <linux/pci_hotplug.h>
1da177e4
LT
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/cpumask.h>
7d715a6c 15#include <linux/pci-aspm.h>
b07461a8 16#include <linux/aer.h>
29dbe1f0 17#include <linux/acpi.h>
788858eb 18#include <linux/irqdomain.h>
bc56b9e0 19#include "pci.h"
1da177e4
LT
20
21#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
22#define CARDBUS_RESERVE_BUSNR 3
1da177e4 23
0b950f0f 24static struct resource busn_resource = {
67cdc827
YL
25 .name = "PCI busn",
26 .start = 0,
27 .end = 255,
28 .flags = IORESOURCE_BUS,
29};
30
1da177e4
LT
31/* Ugh. Need to stop exporting this to modules. */
32LIST_HEAD(pci_root_buses);
33EXPORT_SYMBOL(pci_root_buses);
34
5cc62c20
YL
35static LIST_HEAD(pci_domain_busn_res_list);
36
37struct pci_domain_busn_res {
38 struct list_head list;
39 struct resource res;
40 int domain_nr;
41};
42
43static struct resource *get_pci_domain_busn_res(int domain_nr)
44{
45 struct pci_domain_busn_res *r;
46
47 list_for_each_entry(r, &pci_domain_busn_res_list, list)
48 if (r->domain_nr == domain_nr)
49 return &r->res;
50
51 r = kzalloc(sizeof(*r), GFP_KERNEL);
52 if (!r)
53 return NULL;
54
55 r->domain_nr = domain_nr;
56 r->res.start = 0;
57 r->res.end = 0xff;
58 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
59
60 list_add_tail(&r->list, &pci_domain_busn_res_list);
61
62 return &r->res;
63}
64
70308923
GKH
65static int find_anything(struct device *dev, void *data)
66{
67 return 1;
68}
1da177e4 69
ed4aaadb
ZY
70/*
71 * Some device drivers need know if pci is initiated.
72 * Basically, we think pci is not initiated when there
70308923 73 * is no device to be found on the pci_bus_type.
ed4aaadb
ZY
74 */
75int no_pci_devices(void)
76{
70308923
GKH
77 struct device *dev;
78 int no_devices;
ed4aaadb 79
70308923
GKH
80 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
81 no_devices = (dev == NULL);
82 put_device(dev);
83 return no_devices;
84}
ed4aaadb
ZY
85EXPORT_SYMBOL(no_pci_devices);
86
1da177e4
LT
87/*
88 * PCI Bus Class
89 */
fd7d1ced 90static void release_pcibus_dev(struct device *dev)
1da177e4 91{
fd7d1ced 92 struct pci_bus *pci_bus = to_pci_bus(dev);
1da177e4 93
ff0387c3 94 put_device(pci_bus->bridge);
2fe2abf8 95 pci_bus_remove_resources(pci_bus);
98d9f30c 96 pci_release_bus_of_node(pci_bus);
1da177e4
LT
97 kfree(pci_bus);
98}
99
100static struct class pcibus_class = {
101 .name = "pci_bus",
fd7d1ced 102 .dev_release = &release_pcibus_dev,
56039e65 103 .dev_groups = pcibus_groups,
1da177e4
LT
104};
105
106static int __init pcibus_class_init(void)
107{
108 return class_register(&pcibus_class);
109}
110postcore_initcall(pcibus_class_init);
111
6ac665c6 112static u64 pci_size(u64 base, u64 maxbase, u64 mask)
1da177e4 113{
6ac665c6 114 u64 size = mask & maxbase; /* Find the significant bits */
1da177e4
LT
115 if (!size)
116 return 0;
117
118 /* Get the lowest of them to find the decode size, and
119 from that the extent. */
120 size = (size & ~(size-1)) - 1;
121
122 /* base == maxbase can be valid only if the BAR has
123 already been programmed with all 1s. */
124 if (base == maxbase && ((base | size) & mask) != mask)
125 return 0;
126
127 return size;
128}
129
28c6821a 130static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
6ac665c6 131{
8d6a6a47 132 u32 mem_type;
28c6821a 133 unsigned long flags;
8d6a6a47 134
6ac665c6 135 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
28c6821a
BH
136 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
137 flags |= IORESOURCE_IO;
138 return flags;
6ac665c6 139 }
07eddf3d 140
28c6821a
BH
141 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
142 flags |= IORESOURCE_MEM;
143 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
144 flags |= IORESOURCE_PREFETCH;
07eddf3d 145
8d6a6a47
BH
146 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
147 switch (mem_type) {
148 case PCI_BASE_ADDRESS_MEM_TYPE_32:
149 break;
150 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
0ff9514b 151 /* 1M mem BAR treated as 32-bit BAR */
8d6a6a47
BH
152 break;
153 case PCI_BASE_ADDRESS_MEM_TYPE_64:
28c6821a
BH
154 flags |= IORESOURCE_MEM_64;
155 break;
8d6a6a47 156 default:
0ff9514b 157 /* mem unknown type treated as 32-bit BAR */
8d6a6a47
BH
158 break;
159 }
28c6821a 160 return flags;
07eddf3d
YL
161}
162
808e34e2
ZK
163#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
164
0b400c7e
YZ
165/**
166 * pci_read_base - read a PCI BAR
167 * @dev: the PCI device
168 * @type: type of the BAR
169 * @res: resource buffer to be filled in
170 * @pos: BAR position in the config space
171 *
172 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
6ac665c6 173 */
0b400c7e 174int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
3c78bc61 175 struct resource *res, unsigned int pos)
07eddf3d 176{
6ac665c6 177 u32 l, sz, mask;
23b13bc7 178 u64 l64, sz64, mask64;
253d2e54 179 u16 orig_cmd;
cf4d1cf5 180 struct pci_bus_region region, inverted_region;
6ac665c6 181
1ed67439 182 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
6ac665c6 183
0ff9514b 184 /* No printks while decoding is disabled! */
253d2e54
JP
185 if (!dev->mmio_always_on) {
186 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
808e34e2
ZK
187 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
188 pci_write_config_word(dev, PCI_COMMAND,
189 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
190 }
253d2e54
JP
191 }
192
6ac665c6
MW
193 res->name = pci_name(dev);
194
195 pci_read_config_dword(dev, pos, &l);
1ed67439 196 pci_write_config_dword(dev, pos, l | mask);
6ac665c6
MW
197 pci_read_config_dword(dev, pos, &sz);
198 pci_write_config_dword(dev, pos, l);
199
200 /*
201 * All bits set in sz means the device isn't working properly.
45aa23b4
BH
202 * If the BAR isn't implemented, all bits must be 0. If it's a
203 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
204 * 1 must be clear.
6ac665c6 205 */
f795d86a
MS
206 if (sz == 0xffffffff)
207 sz = 0;
6ac665c6
MW
208
209 /*
210 * I don't know how l can have all bits set. Copied from old code.
211 * Maybe it fixes a bug on some ancient platform.
212 */
213 if (l == 0xffffffff)
214 l = 0;
215
216 if (type == pci_bar_unknown) {
28c6821a
BH
217 res->flags = decode_bar(dev, l);
218 res->flags |= IORESOURCE_SIZEALIGN;
219 if (res->flags & IORESOURCE_IO) {
f795d86a
MS
220 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
221 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
222 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
6ac665c6 223 } else {
f795d86a
MS
224 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
225 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
226 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
6ac665c6
MW
227 }
228 } else {
229 res->flags |= (l & IORESOURCE_ROM_ENABLE);
f795d86a
MS
230 l64 = l & PCI_ROM_ADDRESS_MASK;
231 sz64 = sz & PCI_ROM_ADDRESS_MASK;
232 mask64 = (u32)PCI_ROM_ADDRESS_MASK;
6ac665c6
MW
233 }
234
28c6821a 235 if (res->flags & IORESOURCE_MEM_64) {
6ac665c6
MW
236 pci_read_config_dword(dev, pos + 4, &l);
237 pci_write_config_dword(dev, pos + 4, ~0);
238 pci_read_config_dword(dev, pos + 4, &sz);
239 pci_write_config_dword(dev, pos + 4, l);
240
241 l64 |= ((u64)l << 32);
242 sz64 |= ((u64)sz << 32);
f795d86a
MS
243 mask64 |= ((u64)~0 << 32);
244 }
6ac665c6 245
f795d86a
MS
246 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
247 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
6ac665c6 248
f795d86a
MS
249 if (!sz64)
250 goto fail;
6ac665c6 251
f795d86a 252 sz64 = pci_size(l64, sz64, mask64);
7e79c5f8
MS
253 if (!sz64) {
254 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
255 pos);
f795d86a 256 goto fail;
7e79c5f8 257 }
f795d86a
MS
258
259 if (res->flags & IORESOURCE_MEM_64) {
3a9ad0b4
YL
260 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
261 && sz64 > 0x100000000ULL) {
23b13bc7
BH
262 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
263 res->start = 0;
264 res->end = 0;
f795d86a
MS
265 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
266 pos, (unsigned long long)sz64);
23b13bc7 267 goto out;
c7dabef8
BH
268 }
269
3a9ad0b4 270 if ((sizeof(pci_bus_addr_t) < 8) && l) {
31e9dd25 271 /* Above 32-bit boundary; try to reallocate */
c83bd900 272 res->flags |= IORESOURCE_UNSET;
72dc5601
BH
273 res->start = 0;
274 res->end = sz64;
f795d86a
MS
275 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
276 pos, (unsigned long long)l64);
72dc5601 277 goto out;
6ac665c6 278 }
6ac665c6
MW
279 }
280
f795d86a
MS
281 region.start = l64;
282 region.end = l64 + sz64;
283
fc279850
YL
284 pcibios_bus_to_resource(dev->bus, res, &region);
285 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
cf4d1cf5
KH
286
287 /*
288 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
289 * the corresponding resource address (the physical address used by
290 * the CPU. Converting that resource address back to a bus address
291 * should yield the original BAR value:
292 *
293 * resource_to_bus(bus_to_resource(A)) == A
294 *
295 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
296 * be claimed by the device.
297 */
298 if (inverted_region.start != region.start) {
cf4d1cf5 299 res->flags |= IORESOURCE_UNSET;
cf4d1cf5 300 res->start = 0;
26370fc6 301 res->end = region.end - region.start;
f795d86a
MS
302 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
303 pos, (unsigned long long)region.start);
cf4d1cf5 304 }
96ddef25 305
0ff9514b
BH
306 goto out;
307
308
309fail:
310 res->flags = 0;
311out:
31e9dd25 312 if (res->flags)
33963e30 313 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
0ff9514b 314
28c6821a 315 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
07eddf3d
YL
316}
317
1da177e4
LT
318static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
319{
6ac665c6 320 unsigned int pos, reg;
07eddf3d 321
ad67b437
PB
322 if (dev->non_compliant_bars)
323 return;
324
6ac665c6
MW
325 for (pos = 0; pos < howmany; pos++) {
326 struct resource *res = &dev->resource[pos];
1da177e4 327 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
6ac665c6 328 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
1da177e4 329 }
6ac665c6 330
1da177e4 331 if (rom) {
6ac665c6 332 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
1da177e4 333 dev->rom_base_reg = rom;
6ac665c6 334 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
92b19ff5 335 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
6ac665c6 336 __pci_read_base(dev, pci_bar_mem32, res, rom);
1da177e4
LT
337 }
338}
339
15856ad5 340static void pci_read_bridge_io(struct pci_bus *child)
1da177e4
LT
341{
342 struct pci_dev *dev = child->self;
343 u8 io_base_lo, io_limit_lo;
2b28ae19 344 unsigned long io_mask, io_granularity, base, limit;
5bfa14ed 345 struct pci_bus_region region;
2b28ae19
BH
346 struct resource *res;
347
348 io_mask = PCI_IO_RANGE_MASK;
349 io_granularity = 0x1000;
350 if (dev->io_window_1k) {
351 /* Support 1K I/O space granularity */
352 io_mask = PCI_IO_1K_RANGE_MASK;
353 io_granularity = 0x400;
354 }
1da177e4 355
1da177e4
LT
356 res = child->resource[0];
357 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
358 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
2b28ae19
BH
359 base = (io_base_lo & io_mask) << 8;
360 limit = (io_limit_lo & io_mask) << 8;
1da177e4
LT
361
362 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
363 u16 io_base_hi, io_limit_hi;
8f38eaca 364
1da177e4
LT
365 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
366 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
8f38eaca
BH
367 base |= ((unsigned long) io_base_hi << 16);
368 limit |= ((unsigned long) io_limit_hi << 16);
1da177e4
LT
369 }
370
5dde383e 371 if (base <= limit) {
1da177e4 372 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
5bfa14ed 373 region.start = base;
2b28ae19 374 region.end = limit + io_granularity - 1;
fc279850 375 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 376 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 377 }
fa27b2d1
BH
378}
379
15856ad5 380static void pci_read_bridge_mmio(struct pci_bus *child)
fa27b2d1
BH
381{
382 struct pci_dev *dev = child->self;
383 u16 mem_base_lo, mem_limit_lo;
384 unsigned long base, limit;
5bfa14ed 385 struct pci_bus_region region;
fa27b2d1 386 struct resource *res;
1da177e4
LT
387
388 res = child->resource[1];
389 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
390 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
8f38eaca
BH
391 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
392 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
5dde383e 393 if (base <= limit) {
1da177e4 394 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
5bfa14ed
BH
395 region.start = base;
396 region.end = limit + 0xfffff;
fc279850 397 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 398 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 399 }
fa27b2d1
BH
400}
401
15856ad5 402static void pci_read_bridge_mmio_pref(struct pci_bus *child)
fa27b2d1
BH
403{
404 struct pci_dev *dev = child->self;
405 u16 mem_base_lo, mem_limit_lo;
7fc986d8 406 u64 base64, limit64;
3a9ad0b4 407 pci_bus_addr_t base, limit;
5bfa14ed 408 struct pci_bus_region region;
fa27b2d1 409 struct resource *res;
1da177e4
LT
410
411 res = child->resource[2];
412 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
413 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
7fc986d8
YL
414 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
415 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1da177e4
LT
416
417 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
418 u32 mem_base_hi, mem_limit_hi;
8f38eaca 419
1da177e4
LT
420 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
421 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
422
423 /*
424 * Some bridges set the base > limit by default, and some
425 * (broken) BIOSes do not initialize them. If we find
426 * this, just assume they are not being used.
427 */
428 if (mem_base_hi <= mem_limit_hi) {
7fc986d8
YL
429 base64 |= (u64) mem_base_hi << 32;
430 limit64 |= (u64) mem_limit_hi << 32;
1da177e4
LT
431 }
432 }
7fc986d8 433
3a9ad0b4
YL
434 base = (pci_bus_addr_t) base64;
435 limit = (pci_bus_addr_t) limit64;
7fc986d8
YL
436
437 if (base != base64) {
438 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
439 (unsigned long long) base64);
440 return;
441 }
442
5dde383e 443 if (base <= limit) {
1f82de10
YL
444 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
445 IORESOURCE_MEM | IORESOURCE_PREFETCH;
446 if (res->flags & PCI_PREF_RANGE_TYPE_64)
447 res->flags |= IORESOURCE_MEM_64;
5bfa14ed
BH
448 region.start = base;
449 region.end = limit + 0xfffff;
fc279850 450 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 451 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4
LT
452 }
453}
454
15856ad5 455void pci_read_bridge_bases(struct pci_bus *child)
fa27b2d1
BH
456{
457 struct pci_dev *dev = child->self;
2fe2abf8 458 struct resource *res;
fa27b2d1
BH
459 int i;
460
461 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
462 return;
463
b918c62e
YL
464 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
465 &child->busn_res,
fa27b2d1
BH
466 dev->transparent ? " (subtractive decode)" : "");
467
2fe2abf8
BH
468 pci_bus_remove_resources(child);
469 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
470 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
471
fa27b2d1
BH
472 pci_read_bridge_io(child);
473 pci_read_bridge_mmio(child);
474 pci_read_bridge_mmio_pref(child);
2adf7516
BH
475
476 if (dev->transparent) {
2fe2abf8 477 pci_bus_for_each_resource(child->parent, res, i) {
d739a099 478 if (res && res->flags) {
2fe2abf8
BH
479 pci_bus_add_resource(child, res,
480 PCI_SUBTRACTIVE_DECODE);
2adf7516
BH
481 dev_printk(KERN_DEBUG, &dev->dev,
482 " bridge window %pR (subtractive decode)\n",
2fe2abf8
BH
483 res);
484 }
2adf7516
BH
485 }
486 }
fa27b2d1
BH
487}
488
670ba0c8 489static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
1da177e4
LT
490{
491 struct pci_bus *b;
492
f5afe806 493 b = kzalloc(sizeof(*b), GFP_KERNEL);
05013486
BH
494 if (!b)
495 return NULL;
496
497 INIT_LIST_HEAD(&b->node);
498 INIT_LIST_HEAD(&b->children);
499 INIT_LIST_HEAD(&b->devices);
500 INIT_LIST_HEAD(&b->slots);
501 INIT_LIST_HEAD(&b->resources);
502 b->max_bus_speed = PCI_SPEED_UNKNOWN;
503 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
670ba0c8
CM
504#ifdef CONFIG_PCI_DOMAINS_GENERIC
505 if (parent)
506 b->domain_nr = parent->domain_nr;
507#endif
1da177e4
LT
508 return b;
509}
510
70efde2a
JL
511static void pci_release_host_bridge_dev(struct device *dev)
512{
513 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
514
515 if (bridge->release_fn)
516 bridge->release_fn(bridge);
517
518 pci_free_resource_list(&bridge->windows);
519
520 kfree(bridge);
521}
522
7b543663
YL
523static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
524{
525 struct pci_host_bridge *bridge;
526
527 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
05013486
BH
528 if (!bridge)
529 return NULL;
7b543663 530
05013486
BH
531 INIT_LIST_HEAD(&bridge->windows);
532 bridge->bus = b;
7b543663
YL
533 return bridge;
534}
535
0b950f0f 536static const unsigned char pcix_bus_speed[] = {
9be60ca0
MW
537 PCI_SPEED_UNKNOWN, /* 0 */
538 PCI_SPEED_66MHz_PCIX, /* 1 */
539 PCI_SPEED_100MHz_PCIX, /* 2 */
540 PCI_SPEED_133MHz_PCIX, /* 3 */
541 PCI_SPEED_UNKNOWN, /* 4 */
542 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
543 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
544 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
545 PCI_SPEED_UNKNOWN, /* 8 */
546 PCI_SPEED_66MHz_PCIX_266, /* 9 */
547 PCI_SPEED_100MHz_PCIX_266, /* A */
548 PCI_SPEED_133MHz_PCIX_266, /* B */
549 PCI_SPEED_UNKNOWN, /* C */
550 PCI_SPEED_66MHz_PCIX_533, /* D */
551 PCI_SPEED_100MHz_PCIX_533, /* E */
552 PCI_SPEED_133MHz_PCIX_533 /* F */
553};
554
343e51ae 555const unsigned char pcie_link_speed[] = {
3749c51a
MW
556 PCI_SPEED_UNKNOWN, /* 0 */
557 PCIE_SPEED_2_5GT, /* 1 */
558 PCIE_SPEED_5_0GT, /* 2 */
9dfd97fe 559 PCIE_SPEED_8_0GT, /* 3 */
3749c51a
MW
560 PCI_SPEED_UNKNOWN, /* 4 */
561 PCI_SPEED_UNKNOWN, /* 5 */
562 PCI_SPEED_UNKNOWN, /* 6 */
563 PCI_SPEED_UNKNOWN, /* 7 */
564 PCI_SPEED_UNKNOWN, /* 8 */
565 PCI_SPEED_UNKNOWN, /* 9 */
566 PCI_SPEED_UNKNOWN, /* A */
567 PCI_SPEED_UNKNOWN, /* B */
568 PCI_SPEED_UNKNOWN, /* C */
569 PCI_SPEED_UNKNOWN, /* D */
570 PCI_SPEED_UNKNOWN, /* E */
571 PCI_SPEED_UNKNOWN /* F */
572};
573
574void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
575{
231afea1 576 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
3749c51a
MW
577}
578EXPORT_SYMBOL_GPL(pcie_update_link_speed);
579
45b4cdd5
MW
580static unsigned char agp_speeds[] = {
581 AGP_UNKNOWN,
582 AGP_1X,
583 AGP_2X,
584 AGP_4X,
585 AGP_8X
586};
587
588static enum pci_bus_speed agp_speed(int agp3, int agpstat)
589{
590 int index = 0;
591
592 if (agpstat & 4)
593 index = 3;
594 else if (agpstat & 2)
595 index = 2;
596 else if (agpstat & 1)
597 index = 1;
598 else
599 goto out;
f7625980 600
45b4cdd5
MW
601 if (agp3) {
602 index += 2;
603 if (index == 5)
604 index = 0;
605 }
606
607 out:
608 return agp_speeds[index];
609}
610
9be60ca0
MW
611static void pci_set_bus_speed(struct pci_bus *bus)
612{
613 struct pci_dev *bridge = bus->self;
614 int pos;
615
45b4cdd5
MW
616 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
617 if (!pos)
618 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
619 if (pos) {
620 u32 agpstat, agpcmd;
621
622 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
623 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
624
625 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
626 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
627 }
628
9be60ca0
MW
629 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
630 if (pos) {
631 u16 status;
632 enum pci_bus_speed max;
9be60ca0 633
7793eeab
BH
634 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
635 &status);
636
637 if (status & PCI_X_SSTATUS_533MHZ) {
9be60ca0 638 max = PCI_SPEED_133MHz_PCIX_533;
7793eeab 639 } else if (status & PCI_X_SSTATUS_266MHZ) {
9be60ca0 640 max = PCI_SPEED_133MHz_PCIX_266;
7793eeab 641 } else if (status & PCI_X_SSTATUS_133MHZ) {
3c78bc61 642 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
9be60ca0 643 max = PCI_SPEED_133MHz_PCIX_ECC;
3c78bc61 644 else
9be60ca0 645 max = PCI_SPEED_133MHz_PCIX;
9be60ca0
MW
646 } else {
647 max = PCI_SPEED_66MHz_PCIX;
648 }
649
650 bus->max_bus_speed = max;
7793eeab
BH
651 bus->cur_bus_speed = pcix_bus_speed[
652 (status & PCI_X_SSTATUS_FREQ) >> 6];
9be60ca0
MW
653
654 return;
655 }
656
fdfe1511 657 if (pci_is_pcie(bridge)) {
9be60ca0
MW
658 u32 linkcap;
659 u16 linksta;
660
59875ae4 661 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
231afea1 662 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
9be60ca0 663
59875ae4 664 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
9be60ca0
MW
665 pcie_update_link_speed(bus, linksta);
666 }
667}
668
44aa0c65
MZ
669static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
670{
b165e2b6
MZ
671 struct irq_domain *d;
672
44aa0c65
MZ
673 /*
674 * Any firmware interface that can resolve the msi_domain
675 * should be called from here.
676 */
b165e2b6 677 d = pci_host_bridge_of_msi_domain(bus);
471036b2
SS
678 if (!d)
679 d = pci_host_bridge_acpi_msi_domain(bus);
44aa0c65 680
788858eb
JO
681#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
682 /*
683 * If no IRQ domain was found via the OF tree, try looking it up
684 * directly through the fwnode_handle.
685 */
686 if (!d) {
687 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
688
689 if (fwnode)
690 d = irq_find_matching_fwnode(fwnode,
691 DOMAIN_BUS_PCI_MSI);
692 }
693#endif
694
b165e2b6 695 return d;
44aa0c65
MZ
696}
697
698static void pci_set_bus_msi_domain(struct pci_bus *bus)
699{
700 struct irq_domain *d;
38ea72bd 701 struct pci_bus *b;
44aa0c65
MZ
702
703 /*
38ea72bd
AW
704 * The bus can be a root bus, a subordinate bus, or a virtual bus
705 * created by an SR-IOV device. Walk up to the first bridge device
706 * found or derive the domain from the host bridge.
44aa0c65 707 */
38ea72bd
AW
708 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
709 if (b->self)
710 d = dev_get_msi_domain(&b->self->dev);
711 }
712
713 if (!d)
714 d = pci_host_bridge_msi_domain(b);
44aa0c65
MZ
715
716 dev_set_msi_domain(&bus->dev, d);
717}
718
cbd4e055
AB
719static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
720 struct pci_dev *bridge, int busnr)
1da177e4
LT
721{
722 struct pci_bus *child;
723 int i;
4f535093 724 int ret;
1da177e4
LT
725
726 /*
727 * Allocate a new bus, and inherit stuff from the parent..
728 */
670ba0c8 729 child = pci_alloc_bus(parent);
1da177e4
LT
730 if (!child)
731 return NULL;
732
1da177e4
LT
733 child->parent = parent;
734 child->ops = parent->ops;
0cbdcfcf 735 child->msi = parent->msi;
1da177e4 736 child->sysdata = parent->sysdata;
6e325a62 737 child->bus_flags = parent->bus_flags;
1da177e4 738
fd7d1ced 739 /* initialize some portions of the bus device, but don't register it
4f535093 740 * now as the parent is not properly set up yet.
fd7d1ced
GKH
741 */
742 child->dev.class = &pcibus_class;
1a927133 743 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
1da177e4
LT
744
745 /*
746 * Set up the primary, secondary and subordinate
747 * bus numbers.
748 */
b918c62e
YL
749 child->number = child->busn_res.start = busnr;
750 child->primary = parent->busn_res.start;
751 child->busn_res.end = 0xff;
1da177e4 752
4f535093
YL
753 if (!bridge) {
754 child->dev.parent = parent->bridge;
755 goto add_dev;
756 }
3789fa8a
YZ
757
758 child->self = bridge;
759 child->bridge = get_device(&bridge->dev);
4f535093 760 child->dev.parent = child->bridge;
98d9f30c 761 pci_set_bus_of_node(child);
9be60ca0
MW
762 pci_set_bus_speed(child);
763
1da177e4 764 /* Set up default resource pointers and names.. */
fde09c6d 765 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
1da177e4
LT
766 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
767 child->resource[i]->name = child->name;
768 }
769 bridge->subordinate = child;
770
4f535093 771add_dev:
44aa0c65 772 pci_set_bus_msi_domain(child);
4f535093
YL
773 ret = device_register(&child->dev);
774 WARN_ON(ret < 0);
775
10a95747
JL
776 pcibios_add_bus(child);
777
057bd2e0
TR
778 if (child->ops->add_bus) {
779 ret = child->ops->add_bus(child);
780 if (WARN_ON(ret < 0))
781 dev_err(&child->dev, "failed to add bus: %d\n", ret);
782 }
783
4f535093
YL
784 /* Create legacy_io and legacy_mem files for this bus */
785 pci_create_legacy_files(child);
786
1da177e4
LT
787 return child;
788}
789
3c78bc61
RD
790struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
791 int busnr)
1da177e4
LT
792{
793 struct pci_bus *child;
794
795 child = pci_alloc_child_bus(parent, dev, busnr);
e4ea9bb7 796 if (child) {
d71374da 797 down_write(&pci_bus_sem);
1da177e4 798 list_add_tail(&child->node, &parent->children);
d71374da 799 up_write(&pci_bus_sem);
e4ea9bb7 800 }
1da177e4
LT
801 return child;
802}
b7fe9434 803EXPORT_SYMBOL(pci_add_new_bus);
1da177e4 804
f3dbd802
RJ
805static void pci_enable_crs(struct pci_dev *pdev)
806{
807 u16 root_cap = 0;
808
809 /* Enable CRS Software Visibility if supported */
810 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
811 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
812 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
813 PCI_EXP_RTCTL_CRSSVE);
814}
815
1da177e4
LT
816/*
817 * If it's a bridge, configure it and scan the bus behind it.
818 * For CardBus bridges, we don't scan behind as the devices will
819 * be handled by the bridge driver itself.
820 *
821 * We need to process bridges in two passes -- first we scan those
822 * already configured by the BIOS and after we are done with all of
823 * them, we proceed to assigning numbers to the remaining buses in
824 * order to avoid overlaps between old and new bus numbers.
825 */
15856ad5 826int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1da177e4
LT
827{
828 struct pci_bus *child;
829 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
49887941 830 u32 buses, i, j = 0;
1da177e4 831 u16 bctl;
99ddd552 832 u8 primary, secondary, subordinate;
a1c19894 833 int broken = 0;
1da177e4
LT
834
835 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
99ddd552
BH
836 primary = buses & 0xFF;
837 secondary = (buses >> 8) & 0xFF;
838 subordinate = (buses >> 16) & 0xFF;
1da177e4 839
99ddd552
BH
840 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
841 secondary, subordinate, pass);
1da177e4 842
71f6bd4a
YL
843 if (!primary && (primary != bus->number) && secondary && subordinate) {
844 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
845 primary = bus->number;
846 }
847
a1c19894
BH
848 /* Check if setup is sensible at all */
849 if (!pass &&
1965f66e 850 (primary != bus->number || secondary <= bus->number ||
12d87069 851 secondary > subordinate)) {
1965f66e
YL
852 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
853 secondary, subordinate);
a1c19894
BH
854 broken = 1;
855 }
856
1da177e4 857 /* Disable MasterAbortMode during probing to avoid reporting
f7625980 858 of bus errors (in some architectures) */
1da177e4
LT
859 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
860 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
861 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
862
f3dbd802
RJ
863 pci_enable_crs(dev);
864
99ddd552
BH
865 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
866 !is_cardbus && !broken) {
867 unsigned int cmax;
1da177e4
LT
868 /*
869 * Bus already configured by firmware, process it in the first
870 * pass and just note the configuration.
871 */
872 if (pass)
bbe8f9a3 873 goto out;
1da177e4
LT
874
875 /*
2ed85823
AN
876 * The bus might already exist for two reasons: Either we are
877 * rescanning the bus or the bus is reachable through more than
878 * one bridge. The second case can happen with the i450NX
879 * chipset.
1da177e4 880 */
99ddd552 881 child = pci_find_bus(pci_domain_nr(bus), secondary);
74710ded 882 if (!child) {
99ddd552 883 child = pci_add_new_bus(bus, dev, secondary);
74710ded
AC
884 if (!child)
885 goto out;
99ddd552 886 child->primary = primary;
bc76b731 887 pci_bus_insert_busn_res(child, secondary, subordinate);
74710ded 888 child->bridge_ctl = bctl;
1da177e4
LT
889 }
890
1da177e4 891 cmax = pci_scan_child_bus(child);
c95b0bd6
AN
892 if (cmax > subordinate)
893 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
894 subordinate, cmax);
895 /* subordinate should equal child->busn_res.end */
896 if (subordinate > max)
897 max = subordinate;
1da177e4
LT
898 } else {
899 /*
900 * We need to assign a number to this bus which we always
901 * do in the second pass.
902 */
12f44f46 903 if (!pass) {
619c8c31 904 if (pcibios_assign_all_busses() || broken || is_cardbus)
12f44f46
IK
905 /* Temporarily disable forwarding of the
906 configuration cycles on all bridges in
907 this bus segment to avoid possible
908 conflicts in the second pass between two
909 bridges programmed with overlapping
910 bus ranges. */
911 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
912 buses & ~0xffffff);
bbe8f9a3 913 goto out;
12f44f46 914 }
1da177e4
LT
915
916 /* Clear errors */
917 pci_write_config_word(dev, PCI_STATUS, 0xffff);
918
7a0b33d4
BH
919 /* Prevent assigning a bus number that already exists.
920 * This can happen when a bridge is hot-plugged, so in
921 * this case we only re-scan this bus. */
b1a98b69
TC
922 child = pci_find_bus(pci_domain_nr(bus), max+1);
923 if (!child) {
9a4d7d87 924 child = pci_add_new_bus(bus, dev, max+1);
b1a98b69
TC
925 if (!child)
926 goto out;
12d87069 927 pci_bus_insert_busn_res(child, max+1, 0xff);
b1a98b69 928 }
9a4d7d87 929 max++;
1da177e4
LT
930 buses = (buses & 0xff000000)
931 | ((unsigned int)(child->primary) << 0)
b918c62e
YL
932 | ((unsigned int)(child->busn_res.start) << 8)
933 | ((unsigned int)(child->busn_res.end) << 16);
1da177e4
LT
934
935 /*
936 * yenta.c forces a secondary latency timer of 176.
937 * Copy that behaviour here.
938 */
939 if (is_cardbus) {
940 buses &= ~0xff000000;
941 buses |= CARDBUS_LATENCY_TIMER << 24;
942 }
7c867c88 943
1da177e4
LT
944 /*
945 * We need to blast all three values with a single write.
946 */
947 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
948
949 if (!is_cardbus) {
11949255 950 child->bridge_ctl = bctl;
1da177e4
LT
951 max = pci_scan_child_bus(child);
952 } else {
953 /*
954 * For CardBus bridges, we leave 4 bus numbers
955 * as cards with a PCI-to-PCI bridge can be
956 * inserted later.
957 */
3c78bc61 958 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
49887941 959 struct pci_bus *parent = bus;
cc57450f
RS
960 if (pci_find_bus(pci_domain_nr(bus),
961 max+i+1))
962 break;
49887941
DB
963 while (parent->parent) {
964 if ((!pcibios_assign_all_busses()) &&
b918c62e
YL
965 (parent->busn_res.end > max) &&
966 (parent->busn_res.end <= max+i)) {
49887941
DB
967 j = 1;
968 }
969 parent = parent->parent;
970 }
971 if (j) {
972 /*
973 * Often, there are two cardbus bridges
974 * -- try to leave one valid bus number
975 * for each one.
976 */
977 i /= 2;
978 break;
979 }
980 }
cc57450f 981 max += i;
1da177e4
LT
982 }
983 /*
984 * Set the subordinate bus number to its real value.
985 */
bc76b731 986 pci_bus_update_busn_res_end(child, max);
1da177e4
LT
987 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
988 }
989
cb3576fa
GH
990 sprintf(child->name,
991 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
992 pci_domain_nr(bus), child->number);
1da177e4 993
d55bef51 994 /* Has only triggered on CardBus, fixup is in yenta_socket */
49887941 995 while (bus->parent) {
b918c62e
YL
996 if ((child->busn_res.end > bus->busn_res.end) ||
997 (child->number > bus->busn_res.end) ||
49887941 998 (child->number < bus->number) ||
b918c62e 999 (child->busn_res.end < bus->number)) {
227f0647 1000 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
b918c62e
YL
1001 &child->busn_res,
1002 (bus->number > child->busn_res.end &&
1003 bus->busn_res.end < child->number) ?
a6f29a98
JP
1004 "wholly" : "partially",
1005 bus->self->transparent ? " transparent" : "",
865df576 1006 dev_name(&bus->dev),
b918c62e 1007 &bus->busn_res);
49887941
DB
1008 }
1009 bus = bus->parent;
1010 }
1011
bbe8f9a3
RB
1012out:
1013 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1014
1da177e4
LT
1015 return max;
1016}
b7fe9434 1017EXPORT_SYMBOL(pci_scan_bridge);
1da177e4
LT
1018
1019/*
1020 * Read interrupt line and base address registers.
1021 * The architecture-dependent code can tweak these, of course.
1022 */
1023static void pci_read_irq(struct pci_dev *dev)
1024{
1025 unsigned char irq;
1026
1027 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
ffeff788 1028 dev->pin = irq;
1da177e4
LT
1029 if (irq)
1030 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1031 dev->irq = irq;
1032}
1033
bb209c82 1034void set_pcie_port_type(struct pci_dev *pdev)
480b93b7
YZ
1035{
1036 int pos;
1037 u16 reg16;
d0751b98
YW
1038 int type;
1039 struct pci_dev *parent;
480b93b7
YZ
1040
1041 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1042 if (!pos)
1043 return;
0efea000 1044 pdev->pcie_cap = pos;
480b93b7 1045 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
786e2288 1046 pdev->pcie_flags_reg = reg16;
b03e7495
JM
1047 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1048 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
d0751b98
YW
1049
1050 /*
1051 * A Root Port is always the upstream end of a Link. No PCIe
1052 * component has two Links. Two Links are connected by a Switch
1053 * that has a Port on each Link and internal logic to connect the
1054 * two Ports.
1055 */
1056 type = pci_pcie_type(pdev);
1057 if (type == PCI_EXP_TYPE_ROOT_PORT)
1058 pdev->has_secondary_link = 1;
1059 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1060 type == PCI_EXP_TYPE_DOWNSTREAM) {
1061 parent = pci_upstream_bridge(pdev);
b35b1df5
YW
1062
1063 /*
1064 * Usually there's an upstream device (Root Port or Switch
1065 * Downstream Port), but we can't assume one exists.
1066 */
1067 if (parent && !parent->has_secondary_link)
d0751b98
YW
1068 pdev->has_secondary_link = 1;
1069 }
480b93b7
YZ
1070}
1071
bb209c82 1072void set_pcie_hotplug_bridge(struct pci_dev *pdev)
28760489 1073{
28760489
EB
1074 u32 reg32;
1075
59875ae4 1076 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
28760489
EB
1077 if (reg32 & PCI_EXP_SLTCAP_HPC)
1078 pdev->is_hotplug_bridge = 1;
1079}
1080
78916b00
AW
1081/**
1082 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1083 * @dev: PCI device
1084 *
1085 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1086 * when forwarding a type1 configuration request the bridge must check that
1087 * the extended register address field is zero. The bridge is not permitted
1088 * to forward the transactions and must handle it as an Unsupported Request.
1089 * Some bridges do not follow this rule and simply drop the extended register
1090 * bits, resulting in the standard config space being aliased, every 256
1091 * bytes across the entire configuration space. Test for this condition by
1092 * comparing the first dword of each potential alias to the vendor/device ID.
1093 * Known offenders:
1094 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1095 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1096 */
1097static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1098{
1099#ifdef CONFIG_PCI_QUIRKS
1100 int pos;
1101 u32 header, tmp;
1102
1103 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1104
1105 for (pos = PCI_CFG_SPACE_SIZE;
1106 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1107 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1108 || header != tmp)
1109 return false;
1110 }
1111
1112 return true;
1113#else
1114 return false;
1115#endif
1116}
1117
0b950f0f
SH
1118/**
1119 * pci_cfg_space_size - get the configuration space size of the PCI device.
1120 * @dev: PCI device
1121 *
1122 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1123 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1124 * access it. Maybe we don't have a way to generate extended config space
1125 * accesses, or the device is behind a reverse Express bridge. So we try
1126 * reading the dword at 0x100 which must either be 0 or a valid extended
1127 * capability header.
1128 */
1129static int pci_cfg_space_size_ext(struct pci_dev *dev)
1130{
1131 u32 status;
1132 int pos = PCI_CFG_SPACE_SIZE;
1133
1134 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
8e5a395a 1135 return PCI_CFG_SPACE_SIZE;
78916b00 1136 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
8e5a395a 1137 return PCI_CFG_SPACE_SIZE;
0b950f0f
SH
1138
1139 return PCI_CFG_SPACE_EXP_SIZE;
0b950f0f
SH
1140}
1141
1142int pci_cfg_space_size(struct pci_dev *dev)
1143{
1144 int pos;
1145 u32 status;
1146 u16 class;
1147
1148 class = dev->class >> 8;
1149 if (class == PCI_CLASS_BRIDGE_HOST)
1150 return pci_cfg_space_size_ext(dev);
1151
8e5a395a
BH
1152 if (pci_is_pcie(dev))
1153 return pci_cfg_space_size_ext(dev);
0b950f0f 1154
8e5a395a
BH
1155 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1156 if (!pos)
1157 return PCI_CFG_SPACE_SIZE;
0b950f0f 1158
8e5a395a
BH
1159 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1160 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1161 return pci_cfg_space_size_ext(dev);
0b950f0f 1162
0b950f0f
SH
1163 return PCI_CFG_SPACE_SIZE;
1164}
1165
01abc2aa 1166#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
76e6a1d6 1167
e80e7edc 1168static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1851617c
MT
1169{
1170 /*
1171 * Disable the MSI hardware to avoid screaming interrupts
1172 * during boot. This is the power on reset default so
1173 * usually this should be a noop.
1174 */
1175 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1176 if (dev->msi_cap)
1177 pci_msi_set_enable(dev, 0);
1178
1179 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1180 if (dev->msix_cap)
1181 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1182}
1183
1da177e4
LT
1184/**
1185 * pci_setup_device - fill in class and map information of a device
1186 * @dev: the device structure to fill
1187 *
f7625980 1188 * Initialize the device structure with information about the device's
1da177e4
LT
1189 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1190 * Called at initialisation of the PCI subsystem and by CardBus services.
480b93b7
YZ
1191 * Returns 0 on success and negative if unknown type of device (not normal,
1192 * bridge or CardBus).
1da177e4 1193 */
480b93b7 1194int pci_setup_device(struct pci_dev *dev)
1da177e4
LT
1195{
1196 u32 class;
b84106b4 1197 u16 cmd;
480b93b7 1198 u8 hdr_type;
bc577d2b 1199 int pos = 0;
5bfa14ed
BH
1200 struct pci_bus_region region;
1201 struct resource *res;
480b93b7
YZ
1202
1203 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1204 return -EIO;
1205
1206 dev->sysdata = dev->bus->sysdata;
1207 dev->dev.parent = dev->bus->bridge;
1208 dev->dev.bus = &pci_bus_type;
1209 dev->hdr_type = hdr_type & 0x7f;
1210 dev->multifunction = !!(hdr_type & 0x80);
480b93b7
YZ
1211 dev->error_state = pci_channel_io_normal;
1212 set_pcie_port_type(dev);
1213
017ffe64 1214 pci_dev_assign_slot(dev);
480b93b7
YZ
1215 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1216 set this higher, assuming the system even supports it. */
1217 dev->dma_mask = 0xffffffff;
1da177e4 1218
eebfcfb5
GKH
1219 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1220 dev->bus->number, PCI_SLOT(dev->devfn),
1221 PCI_FUNC(dev->devfn));
1da177e4
LT
1222
1223 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
b8a3a521 1224 dev->revision = class & 0xff;
2dd8ba92 1225 dev->class = class >> 8; /* upper 3 bytes */
1da177e4 1226
2dd8ba92
YL
1227 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1228 dev->vendor, dev->device, dev->hdr_type, dev->class);
1da177e4 1229
853346e4
YZ
1230 /* need to have dev->class ready */
1231 dev->cfg_size = pci_cfg_space_size(dev);
1232
1da177e4 1233 /* "Unknown power state" */
3fe9d19f 1234 dev->current_state = PCI_UNKNOWN;
1da177e4
LT
1235
1236 /* Early fixups, before probing the BARs */
1237 pci_fixup_device(pci_fixup_early, dev);
f79b1b14
YZ
1238 /* device class may be changed after fixup */
1239 class = dev->class >> 8;
1da177e4 1240
b84106b4
BH
1241 if (dev->non_compliant_bars) {
1242 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1243 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1244 dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1245 cmd &= ~PCI_COMMAND_IO;
1246 cmd &= ~PCI_COMMAND_MEMORY;
1247 pci_write_config_word(dev, PCI_COMMAND, cmd);
1248 }
1249 }
1250
1da177e4
LT
1251 switch (dev->hdr_type) { /* header type */
1252 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1253 if (class == PCI_CLASS_BRIDGE_PCI)
1254 goto bad;
1255 pci_read_irq(dev);
1256 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1257 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1258 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
368c73d4
AC
1259
1260 /*
075eb9e3
BH
1261 * Do the ugly legacy mode stuff here rather than broken chip
1262 * quirk code. Legacy mode ATA controllers have fixed
1263 * addresses. These are not always echoed in BAR0-3, and
1264 * BAR0-3 in a few cases contain junk!
368c73d4
AC
1265 */
1266 if (class == PCI_CLASS_STORAGE_IDE) {
1267 u8 progif;
1268 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1269 if ((progif & 1) == 0) {
5bfa14ed
BH
1270 region.start = 0x1F0;
1271 region.end = 0x1F7;
1272 res = &dev->resource[0];
1273 res->flags = LEGACY_IO_RESOURCE;
fc279850 1274 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1275 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1276 res);
5bfa14ed
BH
1277 region.start = 0x3F6;
1278 region.end = 0x3F6;
1279 res = &dev->resource[1];
1280 res->flags = LEGACY_IO_RESOURCE;
fc279850 1281 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1282 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1283 res);
368c73d4
AC
1284 }
1285 if ((progif & 4) == 0) {
5bfa14ed
BH
1286 region.start = 0x170;
1287 region.end = 0x177;
1288 res = &dev->resource[2];
1289 res->flags = LEGACY_IO_RESOURCE;
fc279850 1290 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1291 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1292 res);
5bfa14ed
BH
1293 region.start = 0x376;
1294 region.end = 0x376;
1295 res = &dev->resource[3];
1296 res->flags = LEGACY_IO_RESOURCE;
fc279850 1297 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1298 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1299 res);
368c73d4
AC
1300 }
1301 }
1da177e4
LT
1302 break;
1303
1304 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1305 if (class != PCI_CLASS_BRIDGE_PCI)
1306 goto bad;
1307 /* The PCI-to-PCI bridge spec requires that subtractive
1308 decoding (i.e. transparent) bridge must have programming
f7625980 1309 interface code of 0x01. */
3efd273b 1310 pci_read_irq(dev);
1da177e4
LT
1311 dev->transparent = ((dev->class & 0xff) == 1);
1312 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
28760489 1313 set_pcie_hotplug_bridge(dev);
bc577d2b
GB
1314 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1315 if (pos) {
1316 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1317 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1318 }
1da177e4
LT
1319 break;
1320
1321 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1322 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1323 goto bad;
1324 pci_read_irq(dev);
1325 pci_read_bases(dev, 1, 0);
1326 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1327 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1328 break;
1329
1330 default: /* unknown header */
227f0647
RD
1331 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1332 dev->hdr_type);
480b93b7 1333 return -EIO;
1da177e4
LT
1334
1335 bad:
227f0647
RD
1336 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1337 dev->class, dev->hdr_type);
2b4aed1d 1338 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1da177e4
LT
1339 }
1340
1341 /* We found a fine healthy device, go go go... */
1342 return 0;
1343}
1344
9dae3a97
BH
1345static void pci_configure_mps(struct pci_dev *dev)
1346{
1347 struct pci_dev *bridge = pci_upstream_bridge(dev);
27d868b5 1348 int mps, p_mps, rc;
9dae3a97
BH
1349
1350 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1351 return;
1352
1353 mps = pcie_get_mps(dev);
1354 p_mps = pcie_get_mps(bridge);
1355
1356 if (mps == p_mps)
1357 return;
1358
1359 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1360 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1361 mps, pci_name(bridge), p_mps);
1362 return;
1363 }
27d868b5
KB
1364
1365 /*
1366 * Fancier MPS configuration is done later by
1367 * pcie_bus_configure_settings()
1368 */
1369 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1370 return;
1371
1372 rc = pcie_set_mps(dev, p_mps);
1373 if (rc) {
1374 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1375 p_mps);
1376 return;
1377 }
1378
1379 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1380 p_mps, mps, 128 << dev->pcie_mpss);
9dae3a97
BH
1381}
1382
589fcc23
BH
1383static struct hpp_type0 pci_default_type0 = {
1384 .revision = 1,
1385 .cache_line_size = 8,
1386 .latency_timer = 0x40,
1387 .enable_serr = 0,
1388 .enable_perr = 0,
1389};
1390
1391static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1392{
1393 u16 pci_cmd, pci_bctl;
1394
c6285fc5 1395 if (!hpp)
589fcc23 1396 hpp = &pci_default_type0;
589fcc23
BH
1397
1398 if (hpp->revision > 1) {
1399 dev_warn(&dev->dev,
1400 "PCI settings rev %d not supported; using defaults\n",
1401 hpp->revision);
1402 hpp = &pci_default_type0;
1403 }
1404
1405 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1406 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1407 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1408 if (hpp->enable_serr)
1409 pci_cmd |= PCI_COMMAND_SERR;
589fcc23
BH
1410 if (hpp->enable_perr)
1411 pci_cmd |= PCI_COMMAND_PARITY;
589fcc23
BH
1412 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1413
1414 /* Program bridge control value */
1415 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1416 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1417 hpp->latency_timer);
1418 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1419 if (hpp->enable_serr)
1420 pci_bctl |= PCI_BRIDGE_CTL_SERR;
589fcc23
BH
1421 if (hpp->enable_perr)
1422 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
589fcc23
BH
1423 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1424 }
1425}
1426
1427static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1428{
1429 if (hpp)
1430 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1431}
1432
1433static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1434{
1435 int pos;
1436 u32 reg32;
1437
1438 if (!hpp)
1439 return;
1440
1441 if (hpp->revision > 1) {
1442 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1443 hpp->revision);
1444 return;
1445 }
1446
302328c0
BH
1447 /*
1448 * Don't allow _HPX to change MPS or MRRS settings. We manage
1449 * those to make sure they're consistent with the rest of the
1450 * platform.
1451 */
1452 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1453 PCI_EXP_DEVCTL_READRQ;
1454 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1455 PCI_EXP_DEVCTL_READRQ);
1456
589fcc23
BH
1457 /* Initialize Device Control Register */
1458 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1459 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1460
1461 /* Initialize Link Control Register */
7a1562d4 1462 if (pcie_cap_has_lnkctl(dev))
589fcc23
BH
1463 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1464 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1465
1466 /* Find Advanced Error Reporting Enhanced Capability */
1467 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1468 if (!pos)
1469 return;
1470
1471 /* Initialize Uncorrectable Error Mask Register */
1472 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1473 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1474 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1475
1476 /* Initialize Uncorrectable Error Severity Register */
1477 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1478 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1479 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1480
1481 /* Initialize Correctable Error Mask Register */
1482 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1483 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1484 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1485
1486 /* Initialize Advanced Error Capabilities and Control Register */
1487 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1488 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1489 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1490
1491 /*
1492 * FIXME: The following two registers are not supported yet.
1493 *
1494 * o Secondary Uncorrectable Error Severity Register
1495 * o Secondary Uncorrectable Error Mask Register
1496 */
1497}
1498
6cd33649
BH
1499static void pci_configure_device(struct pci_dev *dev)
1500{
1501 struct hotplug_params hpp;
1502 int ret;
1503
9dae3a97
BH
1504 pci_configure_mps(dev);
1505
6cd33649
BH
1506 memset(&hpp, 0, sizeof(hpp));
1507 ret = pci_get_hp_params(dev, &hpp);
1508 if (ret)
1509 return;
1510
1511 program_hpp_type2(dev, hpp.t2);
1512 program_hpp_type1(dev, hpp.t1);
1513 program_hpp_type0(dev, hpp.t0);
1514}
1515
201de56e
ZY
1516static void pci_release_capabilities(struct pci_dev *dev)
1517{
1518 pci_vpd_release(dev);
d1b054da 1519 pci_iov_release(dev);
f796841e 1520 pci_free_cap_save_buffers(dev);
201de56e
ZY
1521}
1522
1da177e4
LT
1523/**
1524 * pci_release_dev - free a pci device structure when all users of it are finished.
1525 * @dev: device that's been disconnected
1526 *
1527 * Will be called only by the device core when all users of this pci device are
1528 * done.
1529 */
1530static void pci_release_dev(struct device *dev)
1531{
04480094 1532 struct pci_dev *pci_dev;
1da177e4 1533
04480094 1534 pci_dev = to_pci_dev(dev);
201de56e 1535 pci_release_capabilities(pci_dev);
98d9f30c 1536 pci_release_of_node(pci_dev);
6ae32c53 1537 pcibios_release_device(pci_dev);
8b1fce04 1538 pci_bus_put(pci_dev->bus);
782a985d 1539 kfree(pci_dev->driver_override);
338c3149 1540 kfree(pci_dev->dma_alias_mask);
1da177e4
LT
1541 kfree(pci_dev);
1542}
1543
3c6e6ae7 1544struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
65891215
ME
1545{
1546 struct pci_dev *dev;
1547
1548 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1549 if (!dev)
1550 return NULL;
1551
65891215 1552 INIT_LIST_HEAD(&dev->bus_list);
88e7b167 1553 dev->dev.type = &pci_dev_type;
3c6e6ae7 1554 dev->bus = pci_bus_get(bus);
65891215
ME
1555
1556 return dev;
1557}
3c6e6ae7
GZ
1558EXPORT_SYMBOL(pci_alloc_dev);
1559
efdc87da 1560bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
3c78bc61 1561 int crs_timeout)
1da177e4 1562{
1da177e4
LT
1563 int delay = 1;
1564
efdc87da
YL
1565 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1566 return false;
1da177e4
LT
1567
1568 /* some broken boards return 0 or ~0 if a slot is empty: */
efdc87da
YL
1569 if (*l == 0xffffffff || *l == 0x00000000 ||
1570 *l == 0x0000ffff || *l == 0xffff0000)
1571 return false;
1da177e4 1572
89665a6a
RJ
1573 /*
1574 * Configuration Request Retry Status. Some root ports return the
1575 * actual device ID instead of the synthetic ID (0xFFFF) required
1576 * by the PCIe spec. Ignore the device ID and only check for
1577 * (vendor id == 1).
1578 */
1579 while ((*l & 0xffff) == 0x0001) {
efdc87da
YL
1580 if (!crs_timeout)
1581 return false;
1582
1da177e4
LT
1583 msleep(delay);
1584 delay *= 2;
efdc87da
YL
1585 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1586 return false;
1da177e4 1587 /* Card hasn't responded in 60 seconds? Must be stuck. */
efdc87da 1588 if (delay > crs_timeout) {
227f0647
RD
1589 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1590 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1591 PCI_FUNC(devfn));
efdc87da 1592 return false;
1da177e4
LT
1593 }
1594 }
1595
efdc87da
YL
1596 return true;
1597}
1598EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1599
1600/*
1601 * Read the config data for a PCI device, sanity-check it
1602 * and fill in the dev structure...
1603 */
1604static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1605{
1606 struct pci_dev *dev;
1607 u32 l;
1608
1609 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1610 return NULL;
1611
8b1fce04 1612 dev = pci_alloc_dev(bus);
1da177e4
LT
1613 if (!dev)
1614 return NULL;
1615
1da177e4 1616 dev->devfn = devfn;
1da177e4
LT
1617 dev->vendor = l & 0xffff;
1618 dev->device = (l >> 16) & 0xffff;
cef354db 1619
98d9f30c
BH
1620 pci_set_of_node(dev);
1621
480b93b7 1622 if (pci_setup_device(dev)) {
8b1fce04 1623 pci_bus_put(dev->bus);
1da177e4
LT
1624 kfree(dev);
1625 return NULL;
1626 }
1da177e4
LT
1627
1628 return dev;
1629}
1630
201de56e
ZY
1631static void pci_init_capabilities(struct pci_dev *dev)
1632{
938174e5
SS
1633 /* Enhanced Allocation */
1634 pci_ea_init(dev);
1635
e80e7edc
GP
1636 /* Setup MSI caps & disable MSI/MSI-X interrupts */
1637 pci_msi_setup_pci_dev(dev);
201de56e 1638
63f4898a
RW
1639 /* Buffers for saving PCIe and PCI-X capabilities */
1640 pci_allocate_cap_save_buffers(dev);
1641
201de56e
ZY
1642 /* Power Management */
1643 pci_pm_init(dev);
1644
1645 /* Vital Product Data */
f1cd93f9 1646 pci_vpd_init(dev);
58c3a727
YZ
1647
1648 /* Alternative Routing-ID Forwarding */
31ab2476 1649 pci_configure_ari(dev);
d1b054da
YZ
1650
1651 /* Single Root I/O Virtualization */
1652 pci_iov_init(dev);
ae21ee65 1653
edc90fee
BH
1654 /* Address Translation Services */
1655 pci_ats_init(dev);
1656
ae21ee65 1657 /* Enable ACS P2P upstream forwarding */
5d990b62 1658 pci_enable_acs(dev);
b07461a8
TI
1659
1660 pci_cleanup_aer_error_status_regs(dev);
201de56e
ZY
1661}
1662
098259eb
MZ
1663/*
1664 * This is the equivalent of pci_host_bridge_msi_domain that acts on
1665 * devices. Firmware interfaces that can select the MSI domain on a
1666 * per-device basis should be called from here.
1667 */
1668static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
1669{
1670 struct irq_domain *d;
1671
1672 /*
1673 * If a domain has been set through the pcibios_add_device
1674 * callback, then this is the one (platform code knows best).
1675 */
1676 d = dev_get_msi_domain(&dev->dev);
1677 if (d)
1678 return d;
1679
54fa97ee
MZ
1680 /*
1681 * Let's see if we have a firmware interface able to provide
1682 * the domain.
1683 */
1684 d = pci_msi_get_device_domain(dev);
1685 if (d)
1686 return d;
1687
098259eb
MZ
1688 return NULL;
1689}
1690
44aa0c65
MZ
1691static void pci_set_msi_domain(struct pci_dev *dev)
1692{
098259eb
MZ
1693 struct irq_domain *d;
1694
44aa0c65 1695 /*
098259eb
MZ
1696 * If the platform or firmware interfaces cannot supply a
1697 * device-specific MSI domain, then inherit the default domain
1698 * from the host bridge itself.
44aa0c65 1699 */
098259eb
MZ
1700 d = pci_dev_msi_domain(dev);
1701 if (!d)
1702 d = dev_get_msi_domain(&dev->bus->dev);
1703
1704 dev_set_msi_domain(&dev->dev, d);
44aa0c65
MZ
1705}
1706
50230713
SS
1707/**
1708 * pci_dma_configure - Setup DMA configuration
1709 * @dev: ptr to pci_dev struct of the PCI device
1710 *
1711 * Function to update PCI devices's DMA configuration using the same
29dbe1f0 1712 * info from the OF node or ACPI node of host bridge's parent (if any).
50230713
SS
1713 */
1714static void pci_dma_configure(struct pci_dev *dev)
1715{
1716 struct device *bridge = pci_get_host_bridge_device(dev);
1717
768acd64
SS
1718 if (IS_ENABLED(CONFIG_OF) &&
1719 bridge->parent && bridge->parent->of_node) {
50230713 1720 of_dma_configure(&dev->dev, bridge->parent->of_node);
29dbe1f0
SS
1721 } else if (has_acpi_companion(bridge)) {
1722 struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
1723 enum dev_dma_attr attr = acpi_get_dma_attr(adev);
1724
1725 if (attr == DEV_DMA_NOT_SUPPORTED)
1726 dev_warn(&dev->dev, "DMA not supported.\n");
1727 else
1728 arch_setup_dma_ops(&dev->dev, 0, 0, NULL,
1729 attr == DEV_DMA_COHERENT);
50230713
SS
1730 }
1731
1732 pci_put_host_bridge_device(bridge);
1733}
1734
96bde06a 1735void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1da177e4 1736{
4f535093
YL
1737 int ret;
1738
6cd33649
BH
1739 pci_configure_device(dev);
1740
cdb9b9f7
PM
1741 device_initialize(&dev->dev);
1742 dev->dev.release = pci_release_dev;
1da177e4 1743
7629d19a 1744 set_dev_node(&dev->dev, pcibus_to_node(bus));
cdb9b9f7 1745 dev->dev.dma_mask = &dev->dma_mask;
4d57cdfa 1746 dev->dev.dma_parms = &dev->dma_parms;
cdb9b9f7 1747 dev->dev.coherent_dma_mask = 0xffffffffull;
50230713 1748 pci_dma_configure(dev);
1da177e4 1749
4d57cdfa 1750 pci_set_dma_max_seg_size(dev, 65536);
59fc67de 1751 pci_set_dma_seg_boundary(dev, 0xffffffff);
4d57cdfa 1752
1da177e4
LT
1753 /* Fix up broken headers */
1754 pci_fixup_device(pci_fixup_header, dev);
1755
2069ecfb
YL
1756 /* moved out from quirk header fixup code */
1757 pci_reassigndev_resource_alignment(dev);
1758
4b77b0a2
RW
1759 /* Clear the state_saved flag. */
1760 dev->state_saved = false;
1761
201de56e
ZY
1762 /* Initialize various capabilities */
1763 pci_init_capabilities(dev);
eb9d0fe4 1764
1da177e4
LT
1765 /*
1766 * Add the device to our list of discovered devices
1767 * and the bus list for fixup functions, etc.
1768 */
d71374da 1769 down_write(&pci_bus_sem);
1da177e4 1770 list_add_tail(&dev->bus_list, &bus->devices);
d71374da 1771 up_write(&pci_bus_sem);
4f535093 1772
4f535093
YL
1773 ret = pcibios_add_device(dev);
1774 WARN_ON(ret < 0);
1775
44aa0c65
MZ
1776 /* Setup MSI irq domain */
1777 pci_set_msi_domain(dev);
1778
4f535093
YL
1779 /* Notifier could use PCI capabilities */
1780 dev->match_driver = false;
1781 ret = device_add(&dev->dev);
1782 WARN_ON(ret < 0);
cdb9b9f7
PM
1783}
1784
10874f5a 1785struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
cdb9b9f7
PM
1786{
1787 struct pci_dev *dev;
1788
90bdb311
TP
1789 dev = pci_get_slot(bus, devfn);
1790 if (dev) {
1791 pci_dev_put(dev);
1792 return dev;
1793 }
1794
cdb9b9f7
PM
1795 dev = pci_scan_device(bus, devfn);
1796 if (!dev)
1797 return NULL;
1798
1799 pci_device_add(dev, bus);
1da177e4
LT
1800
1801 return dev;
1802}
b73e9687 1803EXPORT_SYMBOL(pci_scan_single_device);
1da177e4 1804
b1bd58e4 1805static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
f07852d6 1806{
b1bd58e4
YW
1807 int pos;
1808 u16 cap = 0;
1809 unsigned next_fn;
4fb88c1a 1810
b1bd58e4
YW
1811 if (pci_ari_enabled(bus)) {
1812 if (!dev)
1813 return 0;
1814 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1815 if (!pos)
1816 return 0;
4fb88c1a 1817
b1bd58e4
YW
1818 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1819 next_fn = PCI_ARI_CAP_NFN(cap);
1820 if (next_fn <= fn)
1821 return 0; /* protect against malformed list */
f07852d6 1822
b1bd58e4
YW
1823 return next_fn;
1824 }
1825
1826 /* dev may be NULL for non-contiguous multifunction devices */
1827 if (!dev || dev->multifunction)
1828 return (fn + 1) % 8;
f07852d6 1829
f07852d6
MW
1830 return 0;
1831}
1832
1833static int only_one_child(struct pci_bus *bus)
1834{
1835 struct pci_dev *parent = bus->self;
284f5f9d 1836
f07852d6
MW
1837 if (!parent || !pci_is_pcie(parent))
1838 return 0;
62f87c0e 1839 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
284f5f9d 1840 return 1;
5bbe029f
BH
1841
1842 /*
1843 * PCIe downstream ports are bridges that normally lead to only a
1844 * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
1845 * possible devices, not just device 0. See PCIe spec r3.0,
1846 * sec 7.3.1.
1847 */
777e61ea 1848 if (parent->has_secondary_link &&
284f5f9d 1849 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
f07852d6
MW
1850 return 1;
1851 return 0;
1852}
1853
1da177e4
LT
1854/**
1855 * pci_scan_slot - scan a PCI slot on a bus for devices.
1856 * @bus: PCI bus to scan
1857 * @devfn: slot number to scan (must have zero function.)
1858 *
1859 * Scan a PCI slot on the specified PCI bus for devices, adding
1860 * discovered devices to the @bus->devices list. New devices
8a1bc901 1861 * will not have is_added set.
1b69dfc6
TP
1862 *
1863 * Returns the number of new devices found.
1da177e4 1864 */
96bde06a 1865int pci_scan_slot(struct pci_bus *bus, int devfn)
1da177e4 1866{
f07852d6 1867 unsigned fn, nr = 0;
1b69dfc6 1868 struct pci_dev *dev;
f07852d6
MW
1869
1870 if (only_one_child(bus) && (devfn > 0))
1871 return 0; /* Already scanned the entire slot */
1da177e4 1872
1b69dfc6 1873 dev = pci_scan_single_device(bus, devfn);
4fb88c1a
MW
1874 if (!dev)
1875 return 0;
1876 if (!dev->is_added)
1b69dfc6
TP
1877 nr++;
1878
b1bd58e4 1879 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
f07852d6
MW
1880 dev = pci_scan_single_device(bus, devfn + fn);
1881 if (dev) {
1882 if (!dev->is_added)
1883 nr++;
1884 dev->multifunction = 1;
1da177e4
LT
1885 }
1886 }
7d715a6c 1887
149e1637
SL
1888 /* only one slot has pcie device */
1889 if (bus->self && nr)
7d715a6c
SL
1890 pcie_aspm_init_link_state(bus->self);
1891
1da177e4
LT
1892 return nr;
1893}
b7fe9434 1894EXPORT_SYMBOL(pci_scan_slot);
1da177e4 1895
b03e7495
JM
1896static int pcie_find_smpss(struct pci_dev *dev, void *data)
1897{
1898 u8 *smpss = data;
1899
1900 if (!pci_is_pcie(dev))
1901 return 0;
1902
d4aa68f6
YW
1903 /*
1904 * We don't have a way to change MPS settings on devices that have
1905 * drivers attached. A hot-added device might support only the minimum
1906 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1907 * where devices may be hot-added, we limit the fabric MPS to 128 so
1908 * hot-added devices will work correctly.
1909 *
1910 * However, if we hot-add a device to a slot directly below a Root
1911 * Port, it's impossible for there to be other existing devices below
1912 * the port. We don't limit the MPS in this case because we can
1913 * reconfigure MPS on both the Root Port and the hot-added device,
1914 * and there are no other devices involved.
1915 *
1916 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
b03e7495 1917 */
d4aa68f6
YW
1918 if (dev->is_hotplug_bridge &&
1919 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
b03e7495
JM
1920 *smpss = 0;
1921
1922 if (*smpss > dev->pcie_mpss)
1923 *smpss = dev->pcie_mpss;
1924
1925 return 0;
1926}
1927
1928static void pcie_write_mps(struct pci_dev *dev, int mps)
1929{
62f392ea 1930 int rc;
b03e7495
JM
1931
1932 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
62f392ea 1933 mps = 128 << dev->pcie_mpss;
b03e7495 1934
62f87c0e
YW
1935 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1936 dev->bus->self)
62f392ea 1937 /* For "Performance", the assumption is made that
b03e7495
JM
1938 * downstream communication will never be larger than
1939 * the MRRS. So, the MPS only needs to be configured
1940 * for the upstream communication. This being the case,
1941 * walk from the top down and set the MPS of the child
1942 * to that of the parent bus.
62f392ea
JM
1943 *
1944 * Configure the device MPS with the smaller of the
1945 * device MPSS or the bridge MPS (which is assumed to be
1946 * properly configured at this point to the largest
1947 * allowable MPS based on its parent bus).
b03e7495 1948 */
62f392ea 1949 mps = min(mps, pcie_get_mps(dev->bus->self));
b03e7495
JM
1950 }
1951
1952 rc = pcie_set_mps(dev, mps);
1953 if (rc)
1954 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1955}
1956
62f392ea 1957static void pcie_write_mrrs(struct pci_dev *dev)
b03e7495 1958{
62f392ea 1959 int rc, mrrs;
b03e7495 1960
ed2888e9
JM
1961 /* In the "safe" case, do not configure the MRRS. There appear to be
1962 * issues with setting MRRS to 0 on a number of devices.
1963 */
ed2888e9
JM
1964 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1965 return;
1966
ed2888e9
JM
1967 /* For Max performance, the MRRS must be set to the largest supported
1968 * value. However, it cannot be configured larger than the MPS the
62f392ea
JM
1969 * device or the bus can support. This should already be properly
1970 * configured by a prior call to pcie_write_mps.
ed2888e9 1971 */
62f392ea 1972 mrrs = pcie_get_mps(dev);
b03e7495
JM
1973
1974 /* MRRS is a R/W register. Invalid values can be written, but a
ed2888e9 1975 * subsequent read will verify if the value is acceptable or not.
b03e7495
JM
1976 * If the MRRS value provided is not acceptable (e.g., too large),
1977 * shrink the value until it is acceptable to the HW.
f7625980 1978 */
b03e7495
JM
1979 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1980 rc = pcie_set_readrq(dev, mrrs);
62f392ea
JM
1981 if (!rc)
1982 break;
b03e7495 1983
62f392ea 1984 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
b03e7495
JM
1985 mrrs /= 2;
1986 }
62f392ea
JM
1987
1988 if (mrrs < 128)
227f0647 1989 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
b03e7495
JM
1990}
1991
1992static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1993{
a513a99a 1994 int mps, orig_mps;
b03e7495
JM
1995
1996 if (!pci_is_pcie(dev))
1997 return 0;
1998
27d868b5
KB
1999 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2000 pcie_bus_config == PCIE_BUS_DEFAULT)
5895af79 2001 return 0;
5895af79 2002
a513a99a
JM
2003 mps = 128 << *(u8 *)data;
2004 orig_mps = pcie_get_mps(dev);
b03e7495
JM
2005
2006 pcie_write_mps(dev, mps);
62f392ea 2007 pcie_write_mrrs(dev);
b03e7495 2008
227f0647
RD
2009 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2010 pcie_get_mps(dev), 128 << dev->pcie_mpss,
a513a99a 2011 orig_mps, pcie_get_readrq(dev));
b03e7495
JM
2012
2013 return 0;
2014}
2015
a513a99a 2016/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
b03e7495
JM
2017 * parents then children fashion. If this changes, then this code will not
2018 * work as designed.
2019 */
a58674ff 2020void pcie_bus_configure_settings(struct pci_bus *bus)
b03e7495 2021{
1e358f94 2022 u8 smpss = 0;
b03e7495 2023
a58674ff 2024 if (!bus->self)
b03e7495
JM
2025 return;
2026
b03e7495 2027 if (!pci_is_pcie(bus->self))
5f39e670
JM
2028 return;
2029
2030 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
3315472c 2031 * to be aware of the MPS of the destination. To work around this,
5f39e670
JM
2032 * simply force the MPS of the entire system to the smallest possible.
2033 */
2034 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2035 smpss = 0;
2036
b03e7495 2037 if (pcie_bus_config == PCIE_BUS_SAFE) {
a58674ff 2038 smpss = bus->self->pcie_mpss;
5f39e670 2039
b03e7495
JM
2040 pcie_find_smpss(bus->self, &smpss);
2041 pci_walk_bus(bus, pcie_find_smpss, &smpss);
2042 }
2043
2044 pcie_bus_configure_set(bus->self, &smpss);
2045 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2046}
debc3b77 2047EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
b03e7495 2048
15856ad5 2049unsigned int pci_scan_child_bus(struct pci_bus *bus)
1da177e4 2050{
b918c62e 2051 unsigned int devfn, pass, max = bus->busn_res.start;
1da177e4
LT
2052 struct pci_dev *dev;
2053
0207c356 2054 dev_dbg(&bus->dev, "scanning bus\n");
1da177e4
LT
2055
2056 /* Go find them, Rover! */
2057 for (devfn = 0; devfn < 0x100; devfn += 8)
2058 pci_scan_slot(bus, devfn);
2059
a28724b0
YZ
2060 /* Reserve buses for SR-IOV capability. */
2061 max += pci_iov_bus_range(bus);
2062
1da177e4
LT
2063 /*
2064 * After performing arch-dependent fixup of the bus, look behind
2065 * all PCI-to-PCI bridges on this bus.
2066 */
74710ded 2067 if (!bus->is_added) {
0207c356 2068 dev_dbg(&bus->dev, "fixups for bus\n");
74710ded 2069 pcibios_fixup_bus(bus);
981cf9ea 2070 bus->is_added = 1;
74710ded
AC
2071 }
2072
3c78bc61 2073 for (pass = 0; pass < 2; pass++)
1da177e4 2074 list_for_each_entry(dev, &bus->devices, bus_list) {
6788a51f 2075 if (pci_is_bridge(dev))
1da177e4
LT
2076 max = pci_scan_bridge(bus, dev, max, pass);
2077 }
2078
2079 /*
2080 * We've scanned the bus and so we know all about what's on
2081 * the other side of any bridges that may be on this bus plus
2082 * any devices.
2083 *
2084 * Return how far we've got finding sub-buses.
2085 */
0207c356 2086 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1da177e4
LT
2087 return max;
2088}
b7fe9434 2089EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1da177e4 2090
6c0cc950
RW
2091/**
2092 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2093 * @bridge: Host bridge to set up.
2094 *
2095 * Default empty implementation. Replace with an architecture-specific setup
2096 * routine, if necessary.
2097 */
2098int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2099{
2100 return 0;
2101}
2102
10a95747
JL
2103void __weak pcibios_add_bus(struct pci_bus *bus)
2104{
2105}
2106
2107void __weak pcibios_remove_bus(struct pci_bus *bus)
2108{
2109}
2110
166c6370
BH
2111struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2112 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1da177e4 2113{
0efd5aab 2114 int error;
5a21d70d 2115 struct pci_host_bridge *bridge;
0207c356 2116 struct pci_bus *b, *b2;
14d76b68 2117 struct resource_entry *window, *n;
a9d9f527 2118 struct resource *res;
0efd5aab
BH
2119 resource_size_t offset;
2120 char bus_addr[64];
2121 char *fmt;
1da177e4 2122
670ba0c8 2123 b = pci_alloc_bus(NULL);
1da177e4 2124 if (!b)
7b543663 2125 return NULL;
1da177e4
LT
2126
2127 b->sysdata = sysdata;
2128 b->ops = ops;
4f535093 2129 b->number = b->busn_res.start = bus;
670ba0c8 2130 pci_bus_assign_domain_nr(b, parent);
0207c356
BH
2131 b2 = pci_find_bus(pci_domain_nr(b), bus);
2132 if (b2) {
1da177e4 2133 /* If we already got to this bus through a different bridge, ignore it */
0207c356 2134 dev_dbg(&b2->dev, "bus already known\n");
1da177e4
LT
2135 goto err_out;
2136 }
d71374da 2137
7b543663
YL
2138 bridge = pci_alloc_host_bridge(b);
2139 if (!bridge)
2140 goto err_out;
2141
2142 bridge->dev.parent = parent;
70efde2a 2143 bridge->dev.release = pci_release_host_bridge_dev;
7b543663 2144 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
6c0cc950 2145 error = pcibios_root_bridge_prepare(bridge);
343df771
JL
2146 if (error) {
2147 kfree(bridge);
2148 goto err_out;
2149 }
6c0cc950 2150
7b543663 2151 error = device_register(&bridge->dev);
343df771
JL
2152 if (error) {
2153 put_device(&bridge->dev);
2154 goto err_out;
2155 }
7b543663 2156 b->bridge = get_device(&bridge->dev);
a1e4d72c 2157 device_enable_async_suspend(b->bridge);
98d9f30c 2158 pci_set_bus_of_node(b);
44aa0c65 2159 pci_set_bus_msi_domain(b);
1da177e4 2160
0d358f22
YL
2161 if (!parent)
2162 set_dev_node(b->bridge, pcibus_to_node(b));
2163
fd7d1ced
GKH
2164 b->dev.class = &pcibus_class;
2165 b->dev.parent = b->bridge;
1a927133 2166 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
fd7d1ced 2167 error = device_register(&b->dev);
1da177e4
LT
2168 if (error)
2169 goto class_dev_reg_err;
1da177e4 2170
10a95747
JL
2171 pcibios_add_bus(b);
2172
1da177e4
LT
2173 /* Create legacy_io and legacy_mem files for this bus */
2174 pci_create_legacy_files(b);
2175
a9d9f527
BH
2176 if (parent)
2177 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
2178 else
2179 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
2180
0efd5aab 2181 /* Add initial resources to the bus */
14d76b68
JL
2182 resource_list_for_each_entry_safe(window, n, resources) {
2183 list_move_tail(&window->node, &bridge->windows);
0efd5aab
BH
2184 res = window->res;
2185 offset = window->offset;
f848ffb1
YL
2186 if (res->flags & IORESOURCE_BUS)
2187 pci_bus_insert_busn_res(b, bus, res->end);
2188 else
2189 pci_bus_add_resource(b, res, 0);
0efd5aab
BH
2190 if (offset) {
2191 if (resource_type(res) == IORESOURCE_IO)
2192 fmt = " (bus address [%#06llx-%#06llx])";
2193 else
2194 fmt = " (bus address [%#010llx-%#010llx])";
2195 snprintf(bus_addr, sizeof(bus_addr), fmt,
2196 (unsigned long long) (res->start - offset),
2197 (unsigned long long) (res->end - offset));
2198 } else
2199 bus_addr[0] = '\0';
2200 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
a9d9f527
BH
2201 }
2202
a5390aa6
BH
2203 down_write(&pci_bus_sem);
2204 list_add_tail(&b->node, &pci_root_buses);
2205 up_write(&pci_bus_sem);
2206
1da177e4
LT
2207 return b;
2208
1da177e4 2209class_dev_reg_err:
7b543663
YL
2210 put_device(&bridge->dev);
2211 device_unregister(&bridge->dev);
1da177e4 2212err_out:
1da177e4
LT
2213 kfree(b);
2214 return NULL;
2215}
e6b29dea 2216EXPORT_SYMBOL_GPL(pci_create_root_bus);
cdb9b9f7 2217
98a35831
YL
2218int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2219{
2220 struct resource *res = &b->busn_res;
2221 struct resource *parent_res, *conflict;
2222
2223 res->start = bus;
2224 res->end = bus_max;
2225 res->flags = IORESOURCE_BUS;
2226
2227 if (!pci_is_root_bus(b))
2228 parent_res = &b->parent->busn_res;
2229 else {
2230 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2231 res->flags |= IORESOURCE_PCI_FIXED;
2232 }
2233
ced04d15 2234 conflict = request_resource_conflict(parent_res, res);
98a35831
YL
2235
2236 if (conflict)
2237 dev_printk(KERN_DEBUG, &b->dev,
2238 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2239 res, pci_is_root_bus(b) ? "domain " : "",
2240 parent_res, conflict->name, conflict);
98a35831
YL
2241
2242 return conflict == NULL;
2243}
2244
2245int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2246{
2247 struct resource *res = &b->busn_res;
2248 struct resource old_res = *res;
2249 resource_size_t size;
2250 int ret;
2251
2252 if (res->start > bus_max)
2253 return -EINVAL;
2254
2255 size = bus_max - res->start + 1;
2256 ret = adjust_resource(res, res->start, size);
2257 dev_printk(KERN_DEBUG, &b->dev,
2258 "busn_res: %pR end %s updated to %02x\n",
2259 &old_res, ret ? "can not be" : "is", bus_max);
2260
2261 if (!ret && !res->parent)
2262 pci_bus_insert_busn_res(b, res->start, res->end);
2263
2264 return ret;
2265}
2266
2267void pci_bus_release_busn_res(struct pci_bus *b)
2268{
2269 struct resource *res = &b->busn_res;
2270 int ret;
2271
2272 if (!res->flags || !res->parent)
2273 return;
2274
2275 ret = release_resource(res);
2276 dev_printk(KERN_DEBUG, &b->dev,
2277 "busn_res: %pR %s released\n",
2278 res, ret ? "can not be" : "is");
2279}
2280
d2a7926d
LP
2281struct pci_bus *pci_scan_root_bus_msi(struct device *parent, int bus,
2282 struct pci_ops *ops, void *sysdata,
2283 struct list_head *resources, struct msi_controller *msi)
a2ebb827 2284{
14d76b68 2285 struct resource_entry *window;
4d99f524 2286 bool found = false;
a2ebb827 2287 struct pci_bus *b;
4d99f524
YL
2288 int max;
2289
14d76b68 2290 resource_list_for_each_entry(window, resources)
4d99f524
YL
2291 if (window->res->flags & IORESOURCE_BUS) {
2292 found = true;
2293 break;
2294 }
a2ebb827
BH
2295
2296 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2297 if (!b)
2298 return NULL;
2299
d2a7926d
LP
2300 b->msi = msi;
2301
4d99f524
YL
2302 if (!found) {
2303 dev_info(&b->dev,
2304 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2305 bus);
2306 pci_bus_insert_busn_res(b, bus, 255);
2307 }
2308
2309 max = pci_scan_child_bus(b);
2310
2311 if (!found)
2312 pci_bus_update_busn_res_end(b, max);
2313
a2ebb827
BH
2314 return b;
2315}
d2a7926d
LP
2316
2317struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2318 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2319{
2320 return pci_scan_root_bus_msi(parent, bus, ops, sysdata, resources,
2321 NULL);
2322}
a2ebb827
BH
2323EXPORT_SYMBOL(pci_scan_root_bus);
2324
15856ad5 2325struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
de4b2f76
BH
2326 void *sysdata)
2327{
2328 LIST_HEAD(resources);
2329 struct pci_bus *b;
2330
2331 pci_add_resource(&resources, &ioport_resource);
2332 pci_add_resource(&resources, &iomem_resource);
857c3b66 2333 pci_add_resource(&resources, &busn_resource);
de4b2f76
BH
2334 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2335 if (b) {
857c3b66 2336 pci_scan_child_bus(b);
de4b2f76
BH
2337 } else {
2338 pci_free_resource_list(&resources);
2339 }
2340 return b;
2341}
2342EXPORT_SYMBOL(pci_scan_bus);
2343
2f320521
YL
2344/**
2345 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2346 * @bridge: PCI bridge for the bus to scan
2347 *
2348 * Scan a PCI bus and child buses for new devices, add them,
2349 * and enable them, resizing bridge mmio/io resource if necessary
2350 * and possible. The caller must ensure the child devices are already
2351 * removed for resizing to occur.
2352 *
2353 * Returns the max number of subordinate bus discovered.
2354 */
10874f5a 2355unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2f320521
YL
2356{
2357 unsigned int max;
2358 struct pci_bus *bus = bridge->subordinate;
2359
2360 max = pci_scan_child_bus(bus);
2361
2362 pci_assign_unassigned_bridge_resources(bridge);
2363
2364 pci_bus_add_devices(bus);
2365
2366 return max;
2367}
2368
a5213a31
YL
2369/**
2370 * pci_rescan_bus - scan a PCI bus for devices.
2371 * @bus: PCI bus to scan
2372 *
2373 * Scan a PCI bus and child buses for new devices, adds them,
2374 * and enables them.
2375 *
2376 * Returns the max number of subordinate bus discovered.
2377 */
10874f5a 2378unsigned int pci_rescan_bus(struct pci_bus *bus)
a5213a31
YL
2379{
2380 unsigned int max;
2381
2382 max = pci_scan_child_bus(bus);
2383 pci_assign_unassigned_bus_resources(bus);
2384 pci_bus_add_devices(bus);
2385
2386 return max;
2387}
2388EXPORT_SYMBOL_GPL(pci_rescan_bus);
2389
9d16947b
RW
2390/*
2391 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2392 * routines should always be executed under this mutex.
2393 */
2394static DEFINE_MUTEX(pci_rescan_remove_lock);
2395
2396void pci_lock_rescan_remove(void)
2397{
2398 mutex_lock(&pci_rescan_remove_lock);
2399}
2400EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2401
2402void pci_unlock_rescan_remove(void)
2403{
2404 mutex_unlock(&pci_rescan_remove_lock);
2405}
2406EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2407
3c78bc61
RD
2408static int __init pci_sort_bf_cmp(const struct device *d_a,
2409 const struct device *d_b)
6b4b78fe 2410{
99178b03
GKH
2411 const struct pci_dev *a = to_pci_dev(d_a);
2412 const struct pci_dev *b = to_pci_dev(d_b);
2413
6b4b78fe
MD
2414 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2415 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2416
2417 if (a->bus->number < b->bus->number) return -1;
2418 else if (a->bus->number > b->bus->number) return 1;
2419
2420 if (a->devfn < b->devfn) return -1;
2421 else if (a->devfn > b->devfn) return 1;
2422
2423 return 0;
2424}
2425
5ff580c1 2426void __init pci_sort_breadthfirst(void)
6b4b78fe 2427{
99178b03 2428 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
6b4b78fe 2429}