1 #include <linux/delay.h>
3 #include <linux/module.h>
4 #include <linux/sched.h>
5 #include <linux/slab.h>
6 #include <linux/ioport.h>
7 #include <linux/wait.h>
12 * This interrupt-safe spinlock protects all accesses to PCI
13 * configuration space.
16 DEFINE_RAW_SPINLOCK(pci_lock);
19 * Wrappers for all PCI configuration access functions. They just check
20 * alignment, do locking and call the low-level functions pointed to
24 #define PCI_byte_BAD 0
25 #define PCI_word_BAD (pos & 1)
26 #define PCI_dword_BAD (pos & 3)
28 #define PCI_OP_READ(size, type, len) \
29 int pci_bus_read_config_##size \
30 (struct pci_bus *bus, unsigned int devfn, int pos, type *value) \
33 unsigned long flags; \
35 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
36 raw_spin_lock_irqsave(&pci_lock, flags); \
37 res = bus->ops->read(bus, devfn, pos, len, &data); \
38 *value = (type)data; \
39 raw_spin_unlock_irqrestore(&pci_lock, flags); \
43 #define PCI_OP_WRITE(size, type, len) \
44 int pci_bus_write_config_##size \
45 (struct pci_bus *bus, unsigned int devfn, int pos, type value) \
48 unsigned long flags; \
49 if (PCI_##size##_BAD) return PCIBIOS_BAD_REGISTER_NUMBER; \
50 raw_spin_lock_irqsave(&pci_lock, flags); \
51 res = bus->ops->write(bus, devfn, pos, len, value); \
52 raw_spin_unlock_irqrestore(&pci_lock, flags); \
56 PCI_OP_READ(byte, u8, 1)
57 PCI_OP_READ(word, u16, 2)
58 PCI_OP_READ(dword, u32, 4)
59 PCI_OP_WRITE(byte, u8, 1)
60 PCI_OP_WRITE(word, u16, 2)
61 PCI_OP_WRITE(dword, u32, 4)
63 EXPORT_SYMBOL(pci_bus_read_config_byte);
64 EXPORT_SYMBOL(pci_bus_read_config_word);
65 EXPORT_SYMBOL(pci_bus_read_config_dword);
66 EXPORT_SYMBOL(pci_bus_write_config_byte);
67 EXPORT_SYMBOL(pci_bus_write_config_word);
68 EXPORT_SYMBOL(pci_bus_write_config_dword);
70 int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn,
71 int where, int size, u32 *val)
75 addr = bus->ops->map_bus(bus, devfn, where);
78 return PCIBIOS_DEVICE_NOT_FOUND;
88 return PCIBIOS_SUCCESSFUL;
90 EXPORT_SYMBOL_GPL(pci_generic_config_read);
92 int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn,
93 int where, int size, u32 val)
97 addr = bus->ops->map_bus(bus, devfn, where);
99 return PCIBIOS_DEVICE_NOT_FOUND;
108 return PCIBIOS_SUCCESSFUL;
110 EXPORT_SYMBOL_GPL(pci_generic_config_write);
112 int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn,
113 int where, int size, u32 *val)
117 addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
120 return PCIBIOS_DEVICE_NOT_FOUND;
126 *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1);
128 return PCIBIOS_SUCCESSFUL;
130 EXPORT_SYMBOL_GPL(pci_generic_config_read32);
132 int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
133 int where, int size, u32 val)
138 addr = bus->ops->map_bus(bus, devfn, where & ~0x3);
140 return PCIBIOS_DEVICE_NOT_FOUND;
144 return PCIBIOS_SUCCESSFUL;
146 mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8));
149 tmp = readl(addr) & mask;
150 tmp |= val << ((where & 0x3) * 8);
153 return PCIBIOS_SUCCESSFUL;
155 EXPORT_SYMBOL_GPL(pci_generic_config_write32);
158 * pci_bus_set_ops - Set raw operations of pci bus
159 * @bus: pci bus struct
160 * @ops: new raw operations
162 * Return previous raw operations
164 struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops)
166 struct pci_ops *old_ops;
169 raw_spin_lock_irqsave(&pci_lock, flags);
172 raw_spin_unlock_irqrestore(&pci_lock, flags);
175 EXPORT_SYMBOL(pci_bus_set_ops);
178 * The following routines are to prevent the user from accessing PCI config
179 * space when it's unsafe to do so. Some devices require this during BIST and
180 * we're required to prevent it during D-state transitions.
182 * We have a bit per device to indicate it's blocked and a global wait queue
183 * for callers to sleep on until devices are unblocked.
185 static DECLARE_WAIT_QUEUE_HEAD(pci_cfg_wait);
187 static noinline void pci_wait_cfg(struct pci_dev *dev)
189 DECLARE_WAITQUEUE(wait, current);
191 __add_wait_queue(&pci_cfg_wait, &wait);
193 set_current_state(TASK_UNINTERRUPTIBLE);
194 raw_spin_unlock_irq(&pci_lock);
196 raw_spin_lock_irq(&pci_lock);
197 } while (dev->block_cfg_access);
198 __remove_wait_queue(&pci_cfg_wait, &wait);
201 /* Returns 0 on success, negative values indicate error. */
202 #define PCI_USER_READ_CONFIG(size, type) \
203 int pci_user_read_config_##size \
204 (struct pci_dev *dev, int pos, type *val) \
206 int ret = PCIBIOS_SUCCESSFUL; \
208 if (PCI_##size##_BAD) \
210 raw_spin_lock_irq(&pci_lock); \
211 if (unlikely(dev->block_cfg_access)) \
213 ret = dev->bus->ops->read(dev->bus, dev->devfn, \
214 pos, sizeof(type), &data); \
215 raw_spin_unlock_irq(&pci_lock); \
217 return pcibios_err_to_errno(ret); \
219 EXPORT_SYMBOL_GPL(pci_user_read_config_##size);
221 /* Returns 0 on success, negative values indicate error. */
222 #define PCI_USER_WRITE_CONFIG(size, type) \
223 int pci_user_write_config_##size \
224 (struct pci_dev *dev, int pos, type val) \
226 int ret = PCIBIOS_SUCCESSFUL; \
227 if (PCI_##size##_BAD) \
229 raw_spin_lock_irq(&pci_lock); \
230 if (unlikely(dev->block_cfg_access)) \
232 ret = dev->bus->ops->write(dev->bus, dev->devfn, \
233 pos, sizeof(type), val); \
234 raw_spin_unlock_irq(&pci_lock); \
235 return pcibios_err_to_errno(ret); \
237 EXPORT_SYMBOL_GPL(pci_user_write_config_##size);
239 PCI_USER_READ_CONFIG(byte, u8)
240 PCI_USER_READ_CONFIG(word, u16)
241 PCI_USER_READ_CONFIG(dword, u32)
242 PCI_USER_WRITE_CONFIG(byte, u8)
243 PCI_USER_WRITE_CONFIG(word, u16)
244 PCI_USER_WRITE_CONFIG(dword, u32)
246 /* VPD access through PCI 2.2+ VPD capability */
249 * pci_read_vpd - Read one entry from Vital Product Data
250 * @dev: pci device struct
251 * @pos: offset in vpd space
252 * @count: number of bytes to read
253 * @buf: pointer to where to store result
255 ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf)
257 if (!dev->vpd || !dev->vpd->ops)
259 return dev->vpd->ops->read(dev, pos, count, buf);
261 EXPORT_SYMBOL(pci_read_vpd);
264 * pci_write_vpd - Write entry to Vital Product Data
265 * @dev: pci device struct
266 * @pos: offset in vpd space
267 * @count: number of bytes to write
268 * @buf: buffer containing write data
270 ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf)
272 if (!dev->vpd || !dev->vpd->ops)
274 return dev->vpd->ops->write(dev, pos, count, buf);
276 EXPORT_SYMBOL(pci_write_vpd);
278 #define PCI_VPD_MAX_SIZE (PCI_VPD_ADDR_MASK + 1)
281 * pci_vpd_size - determine actual size of Vital Product Data
282 * @dev: pci device struct
283 * @old_size: current assumed size, also maximum allowed size
285 static size_t pci_vpd_size(struct pci_dev *dev, size_t old_size)
288 unsigned char header[1+2]; /* 1 byte tag, 2 bytes length */
290 while (off < old_size &&
291 pci_read_vpd(dev, off, 1, header) == 1) {
294 if (header[0] & PCI_VPD_LRDT) {
295 /* Large Resource Data Type Tag */
296 tag = pci_vpd_lrdt_tag(header);
297 /* Only read length from known tag items */
298 if ((tag == PCI_VPD_LTIN_ID_STRING) ||
299 (tag == PCI_VPD_LTIN_RO_DATA) ||
300 (tag == PCI_VPD_LTIN_RW_DATA)) {
301 if (pci_read_vpd(dev, off+1, 2,
304 "invalid large VPD tag %02x size at offset %zu",
308 off += PCI_VPD_LRDT_TAG_SIZE +
309 pci_vpd_lrdt_size(header);
312 /* Short Resource Data Type Tag */
313 off += PCI_VPD_SRDT_TAG_SIZE +
314 pci_vpd_srdt_size(header);
315 tag = pci_vpd_srdt_tag(header);
318 if (tag == PCI_VPD_STIN_END) /* End tag descriptor */
321 if ((tag != PCI_VPD_LTIN_ID_STRING) &&
322 (tag != PCI_VPD_LTIN_RO_DATA) &&
323 (tag != PCI_VPD_LTIN_RW_DATA)) {
325 "invalid %s VPD tag %02x at offset %zu",
326 (header[0] & PCI_VPD_LRDT) ? "large" : "short",
335 * Wait for last operation to complete.
336 * This code has to spin since there is no other notification from the PCI
337 * hardware. Since the VPD is often implemented by serial attachment to an
338 * EEPROM, it may take many milliseconds to complete.
340 * Returns 0 on success, negative values indicate error.
342 static int pci_vpd_wait(struct pci_dev *dev)
344 struct pci_vpd *vpd = dev->vpd;
345 unsigned long timeout = jiffies + msecs_to_jiffies(50);
346 unsigned long max_sleep = 16;
353 while (time_before(jiffies, timeout)) {
354 ret = pci_user_read_config_word(dev, vpd->cap + PCI_VPD_ADDR,
359 if ((status & PCI_VPD_ADDR_F) == vpd->flag) {
364 if (fatal_signal_pending(current))
367 usleep_range(10, max_sleep);
368 if (max_sleep < 1024)
372 dev_warn(&dev->dev, "VPD access failed. This is likely a firmware bug on this device. Contact the card vendor for a firmware update\n");
376 static ssize_t pci_vpd_read(struct pci_dev *dev, loff_t pos, size_t count,
379 struct pci_vpd *vpd = dev->vpd;
381 loff_t end = pos + count;
389 vpd->len = pci_vpd_size(dev, vpd->len);
398 if (end > vpd->len) {
403 if (mutex_lock_killable(&vpd->lock))
406 ret = pci_vpd_wait(dev);
412 unsigned int i, skip;
414 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
419 vpd->flag = PCI_VPD_ADDR_F;
420 ret = pci_vpd_wait(dev);
424 ret = pci_user_read_config_dword(dev, vpd->cap + PCI_VPD_DATA, &val);
429 for (i = 0; i < sizeof(u32); i++) {
439 mutex_unlock(&vpd->lock);
440 return ret ? ret : count;
443 static ssize_t pci_vpd_write(struct pci_dev *dev, loff_t pos, size_t count,
446 struct pci_vpd *vpd = dev->vpd;
448 loff_t end = pos + count;
451 if (pos < 0 || (pos & 3) || (count & 3))
456 vpd->len = pci_vpd_size(dev, vpd->len);
465 if (mutex_lock_killable(&vpd->lock))
468 ret = pci_vpd_wait(dev);
480 ret = pci_user_write_config_dword(dev, vpd->cap + PCI_VPD_DATA, val);
483 ret = pci_user_write_config_word(dev, vpd->cap + PCI_VPD_ADDR,
484 pos | PCI_VPD_ADDR_F);
490 ret = pci_vpd_wait(dev);
497 mutex_unlock(&vpd->lock);
498 return ret ? ret : count;
501 static const struct pci_vpd_ops pci_vpd_ops = {
502 .read = pci_vpd_read,
503 .write = pci_vpd_write,
506 static ssize_t pci_vpd_f0_read(struct pci_dev *dev, loff_t pos, size_t count,
509 struct pci_dev *tdev = pci_get_slot(dev->bus,
510 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
516 ret = pci_read_vpd(tdev, pos, count, arg);
521 static ssize_t pci_vpd_f0_write(struct pci_dev *dev, loff_t pos, size_t count,
524 struct pci_dev *tdev = pci_get_slot(dev->bus,
525 PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
531 ret = pci_write_vpd(tdev, pos, count, arg);
536 static const struct pci_vpd_ops pci_vpd_f0_ops = {
537 .read = pci_vpd_f0_read,
538 .write = pci_vpd_f0_write,
541 int pci_vpd_init(struct pci_dev *dev)
546 cap = pci_find_capability(dev, PCI_CAP_ID_VPD);
550 vpd = kzalloc(sizeof(*vpd), GFP_ATOMIC);
554 vpd->len = PCI_VPD_MAX_SIZE;
555 if (dev->dev_flags & PCI_DEV_FLAGS_VPD_REF_F0)
556 vpd->ops = &pci_vpd_f0_ops;
558 vpd->ops = &pci_vpd_ops;
559 mutex_init(&vpd->lock);
567 void pci_vpd_release(struct pci_dev *dev)
573 * pci_cfg_access_lock - Lock PCI config reads/writes
574 * @dev: pci device struct
576 * When access is locked, any userspace reads or writes to config
577 * space and concurrent lock requests will sleep until access is
578 * allowed via pci_cfg_access_unlocked again.
580 void pci_cfg_access_lock(struct pci_dev *dev)
584 raw_spin_lock_irq(&pci_lock);
585 if (dev->block_cfg_access)
587 dev->block_cfg_access = 1;
588 raw_spin_unlock_irq(&pci_lock);
590 EXPORT_SYMBOL_GPL(pci_cfg_access_lock);
593 * pci_cfg_access_trylock - try to lock PCI config reads/writes
594 * @dev: pci device struct
596 * Same as pci_cfg_access_lock, but will return 0 if access is
597 * already locked, 1 otherwise. This function can be used from
600 bool pci_cfg_access_trylock(struct pci_dev *dev)
605 raw_spin_lock_irqsave(&pci_lock, flags);
606 if (dev->block_cfg_access)
609 dev->block_cfg_access = 1;
610 raw_spin_unlock_irqrestore(&pci_lock, flags);
614 EXPORT_SYMBOL_GPL(pci_cfg_access_trylock);
617 * pci_cfg_access_unlock - Unlock PCI config reads/writes
618 * @dev: pci device struct
620 * This function allows PCI config accesses to resume.
622 void pci_cfg_access_unlock(struct pci_dev *dev)
626 raw_spin_lock_irqsave(&pci_lock, flags);
628 /* This indicates a problem in the caller, but we don't need
629 * to kill them, unlike a double-block above. */
630 WARN_ON(!dev->block_cfg_access);
632 dev->block_cfg_access = 0;
633 wake_up_all(&pci_cfg_wait);
634 raw_spin_unlock_irqrestore(&pci_lock, flags);
636 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
638 static inline int pcie_cap_version(const struct pci_dev *dev)
640 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
643 static bool pcie_downstream_port(const struct pci_dev *dev)
645 int type = pci_pcie_type(dev);
647 return type == PCI_EXP_TYPE_ROOT_PORT ||
648 type == PCI_EXP_TYPE_DOWNSTREAM;
651 bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
653 int type = pci_pcie_type(dev);
655 return type == PCI_EXP_TYPE_ENDPOINT ||
656 type == PCI_EXP_TYPE_LEG_END ||
657 type == PCI_EXP_TYPE_ROOT_PORT ||
658 type == PCI_EXP_TYPE_UPSTREAM ||
659 type == PCI_EXP_TYPE_DOWNSTREAM ||
660 type == PCI_EXP_TYPE_PCI_BRIDGE ||
661 type == PCI_EXP_TYPE_PCIE_BRIDGE;
664 static inline bool pcie_cap_has_sltctl(const struct pci_dev *dev)
666 return pcie_downstream_port(dev) &&
667 pcie_caps_reg(dev) & PCI_EXP_FLAGS_SLOT;
670 static inline bool pcie_cap_has_rtctl(const struct pci_dev *dev)
672 int type = pci_pcie_type(dev);
674 return type == PCI_EXP_TYPE_ROOT_PORT ||
675 type == PCI_EXP_TYPE_RC_EC;
678 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
680 if (!pci_is_pcie(dev))
693 return pcie_cap_has_lnkctl(dev);
697 return pcie_cap_has_sltctl(dev);
701 return pcie_cap_has_rtctl(dev);
702 case PCI_EXP_DEVCAP2:
703 case PCI_EXP_DEVCTL2:
704 case PCI_EXP_LNKCAP2:
705 case PCI_EXP_LNKCTL2:
706 case PCI_EXP_LNKSTA2:
707 return pcie_cap_version(dev) > 1;
714 * Note that these accessor functions are only for the "PCI Express
715 * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the
716 * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.)
718 int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val)
726 if (pcie_capability_reg_implemented(dev, pos)) {
727 ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val);
729 * Reset *val to 0 if pci_read_config_word() fails, it may
730 * have been written as 0xFFFF if hardware error happens
731 * during pci_read_config_word().
739 * For Functions that do not implement the Slot Capabilities,
740 * Slot Status, and Slot Control registers, these spaces must
741 * be hardwired to 0b, with the exception of the Presence Detect
742 * State bit in the Slot Status register of Downstream Ports,
743 * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8)
745 if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
746 pos == PCI_EXP_SLTSTA)
747 *val = PCI_EXP_SLTSTA_PDS;
751 EXPORT_SYMBOL(pcie_capability_read_word);
753 int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val)
761 if (pcie_capability_reg_implemented(dev, pos)) {
762 ret = pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, val);
764 * Reset *val to 0 if pci_read_config_dword() fails, it may
765 * have been written as 0xFFFFFFFF if hardware error happens
766 * during pci_read_config_dword().
773 if (pci_is_pcie(dev) && pcie_downstream_port(dev) &&
774 pos == PCI_EXP_SLTSTA)
775 *val = PCI_EXP_SLTSTA_PDS;
779 EXPORT_SYMBOL(pcie_capability_read_dword);
781 int pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
786 if (!pcie_capability_reg_implemented(dev, pos))
789 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
791 EXPORT_SYMBOL(pcie_capability_write_word);
793 int pcie_capability_write_dword(struct pci_dev *dev, int pos, u32 val)
798 if (!pcie_capability_reg_implemented(dev, pos))
801 return pci_write_config_dword(dev, pci_pcie_cap(dev) + pos, val);
803 EXPORT_SYMBOL(pcie_capability_write_dword);
805 int pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos,
811 ret = pcie_capability_read_word(dev, pos, &val);
815 ret = pcie_capability_write_word(dev, pos, val);
820 EXPORT_SYMBOL(pcie_capability_clear_and_set_word);
822 int pcie_capability_clear_and_set_dword(struct pci_dev *dev, int pos,
828 ret = pcie_capability_read_dword(dev, pos, &val);
832 ret = pcie_capability_write_dword(dev, pos, val);
837 EXPORT_SYMBOL(pcie_capability_clear_and_set_dword);