#include <linux/device.h>
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
--- --#include <asm-generic/pci-bridge.h>
#include <asm/setup.h>
#include <linux/aer.h>
#include "pci.h"
}
EXPORT_SYMBOL_GPL(pci_check_and_unmask_intx);
---- --int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size)
---- --{
---- -- return dma_set_max_seg_size(&dev->dev, size);
---- --}
---- --EXPORT_SYMBOL(pci_set_dma_max_seg_size);
---- --
---- --int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask)
---- --{
---- -- return dma_set_seg_boundary(&dev->dev, mask);
---- --}
---- --EXPORT_SYMBOL(pci_set_dma_seg_boundary);
---- --
/**
* pci_wait_for_pending_transaction - waits for pending transaction
* @dev: the PCI device to operate on
}
EXPORT_SYMBOL(pci_wait_for_pending_transaction);
+++++ +/*
+++++ + * We should only need to wait 100ms after FLR, but some devices take longer.
+++++ + * Wait for up to 1000ms for config space to return something other than -1.
+++++ + * Intel IGD requires this when an LCD panel is attached. We read the 2nd
+++++ + * dword because VFs don't implement the 1st dword.
+++++ + */
+++++ +static void pci_flr_wait(struct pci_dev *dev)
+++++ +{
+++++ + int i = 0;
+++++ + u32 id;
+++++ +
+++++ + do {
+++++ + msleep(100);
+++++ + pci_read_config_dword(dev, PCI_COMMAND, &id);
+++++ + } while (i++ < 10 && id == ~0);
+++++ +
+++++ + if (id == ~0)
+++++ + dev_warn(&dev->dev, "Failed to return from FLR\n");
+++++ + else if (i > 1)
+++++ + dev_info(&dev->dev, "Required additional %dms to return from FLR\n",
+++++ + (i - 1) * 100);
+++++ +}
+++++ +
static int pcie_flr(struct pci_dev *dev, int probe)
{
u32 cap;
dev_err(&dev->dev, "timed out waiting for pending transaction; performing function level reset anyway\n");
pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
----- - msleep(100);
+++++ + pci_flr_wait(dev);
return 0;
}
dev_err(&dev->dev, "timed out waiting for pending transaction; performing AF function level reset anyway\n");
pci_write_config_byte(dev, pos + PCI_AF_CTRL, PCI_AF_CTRL_FLR);
----- - msleep(100);
+++++ + pci_flr_wait(dev);
return 0;
}
#include <linux/pci-aspm.h>
#include <linux/aer.h>
#include <linux/acpi.h>
--- --#include <asm-generic/pci-bridge.h>
#include "pci.h"
#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
pci_pm_init(dev);
/* Vital Product Data */
------ pci_vpd_pci22_init(dev);
++++++ pci_vpd_init(dev);
/* Alternative Routing-ID Forwarding */
pci_configure_ari(dev);
return 0;
if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
return 1;
+++ ++
+++ ++ /*
+++ ++ * PCIe downstream ports are bridges that normally lead to only a
+++ ++ * device 0, but if PCI_SCAN_ALL_PCIE_DEVS is set, scan all
+++ ++ * possible devices, not just device 0. See PCIe spec r3.0,
+++ ++ * sec 7.3.1.
+++ ++ */
if (parent->has_secondary_link &&
!pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
return 1;
u32 class = pdev->class;
/* Use "USB Device (not host controller)" class */
---- -- pdev->class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe;
++++ ++ pdev->class = PCI_CLASS_SERIAL_USB_DEVICE;
dev_info(&pdev->dev, "PCI class overridden (%#08x -> %#08x) so dwc3 driver can claim this instead of xhci\n",
class, pdev->class);
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
++++++ /*
++++++ * If a device follows the VPD format spec, the PCI core will not read or
++++++ * write past the VPD End Tag. But some vendors do not follow the VPD
++++++ * format spec, so we can't tell how much data is safe to access. Devices
++++++ * may behave unpredictably if we access too much. Blacklist these devices
++++++ * so we don't touch VPD at all.
++++++ */
++++++ static void quirk_blacklist_vpd(struct pci_dev *dev)
++++++ {
++++++ if (dev->vpd) {
++++++ dev->vpd->len = 0;
++++++ dev_warn(&dev->dev, FW_BUG "VPD access disabled\n");
++++++ }
++++++ }
++++++
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0060, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x007c, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0413, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0078, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0079, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0073, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x0071, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005b, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x002f, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005d, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_LSI_LOGIC, 0x005f, quirk_blacklist_vpd);
++++++ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATTANSIC, PCI_ANY_ID,
++++++ quirk_blacklist_vpd);
++++++
/*
* For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
* VPD end tag will hang the device. This problem was initially
#endif
}
++++ +static int pci_quirk_cavium_acs(struct pci_dev *dev, u16 acs_flags)
++++ +{
++++ + /*
++++ + * Cavium devices matching this quirk do not perform peer-to-peer
++++ + * with other functions, allowing masking out these bits as if they
++++ + * were unimplemented in the ACS capability.
++++ + */
++++ + acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
++++ + PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
++++ +
++++ + return acs_flags ? 0 : 1;
++++ +}
++++ +
/*
* Many Intel PCH root ports do provide ACS-like features to disable peer
* transactions and validate bus numbers in requests, but do not provide an
{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
{ 0x19a2, 0x710, pci_quirk_mf_endpoint_acs }, /* Emulex BE3-R */
{ 0x10df, 0x720, pci_quirk_mf_endpoint_acs }, /* Emulex Skyhawk-R */
++++ + /* Cavium ThunderX */
++++ + { PCI_VENDOR_ID_CAVIUM, PCI_ANY_ID, pci_quirk_cavium_acs },
{ 0 }
};
pci_mmap_mem
};
---- --/* This defines the direction arg to the DMA mapping routines. */
---- --#define PCI_DMA_BIDIRECTIONAL 0
---- --#define PCI_DMA_TODEVICE 1
---- --#define PCI_DMA_FROMDEVICE 2
---- --#define PCI_DMA_NONE 3
---- --
/*
* For PCI devices, the region numbers are assigned this way:
*/
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
+++ ++enum {
+++ ++ PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */
+++ ++ PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */
+++ ++ PCI_PROBE_ONLY = 0x00000004, /* use existing setup */
+++ ++ PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */
+++ ++ PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */
+++ ++ PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */
+++ ++ PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */
+++ ++};
+++ ++
/* these external functions are only available when PCI support is enabled */
#ifdef CONFIG_PCI
+++ ++extern unsigned int pci_flags;
+++ ++
+++ ++static inline void pci_set_flags(int flags) { pci_flags = flags; }
+++ ++static inline void pci_add_flags(int flags) { pci_flags |= flags; }
+++ ++static inline void pci_clear_flags(int flags) { pci_flags &= ~flags; }
+++ ++static inline int pci_has_flag(int flag) { return pci_flags & flag; }
+++ ++
void pcie_bus_configure_settings(struct pci_bus *bus);
enum pcie_bus_config_types {
bool pci_intx_mask_supported(struct pci_dev *dev);
bool pci_check_and_mask_intx(struct pci_dev *dev);
bool pci_check_and_unmask_intx(struct pci_dev *dev);
---- --int pci_set_dma_max_seg_size(struct pci_dev *dev, unsigned int size);
---- --int pci_set_dma_seg_boundary(struct pci_dev *dev, unsigned long mask);
int pci_wait_for_pending(struct pci_dev *dev, int pos, u16 mask);
int pci_wait_for_pending_transaction(struct pci_dev *dev);
int pcix_get_max_mmrbc(struct pci_dev *dev);
int pci_set_vga_state(struct pci_dev *pdev, bool decode,
unsigned int command_bits, u32 flags);
++++ ++
/* kmem_cache style wrapper around pci_alloc_consistent() */
#include <linux/pci-dma.h>
#else /* CONFIG_PCI is not enabled */
+++ ++static inline void pci_set_flags(int flags) { }
+++ ++static inline void pci_add_flags(int flags) { }
+++ ++static inline void pci_clear_flags(int flags) { }
+++ ++static inline int pci_has_flag(int flag) { return 0; }
+++ ++
/*
* If the system does not have PCI, clearly these return errors. Define
* these as simple inline functions to avoid hair in drivers.
static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
---- --static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask)
---- --{ return -EIO; }
---- --static inline int pci_set_consistent_dma_mask(struct pci_dev *dev, u64 mask)
---- --{ return -EIO; }
---- --static inline int pci_set_dma_max_seg_size(struct pci_dev *dev,
---- -- unsigned int size)
---- --{ return -EIO; }
---- --static inline int pci_set_dma_seg_boundary(struct pci_dev *dev,
---- -- unsigned long mask)
---- --{ return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
static inline int __pci_register_driver(struct pci_driver *drv,
#define PCI_VPD_LRDT_RW_DATA PCI_VPD_LRDT_ID(PCI_VPD_LTIN_RW_DATA)
/* Small Resource Data Type Tag Item Names */
------ #define PCI_VPD_STIN_END 0x78 /* End */
++++++ #define PCI_VPD_STIN_END 0x0f /* End */
------ #define PCI_VPD_SRDT_END PCI_VPD_STIN_END
++++++ #define PCI_VPD_SRDT_END (PCI_VPD_STIN_END << 3)
#define PCI_VPD_SRDT_TIN_MASK 0x78
#define PCI_VPD_SRDT_LEN_MASK 0x07
++++++ #define PCI_VPD_LRDT_TIN_MASK 0x7f
#define PCI_VPD_LRDT_TAG_SIZE 3
#define PCI_VPD_SRDT_TAG_SIZE 1
return (u16)lrdt[1] + ((u16)lrdt[2] << 8);
}
++++++ /**
++++++ * pci_vpd_lrdt_tag - Extracts the Large Resource Data Type Tag Item
++++++ * @lrdt: Pointer to the beginning of the Large Resource Data Type tag
++++++ *
++++++ * Returns the extracted Large Resource Data Type Tag item.
++++++ */
++++++ static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt)
++++++ {
++++++ return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK);
++++++ }
++++++
/**
* pci_vpd_srdt_size - Extracts the Small Resource Data Type length
* @lrdt: Pointer to the beginning of the Small Resource Data Type tag
return (*srdt) & PCI_VPD_SRDT_LEN_MASK;
}
++++++ /**
++++++ * pci_vpd_srdt_tag - Extracts the Small Resource Data Type Tag Item
++++++ * @lrdt: Pointer to the beginning of the Small Resource Data Type tag
++++++ *
++++++ * Returns the extracted Small Resource Data Type Tag Item.
++++++ */
++++++ static inline u8 pci_vpd_srdt_tag(const u8 *srdt)
++++++ {
++++++ return ((*srdt) & PCI_VPD_SRDT_TIN_MASK) >> 3;
++++++ }
++++++
/**
* pci_vpd_info_field_size - Extracts the information field length
* @lrdt: Pointer to the beginning of an information field header
{
return bus->self && bus->self->ari_enabled;
}
++++ ++
++++ ++/* provide the legacy pci_dma_* API */
++++ ++#include <linux/pci-dma-compat.h>
++++ ++
#endif /* LINUX_PCI_H */