PCI: Add pci_enable_atomic_ops_to_root()
authorJay Cornwall <Jay.Cornwall@amd.com>
Fri, 5 Jan 2018 00:44:59 +0000 (19:44 -0500)
committerBjorn Helgaas <bhelgaas@google.com>
Tue, 23 Jan 2018 20:46:50 +0000 (14:46 -0600)
The Atomic Operations feature (PCIe r4.0, sec 6.15) allows atomic
transctions to be requested by, routed through and completed by PCIe
components. Routing and completion do not require software support.
Component support for each is detectable via the DEVCAP2 register.

A Requester may use AtomicOps only if its PCI_EXP_DEVCTL2_ATOMIC_REQ is
set. This should be set only if the Completer and all intermediate routing
elements support AtomicOps.

A concrete example is the AMD Fiji-class GPU (which is capable of making
AtomicOp requests), below a PLX 8747 switch (advertising AtomicOp routing)
with a Haswell host bridge (advertising AtomicOp completion support).

Add pci_enable_atomic_ops_to_root() for per-device control over AtomicOp
requests. This checks to be sure the Root Port supports completion of the
desired AtomicOp sizes and the path to the Root Port supports routing the
AtomicOps.

Signed-off-by: Jay Cornwall <Jay.Cornwall@amd.com>
Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
[bhelgaas: changelog, comments, whitespace]
Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
drivers/pci/pci.c
include/linux/pci.h
include/uapi/linux/pci_regs.h

index 4a7c6864fdf42f8e720a46bd4c38820a29efa989..6112dd8d68b6c156c8546fceda4a6d9edfb1ff2c 100644 (file)
@@ -3065,6 +3065,81 @@ int pci_rebar_set_size(struct pci_dev *pdev, int bar, int size)
        return 0;
 }
 
+/**
+ * pci_enable_atomic_ops_to_root - enable AtomicOp requests to root port
+ * @dev: the PCI device
+ * @cap_mask: mask of desired AtomicOp sizes, including one or more of:
+ *     PCI_EXP_DEVCAP2_ATOMIC_COMP32
+ *     PCI_EXP_DEVCAP2_ATOMIC_COMP64
+ *     PCI_EXP_DEVCAP2_ATOMIC_COMP128
+ *
+ * Return 0 if all upstream bridges support AtomicOp routing, egress
+ * blocking is disabled on all upstream ports, and the root port supports
+ * the requested completion capabilities (32-bit, 64-bit and/or 128-bit
+ * AtomicOp completion), or negative otherwise.
+ */
+int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask)
+{
+       struct pci_bus *bus = dev->bus;
+       struct pci_dev *bridge;
+       u32 cap, ctl2;
+
+       if (!pci_is_pcie(dev))
+               return -EINVAL;
+
+       /*
+        * Per PCIe r4.0, sec 6.15, endpoints and root ports may be
+        * AtomicOp requesters.  For now, we only support endpoints as
+        * requesters and root ports as completers.  No endpoints as
+        * completers, and no peer-to-peer.
+        */
+
+       switch (pci_pcie_type(dev)) {
+       case PCI_EXP_TYPE_ENDPOINT:
+       case PCI_EXP_TYPE_LEG_END:
+       case PCI_EXP_TYPE_RC_END:
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       while (bus->parent) {
+               bridge = bus->self;
+
+               pcie_capability_read_dword(bridge, PCI_EXP_DEVCAP2, &cap);
+
+               switch (pci_pcie_type(bridge)) {
+               /* Ensure switch ports support AtomicOp routing */
+               case PCI_EXP_TYPE_UPSTREAM:
+               case PCI_EXP_TYPE_DOWNSTREAM:
+                       if (!(cap & PCI_EXP_DEVCAP2_ATOMIC_ROUTE))
+                               return -EINVAL;
+                       break;
+
+               /* Ensure root port supports all the sizes we care about */
+               case PCI_EXP_TYPE_ROOT_PORT:
+                       if ((cap & cap_mask) != cap_mask)
+                               return -EINVAL;
+                       break;
+               }
+
+               /* Ensure upstream ports don't block AtomicOps on egress */
+               if (!bridge->has_secondary_link) {
+                       pcie_capability_read_dword(bridge, PCI_EXP_DEVCTL2,
+                                                  &ctl2);
+                       if (ctl2 & PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK)
+                               return -EINVAL;
+               }
+
+               bus = bus->parent;
+       }
+
+       pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
+                                PCI_EXP_DEVCTL2_ATOMIC_REQ);
+       return 0;
+}
+EXPORT_SYMBOL(pci_enable_atomic_ops_to_root);
+
 /**
  * pci_swizzle_interrupt_pin - swizzle INTx for device behind bridge
  * @dev: the PCI device
index c170c9250c8b706e62e00f4b6a26149735bacba5..ab3d12a7dfedf29863208504a254264ccfb79981 100644 (file)
@@ -2061,6 +2061,7 @@ void pci_request_acs(void);
 bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags);
 bool pci_acs_path_enabled(struct pci_dev *start,
                          struct pci_dev *end, u16 acs_flags);
+int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask);
 
 #define PCI_VPD_LRDT                   0x80    /* Large Resource Data Type */
 #define PCI_VPD_LRDT_ID(x)             ((x) | PCI_VPD_LRDT)
index 70c2b2ade0483fde34bdcd77a4cf4bcabac94252..f31b56b217146bd5f8f69128486f21c73a050dd3 100644 (file)
 #define PCI_EXP_DEVCAP2                36      /* Device Capabilities 2 */
 #define  PCI_EXP_DEVCAP2_ARI           0x00000020 /* Alternative Routing-ID */
 #define  PCI_EXP_DEVCAP2_ATOMIC_ROUTE  0x00000040 /* Atomic Op routing */
-#define PCI_EXP_DEVCAP2_ATOMIC_COMP64  0x00000100 /* Atomic 64-bit compare */
+#define  PCI_EXP_DEVCAP2_ATOMIC_COMP32 0x00000080 /* 32b AtomicOp completion */
+#define  PCI_EXP_DEVCAP2_ATOMIC_COMP64 0x00000100 /* 64b AtomicOp completion */
+#define  PCI_EXP_DEVCAP2_ATOMIC_COMP128        0x00000200 /* 128b AtomicOp completion */
 #define  PCI_EXP_DEVCAP2_LTR           0x00000800 /* Latency tolerance reporting */
 #define  PCI_EXP_DEVCAP2_OBFF_MASK     0x000c0000 /* OBFF support mechanism */
 #define  PCI_EXP_DEVCAP2_OBFF_MSG      0x00040000 /* New message signaling */