dmaengine: idxd: setup event log configuration
authorDave Jiang <dave.jiang@intel.com>
Fri, 7 Apr 2023 20:31:30 +0000 (13:31 -0700)
committerVinod Koul <vkoul@kernel.org>
Wed, 12 Apr 2023 17:48:45 +0000 (23:18 +0530)
Add setup of event log feature for supported device. Event log addresses
error reporting that was lacking in gen 1 DSA devices where a second error
event does not get reported when a first event is pending software
handling. The event log allows a circular buffer that the device can push
error events to. It is up to the user to create a large enough event log
ring in order to capture the expected events. The evl size can be set in
the device sysfs attribute. By default 64 entries are supported as minimal
when event log is enabled.

Tested-by: Tony Zhu <tony.zhu@intel.com>
Signed-off-by: Dave Jiang <dave.jiang@intel.com>
Co-developed-by: Fenghua Yu <fenghua.yu@intel.com>
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Link: https://lore.kernel.org/r/20230407203143.2189681-4-fenghua.yu@intel.com
Signed-off-by: Vinod Koul <vkoul@kernel.org>
drivers/dma/idxd/device.c
drivers/dma/idxd/idxd.h
drivers/dma/idxd/init.c
drivers/dma/idxd/registers.h
drivers/dma/idxd/sysfs.c
include/uapi/linux/idxd.h

index 5f321f3b4242ea0ab1b12bcf231d1ea921f0f44f..230fe9bb56ae5e6849884b2f85e77871a5479a65 100644 (file)
@@ -752,6 +752,83 @@ void idxd_device_clear_state(struct idxd_device *idxd)
        spin_unlock(&idxd->dev_lock);
 }
 
+static int idxd_device_evl_setup(struct idxd_device *idxd)
+{
+       union gencfg_reg gencfg;
+       union evlcfg_reg evlcfg;
+       union genctrl_reg genctrl;
+       struct device *dev = &idxd->pdev->dev;
+       void *addr;
+       dma_addr_t dma_addr;
+       int size;
+       struct idxd_evl *evl = idxd->evl;
+
+       if (!evl)
+               return 0;
+
+       size = evl_size(idxd);
+       /*
+        * Address needs to be page aligned. However, dma_alloc_coherent() provides
+        * at minimal page size aligned address. No manual alignment required.
+        */
+       addr = dma_alloc_coherent(dev, size, &dma_addr, GFP_KERNEL);
+       if (!addr)
+               return -ENOMEM;
+
+       memset(addr, 0, size);
+
+       spin_lock(&evl->lock);
+       evl->log = addr;
+       evl->dma = dma_addr;
+       evl->log_size = size;
+
+       memset(&evlcfg, 0, sizeof(evlcfg));
+       evlcfg.bits[0] = dma_addr & GENMASK(63, 12);
+       evlcfg.size = evl->size;
+
+       iowrite64(evlcfg.bits[0], idxd->reg_base + IDXD_EVLCFG_OFFSET);
+       iowrite64(evlcfg.bits[1], idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
+
+       genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+       genctrl.evl_int_en = 1;
+       iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+
+       gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+       gencfg.evl_en = 1;
+       iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+
+       spin_unlock(&evl->lock);
+       return 0;
+}
+
+static void idxd_device_evl_free(struct idxd_device *idxd)
+{
+       union gencfg_reg gencfg;
+       union genctrl_reg genctrl;
+       struct device *dev = &idxd->pdev->dev;
+       struct idxd_evl *evl = idxd->evl;
+
+       gencfg.bits = ioread32(idxd->reg_base + IDXD_GENCFG_OFFSET);
+       if (!gencfg.evl_en)
+               return;
+
+       spin_lock(&evl->lock);
+       gencfg.evl_en = 0;
+       iowrite32(gencfg.bits, idxd->reg_base + IDXD_GENCFG_OFFSET);
+
+       genctrl.bits = ioread32(idxd->reg_base + IDXD_GENCTRL_OFFSET);
+       genctrl.evl_int_en = 0;
+       iowrite32(genctrl.bits, idxd->reg_base + IDXD_GENCTRL_OFFSET);
+
+       iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET);
+       iowrite64(0, idxd->reg_base + IDXD_EVLCFG_OFFSET + 8);
+
+       dma_free_coherent(dev, evl->log_size, evl->log, evl->dma);
+       evl->log = NULL;
+       evl->size = IDXD_EVL_SIZE_MIN;
+       spin_unlock(&evl->lock);
+}
+
 static void idxd_group_config_write(struct idxd_group *group)
 {
        struct idxd_device *idxd = group->idxd;
@@ -1451,15 +1528,24 @@ int idxd_device_drv_probe(struct idxd_dev *idxd_dev)
        if (rc < 0)
                return -ENXIO;
 
+       rc = idxd_device_evl_setup(idxd);
+       if (rc < 0) {
+               idxd->cmd_status = IDXD_SCMD_DEV_EVL_ERR;
+               return rc;
+       }
+
        /* Start device */
        rc = idxd_device_enable(idxd);
-       if (rc < 0)
+       if (rc < 0) {
+               idxd_device_evl_free(idxd);
                return rc;
+       }
 
        /* Setup DMA device without channels */
        rc = idxd_register_dma_device(idxd);
        if (rc < 0) {
                idxd_device_disable(idxd);
+               idxd_device_evl_free(idxd);
                idxd->cmd_status = IDXD_SCMD_DEV_DMA_ERR;
                return rc;
        }
@@ -1488,6 +1574,7 @@ void idxd_device_drv_remove(struct idxd_dev *idxd_dev)
        idxd_device_disable(idxd);
        if (test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
                idxd_device_reset(idxd);
+       idxd_device_evl_free(idxd);
 }
 
 static enum idxd_dev_type dev_types[] = {
index 2a71273f18226ca90535f74680f8906f5741dbf7..c74681f02b18a0ac5417d969335f8f9aad0637c4 100644 (file)
@@ -262,7 +262,15 @@ struct idxd_driver_data {
 };
 
 struct idxd_evl {
+       /* Lock to protect event log access. */
+       spinlock_t lock;
+       void *log;
+       dma_addr_t dma;
+       /* Total size of event log = number of entries * entry size. */
+       unsigned int log_size;
+       /* The number of entries in the event log. */
        u16 size;
+       u16 head;
 };
 
 struct idxd_device {
@@ -324,6 +332,17 @@ struct idxd_device {
        struct idxd_evl *evl;
 };
 
+static inline unsigned int evl_ent_size(struct idxd_device *idxd)
+{
+       return idxd->hw.gen_cap.evl_support ?
+              (32 * (1 << idxd->hw.gen_cap.evl_support)) : 0;
+}
+
+static inline unsigned int evl_size(struct idxd_device *idxd)
+{
+       return idxd->evl->size * evl_ent_size(idxd);
+}
+
 /* IDXD software descriptor */
 struct idxd_desc {
        union {
index 525de59df82a2e82d509b53fc2f8e044969af0ff..f44719a11c95a3d73b6deb4a3f30e4b36d4ab12d 100644 (file)
@@ -343,6 +343,7 @@ static int idxd_init_evl(struct idxd_device *idxd)
        if (!evl)
                return -ENOMEM;
 
+       spin_lock_init(&evl->lock);
        evl->size = IDXD_EVL_SIZE_MIN;
        idxd->evl = evl;
        return 0;
index ea3a499a3c3c478490634197a2beeb6d0d40db34..11bb97cf7481ca934e75dd51b58a0c8d92c0315e 100644 (file)
@@ -3,6 +3,8 @@
 #ifndef _IDXD_REGISTERS_H_
 #define _IDXD_REGISTERS_H_
 
+#include <uapi/linux/idxd.h>
+
 /* PCI Config */
 #define PCI_DEVICE_ID_INTEL_DSA_SPR0   0x0b25
 #define PCI_DEVICE_ID_INTEL_IAX_SPR0   0x0cfe
@@ -119,7 +121,8 @@ union gencfg_reg {
                u32 rdbuf_limit:8;
                u32 rsvd:4;
                u32 user_int_en:1;
-               u32 rsvd2:19;
+               u32 evl_en:1;
+               u32 rsvd2:18;
        };
        u32 bits;
 } __packed;
@@ -129,7 +132,8 @@ union genctrl_reg {
        struct {
                u32 softerr_int_en:1;
                u32 halt_int_en:1;
-               u32 rsvd:30;
+               u32 evl_int_en:1;
+               u32 rsvd:29;
        };
        u32 bits;
 } __packed;
@@ -299,6 +303,21 @@ union iaa_cap_reg {
 
 #define IDXD_IAACAP_OFFSET     0x180
 
+#define IDXD_EVLCFG_OFFSET     0xe0
+union evlcfg_reg {
+       struct {
+               u64 pasid_en:1;
+               u64 priv:1;
+               u64 rsvd:10;
+               u64 base_addr:52;
+
+               u64 size:16;
+               u64 pasid:20;
+               u64 rsvd2:28;
+       };
+       u64 bits[2];
+} __packed;
+
 #define IDXD_EVL_SIZE_MIN      0x0040
 #define IDXD_EVL_SIZE_MAX      0xffff
 
@@ -539,4 +558,53 @@ union filter_cfg {
        u64 val;
 } __packed;
 
+struct __evl_entry {
+       u64 rsvd:2;
+       u64 desc_valid:1;
+       u64 wq_idx_valid:1;
+       u64 batch:1;
+       u64 fault_rw:1;
+       u64 priv:1;
+       u64 err_info_valid:1;
+       u64 error:8;
+       u64 wq_idx:8;
+       u64 batch_id:8;
+       u64 operation:8;
+       u64 pasid:20;
+       u64 rsvd2:4;
+
+       u16 batch_idx;
+       u16 rsvd3;
+       union {
+               /* Invalid Flags 0x11 */
+               u32 invalid_flags;
+               /* Invalid Int Handle 0x19 */
+               /* Page fault 0x1a */
+               /* Page fault 0x06, 0x1f, only operand_id */
+               /* Page fault before drain or in batch, 0x26, 0x27 */
+               struct {
+                       u16 int_handle;
+                       u16 rci:1;
+                       u16 ims:1;
+                       u16 rcr:1;
+                       u16 first_err_in_batch:1;
+                       u16 rsvd4_2:9;
+                       u16 operand_id:3;
+               };
+       };
+       u64 fault_addr;
+       u64 rsvd5;
+} __packed;
+
+struct dsa_evl_entry {
+       struct __evl_entry e;
+       struct dsa_completion_record cr;
+} __packed;
+
+struct iax_evl_entry {
+       struct __evl_entry e;
+       u64 rsvd[4];
+       struct iax_completion_record cr;
+} __packed;
+
 #endif
index 85644e5bde8398b1eb2cc2e6a23565b86cdc3f6f..163fdfaa502236e4d5491969147f114bcc717d01 100644 (file)
@@ -1605,7 +1605,8 @@ static ssize_t event_log_size_store(struct device *dev,
        if (!test_bit(IDXD_FLAG_CONFIGURABLE, &idxd->flags))
                return -EPERM;
 
-       if (val < IDXD_EVL_SIZE_MIN || val > IDXD_EVL_SIZE_MAX)
+       if (val < IDXD_EVL_SIZE_MIN || val > IDXD_EVL_SIZE_MAX ||
+           (val * evl_ent_size(idxd) > ULONG_MAX - idxd->evl->dma))
                return -EINVAL;
 
        idxd->evl->size = val;
index fc47635b57dc9d68cad62dde74faaef5a00d8f04..5d05bf12f2bd7007e179bd83624302ed677c8507 100644 (file)
@@ -30,6 +30,7 @@ enum idxd_scmd_stat {
        IDXD_SCMD_WQ_NO_PRIV = 0x800f0000,
        IDXD_SCMD_WQ_IRQ_ERR = 0x80100000,
        IDXD_SCMD_WQ_USER_NO_IOMMU = 0x80110000,
+       IDXD_SCMD_DEV_EVL_ERR = 0x80120000,
 };
 
 #define IDXD_SCMD_SOFTERR_MASK 0x80000000