enic: added enic_wq.c and enic_wq.h
authorSatish Kharat <satishkh@cisco.com>
Wed, 5 Mar 2025 00:56:42 +0000 (19:56 -0500)
committerPaolo Abeni <pabeni@redhat.com>
Tue, 11 Mar 2025 09:21:32 +0000 (10:21 +0100)
Moves wq related function to enic_wq.c. Prepares for
a cleaup of enic wq code path.

Co-developed-by: Nelson Escobar <neescoba@cisco.com>
Signed-off-by: Nelson Escobar <neescoba@cisco.com>
Co-developed-by: John Daley <johndale@cisco.com>
Signed-off-by: John Daley <johndale@cisco.com>
Signed-off-by: Satish Kharat <satishkh@cisco.com>
Link: https://patch.msgid.link/20250304-enic_cleanup_and_ext_cq-v2-6-85804263dad8@cisco.com
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/cisco/enic/Makefile
drivers/net/ethernet/cisco/enic/cq_desc.h
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/enic_wq.c [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_wq.h [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/vnic_cq.h

index b3b5196b2dfcc3e59366474ba78fc7a4cd746eb0..a96b8332e6e2a87da6e50a2da3ef9546d61b589c 100644 (file)
@@ -3,5 +3,5 @@ obj-$(CONFIG_ENIC) := enic.o
 
 enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
        enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
-       enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o
+       enic_ethtool.o enic_api.o enic_clsf.o enic_rq.o enic_wq.o
 
index 8fc313b6ed0434bd55b8e10bf3086ef848acbdf1..bfb3f14e89f5d6cfb0159bdf041b8004c774d7e8 100644 (file)
@@ -43,28 +43,4 @@ struct cq_desc {
 #define CQ_DESC_32_FI_MASK (BIT(0) | BIT(1))
 #define CQ_DESC_64_FI_MASK (BIT(0) | BIT(1))
 
-static inline void cq_desc_dec(const struct cq_desc *desc_arg,
-       u8 *type, u8 *color, u16 *q_number, u16 *completed_index)
-{
-       const struct cq_desc *desc = desc_arg;
-       const u8 type_color = desc->type_color;
-
-       *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
-
-       /*
-        * Make sure color bit is read from desc *before* other fields
-        * are read from desc.  Hardware guarantees color bit is last
-        * bit (byte) written.  Adding the rmb() prevents the compiler
-        * and/or CPU from reordering the reads which would potentially
-        * result in reading stale values.
-        */
-
-       rmb();
-
-       *type = type_color & CQ_DESC_TYPE_MASK;
-       *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
-       *completed_index = le16_to_cpu(desc->completed_index) &
-               CQ_DESC_COMP_NDX_MASK;
-}
-
 #endif /* _CQ_DESC_H_ */
index d60e55accafd0e4f83728524da4f167a474d6213..9c12e967e9f1299e1cf3e280a16fb9bf93ac607b 100644 (file)
@@ -83,6 +83,10 @@ struct enic_rx_coal {
 #define ENIC_SET_INSTANCE              (1 << 3)
 #define ENIC_SET_HOST                  (1 << 4)
 
+#define MAX_TSO                        BIT(16)
+#define WQ_ENET_MAX_DESC_LEN   BIT(WQ_ENET_LEN_BITS)
+#define ENIC_DESC_MAX_SPLITS   (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
+
 struct enic_port_profile {
        u32 set;
        u8 request;
index d716514366dfc56b4e08260d18d78fddd23f6253..52174843f02f1fecc75666367ad5034cbbcf8f07 100644 (file)
 #include "enic_pp.h"
 #include "enic_clsf.h"
 #include "enic_rq.h"
+#include "enic_wq.h"
 
 #define ENIC_NOTIFY_TIMER_PERIOD       (2 * HZ)
-#define WQ_ENET_MAX_DESC_LEN           (1 << WQ_ENET_LEN_BITS)
-#define MAX_TSO                                (1 << 16)
-#define ENIC_DESC_MAX_SPLITS           (MAX_TSO / WQ_ENET_MAX_DESC_LEN + 1)
 
 #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
@@ -321,54 +319,6 @@ int enic_is_valid_vf(struct enic *enic, int vf)
 #endif
 }
 
-static void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
-{
-       struct enic *enic = vnic_dev_priv(wq->vdev);
-
-       if (buf->sop)
-               dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
-                                DMA_TO_DEVICE);
-       else
-               dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
-                              DMA_TO_DEVICE);
-
-       if (buf->os_buf)
-               dev_kfree_skb_any(buf->os_buf);
-}
-
-static void enic_wq_free_buf(struct vnic_wq *wq,
-       struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque)
-{
-       struct enic *enic = vnic_dev_priv(wq->vdev);
-
-       enic->wq[wq->index].stats.cq_work++;
-       enic->wq[wq->index].stats.cq_bytes += buf->len;
-       enic_free_wq_buf(wq, buf);
-}
-
-static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc,
-       u8 type, u16 q_number, u16 completed_index, void *opaque)
-{
-       struct enic *enic = vnic_dev_priv(vdev);
-
-       spin_lock(&enic->wq[q_number].lock);
-
-       vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
-               completed_index, enic_wq_free_buf,
-               opaque);
-
-       if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number)) &&
-           vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
-           (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
-               netif_wake_subqueue(enic->netdev, q_number);
-               enic->wq[q_number].stats.wake++;
-       }
-
-       spin_unlock(&enic->wq[q_number].lock);
-
-       return 0;
-}
-
 static bool enic_log_q_error(struct enic *enic)
 {
        unsigned int i;
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.c b/drivers/net/ethernet/cisco/enic/enic_wq.c
new file mode 100644 (file)
index 0000000..59b0290
--- /dev/null
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright 2025 Cisco Systems, Inc.  All rights reserved.
+
+#include <net/netdev_queues.h>
+#include "enic_res.h"
+#include "enic.h"
+#include "enic_wq.h"
+
+static void cq_desc_dec(const struct cq_desc *desc_arg, u8 *type, u8 *color,
+                       u16 *q_number, u16 *completed_index)
+{
+       const struct cq_desc *desc = desc_arg;
+       const u8 type_color = desc->type_color;
+
+       *color = (type_color >> CQ_DESC_COLOR_SHIFT) & CQ_DESC_COLOR_MASK;
+
+       /*
+        * Make sure color bit is read from desc *before* other fields
+        * are read from desc.  Hardware guarantees color bit is last
+        * bit (byte) written.  Adding the rmb() prevents the compiler
+        * and/or CPU from reordering the reads which would potentially
+        * result in reading stale values.
+        */
+       rmb();
+
+       *type = type_color & CQ_DESC_TYPE_MASK;
+       *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK;
+       *completed_index = le16_to_cpu(desc->completed_index) &
+               CQ_DESC_COMP_NDX_MASK;
+}
+
+unsigned int vnic_cq_service(struct vnic_cq *cq, unsigned int work_to_do,
+                            int (*q_service)(struct vnic_dev *vdev,
+                                             struct cq_desc *cq_desc, u8 type,
+                                             u16 q_number, u16 completed_index,
+                                             void *opaque), void *opaque)
+{
+       struct cq_desc *cq_desc;
+       unsigned int work_done = 0;
+       u16 q_number, completed_index;
+       u8 type, color;
+
+       cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+                  cq->ring.desc_size * cq->to_clean);
+       cq_desc_dec(cq_desc, &type, &color,
+                   &q_number, &completed_index);
+
+       while (color != cq->last_color) {
+               if ((*q_service)(cq->vdev, cq_desc, type, q_number,
+                                completed_index, opaque))
+                       break;
+
+               cq->to_clean++;
+               if (cq->to_clean == cq->ring.desc_count) {
+                       cq->to_clean = 0;
+                       cq->last_color = cq->last_color ? 0 : 1;
+               }
+
+               cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
+                       cq->ring.desc_size * cq->to_clean);
+               cq_desc_dec(cq_desc, &type, &color,
+                           &q_number, &completed_index);
+
+               work_done++;
+               if (work_done >= work_to_do)
+                       break;
+       }
+
+       return work_done;
+}
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf)
+{
+       struct enic *enic = vnic_dev_priv(wq->vdev);
+
+       if (buf->sop)
+               dma_unmap_single(&enic->pdev->dev, buf->dma_addr, buf->len,
+                                DMA_TO_DEVICE);
+       else
+               dma_unmap_page(&enic->pdev->dev, buf->dma_addr, buf->len,
+                              DMA_TO_DEVICE);
+
+       if (buf->os_buf)
+               dev_kfree_skb_any(buf->os_buf);
+}
+
+static void enic_wq_free_buf(struct vnic_wq *wq, struct cq_desc *cq_desc,
+                            struct vnic_wq_buf *buf, void *opaque)
+{
+       struct enic *enic = vnic_dev_priv(wq->vdev);
+
+       enic->wq[wq->index].stats.cq_work++;
+       enic->wq[wq->index].stats.cq_bytes += buf->len;
+       enic_free_wq_buf(wq, buf);
+}
+
+int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
+                   u16 q_number, u16 completed_index, void *opaque)
+{
+       struct enic *enic = vnic_dev_priv(vdev);
+
+       spin_lock(&enic->wq[q_number].lock);
+
+       vnic_wq_service(&enic->wq[q_number].vwq, cq_desc,
+                       completed_index, enic_wq_free_buf, opaque);
+
+       if (netif_tx_queue_stopped(netdev_get_tx_queue(enic->netdev, q_number))
+           && vnic_wq_desc_avail(&enic->wq[q_number].vwq) >=
+           (MAX_SKB_FRAGS + ENIC_DESC_MAX_SPLITS)) {
+               netif_wake_subqueue(enic->netdev, q_number);
+               enic->wq[q_number].stats.wake++;
+       }
+
+       spin_unlock(&enic->wq[q_number].lock);
+
+       return 0;
+}
+
diff --git a/drivers/net/ethernet/cisco/enic/enic_wq.h b/drivers/net/ethernet/cisco/enic/enic_wq.h
new file mode 100644 (file)
index 0000000..cc4d6a9
--- /dev/null
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only
+ * Copyright 2025 Cisco Systems, Inc.  All rights reserved.
+ */
+
+unsigned int vnic_cq_service(struct vnic_cq *cq, unsigned int work_to_do,
+                            int (*q_service)(struct vnic_dev *vdev,
+                                             struct cq_desc *cq_desc, u8 type,
+                                             u16 q_number, u16 completed_index,
+                                             void *opaque), void *opaque);
+
+void enic_free_wq_buf(struct vnic_wq *wq, struct vnic_wq_buf *buf);
+
+int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, u8 type,
+                   u16 q_number, u16 completed_index, void *opaque);
index 21d97c01f9424fde3d3c1d9b6cb4b7ef6de144b1..0e37f5d5e5272ed82773b9c16008087ef2dc6dd7 100644 (file)
@@ -56,47 +56,6 @@ struct vnic_cq {
        ktime_t prev_ts;
 };
 
-static inline unsigned int vnic_cq_service(struct vnic_cq *cq,
-       unsigned int work_to_do,
-       int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc,
-       u8 type, u16 q_number, u16 completed_index, void *opaque),
-       void *opaque)
-{
-       struct cq_desc *cq_desc;
-       unsigned int work_done = 0;
-       u16 q_number, completed_index;
-       u8 type, color;
-
-       cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
-               cq->ring.desc_size * cq->to_clean);
-       cq_desc_dec(cq_desc, &type, &color,
-               &q_number, &completed_index);
-
-       while (color != cq->last_color) {
-
-               if ((*q_service)(cq->vdev, cq_desc, type,
-                       q_number, completed_index, opaque))
-                       break;
-
-               cq->to_clean++;
-               if (cq->to_clean == cq->ring.desc_count) {
-                       cq->to_clean = 0;
-                       cq->last_color = cq->last_color ? 0 : 1;
-               }
-
-               cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs +
-                       cq->ring.desc_size * cq->to_clean);
-               cq_desc_dec(cq_desc, &type, &color,
-                       &q_number, &completed_index);
-
-               work_done++;
-               if (work_done >= work_to_do)
-                       break;
-       }
-
-       return work_done;
-}
-
 static inline void *vnic_cq_to_clean(struct vnic_cq *cq)
 {
        return ((u8 *)cq->ring.descs + cq->ring.desc_size * cq->to_clean);