net: ena: Remove CQ tail pointer update
authorDavid Arinzon <darinzon@amazon.com>
Tue, 30 Jan 2024 09:53:47 +0000 (09:53 +0000)
committerPaolo Abeni <pabeni@redhat.com>
Thu, 1 Feb 2024 12:22:12 +0000 (13:22 +0100)
The functionality was added to allow the drivers to create an
SQ and CQ of different sizes.
When the RX/TX SQ and CQ have the same size, such update isn't
necessary as the device can safely assume it doesn't override
unprocessed completions. However, if the SQ is larger than the CQ,
the device might "have" more completions it wants to update about
than there's room in the CQ.

There's no support for different SQ and CQ sizes, therefore,
removing the API and its usage.

'____cacheline_aligned' compiler attribute was added to
'struct ena_com_io_cq' to ensure that the removal of the
'cq_head_db_reg' field doesn't change the cache-line layout
of this struct.

Signed-off-by: Shay Agroskin <shayagr@amazon.com>
Signed-off-by: David Arinzon <darinzon@amazon.com>
Signed-off-by: Paolo Abeni <pabeni@redhat.com>
drivers/net/ethernet/amazon/ena/ena_com.c
drivers/net/ethernet/amazon/ena/ena_com.h
drivers/net/ethernet/amazon/ena/ena_eth_com.h
drivers/net/ethernet/amazon/ena/ena_netdev.c
drivers/net/ethernet/amazon/ena/ena_xdp.c

index 9a8a43b7896720c2c35e0e6af3f9aa83b3013e83..675ee728fe527079da355ffb0f9745d7d2355ba9 100644 (file)
@@ -1427,11 +1427,6 @@ int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
        io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
                cmd_completion.cq_interrupt_unmask_register_offset);
 
-       if (cmd_completion.cq_head_db_register_offset)
-               io_cq->cq_head_db_reg =
-                       (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
-                       cmd_completion.cq_head_db_register_offset);
-
        if (cmd_completion.numa_node_register_offset)
                io_cq->numa_node_cfg_reg =
                        (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
index f3176fc63d70b55d2e2f1d3d7e3b41e3fc89b70c..fea57eb8e58b63d90a0bb2d3e53d96dec6dee669 100644 (file)
@@ -109,16 +109,13 @@ struct ena_com_io_cq {
        /* Interrupt unmask register */
        u32 __iomem *unmask_reg;
 
-       /* The completion queue head doorbell register */
-       u32 __iomem *cq_head_db_reg;
-
        /* numa configuration register (for TPH) */
        u32 __iomem *numa_node_cfg_reg;
 
        /* The value to write to the above register to unmask
         * the interrupt of this queue
         */
-       u32 msix_vector;
+       u32 msix_vector ____cacheline_aligned;
 
        enum queue_direction direction;
 
@@ -134,7 +131,6 @@ struct ena_com_io_cq {
        /* Device queue index */
        u16 idx;
        u16 head;
-       u16 last_head_update;
        u8 phase;
        u8 cdesc_entry_size_in_bytes;
 
index 372b259279eca3de4eba3168f99a58c2f6c3e2f5..4d65d820a95eda2bb79937a8aa4fe79c585a848d 100644 (file)
@@ -8,8 +8,6 @@
 
 #include "ena_com.h"
 
-/* head update threshold in units of (queue size / ENA_COMP_HEAD_THRESH) */
-#define ENA_COMP_HEAD_THRESH 4
 /* we allow 2 DMA descriptors per LLQ entry */
 #define ENA_LLQ_ENTRY_DESC_CHUNK_SIZE  (2 * sizeof(struct ena_eth_io_tx_desc))
 #define ENA_LLQ_HEADER         (128UL - ENA_LLQ_ENTRY_DESC_CHUNK_SIZE)
@@ -172,28 +170,6 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq)
        return 0;
 }
 
-static inline int ena_com_update_dev_comp_head(struct ena_com_io_cq *io_cq)
-{
-       u16 unreported_comp, head;
-       bool need_update;
-
-       if (unlikely(io_cq->cq_head_db_reg)) {
-               head = io_cq->head;
-               unreported_comp = head - io_cq->last_head_update;
-               need_update = unreported_comp > (io_cq->q_depth / ENA_COMP_HEAD_THRESH);
-
-               if (unlikely(need_update)) {
-                       netdev_dbg(ena_com_io_cq_to_ena_dev(io_cq)->net_device,
-                                  "Write completion queue doorbell for queue %d: head: %d\n",
-                                  io_cq->qid, head);
-                       writel(head, io_cq->cq_head_db_reg);
-                       io_cq->last_head_update = head;
-               }
-       }
-
-       return 0;
-}
-
 static inline void ena_com_update_numa_node(struct ena_com_io_cq *io_cq,
                                            u8 numa_node)
 {
index 0b7f94f6c63161b72a1b1d7be1915a2546044d96..cd75e5a7a52e8eaa926d1e80bd90de57064f6e80 100644 (file)
@@ -856,7 +856,6 @@ static int ena_clean_tx_irq(struct ena_ring *tx_ring, u32 budget)
 
        tx_ring->next_to_clean = next_to_clean;
        ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
-       ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
 
        netdev_tx_completed_queue(txq, tx_pkts, tx_bytes);
 
@@ -1303,10 +1302,8 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
                      ENA_RX_REFILL_THRESH_PACKET);
 
        /* Optimization, try to batch new rx buffers */
-       if (refill_required > refill_threshold) {
-               ena_com_update_dev_comp_head(rx_ring->ena_com_io_cq);
+       if (refill_required > refill_threshold)
                ena_refill_rx_bufs(rx_ring, refill_required);
-       }
 
        if (xdp_flags & ENA_XDP_REDIRECT)
                xdp_do_flush();
index fc1c4ef73ba32d7e3b7f3df569f52fec63970222..337c435d3ce998b1b8f69a86f8be7997e1ff99c8 100644 (file)
@@ -412,7 +412,6 @@ static int ena_clean_xdp_irq(struct ena_ring *tx_ring, u32 budget)
 
        tx_ring->next_to_clean = next_to_clean;
        ena_com_comp_ack(tx_ring->ena_com_io_sq, total_done);
-       ena_com_update_dev_comp_head(tx_ring->ena_com_io_cq);
 
        netif_dbg(tx_ring->adapter, tx_done, tx_ring->netdev,
                  "tx_poll: q %d done. total pkts: %d\n",