net: rswitch: Add unmap_addrs instead of dma address in each desc
authorYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Fri, 8 Dec 2023 04:10:25 +0000 (13:10 +0900)
committerDavid S. Miller <davem@davemloft.net>
Sun, 10 Dec 2023 19:31:41 +0000 (19:31 +0000)
If the driver would like to transmit a jumbo frame like 2KiB or more,
it should be split into multiple queues. In the near future, to support
this, add unmap_addrs array to unmap dma mapping address instead of dma
address in each TX descriptor because the descriptors may not have
the top dma address.

Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/renesas/rswitch.c
drivers/net/ethernet/renesas/rswitch.h

index c6a96abe9dbaf6a5650990c0ab3352a88882bde7..c2125fefc8c5ef49f35f1890521578d478ae7a54 100644 (file)
@@ -284,6 +284,8 @@ static void rswitch_gwca_queue_free(struct net_device *ndev,
                gq->tx_ring = NULL;
                kfree(gq->skbs);
                gq->skbs = NULL;
+               kfree(gq->unmap_addrs);
+               gq->unmap_addrs = NULL;
        }
 }
 
@@ -322,6 +324,9 @@ static int rswitch_gwca_queue_alloc(struct net_device *ndev,
                gq->skbs = kcalloc(gq->ring_size, sizeof(*gq->skbs), GFP_KERNEL);
                if (!gq->skbs)
                        return -ENOMEM;
+               gq->unmap_addrs = kcalloc(gq->ring_size, sizeof(*gq->unmap_addrs), GFP_KERNEL);
+               if (!gq->unmap_addrs)
+                       goto out;
                gq->tx_ring = dma_alloc_coherent(ndev->dev.parent,
                                                 sizeof(struct rswitch_ext_desc) *
                                                 (gq->ring_size + 1), &gq->ring_dma, GFP_KERNEL);
@@ -787,9 +792,7 @@ static void rswitch_tx_free(struct net_device *ndev)
        struct rswitch_device *rdev = netdev_priv(ndev);
        struct rswitch_gwca_queue *gq = rdev->tx_queue;
        struct rswitch_ext_desc *desc;
-       dma_addr_t dma_addr;
        struct sk_buff *skb;
-       unsigned int size;
 
        for (; rswitch_get_num_cur_queues(gq) > 0;
             gq->dirty = rswitch_next_queue_index(gq, false, 1)) {
@@ -798,18 +801,17 @@ static void rswitch_tx_free(struct net_device *ndev)
                        break;
 
                dma_rmb();
-               size = le16_to_cpu(desc->desc.info_ds) & TX_DS;
                skb = gq->skbs[gq->dirty];
                if (skb) {
-                       dma_addr = rswitch_desc_get_dptr(&desc->desc);
-                       dma_unmap_single(ndev->dev.parent, dma_addr,
-                                        size, DMA_TO_DEVICE);
+                       dma_unmap_single(ndev->dev.parent,
+                                        gq->unmap_addrs[gq->dirty],
+                                        skb->len, DMA_TO_DEVICE);
                        dev_kfree_skb_any(gq->skbs[gq->dirty]);
                        gq->skbs[gq->dirty] = NULL;
+                       rdev->ndev->stats.tx_packets++;
+                       rdev->ndev->stats.tx_bytes += skb->len;
                }
                desc->desc.die_dt = DT_EEMPTY;
-               rdev->ndev->stats.tx_packets++;
-               rdev->ndev->stats.tx_bytes += size;
        }
 }
 
@@ -1538,6 +1540,7 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
                goto err_kfree;
 
        gq->skbs[gq->cur] = skb;
+       gq->unmap_addrs[gq->cur] = dma_addr;
        desc = &gq->tx_ring[gq->cur];
        rswitch_desc_set_dptr(&desc->desc, dma_addr);
        desc->desc.info_ds = cpu_to_le16(skb->len);
index a407d85dcfadbce1023dcf798a4c2eadaf43abeb..2ac9a86b6238cec9aaad4e07284b424d23ca3d46 100644 (file)
@@ -956,6 +956,7 @@ struct rswitch_gwca_queue {
                /* For TX */
                struct {
                        struct sk_buff **skbs;
+                       dma_addr_t *unmap_addrs;
                };
                /* For RX */
                struct {