net: rswitch: Add jumbo frames handling for TX
authorYoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Fri, 8 Dec 2023 04:10:29 +0000 (13:10 +0900)
committerDavid S. Miller <davem@davemloft.net>
Sun, 10 Dec 2023 19:31:42 +0000 (19:31 +0000)
If the driver would like to transmit a jumbo frame like 2KiB or more,
it should be split into multiple queues. In the near future, to support
this, add handling specific descriptor types F{START,MID,END}. However,
such jumbo frames will not happen yet because the maximum MTU size is
still default for now.

Signed-off-by: Yoshihiro Shimoda <yoshihiro.shimoda.uh@renesas.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/renesas/rswitch.c

index 36c70131594d55b059ba5bbea435c0a25b5216d8..d43f705f410b318bed1d577db42f00beb750f437 100644 (file)
@@ -1632,15 +1632,44 @@ static bool rswitch_ext_desc_set(struct rswitch_device *rdev,
        return true;
 }
 
+static u8 rswitch_ext_desc_get_die_dt(unsigned int nr_desc, unsigned int index)
+{
+       if (nr_desc == 1)
+               return DT_FSINGLE | DIE;
+       if (index == 0)
+               return DT_FSTART;
+       if (nr_desc - 1 == index)
+               return DT_FEND | DIE;
+       return DT_FMID;
+}
+
+static u16 rswitch_ext_desc_get_len(u8 die_dt, unsigned int orig_len)
+{
+       switch (die_dt & DT_MASK) {
+       case DT_FSINGLE:
+       case DT_FEND:
+               return (orig_len % RSWITCH_DESC_BUF_SIZE) ?: RSWITCH_DESC_BUF_SIZE;
+       case DT_FSTART:
+       case DT_FMID:
+               return RSWITCH_DESC_BUF_SIZE;
+       default:
+               return 0;
+       }
+}
+
 static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 {
        struct rswitch_device *rdev = netdev_priv(ndev);
        struct rswitch_gwca_queue *gq = rdev->tx_queue;
+       dma_addr_t dma_addr, dma_addr_orig;
        netdev_tx_t ret = NETDEV_TX_OK;
        struct rswitch_ext_desc *desc;
-       dma_addr_t dma_addr;
+       unsigned int i, nr_desc;
+       u8 die_dt;
+       u16 len;
 
-       if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - 1) {
+       nr_desc = (skb->len - 1) / RSWITCH_DESC_BUF_SIZE + 1;
+       if (rswitch_get_num_cur_queues(gq) >= gq->ring_size - nr_desc) {
                netif_stop_subqueue(ndev, 0);
                return NETDEV_TX_BUSY;
        }
@@ -1648,25 +1677,32 @@ static netdev_tx_t rswitch_start_xmit(struct sk_buff *skb, struct net_device *nd
        if (skb_put_padto(skb, ETH_ZLEN))
                return ret;
 
-       dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
-       if (dma_mapping_error(ndev->dev.parent, dma_addr))
+       dma_addr_orig = dma_map_single(ndev->dev.parent, skb->data, skb->len, DMA_TO_DEVICE);
+       if (dma_mapping_error(ndev->dev.parent, dma_addr_orig))
                goto err_kfree;
 
        gq->skbs[gq->cur] = skb;
-       gq->unmap_addrs[gq->cur] = dma_addr;
-       desc = &gq->tx_ring[gq->cur];
-       if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, skb->len, DT_FSINGLE | DIE))
-               goto err_unmap;
+       gq->unmap_addrs[gq->cur] = dma_addr_orig;
+
+       /* DT_FSTART should be set at last. So, this is reverse order. */
+       for (i = nr_desc; i-- > 0; ) {
+               desc = &gq->tx_ring[rswitch_next_queue_index(gq, true, i)];
+               die_dt = rswitch_ext_desc_get_die_dt(nr_desc, i);
+               dma_addr = dma_addr_orig + i * RSWITCH_DESC_BUF_SIZE;
+               len = rswitch_ext_desc_get_len(die_dt, skb->len);
+               if (!rswitch_ext_desc_set(rdev, skb, desc, dma_addr, len, die_dt))
+                       goto err_unmap;
+       }
 
        wmb();  /* gq->cur must be incremented after die_dt was set */
 
-       gq->cur = rswitch_next_queue_index(gq, true, 1);
+       gq->cur = rswitch_next_queue_index(gq, true, nr_desc);
        rswitch_modify(rdev->addr, GWTRC(gq->index), 0, BIT(gq->index % 32));
 
        return ret;
 
 err_unmap:
-       dma_unmap_single(ndev->dev.parent, dma_addr, skb->len, DMA_TO_DEVICE);
+       dma_unmap_single(ndev->dev.parent, dma_addr_orig, skb->len, DMA_TO_DEVICE);
 
 err_kfree:
        dev_kfree_skb_any(skb);