ravb: add support for changing MTU
authorNiklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Fri, 16 Feb 2018 16:10:08 +0000 (17:10 +0100)
committerDavid S. Miller <davem@davemloft.net>
Fri, 16 Feb 2018 21:34:50 +0000 (16:34 -0500)
Allow for changing the MTU within the limit of the maximum size of a
descriptor (2048 bytes). Add the callback to change MTU from user-space
and take the configurable MTU into account when configuring the
hardware.

Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/renesas/ravb.h
drivers/net/ethernet/renesas/ravb_main.c

index 96a27b00c90e212aae4a751142fd41b5f57fb511..b81f4faf7b10114df1f17a0c0d80881e8ea9c5ea 100644 (file)
@@ -1018,6 +1018,7 @@ struct ravb_private {
        u32 dirty_rx[NUM_RX_QUEUE];     /* Producer ring indices */
        u32 cur_tx[NUM_TX_QUEUE];
        u32 dirty_tx[NUM_TX_QUEUE];
+       u32 rx_buf_sz;                  /* Based on MTU+slack. */
        struct napi_struct napi[NUM_RX_QUEUE];
        struct work_struct work;
        /* MII transceiver section. */
index c87f57ca44371586d30f56985010a115a38c8aa1..34e841306e04a3d3171b85737e49c019f0d6668a 100644 (file)
@@ -238,7 +238,7 @@ static void ravb_ring_free(struct net_device *ndev, int q)
                                               le32_to_cpu(desc->dptr)))
                                dma_unmap_single(ndev->dev.parent,
                                                 le32_to_cpu(desc->dptr),
-                                                PKT_BUF_SZ,
+                                                priv->rx_buf_sz,
                                                 DMA_FROM_DEVICE);
                }
                ring_size = sizeof(struct ravb_ex_rx_desc) *
@@ -300,9 +300,9 @@ static void ravb_ring_format(struct net_device *ndev, int q)
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
                /* RX descriptor */
                rx_desc = &priv->rx_ring[q][i];
-               rx_desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
+               rx_desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
                dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data,
-                                         PKT_BUF_SZ,
+                                         priv->rx_buf_sz,
                                          DMA_FROM_DEVICE);
                /* We just set the data size to 0 for a failed mapping which
                 * should prevent DMA from happening...
@@ -346,6 +346,10 @@ static int ravb_ring_init(struct net_device *ndev, int q)
        int ring_size;
        int i;
 
+       /* +16 gets room from the status from the card. */
+       priv->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ : ndev->mtu) +
+               ETH_HLEN + VLAN_HLEN;
+
        /* Allocate RX and TX skb rings */
        priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
                                  sizeof(*priv->rx_skb[q]), GFP_KERNEL);
@@ -355,7 +359,7 @@ static int ravb_ring_init(struct net_device *ndev, int q)
                goto error;
 
        for (i = 0; i < priv->num_rx_ring[q]; i++) {
-               skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
+               skb = netdev_alloc_skb(ndev, priv->rx_buf_sz + RAVB_ALIGN - 1);
                if (!skb)
                        goto error;
                ravb_set_buffer_align(skb);
@@ -586,7 +590,7 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
                        skb = priv->rx_skb[q][entry];
                        priv->rx_skb[q][entry] = NULL;
                        dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
-                                        PKT_BUF_SZ,
+                                        priv->rx_buf_sz,
                                         DMA_FROM_DEVICE);
                        get_ts &= (q == RAVB_NC) ?
                                        RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
@@ -619,11 +623,12 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
        for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) {
                entry = priv->dirty_rx[q] % priv->num_rx_ring[q];
                desc = &priv->rx_ring[q][entry];
-               desc->ds_cc = cpu_to_le16(PKT_BUF_SZ);
+               desc->ds_cc = cpu_to_le16(priv->rx_buf_sz);
 
                if (!priv->rx_skb[q][entry]) {
                        skb = netdev_alloc_skb(ndev,
-                                              PKT_BUF_SZ + RAVB_ALIGN - 1);
+                                              priv->rx_buf_sz +
+                                              RAVB_ALIGN - 1);
                        if (!skb)
                                break;  /* Better luck next round. */
                        ravb_set_buffer_align(skb);
@@ -1854,6 +1859,17 @@ static int ravb_do_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
        return phy_mii_ioctl(phydev, req, cmd);
 }
 
+static int ravb_change_mtu(struct net_device *ndev, int new_mtu)
+{
+       if (netif_running(ndev))
+               return -EBUSY;
+
+       ndev->mtu = new_mtu;
+       netdev_update_features(ndev);
+
+       return 0;
+}
+
 static void ravb_set_rx_csum(struct net_device *ndev, bool enable)
 {
        struct ravb_private *priv = netdev_priv(ndev);
@@ -1895,6 +1911,7 @@ static const struct net_device_ops ravb_netdev_ops = {
        .ndo_set_rx_mode        = ravb_set_rx_mode,
        .ndo_tx_timeout         = ravb_tx_timeout,
        .ndo_do_ioctl           = ravb_do_ioctl,
+       .ndo_change_mtu         = ravb_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_set_mac_address    = eth_mac_addr,
        .ndo_set_features       = ravb_set_features,
@@ -2117,6 +2134,9 @@ static int ravb_probe(struct platform_device *pdev)
                goto out_release;
        }
 
+       ndev->max_mtu = 2048 - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+       ndev->min_mtu = ETH_MIN_MTU;
+
        /* Set function */
        ndev->netdev_ops = &ravb_netdev_ops;
        ndev->ethtool_ops = &ravb_ethtool_ops;