ice: Fix to change Rx/Tx ring descriptor size via ethtool with DCBx
authorUsha Ketineni <usha.k.ketineni@intel.com>
Wed, 6 Nov 2019 10:05:31 +0000 (02:05 -0800)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Fri, 8 Nov 2019 20:02:46 +0000 (12:02 -0800)
This patch fixes the call trace caused by the kernel when the Rx/Tx
descriptor size change request is initiated via ethtool when DCB is
configured. ice_set_ringparam() should use vsi->num_txq instead of
vsi->alloc_txq as it represents the queues that are enabled in the
driver when DCB is enabled/disabled. Otherwise, queue index being
used can go out of range.

For example, when vsi->alloc_txq has 104 queues and with 3 TCS enabled
via DCB, each TC gets 34 queues, vsi->num_txq will be 102 and only 102
queues will be enabled.

Signed-off-by: Usha Ketineni <usha.k.ketineni@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/ice/ice_ethtool.c

index a8e51bc951982aa998884517974647fad5e4164b..f85d224f964d98ed963b7534b75756bc85a383d6 100644 (file)
@@ -2654,14 +2654,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
        netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
                    vsi->tx_rings[0]->count, new_tx_cnt);
 
-       tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
+       tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
                                sizeof(*tx_rings), GFP_KERNEL);
        if (!tx_rings) {
                err = -ENOMEM;
                goto done;
        }
 
-       for (i = 0; i < vsi->alloc_txq; i++) {
+       ice_for_each_txq(vsi, i) {
                /* clone ring and setup updated count */
                tx_rings[i] = *vsi->tx_rings[i];
                tx_rings[i].count = new_tx_cnt;
@@ -2714,14 +2714,14 @@ process_rx:
        netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
                    vsi->rx_rings[0]->count, new_rx_cnt);
 
-       rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
+       rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq,
                                sizeof(*rx_rings), GFP_KERNEL);
        if (!rx_rings) {
                err = -ENOMEM;
                goto done;
        }
 
-       for (i = 0; i < vsi->alloc_rxq; i++) {
+       ice_for_each_rxq(vsi, i) {
                /* clone ring and setup updated count */
                rx_rings[i] = *vsi->rx_rings[i];
                rx_rings[i].count = new_rx_cnt;
@@ -2759,7 +2759,7 @@ process_link:
                ice_down(vsi);
 
                if (tx_rings) {
-                       for (i = 0; i < vsi->alloc_txq; i++) {
+                       ice_for_each_txq(vsi, i) {
                                ice_free_tx_ring(vsi->tx_rings[i]);
                                *vsi->tx_rings[i] = tx_rings[i];
                        }
@@ -2767,7 +2767,7 @@ process_link:
                }
 
                if (rx_rings) {
-                       for (i = 0; i < vsi->alloc_rxq; i++) {
+                       ice_for_each_rxq(vsi, i) {
                                ice_free_rx_ring(vsi->rx_rings[i]);
                                /* copy the real tail offset */
                                rx_rings[i].tail = vsi->rx_rings[i]->tail;
@@ -2801,7 +2801,7 @@ process_link:
 free_tx:
        /* error cleanup if the Rx allocations failed after getting Tx */
        if (tx_rings) {
-               for (i = 0; i < vsi->alloc_txq; i++)
+               ice_for_each_txq(vsi, i)
                        ice_free_tx_ring(&tx_rings[i]);
                devm_kfree(&pf->pdev->dev, tx_rings);
        }