i40e: remove workaround for resetting XPS
authorJacob Keller <jacob.e.keller@intel.com>
Fri, 14 Jul 2017 13:10:08 +0000 (09:10 -0400)
committerJeff Kirsher <jeffrey.t.kirsher@intel.com>
Sun, 27 Aug 2017 23:06:02 +0000 (16:06 -0700)
Since commit 3ffa037d7f78 ("i40e: Set XPS bit mask to zero in DCB mode")
we've tried to reset the XPS settings by building a custom
empty CPU mask.

This workaround is not necessary because we're not really removing the
XPS setting, but simply setting it so that no CPU is valid.

Second, we shorten the code further by using zalloc_cpumask_var instead
of a separate call to bitmap_zero().

Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
Tested-by: Andrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
drivers/net/ethernet/intel/i40e/i40e_main.c

index 0962b85ef6f3fc8e41e124241d19159702d05e6e..7366e7c7f39955c553feda701d1b8482c56e34b6 100644 (file)
@@ -2874,22 +2874,15 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
 {
        struct i40e_vsi *vsi = ring->vsi;
-       cpumask_var_t mask;
 
        if (!ring->q_vector || !ring->netdev)
                return;
 
-       /* Single TC mode enable XPS */
-       if (vsi->tc_config.numtc <= 1) {
-               if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
-                       netif_set_xps_queue(ring->netdev,
-                                           &ring->q_vector->affinity_mask,
-                                           ring->queue_index);
-       } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
-               /* Disable XPS to allow selection based on TC */
-               bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
-               netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
-               free_cpumask_var(mask);
+       if ((vsi->tc_config.numtc <= 1) &&
+           !test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state)) {
+               netif_set_xps_queue(ring->netdev,
+                                   &ring->q_vector->affinity_mask,
+                                   ring->queue_index);
        }
 
        /* schedule our worker thread which will take care of