ice: remove eswitch changing queues algorithm
authorMichal Swiatkowski <michal.swiatkowski@linux.intel.com>
Fri, 1 Mar 2024 11:54:07 +0000 (12:54 +0100)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Mon, 25 Mar 2024 20:08:04 +0000 (13:08 -0700)
Changing queues used by eswitch will be done through PF netdev.
There is no need to reserve queues if the number of used queues
is known.

Reviewed-by: Wojciech Drewek <wojciech.drewek@intel.com>
Reviewed-by: Marcin Szycik <marcin.szycik@linux.intel.com>
Signed-off-by: Michal Swiatkowski <michal.swiatkowski@linux.intel.com>
Tested-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_eswitch.c
drivers/net/ethernet/intel/ice/ice_eswitch.h
drivers/net/ethernet/intel/ice/ice_sriov.c

index 365c03d1c4622340f119a072ef73127a44203b13..9bb435b4338ffc999ed3353bb3759d26933701d1 100644 (file)
@@ -527,12 +527,6 @@ struct ice_eswitch {
        struct ice_esw_br_offloads *br_offloads;
        struct xarray reprs;
        bool is_running;
-       /* struct to allow cp queues management optimization */
-       struct {
-               int to_reach;
-               int value;
-               bool is_reaching;
-       } qs;
 };
 
 struct ice_agg_node {
index 9069725c71b4a5c10b1086bb93d47b4b41dc3aac..2e999f801c0a12d837c6ea49d276cde4721c83d7 100644 (file)
@@ -455,8 +455,6 @@ static int ice_eswitch_enable_switchdev(struct ice_pf *pf)
                return -ENODEV;
 
        ctrl_vsi = pf->eswitch.control_vsi;
-       /* cp VSI is createad with 1 queue as default */
-       pf->eswitch.qs.value = 1;
        pf->eswitch.uplink_vsi = uplink_vsi;
 
        if (ice_eswitch_setup_env(pf))
@@ -489,7 +487,6 @@ static void ice_eswitch_disable_switchdev(struct ice_pf *pf)
        ice_vsi_release(ctrl_vsi);
 
        pf->eswitch.is_running = false;
-       pf->eswitch.qs.is_reaching = false;
 }
 
 /**
@@ -620,18 +617,6 @@ ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
        struct ice_vsi *cp = eswitch->control_vsi;
        int queues = 0;
 
-       if (eswitch->qs.is_reaching) {
-               if (eswitch->qs.to_reach >= eswitch->qs.value + change) {
-                       queues = eswitch->qs.to_reach;
-                       eswitch->qs.is_reaching = false;
-               } else {
-                       queues = 0;
-               }
-       } else if ((change > 0 && cp->alloc_txq <= eswitch->qs.value) ||
-                  change < 0) {
-               queues = cp->alloc_txq + change;
-       }
-
        if (queues) {
                cp->req_txq = queues;
                cp->req_rxq = queues;
@@ -643,7 +628,6 @@ ice_eswitch_cp_change_queues(struct ice_eswitch *eswitch, int change)
                ice_vsi_open(cp);
        }
 
-       eswitch->qs.value += change;
        ice_eswitch_remap_rings_to_vectors(eswitch);
 }
 
@@ -661,8 +645,6 @@ ice_eswitch_attach(struct ice_pf *pf, struct ice_vf *vf)
                err = ice_eswitch_enable_switchdev(pf);
                if (err)
                        return err;
-               /* Control plane VSI is created with 1 queue as default */
-               pf->eswitch.qs.to_reach -= 1;
                change = 0;
        }
 
@@ -756,19 +738,3 @@ int ice_eswitch_rebuild(struct ice_pf *pf)
 
        return 0;
 }
-
-/**
- * ice_eswitch_reserve_cp_queues - reserve control plane VSI queues
- * @pf: pointer to PF structure
- * @change: how many more (or less) queues is needed
- *
- * Remember to call ice_eswitch_attach/detach() the "change" times.
- */
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change)
-{
-       if (pf->eswitch.qs.value + change < 0)
-               return;
-
-       pf->eswitch.qs.to_reach = pf->eswitch.qs.value + change;
-       pf->eswitch.qs.is_reaching = true;
-}
index 1a288a03a79a14844357413240f9a778d8a6f849..59d51c0d14e5a2c7bab4d3ec6adb01c6a2ea60fd 100644 (file)
@@ -26,7 +26,6 @@ void ice_eswitch_set_target_vsi(struct sk_buff *skb,
                                struct ice_tx_offload_params *off);
 netdev_tx_t
 ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev);
-void ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change);
 #else /* CONFIG_ICE_SWITCHDEV */
 static inline void ice_eswitch_detach(struct ice_pf *pf, struct ice_vf *vf) { }
 
@@ -77,8 +76,5 @@ ice_eswitch_port_start_xmit(struct sk_buff *skb, struct net_device *netdev)
 {
        return NETDEV_TX_BUSY;
 }
-
-static inline void
-ice_eswitch_reserve_cp_queues(struct ice_pf *pf, int change) { }
 #endif /* CONFIG_ICE_SWITCHDEV */
 #endif /* _ICE_ESWITCH_H_ */
index a958fcf3e6befb214051fa6099132f139306051f..65e1986af7772ee41f280484f4511beae20746d4 100644 (file)
@@ -170,8 +170,6 @@ void ice_free_vfs(struct ice_pf *pf)
        else
                dev_warn(dev, "VFs are assigned - not disabling SR-IOV\n");
 
-       ice_eswitch_reserve_cp_queues(pf, -ice_get_num_vfs(pf));
-
        mutex_lock(&vfs->table_lock);
 
        ice_for_each_vf(pf, bkt, vf) {
@@ -897,7 +895,6 @@ static int ice_ena_vfs(struct ice_pf *pf, u16 num_vfs)
                goto err_unroll_sriov;
        }
 
-       ice_eswitch_reserve_cp_queues(pf, num_vfs);
        ret = ice_start_vfs(pf);
        if (ret) {
                dev_err(dev, "Failed to start %d VFs, err %d\n", num_vfs, ret);