ice: Add driver support for firmware changes for LAG
authorDave Ertman <david.m.ertman@intel.com>
Tue, 20 Jun 2023 22:18:46 +0000 (15:18 -0700)
committerTony Nguyen <anthony.l.nguyen@intel.com>
Thu, 27 Jul 2023 17:56:32 +0000 (10:56 -0700)
Add the defines, fields, and detection code for FW support of LAG for
SRIOV.  Also exposes some previously static functions to allow access
in the lag code.

Clean up code that is unused or not needed for LAG support.  Also add
an ordered workqueue for processing LAG events.

Reviewed-by: Daniel Machon <daniel.machon@microchip.com>
Signed-off-by: Dave Ertman <david.m.ertman@intel.com>
Tested-by: Sujai Buvaneswaran <sujai.buvaneswaran@intel.com>
Signed-off-by: Tony Nguyen <anthony.l.nguyen@intel.com>
drivers/net/ethernet/intel/ice/ice.h
drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
drivers/net/ethernet/intel/ice/ice_common.c
drivers/net/ethernet/intel/ice/ice_lag.c
drivers/net/ethernet/intel/ice/ice_lib.c
drivers/net/ethernet/intel/ice/ice_lib.h
drivers/net/ethernet/intel/ice/ice_main.c
drivers/net/ethernet/intel/ice/ice_type.h

index 9109006336f0d9e1d39bb87ef12865b85f0e8312..5ac0ad12f9f179b0e3934d755fa8feed1feef6e0 100644 (file)
@@ -200,6 +200,8 @@ enum ice_feature {
        ICE_F_PTP_EXTTS,
        ICE_F_SMA_CTRL,
        ICE_F_GNSS,
+       ICE_F_ROCE_LAG,
+       ICE_F_SRIOV_LAG,
        ICE_F_MAX
 };
 
@@ -569,6 +571,7 @@ struct ice_pf {
        struct mutex sw_mutex;          /* lock for protecting VSI alloc flow */
        struct mutex tc_mutex;          /* lock to protect TC changes */
        struct mutex adev_mutex;        /* lock to protect aux device access */
+       struct mutex lag_mutex;         /* protect ice_lag struct in PF */
        u32 msg_enable;
        struct ice_ptp ptp;
        struct gnss_serial *gnss_serial;
@@ -639,6 +642,8 @@ struct ice_pf {
        struct ice_agg_node vf_agg_node[ICE_MAX_VF_AGG_NODES];
 };
 
+extern struct workqueue_struct *ice_lag_wq;
+
 struct ice_netdev_priv {
        struct ice_vsi *vsi;
        struct ice_repr *repr;
index 63d3e1dcbba55ef05239f1bba2d6c0340c94b12f..1d4227b024d3109db8b4a344a16e5b1117ecb29b 100644 (file)
@@ -120,6 +120,9 @@ struct ice_aqc_list_caps_elem {
 #define ICE_AQC_CAPS_PCIE_RESET_AVOIDANCE              0x0076
 #define ICE_AQC_CAPS_POST_UPDATE_RESET_RESTRICT                0x0077
 #define ICE_AQC_CAPS_NVM_MGMT                          0x0080
+#define ICE_AQC_CAPS_FW_LAG_SUPPORT                    0x0092
+#define ICE_AQC_BIT_ROCEV2_LAG                         0x01
+#define ICE_AQC_BIT_SRIOV_LAG                          0x02
 
        u8 major_ver;
        u8 minor_ver;
index 8ff3d6d847976c815f408a6cdb9130800ba11f13..3853ef22429f12ea6198e7670fc5b59bac4dc636 100644 (file)
@@ -2241,6 +2241,14 @@ ice_parse_common_caps(struct ice_hw *hw, struct ice_hw_common_caps *caps,
                          "%s: reset_restrict_support = %d\n", prefix,
                          caps->reset_restrict_support);
                break;
+       case ICE_AQC_CAPS_FW_LAG_SUPPORT:
+               caps->roce_lag = !!(number & ICE_AQC_BIT_ROCEV2_LAG);
+               ice_debug(hw, ICE_DBG_INIT, "%s: roce_lag = %u\n",
+                         prefix, caps->roce_lag);
+               caps->sriov_lag = !!(number & ICE_AQC_BIT_SRIOV_LAG);
+               ice_debug(hw, ICE_DBG_INIT, "%s: sriov_lag = %u\n",
+                         prefix, caps->sriov_lag);
+               break;
        default:
                /* Not one of the recognized common capabilities */
                found = false;
index 5a7753bda32457ec7aaee441984648a8dc69e968..d018e68f5a6d9ed6d6ede20bfeb3ad3ceac4c9d3 100644 (file)
@@ -4,8 +4,12 @@
 /* Link Aggregation code */
 
 #include "ice.h"
+#include "ice_lib.h"
 #include "ice_lag.h"
 
+#define ICE_LAG_RES_SHARED     BIT(14)
+#define ICE_LAG_RES_VALID      BIT(15)
+
 /**
  * ice_lag_set_primary - set PF LAG state as Primary
  * @lag: LAG info struct
@@ -225,6 +229,26 @@ static void ice_lag_unregister(struct ice_lag *lag, struct net_device *netdev)
        lag->role = ICE_LAG_NONE;
 }
 
+/**
+ * ice_lag_init_feature_support_flag - Check for NVM support for LAG
+ * @pf: PF struct
+ */
+static void ice_lag_init_feature_support_flag(struct ice_pf *pf)
+{
+       struct ice_hw_common_caps *caps;
+
+       caps = &pf->hw.dev_caps.common_cap;
+       if (caps->roce_lag)
+               ice_set_feature_support(pf, ICE_F_ROCE_LAG);
+       else
+               ice_clear_feature_support(pf, ICE_F_ROCE_LAG);
+
+       if (caps->sriov_lag)
+               ice_set_feature_support(pf, ICE_F_SRIOV_LAG);
+       else
+               ice_clear_feature_support(pf, ICE_F_SRIOV_LAG);
+}
+
 /**
  * ice_lag_changeupper_event - handle LAG changeupper event
  * @lag: LAG info struct
@@ -264,26 +288,6 @@ static void ice_lag_changeupper_event(struct ice_lag *lag, void *ptr)
        ice_display_lag_info(lag);
 }
 
-/**
- * ice_lag_changelower_event - handle LAG changelower event
- * @lag: LAG info struct
- * @ptr: opaque data pointer
- *
- * ptr to be cast to netdev_notifier_changelowerstate_info
- */
-static void ice_lag_changelower_event(struct ice_lag *lag, void *ptr)
-{
-       struct net_device *netdev = netdev_notifier_info_to_dev(ptr);
-
-       if (netdev != lag->netdev)
-               return;
-
-       netdev_dbg(netdev, "bonding info\n");
-
-       if (!netif_is_lag_port(netdev))
-               netdev_dbg(netdev, "CHANGELOWER rcvd, but netdev not in LAG. Bail\n");
-}
-
 /**
  * ice_lag_event_handler - handle LAG events from netdev
  * @notif_blk: notifier block registered by this netdev
@@ -310,9 +314,6 @@ ice_lag_event_handler(struct notifier_block *notif_blk, unsigned long event,
        case NETDEV_CHANGEUPPER:
                ice_lag_changeupper_event(lag, ptr);
                break;
-       case NETDEV_CHANGELOWERSTATE:
-               ice_lag_changelower_event(lag, ptr);
-               break;
        case NETDEV_BONDING_INFO:
                ice_lag_info_event(lag, ptr);
                break;
@@ -379,6 +380,8 @@ int ice_init_lag(struct ice_pf *pf)
        struct ice_vsi *vsi;
        int err;
 
+       ice_lag_init_feature_support_flag(pf);
+
        pf->lag = kzalloc(sizeof(*lag), GFP_KERNEL);
        if (!pf->lag)
                return -ENOMEM;
@@ -435,9 +438,7 @@ void ice_deinit_lag(struct ice_pf *pf)
        if (lag->pf)
                ice_unregister_lag_handler(lag);
 
-       dev_put(lag->upper_netdev);
-
-       dev_put(lag->peer_netdev);
+       flush_workqueue(ice_lag_wq);
 
        kfree(lag);
 
index a43c23c80565b7b4c0269783e1e1a02279fff04b..077f2e91ae1a18d3298a1ef49fcf1774f02b9b3d 100644 (file)
@@ -3970,7 +3970,7 @@ bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f)
  * @pf: pointer to the struct ice_pf instance
  * @f: feature enum to set
  */
-static void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f)
 {
        if (f < 0 || f >= ICE_F_MAX)
                return;
index 1628385a967264b020fd97aa9eb15cb380c71cc7..dd53fe968ad869110a667904d8b5cf298a727f9c 100644 (file)
@@ -163,6 +163,7 @@ int ice_vsi_del_vlan_zero(struct ice_vsi *vsi);
 bool ice_vsi_has_non_zero_vlans(struct ice_vsi *vsi);
 u16 ice_vsi_num_non_zero_vlans(struct ice_vsi *vsi);
 bool ice_is_feature_supported(struct ice_pf *pf, enum ice_feature f);
+void ice_set_feature_support(struct ice_pf *pf, enum ice_feature f);
 void ice_clear_feature_support(struct ice_pf *pf, enum ice_feature f);
 void ice_init_feature_support(struct ice_pf *pf);
 bool ice_vsi_is_rx_queue_active(struct ice_vsi *vsi);
index 9b36cce306b89532fb9e87c59f9268b6240c0781..7f7728dadcbbffc64dbd7f5d7291e311869dc2fa 100644 (file)
@@ -64,6 +64,7 @@ struct device *ice_hw_to_dev(struct ice_hw *hw)
 }
 
 static struct workqueue_struct *ice_wq;
+struct workqueue_struct *ice_lag_wq;
 static const struct net_device_ops ice_netdev_safe_mode_ops;
 static const struct net_device_ops ice_netdev_ops;
 
@@ -3795,6 +3796,7 @@ u16 ice_get_avail_rxq_count(struct ice_pf *pf)
 static void ice_deinit_pf(struct ice_pf *pf)
 {
        ice_service_task_stop(pf);
+       mutex_destroy(&pf->lag_mutex);
        mutex_destroy(&pf->adev_mutex);
        mutex_destroy(&pf->sw_mutex);
        mutex_destroy(&pf->tc_mutex);
@@ -3875,6 +3877,7 @@ static int ice_init_pf(struct ice_pf *pf)
        mutex_init(&pf->sw_mutex);
        mutex_init(&pf->tc_mutex);
        mutex_init(&pf->adev_mutex);
+       mutex_init(&pf->lag_mutex);
 
        INIT_HLIST_HEAD(&pf->aq_wait_list);
        spin_lock_init(&pf->aq_wait_lock);
@@ -5571,7 +5574,7 @@ static struct pci_driver ice_driver = {
  */
 static int __init ice_module_init(void)
 {
-       int status;
+       int status = -ENOMEM;
 
        pr_info("%s\n", ice_driver_string);
        pr_info("%s\n", ice_copyright);
@@ -5579,15 +5582,27 @@ static int __init ice_module_init(void)
        ice_wq = alloc_workqueue("%s", 0, 0, KBUILD_MODNAME);
        if (!ice_wq) {
                pr_err("Failed to create workqueue\n");
-               return -ENOMEM;
+               return status;
+       }
+
+       ice_lag_wq = alloc_ordered_workqueue("ice_lag_wq", 0);
+       if (!ice_lag_wq) {
+               pr_err("Failed to create LAG workqueue\n");
+               goto err_dest_wq;
        }
 
        status = pci_register_driver(&ice_driver);
        if (status) {
                pr_err("failed to register PCI driver, err %d\n", status);
-               destroy_workqueue(ice_wq);
+               goto err_dest_lag_wq;
        }
 
+       return 0;
+
+err_dest_lag_wq:
+       destroy_workqueue(ice_lag_wq);
+err_dest_wq:
+       destroy_workqueue(ice_wq);
        return status;
 }
 module_init(ice_module_init);
@@ -5602,6 +5617,7 @@ static void __exit ice_module_exit(void)
 {
        pci_unregister_driver(&ice_driver);
        destroy_workqueue(ice_wq);
+       destroy_workqueue(ice_lag_wq);
        pr_info("module unloaded\n");
 }
 module_exit(ice_module_exit);
index df9171a1a34f0606dce27b454804d68dfec2ed4a..e82f38c2a9402ddb50c021e510a1ef3223203f26 100644 (file)
@@ -277,6 +277,8 @@ struct ice_hw_common_caps {
        u8 dcb;
        u8 ieee_1588;
        u8 rdma;
+       u8 roce_lag;
+       u8 sriov_lag;
 
        bool nvm_update_pending_nvm;
        bool nvm_update_pending_orom;