bnxt_en: Combine 2 functions calling the same HWRM_DRV_RGTR fw command.
authorVasundhara Volam <vasundhara-v.volam@broadcom.com>
Sun, 24 Nov 2019 03:30:41 +0000 (22:30 -0500)
committerJakub Kicinski <jakub.kicinski@netronome.com>
Sun, 24 Nov 2019 22:48:02 +0000 (14:48 -0800)
Everytime driver registers with firmware, driver is required to
register for async event notifications as well. These 2 calls
are done using the same firmware command and can be combined.

We are also missing the 2nd step to register for async events
in the suspend/resume path and this will fix it.  Prior to this,
we were getting only default notifications.

ULP can register for additional async events for the RDMA driver,
so we add a parameter to the new function to only do step 2 when
it is called from ULP.

Signed-off-by: Vasundhara Volam <vasundhara-v.volam@broadcom.com>
Signed-off-by: Michael Chan <michael.chan@broadcom.com>
Signed-off-by: Jakub Kicinski <jakub.kicinski@netronome.com>
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c

index 464e8bd143658ccc6fd4b63a0f6b789c78fda024..f627741ef5853337169f1ff560b783cdf9b8b49f 100644 (file)
@@ -4394,53 +4394,22 @@ int hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 msg_len,
        return rc;
 }
 
-int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
-                                    int bmap_size)
+int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap, int bmap_size,
+                           bool async_only)
 {
+       struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
        struct hwrm_func_drv_rgtr_input req = {0};
        DECLARE_BITMAP(async_events_bmap, 256);
        u32 *events = (u32 *)async_events_bmap;
-       int i;
-
-       bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
-
-       req.enables =
-               cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
-
-       memset(async_events_bmap, 0, sizeof(async_events_bmap));
-       for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
-               u16 event_id = bnxt_async_events_arr[i];
-
-               if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
-                   !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
-                       continue;
-               __set_bit(bnxt_async_events_arr[i], async_events_bmap);
-       }
-       if (bmap && bmap_size) {
-               for (i = 0; i < bmap_size; i++) {
-                       if (test_bit(i, bmap))
-                               __set_bit(i, async_events_bmap);
-               }
-       }
-
-       for (i = 0; i < 8; i++)
-               req.async_event_fwd[i] |= cpu_to_le32(events[i]);
-
-       return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
-}
-
-static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
-{
-       struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr;
-       struct hwrm_func_drv_rgtr_input req = {0};
        u32 flags;
-       int rc;
+       int rc, i;
 
        bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_DRV_RGTR, -1, -1);
 
        req.enables =
                cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_OS_TYPE |
-                           FUNC_DRV_RGTR_REQ_ENABLES_VER);
+                           FUNC_DRV_RGTR_REQ_ENABLES_VER |
+                           FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
 
        req.os_type = cpu_to_le16(FUNC_DRV_RGTR_REQ_OS_TYPE_LINUX);
        flags = FUNC_DRV_RGTR_REQ_FLAGS_16BIT_VER_MODE |
@@ -4481,6 +4450,28 @@ static int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp)
                req.flags |= cpu_to_le32(
                        FUNC_DRV_RGTR_REQ_FLAGS_FLOW_HANDLE_64BIT_MODE);
 
+       memset(async_events_bmap, 0, sizeof(async_events_bmap));
+       for (i = 0; i < ARRAY_SIZE(bnxt_async_events_arr); i++) {
+               u16 event_id = bnxt_async_events_arr[i];
+
+               if (event_id == ASYNC_EVENT_CMPL_EVENT_ID_ERROR_RECOVERY &&
+                   !(bp->fw_cap & BNXT_FW_CAP_ERROR_RECOVERY))
+                       continue;
+               __set_bit(bnxt_async_events_arr[i], async_events_bmap);
+       }
+       if (bmap && bmap_size) {
+               for (i = 0; i < bmap_size; i++) {
+                       if (test_bit(i, bmap))
+                               __set_bit(i, async_events_bmap);
+               }
+       }
+       for (i = 0; i < 8; i++)
+               req.async_event_fwd[i] |= cpu_to_le32(events[i]);
+
+       if (async_only)
+               req.enables =
+                       cpu_to_le32(FUNC_DRV_RGTR_REQ_ENABLES_ASYNC_EVENT_FWD);
+
        mutex_lock(&bp->hwrm_cmd_lock);
        rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
        if (!rc) {
@@ -10490,11 +10481,7 @@ static int bnxt_fw_init_one_p2(struct bnxt *bp)
                netdev_warn(bp->dev, "hwrm query error recovery failure rc: %d\n",
                            rc);
 
-       rc = bnxt_hwrm_func_drv_rgtr(bp);
-       if (rc)
-               return -ENODEV;
-
-       rc = bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+       rc = bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false);
        if (rc)
                return -ENODEV;
 
@@ -11947,7 +11934,8 @@ static int bnxt_resume(struct device *device)
                goto resume_exit;
        }
        pci_set_master(bp->pdev);
-       if (bnxt_hwrm_ver_get(bp) || bnxt_hwrm_func_drv_rgtr(bp)) {
+       if (bnxt_hwrm_ver_get(bp) ||
+           bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, false)) {
                rc = -ENODEV;
                goto resume_exit;
        }
index a38664eef6143b6fe152fdea51470a9e38438014..35c483b9db672dd5d141d75c9da1072515e16fe1 100644 (file)
@@ -1996,8 +1996,8 @@ int _hwrm_send_message(struct bnxt *, void *, u32, int);
 int _hwrm_send_message_silent(struct bnxt *bp, void *msg, u32 len, int timeout);
 int hwrm_send_message(struct bnxt *, void *, u32, int);
 int hwrm_send_message_silent(struct bnxt *, void *, u32, int);
-int bnxt_hwrm_func_rgtr_async_events(struct bnxt *bp, unsigned long *bmap,
-                                    int bmap_size);
+int bnxt_hwrm_func_drv_rgtr(struct bnxt *bp, unsigned long *bmap,
+                           int bmap_size, bool async_only);
 int bnxt_hwrm_vnic_cfg(struct bnxt *bp, u16 vnic_id);
 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings);
 int bnxt_nq_rings_in_use(struct bnxt *bp);
index 077fd101be600a7fae9c1756b3829e003e68a50e..c601ff7b8f61cdde62177430af3e2c61f481dd6a 100644 (file)
@@ -81,7 +81,7 @@ static int bnxt_unregister_dev(struct bnxt_en_dev *edev, int ulp_id)
                edev->en_ops->bnxt_free_msix(edev, ulp_id);
 
        if (ulp->max_async_event_id)
-               bnxt_hwrm_func_rgtr_async_events(bp, NULL, 0);
+               bnxt_hwrm_func_drv_rgtr(bp, NULL, 0, true);
 
        RCU_INIT_POINTER(ulp->ulp_ops, NULL);
        synchronize_rcu();
@@ -441,7 +441,7 @@ static int bnxt_register_async_events(struct bnxt_en_dev *edev, int ulp_id,
        /* Make sure bnxt_ulp_async_events() sees this order */
        smp_wmb();
        ulp->max_async_event_id = max_id;
-       bnxt_hwrm_func_rgtr_async_events(bp, events_bmap, max_id + 1);
+       bnxt_hwrm_func_drv_rgtr(bp, events_bmap, max_id + 1, true);
        return 0;
 }