RDMA/cma: Combine cma_ndev_work with cma_work
authorJason Gunthorpe <jgg@nvidia.com>
Wed, 2 Sep 2020 08:11:19 +0000 (11:11 +0300)
committerJason Gunthorpe <jgg@nvidia.com>
Thu, 17 Sep 2020 12:09:24 +0000 (09:09 -0300)
These are the same thing, except that cma_ndev_work doesn't have a state
transition. Signal no state transition by setting old_state and new_state
== 0.

In all cases the handler function should not be called once
rdma_destroy_id() has progressed passed setting the state.

Link: https://lore.kernel.org/r/20200902081122.745412-6-leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
Signed-off-by: Jason Gunthorpe <jgg@nvidia.com>
drivers/infiniband/core/cma.c

index bd3621654366b9f1f8854b8f6d6454ddc6e0c35c..df0a5bc4d6b7e6030f6d77911a6fd2e8d4b634fe 100644 (file)
@@ -363,12 +363,6 @@ struct cma_work {
        struct rdma_cm_event    event;
 };
 
-struct cma_ndev_work {
-       struct work_struct      work;
-       struct rdma_id_private  *id;
-       struct rdma_cm_event    event;
-};
-
 struct iboe_mcast_work {
        struct work_struct       work;
        struct rdma_id_private  *id;
@@ -2647,32 +2641,14 @@ static void cma_work_handler(struct work_struct *_work)
        struct rdma_id_private *id_priv = work->id;
 
        mutex_lock(&id_priv->handler_mutex);
-       if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+       if (READ_ONCE(id_priv->state) == RDMA_CM_DESTROYING ||
+           READ_ONCE(id_priv->state) == RDMA_CM_DEVICE_REMOVAL)
                goto out_unlock;
-
-       if (cma_cm_event_handler(id_priv, &work->event)) {
-               cma_id_put(id_priv);
-               destroy_id_handler_unlock(id_priv);
-               goto out_free;
+       if (work->old_state != 0 || work->new_state != 0) {
+               if (!cma_comp_exch(id_priv, work->old_state, work->new_state))
+                       goto out_unlock;
        }
 
-out_unlock:
-       mutex_unlock(&id_priv->handler_mutex);
-       cma_id_put(id_priv);
-out_free:
-       kfree(work);
-}
-
-static void cma_ndev_work_handler(struct work_struct *_work)
-{
-       struct cma_ndev_work *work = container_of(_work, struct cma_ndev_work, work);
-       struct rdma_id_private *id_priv = work->id;
-
-       mutex_lock(&id_priv->handler_mutex);
-       if (id_priv->state == RDMA_CM_DESTROYING ||
-           id_priv->state == RDMA_CM_DEVICE_REMOVAL)
-               goto out_unlock;
-
        if (cma_cm_event_handler(id_priv, &work->event)) {
                cma_id_put(id_priv);
                destroy_id_handler_unlock(id_priv);
@@ -4698,7 +4674,7 @@ EXPORT_SYMBOL(rdma_leave_multicast);
 static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id_priv)
 {
        struct rdma_dev_addr *dev_addr;
-       struct cma_ndev_work *work;
+       struct cma_work *work;
 
        dev_addr = &id_priv->id.route.addr.dev_addr;
 
@@ -4711,7 +4687,7 @@ static int cma_netdev_change(struct net_device *ndev, struct rdma_id_private *id
                if (!work)
                        return -ENOMEM;
 
-               INIT_WORK(&work->work, cma_ndev_work_handler);
+               INIT_WORK(&work->work, cma_work_handler);
                work->id = id_priv;
                work->event.event = RDMA_CM_EVENT_ADDR_CHANGE;
                cma_id_get(id_priv);