RDMA/vmw_pvrdma: Avoid use after free due to QP/CQ/SRQ destroy
authorBryan Tan <bryantan@vmware.com>
Wed, 20 Dec 2017 17:51:40 +0000 (09:51 -0800)
committerJason Gunthorpe <jgg@mellanox.com>
Thu, 21 Dec 2017 23:06:07 +0000 (16:06 -0700)
The use of wait queues in vmw_pvrdma for handling concurrent
access to a resource leaves a race condition which can cause a use
after free bug.

Fix this by using the pattern from other drivers, complete() protected by
dec_and_test to ensure complete() is called only once.

Fixes: 29c8d9eba550 ("IB: Add vmw_pvrdma driver")
Signed-off-by: Bryan Tan <bryantan@vmware.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/vmw_pvrdma/pvrdma.h
drivers/infiniband/hw/vmw_pvrdma/pvrdma_cq.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_qp.c
drivers/infiniband/hw/vmw_pvrdma/pvrdma_srq.c

index 63bc2efc34eb57e37f364e5b73e5a157e6040e47..4f7bd3b6a3152937a8ec2fa4de8637e3d9391a9a 100644 (file)
@@ -94,7 +94,7 @@ struct pvrdma_cq {
        u32 cq_handle;
        bool is_kernel;
        atomic_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_id_table {
@@ -175,7 +175,7 @@ struct pvrdma_srq {
        u32 srq_handle;
        int npages;
        refcount_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_qp {
@@ -197,7 +197,7 @@ struct pvrdma_qp {
        bool is_kernel;
        struct mutex mutex; /* QP state mutex. */
        atomic_t refcnt;
-       wait_queue_head_t wait;
+       struct completion free;
 };
 
 struct pvrdma_dev {
index 3562c0c30492d07a7d1eddacb033ef769a1f9264..e529622cefad6a501492dc165458b738636842b3 100644 (file)
@@ -179,7 +179,7 @@ struct ib_cq *pvrdma_create_cq(struct ib_device *ibdev,
                pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0);
 
        atomic_set(&cq->refcnt, 1);
-       init_waitqueue_head(&cq->wait);
+       init_completion(&cq->free);
        spin_lock_init(&cq->cq_lock);
 
        memset(cmd, 0, sizeof(*cmd));
@@ -230,8 +230,9 @@ err_cq:
 
 static void pvrdma_free_cq(struct pvrdma_dev *dev, struct pvrdma_cq *cq)
 {
-       atomic_dec(&cq->refcnt);
-       wait_event(cq->wait, !atomic_read(&cq->refcnt));
+       if (atomic_dec_and_test(&cq->refcnt))
+               complete(&cq->free);
+       wait_for_completion(&cq->free);
 
        if (!cq->is_kernel)
                ib_umem_release(cq->umem);
index 1f4e18717a006da9cd3566318123f0fff1038df9..e92681878c93f4b0d4f29724700cc7ba474d7de4 100644 (file)
@@ -346,9 +346,8 @@ static void pvrdma_qp_event(struct pvrdma_dev *dev, u32 qpn, int type)
                ibqp->event_handler(&e, ibqp->qp_context);
        }
        if (qp) {
-               atomic_dec(&qp->refcnt);
-               if (atomic_read(&qp->refcnt) == 0)
-                       wake_up(&qp->wait);
+               if (atomic_dec_and_test(&qp->refcnt))
+                       complete(&qp->free);
        }
 }
 
@@ -373,9 +372,8 @@ static void pvrdma_cq_event(struct pvrdma_dev *dev, u32 cqn, int type)
                ibcq->event_handler(&e, ibcq->cq_context);
        }
        if (cq) {
-               atomic_dec(&cq->refcnt);
-               if (atomic_read(&cq->refcnt) == 0)
-                       wake_up(&cq->wait);
+               if (atomic_dec_and_test(&cq->refcnt))
+                       complete(&cq->free);
        }
 }
 
@@ -404,7 +402,7 @@ static void pvrdma_srq_event(struct pvrdma_dev *dev, u32 srqn, int type)
        }
        if (srq) {
                if (refcount_dec_and_test(&srq->refcnt))
-                       wake_up(&srq->wait);
+                       complete(&srq->free);
        }
 }
 
@@ -539,9 +537,8 @@ static irqreturn_t pvrdma_intrx_handler(int irq, void *dev_id)
                if (cq && cq->ibcq.comp_handler)
                        cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context);
                if (cq) {
-                       atomic_dec(&cq->refcnt);
-                       if (atomic_read(&cq->refcnt))
-                               wake_up(&cq->wait);
+                       if (atomic_dec_and_test(&cq->refcnt))
+                               complete(&cq->free);
                }
                pvrdma_idx_ring_inc(&ring->cons_head, ring_slots);
        }
index dceebc623d961b6fe4d9ebc795ae53a701bdc4d4..4059308e1454a5bfda72a31d9931afaa50d39441 100644 (file)
@@ -246,7 +246,7 @@ struct ib_qp *pvrdma_create_qp(struct ib_pd *pd,
                spin_lock_init(&qp->rq.lock);
                mutex_init(&qp->mutex);
                atomic_set(&qp->refcnt, 1);
-               init_waitqueue_head(&qp->wait);
+               init_completion(&qp->free);
 
                qp->state = IB_QPS_RESET;
 
@@ -428,8 +428,9 @@ static void pvrdma_free_qp(struct pvrdma_qp *qp)
 
        pvrdma_unlock_cqs(scq, rcq, &scq_flags, &rcq_flags);
 
-       atomic_dec(&qp->refcnt);
-       wait_event(qp->wait, !atomic_read(&qp->refcnt));
+       if (atomic_dec_and_test(&qp->refcnt))
+               complete(&qp->free);
+       wait_for_completion(&qp->free);
 
        if (!qp->is_kernel) {
                if (qp->rumem)
index a2b1a3c115f21fd3714df88e19c4c570169f6ef2..5acebb1ef631ae0070d28917530345689a2654de 100644 (file)
@@ -149,7 +149,7 @@ struct ib_srq *pvrdma_create_srq(struct ib_pd *pd,
 
        spin_lock_init(&srq->lock);
        refcount_set(&srq->refcnt, 1);
-       init_waitqueue_head(&srq->wait);
+       init_completion(&srq->free);
 
        dev_dbg(&dev->pdev->dev,
                "create shared receive queue from user space\n");
@@ -236,8 +236,9 @@ static void pvrdma_free_srq(struct pvrdma_dev *dev, struct pvrdma_srq *srq)
        dev->srq_tbl[srq->srq_handle] = NULL;
        spin_unlock_irqrestore(&dev->srq_tbl_lock, flags);
 
-       if (!refcount_dec_and_test(&srq->refcnt))
-               wait_event(srq->wait, !refcount_read(&srq->refcnt));
+       if (refcount_dec_and_test(&srq->refcnt))
+               complete(&srq->free);
+       wait_for_completion(&srq->free);
 
        /* There is no support for kernel clients, so this is safe. */
        ib_umem_release(srq->umem);