rxe_pool_cleanup(&rxe->cq_pool);
rxe_pool_cleanup(&rxe->mr_pool);
rxe_pool_cleanup(&rxe->mw_pool);
- rxe_pool_cleanup(&rxe->mc_grp_pool);
if (rxe->tfm)
crypto_free_shash(rxe->tfm);
if (err)
goto err8;
- err = rxe_pool_init(rxe, &rxe->mc_grp_pool, RXE_TYPE_MC_GRP,
- rxe->attr.max_mcast_grp);
- if (err)
- goto err9;
-
return 0;
-err9:
- rxe_pool_cleanup(&rxe->mw_pool);
err8:
rxe_pool_cleanup(&rxe->mr_pool);
err7:
}
if (node) {
- rxe_add_ref(mcg);
+ kref_get(&mcg->ref_cnt);
return mcg;
}
if (unlikely(err))
return err;
+ kref_init(&mcg->ref_cnt);
memcpy(&mcg->mgid, mgid, sizeof(mcg->mgid));
INIT_LIST_HEAD(&mcg->qp_list);
mcg->rxe = rxe;
* Inserting mcg makes it visible to outside so this should
* be done last after the object is ready.
*/
- rxe_add_ref(mcg);
+ kref_get(&mcg->ref_cnt);
__rxe_insert_mcg(mcg);
return 0;
*/
static struct rxe_mcg *rxe_get_mcg(struct rxe_dev *rxe, union ib_gid *mgid)
{
- struct rxe_pool *pool = &rxe->mc_grp_pool;
struct rxe_mcg *mcg, *tmp;
unsigned long flags;
int err;
return mcg;
/* speculative alloc of new mcg */
- mcg = rxe_alloc(pool);
+ mcg = kzalloc(sizeof(*mcg), GFP_KERNEL);
if (!mcg)
return ERR_PTR(-ENOMEM);
/* re-check to see if someone else just added it */
tmp = __rxe_lookup_mcg(rxe, mgid);
if (tmp) {
- rxe_drop_ref(mcg);
+ kfree(mcg);
mcg = tmp;
goto out;
}
err_dec:
atomic_dec(&rxe->mcg_num);
spin_unlock_irqrestore(&rxe->mcg_lock, flags);
- rxe_drop_ref(mcg);
+ kfree(mcg);
return ERR_PTR(err);
}
+/**
+ * rxe_cleanup_mcg - cleanup mcg for kref_put
+ * @kref:
+ */
+void rxe_cleanup_mcg(struct kref *kref)
+{
+ struct rxe_mcg *mcg = container_of(kref, typeof(*mcg), ref_cnt);
+
+ kfree(mcg);
+}
+
/**
* __rxe_destroy_mcg - destroy mcg object holding rxe->mcg_lock
* @mcg: the mcg object
*/
static void __rxe_destroy_mcg(struct rxe_mcg *mcg)
{
+ struct rxe_dev *rxe = mcg->rxe;
+
/* remove mcg from red-black tree then drop ref */
__rxe_remove_mcg(mcg);
- rxe_drop_ref(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
rxe_mcast_delete(mcg->rxe, &mcg->mgid);
+ atomic_dec(&rxe->mcg_num);
}
/**
spin_unlock_irqrestore(&mcg->rxe->mcg_lock, flags);
}
-void rxe_mc_cleanup(struct rxe_pool_elem *elem)
-{
- /* nothing left to do for now */
-}
-
static int rxe_attach_mcg(struct rxe_dev *rxe, struct rxe_qp *qp,
struct rxe_mcg *mcg)
{
/* drop the ref from get key. This will free the
* object if qp_num is zero.
*/
- rxe_drop_ref(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
kfree(mca);
err = 0;
goto out_unlock;
}
/* we didn't find the qp on the list */
- rxe_drop_ref(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
err = -EINVAL;
out_unlock:
if (atomic_read(&mcg->qp_num) == 0)
rxe_destroy_mcg(mcg);
- rxe_drop_ref(mcg);
+ kref_put(&mcg->ref_cnt, rxe_cleanup_mcg);
return err;
}
.min_index = RXE_MIN_MW_INDEX,
.max_index = RXE_MAX_MW_INDEX,
},
- [RXE_TYPE_MC_GRP] = {
- .name = "rxe-mc_grp",
- .size = sizeof(struct rxe_mcg),
- .elem_offset = offsetof(struct rxe_mcg, elem),
- .cleanup = rxe_mc_cleanup,
- .flags = RXE_POOL_KEY,
- .key_offset = offsetof(struct rxe_mcg, mgid),
- .key_size = sizeof(union ib_gid),
- },
};
static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min)