IB/mlx5: Support scatter to CQE for DC transport type
authorYonatan Cohen <yonatanc@mellanox.com>
Tue, 9 Oct 2018 09:05:13 +0000 (12:05 +0300)
committerDoug Ledford <dledford@redhat.com>
Wed, 17 Oct 2018 15:25:41 +0000 (11:25 -0400)
Scatter to CQE is a HW offload that saves PCI writes by scattering the
payload to the CQE.
This patch extends already existing functionality to support DC
transport type.

Signed-off-by: Yonatan Cohen <yonatanc@mellanox.com>
Reviewed-by: Guy Levi <guyle@mellanox.com>
Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/qp.c

index dae30b6478bfef8d188dea3e95dd0461f21aaaaf..a41519dc8d3aee58866de62e183004b4c4c382be 100644 (file)
@@ -1460,7 +1460,7 @@ ex:
        return err;
 }
 
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq)
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq)
 {
        struct mlx5_ib_cq *cq;
 
index 8444ea78229acbb0096fbda45e6f934bb463791f..9de9397166b8dd289d438d1ba29d1c401aae98ef 100644 (file)
@@ -1127,7 +1127,7 @@ void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem,
                          int page_shift, __be64 *pas, int access_flags);
 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
-int mlx5_ib_get_cqe_size(struct mlx5_ib_dev *dev, struct ib_cq *ibcq);
+int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
 
index 829aec3aba8fc7662d61294cd23d4aac03462abe..817c391bdfc08cb8326680fade472f2f9fd33ec3 100644 (file)
@@ -1053,7 +1053,8 @@ static u32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
 
 static int is_connected(enum ib_qp_type qp_type)
 {
-       if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC)
+       if (qp_type == IB_QPT_RC || qp_type == IB_QPT_UC ||
+           qp_type == MLX5_IB_QPT_DCI)
                return 1;
 
        return 0;
@@ -1684,6 +1685,49 @@ err:
        return err;
 }
 
+static void configure_responder_scat_cqe(struct ib_qp_init_attr *init_attr,
+                                        void *qpc)
+{
+       int rcqe_sz;
+
+       if (init_attr->qp_type == MLX5_IB_QPT_DCI)
+               return;
+
+       rcqe_sz = mlx5_ib_get_cqe_size(init_attr->recv_cq);
+
+       if (rcqe_sz == 128) {
+               MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
+               return;
+       }
+
+       if (init_attr->qp_type != MLX5_IB_QPT_DCT)
+               MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
+}
+
+static void configure_requester_scat_cqe(struct mlx5_ib_dev *dev,
+                                        struct ib_qp_init_attr *init_attr,
+                                        void *qpc)
+{
+       enum ib_qp_type qpt = init_attr->qp_type;
+       int scqe_sz;
+
+       if (qpt == IB_QPT_UC || qpt == IB_QPT_UD)
+               return;
+
+       if (init_attr->sq_sig_type != IB_SIGNAL_ALL_WR)
+               return;
+
+       scqe_sz = mlx5_ib_get_cqe_size(init_attr->send_cq);
+       if (scqe_sz == 128) {
+               MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
+               return;
+       }
+
+       if (init_attr->qp_type != MLX5_IB_QPT_DCI ||
+           MLX5_CAP_GEN(dev->mdev, dc_req_scat_data_cqe))
+               MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
+}
+
 static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                            struct ib_qp_init_attr *init_attr,
                            struct ib_udata *udata, struct mlx5_ib_qp *qp)
@@ -1787,7 +1831,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                        return err;
 
                qp->wq_sig = !!(ucmd.flags & MLX5_QP_FLAG_SIGNATURE);
-               qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
+               if (MLX5_CAP_GEN(dev->mdev, sctr_data_cqe))
+                       qp->scat_cqe = !!(ucmd.flags & MLX5_QP_FLAG_SCATTER_CQE);
                if (ucmd.flags & MLX5_QP_FLAG_TUNNEL_OFFLOADS) {
                        if (init_attr->qp_type != IB_QPT_RAW_PACKET ||
                            !tunnel_offload_supported(mdev)) {
@@ -1911,23 +1956,8 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                MLX5_SET(qpc, qpc, cd_slave_receive, 1);
 
        if (qp->scat_cqe && is_connected(init_attr->qp_type)) {
-               int rcqe_sz;
-               int scqe_sz;
-
-               rcqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->recv_cq);
-               scqe_sz = mlx5_ib_get_cqe_size(dev, init_attr->send_cq);
-
-               if (rcqe_sz == 128)
-                       MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA64_CQE);
-               else
-                       MLX5_SET(qpc, qpc, cs_res, MLX5_RES_SCAT_DATA32_CQE);
-
-               if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) {
-                       if (scqe_sz == 128)
-                               MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA64_CQE);
-                       else
-                               MLX5_SET(qpc, qpc, cs_req, MLX5_REQ_SCAT_DATA32_CQE);
-               }
+               configure_responder_scat_cqe(init_attr, qpc);
+               configure_requester_scat_cqe(dev, init_attr, qpc);
        }
 
        if (qp->rq.wqe_cnt) {
@@ -2302,6 +2332,9 @@ static struct ib_qp *mlx5_ib_create_dct(struct ib_pd *pd,
        MLX5_SET64(dctc, dctc, dc_access_key, ucmd->access_key);
        MLX5_SET(dctc, dctc, user_index, uidx);
 
+       if (ucmd->flags & MLX5_QP_FLAG_SCATTER_CQE)
+               configure_responder_scat_cqe(attr, dctc);
+
        qp->state = IB_QPS_RESET;
 
        return &qp->ibqp;