RDMA/hns: Cleanups of magic numbers
authorLang Cheng <chenglang@huawei.com>
Sun, 26 Jan 2020 14:55:04 +0000 (22:55 +0800)
committerJason Gunthorpe <jgg@mellanox.com>
Tue, 11 Feb 2020 18:10:20 +0000 (14:10 -0400)
Some magic numbers are hard to understand, so replace them with macros or
add some comments for them.

Link: https://lore.kernel.org/r/20200126145504.9700-1-liweihang@huawei.com
Signed-off-by: Lang Cheng <chenglang@huawei.com>
Signed-off-by: Yixian Liu <liuyixian@huawei.com>
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Yixing Liu <liuyixing1@huawei.com>
Signed-off-by: Weihang Li <liweihang@huawei.com>
Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_hw_v2.c
drivers/infiniband/hw/hns/hns_roce_qp.c
drivers/infiniband/hw/hns/hns_roce_srq.c

index a7c4ff975c2890d89818cd1e0a71f76a18211dfb..3b3d6fee1ecae578ace16b65df49208483306eec 100644 (file)
@@ -881,7 +881,7 @@ struct hns_roce_caps {
        u32             cqc_timer_ba_pg_sz;
        u32             cqc_timer_buf_pg_sz;
        u32             cqc_timer_hop_num;
-       u32             cqe_ba_pg_sz;
+       u32             cqe_ba_pg_sz;   /* page_size = 4K*(2^cqe_ba_pg_sz) */
        u32             cqe_buf_pg_sz;
        u32             cqe_hop_num;
        u32             srqwqe_ba_pg_sz;
index 12c4cd8e9378c69f39d1e1265c421e5f4ca5aab4..b8668780940790c18629199ffb14e1688c5bf44a 100644 (file)
@@ -1999,7 +1999,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
                return ret;
        }
 
-       if (hr_dev->pci_dev->revision == 0x21) {
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
                ret = hns_roce_query_pf_timer_resource(hr_dev);
                if (ret) {
                        dev_err(hr_dev->dev,
@@ -2016,7 +2016,7 @@ static int hns_roce_v2_profile(struct hns_roce_dev *hr_dev)
                return ret;
        }
 
-       if (hr_dev->pci_dev->revision == 0x21) {
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B) {
                ret = hns_roce_set_vf_switch_param(hr_dev, 0);
                if (ret) {
                        dev_err(hr_dev->dev,
@@ -2298,7 +2298,7 @@ static void hns_roce_v2_exit(struct hns_roce_dev *hr_dev)
 {
        struct hns_roce_v2_priv *priv = hr_dev->priv;
 
-       if (hr_dev->pci_dev->revision == 0x21)
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B)
                hns_roce_function_clear(hr_dev);
 
        hns_roce_free_link_table(hr_dev, &priv->tpq);
@@ -2757,7 +2757,7 @@ static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
 
 static void hns_roce_v2_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index)
 {
-       *hr_cq->set_ci_db = cons_index & 0xffffff;
+       *hr_cq->set_ci_db = cons_index & V2_CQ_DB_PARAMETER_CONS_IDX_M;
 }
 
 static void __hns_roce_v2_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn,
@@ -4475,7 +4475,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
        roce_set_field(qpc_mask->byte_24_mtu_tc, V2_QPC_BYTE_24_HOP_LIMIT_M,
                       V2_QPC_BYTE_24_HOP_LIMIT_S, 0);
 
-       if (hr_dev->pci_dev->revision == 0x21 && is_udp)
+       if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_B && is_udp)
                roce_set_field(context->byte_24_mtu_tc, V2_QPC_BYTE_24_TC_M,
                               V2_QPC_BYTE_24_TC_S, grh->traffic_class >> 2);
        else
index 3257ad11be48286d85d023b6f71736ed6d0d6d51..e13f16c59115ddb37ee61c18c9e0250503fa463c 100644 (file)
@@ -309,7 +309,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
 
                max_cnt = max(1U, cap->max_recv_sge);
                hr_qp->rq.max_gs = roundup_pow_of_two(max_cnt);
-               if (hr_dev->caps.max_rq_sg <= 2)
+               if (hr_dev->caps.max_rq_sg <= HNS_ROCE_SGE_IN_WQE)
                        hr_qp->rq.wqe_shift =
                                        ilog2(hr_dev->caps.max_rq_desc_sz);
                else
@@ -370,16 +370,17 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
        hr_qp->sq.wqe_shift = ucmd->log_sq_stride;
 
        max_cnt = max(1U, cap->max_send_sge);
-       if (hr_dev->caps.max_sq_sg <= 2)
+       if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
                hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
        else
                hr_qp->sq.max_gs = max_cnt;
 
-       if (hr_qp->sq.max_gs > 2)
+       if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE)
                hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
                                                        (hr_qp->sq.max_gs - 2));
 
-       if ((hr_qp->sq.max_gs > 2) && (hr_dev->pci_dev->revision == 0x20)) {
+       if (hr_qp->sq.max_gs > HNS_ROCE_SGE_IN_WQE &&
+           hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
                if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
                        dev_err(hr_dev->dev,
                                "The extended sge cnt error! sge_cnt=%d\n",
@@ -392,7 +393,7 @@ static int hns_roce_set_user_sq_size(struct hns_roce_dev *hr_dev,
        ex_sge_num = hr_qp->sge.sge_cnt;
 
        /* Get buf size, SQ and RQ  are aligned to page_szie */
-       if (hr_dev->caps.max_sq_sg <= 2) {
+       if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) {
                hr_qp->buff_size = round_up((hr_qp->rq.wqe_cnt <<
                                             hr_qp->rq.wqe_shift), PAGE_SIZE) +
                                   round_up((hr_qp->sq.wqe_cnt <<
@@ -528,13 +529,15 @@ static int set_extend_sge_param(struct hns_roce_dev *hr_dev,
        }
 
        /* ud sqwqe's sge use extend sge */
-       if (hr_dev->caps.max_sq_sg > 2 && hr_qp->ibqp.qp_type == IB_QPT_GSI) {
+       if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 &&
+           hr_qp->ibqp.qp_type == IB_QPT_GSI) {
                hr_qp->sge.sge_cnt = roundup_pow_of_two(hr_qp->sq.wqe_cnt *
                                     hr_qp->sq.max_gs);
                hr_qp->sge.sge_shift = 4;
        }
 
-       if ((hr_qp->sq.max_gs > 2) && hr_dev->pci_dev->revision == 0x20) {
+       if (hr_qp->sq.max_gs > 2 &&
+           hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08_A) {
                if (hr_qp->sge.sge_cnt > hr_dev->caps.max_extend_sg) {
                        dev_err(dev, "The extended sge cnt error! sge_cnt=%d\n",
                                hr_qp->sge.sge_cnt);
@@ -577,7 +580,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
 
        /* Get data_seg numbers */
        max_cnt = max(1U, cap->max_send_sge);
-       if (hr_dev->caps.max_sq_sg <= 2)
+       if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
                hr_qp->sq.max_gs = roundup_pow_of_two(max_cnt);
        else
                hr_qp->sq.max_gs = max_cnt;
@@ -593,7 +596,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
        hr_qp->sq.offset = 0;
        size = round_up(hr_qp->sq.wqe_cnt << hr_qp->sq.wqe_shift, page_size);
 
-       if (hr_dev->caps.max_sq_sg > 2 && hr_qp->sge.sge_cnt) {
+       if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && hr_qp->sge.sge_cnt) {
                hr_qp->sge.sge_cnt = max(page_size/(1 << hr_qp->sge.sge_shift),
                                         (u32)hr_qp->sge.sge_cnt);
                hr_qp->sge.offset = size;
@@ -1078,7 +1081,7 @@ struct ib_qp *hns_roce_create_qp(struct ib_pd *pd,
                hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port];
 
                /* when hw version is v1, the sqpn is allocated */
-               if (hr_dev->caps.max_sq_sg <= 2)
+               if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
                        hr_qp->ibqp.qp_num = HNS_ROCE_MAX_PORTS +
                                             hr_dev->iboe.phy_port[hr_qp->port];
                else
index c6d5f06f9cdeaeebcd3cf516437d6e003cbb0a19..5b3dd1a337d4bfe3ccfcf9b593f15c02d19244c7 100644 (file)
@@ -381,7 +381,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
        srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
        srq->max_gs = init_attr->attr.max_sge;
 
-       srq_desc_size = roundup_pow_of_two(max(16, 16 * srq->max_gs));
+       srq_desc_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
+                                       HNS_ROCE_SGE_SIZE * srq->max_gs));
 
        srq->wqe_shift = ilog2(srq_desc_size);