RDMA/hns: Support CQ's restrack raw ops for hns driver
authorWenpeng Liang <liangwenpeng@huawei.com>
Mon, 22 Aug 2022 10:44:51 +0000 (18:44 +0800)
committerLeon Romanovsky <leonro@nvidia.com>
Tue, 23 Aug 2022 08:34:58 +0000 (11:34 +0300)
The CQ raw restrack attributes come from the queue context maintained by
the ROCEE.

For example:

$ rdma res show cq dev hns_0 cqn 14 -dd -jp -r
[ {
        "ifindex": 4,
        "ifname": "hns_0",
        "data": [ 1,0,0,0,7,0,0,0,0,0,0,0,0,82,6,0,0,82,6,0,0,82,6,0,
  1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,
  6,0,0,0,0,0,0,0 ]
    } ]

Link: https://lore.kernel.org/r/20220822104455.2311053-4-liangwenpeng@huawei.com
Signed-off-by: Wenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: Leon Romanovsky <leon@kernel.org>
drivers/infiniband/hw/hns/hns_roce_device.h
drivers/infiniband/hw/hns/hns_roce_main.c
drivers/infiniband/hw/hns/hns_roce_restrack.c

index 103d50564b8905d390f845df204bbef670960233..c73adc0d35551efb4d7b92fcf7dbcb767b3d183c 100644 (file)
@@ -1224,6 +1224,7 @@ void hns_roce_handle_device_err(struct hns_roce_dev *hr_dev);
 int hns_roce_init(struct hns_roce_dev *hr_dev);
 void hns_roce_exit(struct hns_roce_dev *hr_dev);
 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq);
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq);
 struct hns_user_mmap_entry *
 hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address,
                                size_t length,
index caf73e8f4bbeb69af25a769b8b664bd9b27a24ff..1b66ed45350eff53fd0b5886fb7c0ac1402f8d75 100644 (file)
@@ -567,6 +567,7 @@ static const struct ib_device_ops hns_roce_dev_xrcd_ops = {
 
 static const struct ib_device_ops hns_roce_dev_restrack_ops = {
        .fill_res_cq_entry = hns_roce_fill_res_cq_entry,
+       .fill_res_cq_entry_raw = hns_roce_fill_res_cq_entry_raw,
 };
 
 static int hns_roce_register_device(struct hns_roce_dev *hr_dev)
index 2e8299784bc2e383cf490e9ad6e27f0b219611f7..3f9c2f9dfdf60bc7bad634a92faf343e284f0f85 100644 (file)
@@ -9,6 +9,8 @@
 #include "hns_roce_device.h"
 #include "hns_roce_hw_v2.h"
 
+#define MAX_ENTRY_NUM 256
+
 int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq)
 {
        struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
@@ -39,3 +41,40 @@ err:
 
        return -EMSGSIZE;
 }
+
+int hns_roce_fill_res_cq_entry_raw(struct sk_buff *msg, struct ib_cq *ib_cq)
+{
+       struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device);
+       struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq);
+       struct hns_roce_v2_cq_context context;
+       u32 data[MAX_ENTRY_NUM] = {};
+       int offset = 0;
+       int ret;
+
+       if (!hr_dev->hw->query_cqc)
+               return -EINVAL;
+
+       ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context);
+       if (ret)
+               return -EINVAL;
+
+       data[offset++] = hr_reg_read(&context, CQC_CQ_ST);
+       data[offset++] = hr_reg_read(&context, CQC_SHIFT);
+       data[offset++] = hr_reg_read(&context, CQC_CQE_SIZE);
+       data[offset++] = hr_reg_read(&context, CQC_CQE_CNT);
+       data[offset++] = hr_reg_read(&context, CQC_CQ_PRODUCER_IDX);
+       data[offset++] = hr_reg_read(&context, CQC_CQ_CONSUMER_IDX);
+       data[offset++] = hr_reg_read(&context, CQC_DB_RECORD_EN);
+       data[offset++] = hr_reg_read(&context, CQC_ARM_ST);
+       data[offset++] = hr_reg_read(&context, CQC_CMD_SN);
+       data[offset++] = hr_reg_read(&context, CQC_CEQN);
+       data[offset++] = hr_reg_read(&context, CQC_CQ_MAX_CNT);
+       data[offset++] = hr_reg_read(&context, CQC_CQ_PERIOD);
+       data[offset++] = hr_reg_read(&context, CQC_CQE_HOP_NUM);
+       data[offset++] = hr_reg_read(&context, CQC_CQE_BAR_PG_SZ);
+       data[offset++] = hr_reg_read(&context, CQC_CQE_BUF_PG_SZ);
+
+       ret = nla_put(msg, RDMA_NLDEV_ATTR_RES_RAW, offset * sizeof(u32), data);
+
+       return ret;
+}