IB/mlx5: Expose CQE version to user-space
authorHaggai Abramovsky <hagaya@mellanox.com>
Thu, 14 Jan 2016 17:12:58 +0000 (19:12 +0200)
committerDoug Ledford <dledford@redhat.com>
Thu, 21 Jan 2016 17:01:08 +0000 (12:01 -0500)
Per user context, work with CQE version that both the user-space
and the kernel support. Report this CQE version via the response of
the alloc_ucontext command.

Signed-off-by: Haggai Abramovsky <hagaya@mellanox.com>
Reviewed-by: Matan Barak <matanb@mellanox.com>
Signed-off-by: Doug Ledford <dledford@redhat.com>
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/user.h

index f82336699c3e47bec1e7369af1bc89c8c261df2e..829c37c95d605498bc83823d22f58c12ac1ba402 100644 (file)
@@ -869,7 +869,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        if (req.total_num_uuars == 0)
                return ERR_PTR(-EINVAL);
 
-       if (req.comp_mask)
+       if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2)
                return ERR_PTR(-EOPNOTSUPP);
 
        if (reqlen > sizeof(req) &&
@@ -892,6 +892,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
        resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
        resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
+       resp.cqe_version = min_t(__u8,
+                                (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version),
+                                req.max_cqe_version);
        resp.response_length = min(offsetof(typeof(resp), response_length) +
                                   sizeof(resp.response_length), udata->outlen);
 
@@ -945,8 +948,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        resp.tot_uuars = req.total_num_uuars;
        resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
 
-       if (field_avail(typeof(resp), reserved2, udata->outlen))
-               resp.response_length += sizeof(resp.reserved2);
+       if (field_avail(typeof(resp), cqe_version, udata->outlen))
+               resp.response_length += sizeof(resp.cqe_version);
 
        if (field_avail(typeof(resp), hca_core_clock_offset, udata->outlen)) {
                resp.comp_mask |=
@@ -954,7 +957,9 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
                resp.hca_core_clock_offset =
                        offsetof(struct mlx5_init_seg, internal_timer_h) %
                        PAGE_SIZE;
-               resp.response_length += sizeof(resp.hca_core_clock_offset);
+               resp.response_length += sizeof(resp.hca_core_clock_offset) +
+                                       sizeof(resp.reserved2) +
+                                       sizeof(resp.reserved3);
        }
 
        err = ib_copy_to_udata(udata, &resp, resp.response_length);
@@ -965,6 +970,8 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        uuari->num_low_latency_uuars = req.num_low_latency_uuars;
        uuari->uars = uars;
        uuari->num_uars = num_uars;
+       context->cqe_version = resp.cqe_version;
+
        return &context->ibucontext;
 
 out_uars:
index 5bf0935f0cb5172500e61f385109d43154eadcb3..41f0525e825c7e545cbfbe2e576109e1f68e39ff 100644 (file)
@@ -69,6 +69,10 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
        __u32   num_low_latency_uuars;
        __u32   flags;
        __u32   comp_mask;
+       __u8    max_cqe_version;
+       __u8    reserved0;
+       __u16   reserved1;
+       __u32   reserved2;
 };
 
 enum mlx5_ib_alloc_ucontext_resp_mask {
@@ -89,7 +93,9 @@ struct mlx5_ib_alloc_ucontext_resp {
        __u16   reserved1;
        __u32   comp_mask;
        __u32   response_length;
-       __u32   reserved2;
+       __u8    cqe_version;
+       __u8    reserved2;
+       __u16   reserved3;
        __u64   hca_core_clock_offset;
 };