Merge branch 'mlx5-odp-dc' into rdma.git for-next
authorJason Gunthorpe <jgg@mellanox.com>
Wed, 28 Aug 2019 14:25:37 +0000 (11:25 -0300)
committerJason Gunthorpe <jgg@mellanox.com>
Wed, 28 Aug 2019 14:25:37 +0000 (11:25 -0300)
Michael Guralnik says:

====================
The series adds support for on-demand paging for DC transport.

As DC is a mlx-only transport, the capabilities are exposed to the user
using DEVX objects and later on through mlx5dv_query_device.
====================

Based on the mlx5-next branch from
git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux for
dependencies

* branch 'mlx5-odp-dc':
  IB/mlx5: Add page fault handler for DC initiator WQE
  IB/mlx5: Remove check of FW capabilities in ODP page fault handling
  net/mlx5: Set ODP capabilities for DC transport to max

drivers/infiniband/hw/mlx5/odp.c
drivers/net/ethernet/mellanox/mlx5/core/diag/fw_tracer.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
include/linux/mlx5/mlx5_ifc.h

index 817c924e72897bdaf968ef39a9e3bc1dc0d7c8be..905936423a033f96c50758a2c6297c542dbd0c2e 100644 (file)
@@ -980,17 +980,6 @@ static int pagefault_data_segments(struct mlx5_ib_dev *dev,
        return ret < 0 ? ret : npages;
 }
 
-static const u32 mlx5_ib_odp_opcode_cap[] = {
-       [MLX5_OPCODE_SEND]             = IB_ODP_SUPPORT_SEND,
-       [MLX5_OPCODE_SEND_IMM]         = IB_ODP_SUPPORT_SEND,
-       [MLX5_OPCODE_SEND_INVAL]       = IB_ODP_SUPPORT_SEND,
-       [MLX5_OPCODE_RDMA_WRITE]       = IB_ODP_SUPPORT_WRITE,
-       [MLX5_OPCODE_RDMA_WRITE_IMM]   = IB_ODP_SUPPORT_WRITE,
-       [MLX5_OPCODE_RDMA_READ]        = IB_ODP_SUPPORT_READ,
-       [MLX5_OPCODE_ATOMIC_CS]        = IB_ODP_SUPPORT_ATOMIC,
-       [MLX5_OPCODE_ATOMIC_FA]        = IB_ODP_SUPPORT_ATOMIC,
-};
-
 /*
  * Parse initiator WQE. Advances the wqe pointer to point at the
  * scatter-gather list, and set wqe_end to the end of the WQE.
@@ -1001,7 +990,6 @@ static int mlx5_ib_mr_initiator_pfault_handler(
 {
        struct mlx5_wqe_ctrl_seg *ctrl = *wqe;
        u16 wqe_index = pfault->wqe.wqe_index;
-       u32 transport_caps;
        struct mlx5_base_av *av;
        unsigned ds, opcode;
        u32 qpn = qp->trans_qp.base.mqp.qpn;
@@ -1025,31 +1013,11 @@ static int mlx5_ib_mr_initiator_pfault_handler(
        opcode = be32_to_cpu(ctrl->opmod_idx_opcode) &
                 MLX5_WQE_CTRL_OPCODE_MASK;
 
-       switch (qp->ibqp.qp_type) {
-       case IB_QPT_XRC_INI:
+       if (qp->ibqp.qp_type == IB_QPT_XRC_INI)
                *wqe += sizeof(struct mlx5_wqe_xrc_seg);
-               transport_caps = dev->odp_caps.per_transport_caps.xrc_odp_caps;
-               break;
-       case IB_QPT_RC:
-               transport_caps = dev->odp_caps.per_transport_caps.rc_odp_caps;
-               break;
-       case IB_QPT_UD:
-               transport_caps = dev->odp_caps.per_transport_caps.ud_odp_caps;
-               break;
-       default:
-               mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport 0x%x\n",
-                           qp->ibqp.qp_type);
-               return -EFAULT;
-       }
 
-       if (unlikely(opcode >= ARRAY_SIZE(mlx5_ib_odp_opcode_cap) ||
-                    !(transport_caps & mlx5_ib_odp_opcode_cap[opcode]))) {
-               mlx5_ib_err(dev, "ODP fault on QP of an unsupported opcode 0x%x\n",
-                           opcode);
-               return -EFAULT;
-       }
-
-       if (qp->ibqp.qp_type == IB_QPT_UD) {
+       if (qp->ibqp.qp_type == IB_QPT_UD ||
+           qp->qp_sub_type == MLX5_IB_QPT_DCI) {
                av = *wqe;
                if (av->dqp_dct & cpu_to_be32(MLX5_EXTENDED_UD_AV))
                        *wqe += sizeof(struct mlx5_av);
@@ -1112,19 +1080,6 @@ static int mlx5_ib_mr_responder_pfault_handler_rq(struct mlx5_ib_dev *dev,
                return -EFAULT;
        }
 
-       switch (qp->ibqp.qp_type) {
-       case IB_QPT_RC:
-               if (!(dev->odp_caps.per_transport_caps.rc_odp_caps &
-                     IB_ODP_SUPPORT_RECV))
-                       goto invalid_transport_or_opcode;
-               break;
-       default:
-invalid_transport_or_opcode:
-               mlx5_ib_err(dev, "ODP fault on QP of an unsupported transport. transport: 0x%x\n",
-                           qp->ibqp.qp_type);
-               return -EFAULT;
-       }
-
        *wqe_end = wqe + wqe_size;
 
        return 0;
index 8a4930c8bf6214118d7782e2607e5b1fc8a21a4b..2011eaf15cc5ef3ef33b38824046326d75456335 100644 (file)
@@ -546,7 +546,7 @@ static void mlx5_fw_tracer_save_trace(struct mlx5_fw_tracer *tracer,
        trace_data->timestamp = timestamp;
        trace_data->lost = lost;
        trace_data->event_id = event_id;
-       strncpy(trace_data->msg, msg, TRACE_STR_MSG);
+       strscpy_pad(trace_data->msg, msg, TRACE_STR_MSG);
 
        tracer->st_arr.saved_traces_index =
                (tracer->st_arr.saved_traces_index + 1) & (SAVED_TRACES_NUM - 1);
index fa0e991f19835e433133853589c991ae9436f2f0..7f70ecb1db6d42804c22d5f937984a363a0ba2b8 100644 (file)
@@ -495,6 +495,12 @@ static int handle_hca_cap_odp(struct mlx5_core_dev *dev)
        ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
        ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
        ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
+       ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
+       ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
+       ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
+       ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
+       ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
+       ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
 
        if (do_set)
                err = set_caps(dev, set_ctx, set_sz,
index 3a5c9965b7a24811d2f588fbfdf6555551c5e9f0..8ec5ab9a0c0898d98aa0081a39616e0692fe4f5c 100644 (file)
@@ -948,7 +948,9 @@ struct mlx5_ifc_odp_cap_bits {
 
        struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps;
 
-       u8         reserved_at_100[0x700];
+       struct mlx5_ifc_odp_per_transport_service_cap_bits dc_odp_caps;
+
+       u8         reserved_at_120[0x6E0];
 };
 
 struct mlx5_ifc_calc_op {