RDMA/mlx5: Add support to multi-plane device and port
authorMark Zhang <markzhang@nvidia.com>
Sun, 16 Jun 2024 16:08:35 +0000 (19:08 +0300)
committerLeon Romanovsky <leonro@nvidia.com>
Mon, 1 Jul 2024 12:10:15 +0000 (15:10 +0300)
When multi-plane is supported, a logical port, which is aggregation of
multiple physical plane ports, is exposed for data transmission.
Compared with a normal mlx5 IB port, this logical port supports all
functionalities except Subnet Management.

Signed-off-by: Mark Zhang <markzhang@nvidia.com>
Link: https://lore.kernel.org/r/7e37c06c9cb243be9ac79930cd17053903785b95.1718553901.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/net/ethernet/mellanox/mlx5/core/vport.c
include/linux/mlx5/driver.h

index 4adcbbafb7774229c170f95984c2f393a2d48f56..4fed8d1ed819a731f0af0de4e56773f157e851da 100644 (file)
@@ -1357,7 +1357,13 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port,
        props->sm_sl            = rep->sm_sl;
        props->state            = rep->vport_state;
        props->phys_state       = rep->port_physical_state;
-       props->port_cap_flags   = rep->cap_mask1;
+
+       props->port_cap_flags = rep->cap_mask1;
+       if (dev->num_plane) {
+               props->port_cap_flags |= IB_PORT_SM_DISABLED;
+               props->port_cap_flags &= ~IB_PORT_SM;
+       }
+
        props->gid_tbl_len      = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
        props->max_msg_sz       = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
        props->pkey_tbl_len     = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
@@ -2776,6 +2782,23 @@ static int mlx5_ib_event_slave_port(struct notifier_block *nb,
        return NOTIFY_OK;
 }
 
+static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane)
+{
+       struct mlx5_hca_vport_context vport_ctx;
+       int err;
+
+       *num_plane = 0;
+       if (!MLX5_CAP_GEN(mdev, ib_virt))
+               return 0;
+
+       err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx);
+       if (err)
+               return err;
+
+       *num_plane = vport_ctx.num_plane;
+       return 0;
+}
+
 static int set_has_smi_cap(struct mlx5_ib_dev *dev)
 {
        struct mlx5_hca_vport_context vport_ctx;
@@ -2786,10 +2809,14 @@ static int set_has_smi_cap(struct mlx5_ib_dev *dev)
                return 0;
 
        for (port = 1; port <= dev->num_ports; port++) {
-               if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
+               if (dev->num_plane) {
+                       dev->port_caps[port - 1].has_smi = false;
+                       continue;
+               } else if (!MLX5_CAP_GEN(dev->mdev, ib_virt)) {
                        dev->port_caps[port - 1].has_smi = true;
                        continue;
                }
+
                err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0,
                                                   &vport_ctx);
                if (err) {
@@ -2995,6 +3022,11 @@ static u32 get_core_cap_flags(struct ib_device *ibdev,
        if (rep->grh_required)
                ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED;
 
+       if (dev->num_plane)
+               return ret | RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_IB_MAD |
+                       RDMA_CORE_CAP_IB_CM | RDMA_CORE_CAP_IB_SA |
+                       RDMA_CORE_CAP_AF_IB;
+
        if (ll == IB_LINK_LAYER_INFINIBAND)
                return ret | RDMA_CORE_PORT_IBA_IB;
 
@@ -4477,11 +4509,18 @@ static int mlx5r_probe(struct auxiliary_device *adev,
        dev = ib_alloc_device(mlx5_ib_dev, ib_dev);
        if (!dev)
                return -ENOMEM;
+
+       if (ll == IB_LINK_LAYER_INFINIBAND) {
+               ret = mlx5_ib_get_plane_num(mdev, &dev->num_plane);
+               if (ret)
+                       goto fail;
+       }
+
        dev->port = kcalloc(num_ports, sizeof(*dev->port),
                             GFP_KERNEL);
        if (!dev->port) {
-               ib_dealloc_device(&dev->ib_dev);
-               return -ENOMEM;
+               ret = -ENOMEM;
+               goto fail;
        }
 
        dev->mdev = mdev;
@@ -4493,14 +4532,17 @@ static int mlx5r_probe(struct auxiliary_device *adev,
                profile = &pf_profile;
 
        ret = __mlx5_ib_add(dev, profile);
-       if (ret) {
-               kfree(dev->port);
-               ib_dealloc_device(&dev->ib_dev);
-               return ret;
-       }
+       if (ret)
+               goto fail_ib_add;
 
        auxiliary_set_drvdata(adev, dev);
        return 0;
+
+fail_ib_add:
+       kfree(dev->port);
+fail:
+       ib_dealloc_device(&dev->ib_dev);
+       return ret;
 }
 
 static void mlx5r_remove(struct auxiliary_device *adev)
index 8f4618dee86976bf589bed346512e63c79f3467c..49a5eebe69b8fa6bfa4928a1d2deaf4ecf216cc9 100644 (file)
@@ -1189,6 +1189,8 @@ struct mlx5_ib_dev {
 #ifdef CONFIG_MLX5_MACSEC
        struct mlx5_macsec macsec;
 #endif
+
+       u8 num_plane;
 };
 
 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
index 1005bb6935b65c0d6bb2b68f71744c2857085eed..0d5f750faa4555c498eec32349b79589d7482b41 100644 (file)
@@ -737,6 +737,7 @@ int mlx5_query_hca_vport_context(struct mlx5_core_dev *dev,
        rep->grh_required = MLX5_GET_PR(hca_vport_context, ctx, grh_required);
        rep->sys_image_guid = MLX5_GET64_PR(hca_vport_context, ctx,
                                            system_image_guid);
+       rep->num_plane = MLX5_GET_PR(hca_vport_context, ctx, num_port_plane);
 
 ex:
        kvfree(out);
index 0d31f77396fcc5a116040eb18539ee7edc49365c..a96438ded15fe9b3ada935de2248f5ebf55b3a85 100644 (file)
@@ -917,6 +917,7 @@ struct mlx5_hca_vport_context {
        u16                     qkey_violation_counter;
        u16                     pkey_violation_counter;
        bool                    grh_required;
+       u8                      num_plane;
 };
 
 #define STRUCT_FIELD(header, field) \