| 1 | // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB |
| 2 | /* |
| 3 | * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved. |
| 4 | * Copyright (c) 2020, Intel Corporation. All rights reserved. |
| 5 | */ |
| 6 | |
| 7 | #include <linux/debugfs.h> |
| 8 | #include <linux/highmem.h> |
| 9 | #include <linux/module.h> |
| 10 | #include <linux/init.h> |
| 11 | #include <linux/errno.h> |
| 12 | #include <linux/pci.h> |
| 13 | #include <linux/dma-mapping.h> |
| 14 | #include <linux/slab.h> |
| 15 | #include <linux/bitmap.h> |
| 16 | #include <linux/sched.h> |
| 17 | #include <linux/sched/mm.h> |
| 18 | #include <linux/sched/task.h> |
| 19 | #include <linux/delay.h> |
| 20 | #include <rdma/ib_user_verbs.h> |
| 21 | #include <rdma/ib_addr.h> |
| 22 | #include <rdma/ib_cache.h> |
| 23 | #include <linux/mlx5/port.h> |
| 24 | #include <linux/mlx5/vport.h> |
| 25 | #include <linux/mlx5/fs.h> |
| 26 | #include <linux/mlx5/eswitch.h> |
| 27 | #include <linux/mlx5/driver.h> |
| 28 | #include <linux/list.h> |
| 29 | #include <rdma/ib_smi.h> |
| 30 | #include <rdma/ib_umem_odp.h> |
| 31 | #include <rdma/lag.h> |
| 32 | #include <linux/in.h> |
| 33 | #include <linux/etherdevice.h> |
| 34 | #include "mlx5_ib.h" |
| 35 | #include "ib_rep.h" |
| 36 | #include "cmd.h" |
| 37 | #include "devx.h" |
| 38 | #include "dm.h" |
| 39 | #include "fs.h" |
| 40 | #include "srq.h" |
| 41 | #include "qp.h" |
| 42 | #include "wr.h" |
| 43 | #include "restrack.h" |
| 44 | #include "counters.h" |
| 45 | #include "umr.h" |
| 46 | #include <rdma/uverbs_std_types.h> |
| 47 | #include <rdma/uverbs_ioctl.h> |
| 48 | #include <rdma/mlx5_user_ioctl_verbs.h> |
| 49 | #include <rdma/mlx5_user_ioctl_cmds.h> |
| 50 | #include "macsec.h" |
| 51 | |
| 52 | #define UVERBS_MODULE_NAME mlx5_ib |
| 53 | #include <rdma/uverbs_named_ioctl.h> |
| 54 | |
| 55 | MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>"); |
| 56 | MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) IB driver"); |
| 57 | MODULE_LICENSE("Dual BSD/GPL"); |
| 58 | |
| 59 | struct mlx5_ib_event_work { |
| 60 | struct work_struct work; |
| 61 | union { |
| 62 | struct mlx5_ib_dev *dev; |
| 63 | struct mlx5_ib_multiport_info *mpi; |
| 64 | }; |
| 65 | bool is_slave; |
| 66 | unsigned int event; |
| 67 | void *param; |
| 68 | }; |
| 69 | |
| 70 | enum { |
| 71 | MLX5_ATOMIC_SIZE_QP_8BYTES = 1 << 3, |
| 72 | }; |
| 73 | |
| 74 | static struct workqueue_struct *mlx5_ib_event_wq; |
| 75 | static LIST_HEAD(mlx5_ib_unaffiliated_port_list); |
| 76 | static LIST_HEAD(mlx5_ib_dev_list); |
| 77 | /* |
| 78 | * This mutex should be held when accessing either of the above lists |
| 79 | */ |
| 80 | static DEFINE_MUTEX(mlx5_ib_multiport_mutex); |
| 81 | |
| 82 | struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi) |
| 83 | { |
| 84 | struct mlx5_ib_dev *dev; |
| 85 | |
| 86 | mutex_lock(&mlx5_ib_multiport_mutex); |
| 87 | dev = mpi->ibdev; |
| 88 | mutex_unlock(&mlx5_ib_multiport_mutex); |
| 89 | return dev; |
| 90 | } |
| 91 | |
| 92 | static enum rdma_link_layer |
| 93 | mlx5_port_type_cap_to_rdma_ll(int port_type_cap) |
| 94 | { |
| 95 | switch (port_type_cap) { |
| 96 | case MLX5_CAP_PORT_TYPE_IB: |
| 97 | return IB_LINK_LAYER_INFINIBAND; |
| 98 | case MLX5_CAP_PORT_TYPE_ETH: |
| 99 | return IB_LINK_LAYER_ETHERNET; |
| 100 | default: |
| 101 | return IB_LINK_LAYER_UNSPECIFIED; |
| 102 | } |
| 103 | } |
| 104 | |
| 105 | static enum rdma_link_layer |
| 106 | mlx5_ib_port_link_layer(struct ib_device *device, u32 port_num) |
| 107 | { |
| 108 | struct mlx5_ib_dev *dev = to_mdev(device); |
| 109 | int port_type_cap = MLX5_CAP_GEN(dev->mdev, port_type); |
| 110 | |
| 111 | return mlx5_port_type_cap_to_rdma_ll(port_type_cap); |
| 112 | } |
| 113 | |
| 114 | static int get_port_state(struct ib_device *ibdev, |
| 115 | u32 port_num, |
| 116 | enum ib_port_state *state) |
| 117 | { |
| 118 | struct ib_port_attr attr; |
| 119 | int ret; |
| 120 | |
| 121 | memset(&attr, 0, sizeof(attr)); |
| 122 | ret = ibdev->ops.query_port(ibdev, port_num, &attr); |
| 123 | if (!ret) |
| 124 | *state = attr.state; |
| 125 | return ret; |
| 126 | } |
| 127 | |
| 128 | static struct mlx5_roce *mlx5_get_rep_roce(struct mlx5_ib_dev *dev, |
| 129 | struct net_device *ndev, |
| 130 | struct net_device *upper, |
| 131 | u32 *port_num) |
| 132 | { |
| 133 | struct net_device *rep_ndev; |
| 134 | struct mlx5_ib_port *port; |
| 135 | int i; |
| 136 | |
| 137 | for (i = 0; i < dev->num_ports; i++) { |
| 138 | port = &dev->port[i]; |
| 139 | if (!port->rep) |
| 140 | continue; |
| 141 | |
| 142 | if (upper == ndev && port->rep->vport == MLX5_VPORT_UPLINK) { |
| 143 | *port_num = i + 1; |
| 144 | return &port->roce; |
| 145 | } |
| 146 | |
| 147 | if (upper && port->rep->vport == MLX5_VPORT_UPLINK) |
| 148 | continue; |
| 149 | |
| 150 | read_lock(&port->roce.netdev_lock); |
| 151 | rep_ndev = mlx5_ib_get_rep_netdev(port->rep->esw, |
| 152 | port->rep->vport); |
| 153 | if (rep_ndev == ndev) { |
| 154 | read_unlock(&port->roce.netdev_lock); |
| 155 | *port_num = i + 1; |
| 156 | return &port->roce; |
| 157 | } |
| 158 | read_unlock(&port->roce.netdev_lock); |
| 159 | } |
| 160 | |
| 161 | return NULL; |
| 162 | } |
| 163 | |
| 164 | static int mlx5_netdev_event(struct notifier_block *this, |
| 165 | unsigned long event, void *ptr) |
| 166 | { |
| 167 | struct mlx5_roce *roce = container_of(this, struct mlx5_roce, nb); |
| 168 | struct net_device *ndev = netdev_notifier_info_to_dev(ptr); |
| 169 | u32 port_num = roce->native_port_num; |
| 170 | struct mlx5_core_dev *mdev; |
| 171 | struct mlx5_ib_dev *ibdev; |
| 172 | |
| 173 | ibdev = roce->dev; |
| 174 | mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); |
| 175 | if (!mdev) |
| 176 | return NOTIFY_DONE; |
| 177 | |
| 178 | switch (event) { |
| 179 | case NETDEV_REGISTER: |
| 180 | /* Should already be registered during the load */ |
| 181 | if (ibdev->is_rep) |
| 182 | break; |
| 183 | write_lock(&roce->netdev_lock); |
| 184 | if (ndev->dev.parent == mdev->device) |
| 185 | roce->netdev = ndev; |
| 186 | write_unlock(&roce->netdev_lock); |
| 187 | break; |
| 188 | |
| 189 | case NETDEV_UNREGISTER: |
| 190 | /* In case of reps, ib device goes away before the netdevs */ |
| 191 | write_lock(&roce->netdev_lock); |
| 192 | if (roce->netdev == ndev) |
| 193 | roce->netdev = NULL; |
| 194 | write_unlock(&roce->netdev_lock); |
| 195 | break; |
| 196 | |
| 197 | case NETDEV_CHANGE: |
| 198 | case NETDEV_UP: |
| 199 | case NETDEV_DOWN: { |
| 200 | struct net_device *lag_ndev = mlx5_lag_get_roce_netdev(mdev); |
| 201 | struct net_device *upper = NULL; |
| 202 | |
| 203 | if (lag_ndev) { |
| 204 | upper = netdev_master_upper_dev_get(lag_ndev); |
| 205 | dev_put(lag_ndev); |
| 206 | } |
| 207 | |
| 208 | if (ibdev->is_rep) |
| 209 | roce = mlx5_get_rep_roce(ibdev, ndev, upper, &port_num); |
| 210 | if (!roce) |
| 211 | return NOTIFY_DONE; |
| 212 | if ((upper == ndev || |
| 213 | ((!upper || ibdev->is_rep) && ndev == roce->netdev)) && |
| 214 | ibdev->ib_active) { |
| 215 | struct ib_event ibev = { }; |
| 216 | enum ib_port_state port_state; |
| 217 | |
| 218 | if (get_port_state(&ibdev->ib_dev, port_num, |
| 219 | &port_state)) |
| 220 | goto done; |
| 221 | |
| 222 | if (roce->last_port_state == port_state) |
| 223 | goto done; |
| 224 | |
| 225 | roce->last_port_state = port_state; |
| 226 | ibev.device = &ibdev->ib_dev; |
| 227 | if (port_state == IB_PORT_DOWN) |
| 228 | ibev.event = IB_EVENT_PORT_ERR; |
| 229 | else if (port_state == IB_PORT_ACTIVE) |
| 230 | ibev.event = IB_EVENT_PORT_ACTIVE; |
| 231 | else |
| 232 | goto done; |
| 233 | |
| 234 | ibev.element.port_num = port_num; |
| 235 | ib_dispatch_event(&ibev); |
| 236 | } |
| 237 | break; |
| 238 | } |
| 239 | |
| 240 | default: |
| 241 | break; |
| 242 | } |
| 243 | done: |
| 244 | mlx5_ib_put_native_port_mdev(ibdev, port_num); |
| 245 | return NOTIFY_DONE; |
| 246 | } |
| 247 | |
| 248 | static struct net_device *mlx5_ib_get_netdev(struct ib_device *device, |
| 249 | u32 port_num) |
| 250 | { |
| 251 | struct mlx5_ib_dev *ibdev = to_mdev(device); |
| 252 | struct net_device *ndev; |
| 253 | struct mlx5_core_dev *mdev; |
| 254 | |
| 255 | mdev = mlx5_ib_get_native_port_mdev(ibdev, port_num, NULL); |
| 256 | if (!mdev) |
| 257 | return NULL; |
| 258 | |
| 259 | ndev = mlx5_lag_get_roce_netdev(mdev); |
| 260 | if (ndev) |
| 261 | goto out; |
| 262 | |
| 263 | /* Ensure ndev does not disappear before we invoke dev_hold() |
| 264 | */ |
| 265 | read_lock(&ibdev->port[port_num - 1].roce.netdev_lock); |
| 266 | ndev = ibdev->port[port_num - 1].roce.netdev; |
| 267 | dev_hold(ndev); |
| 268 | read_unlock(&ibdev->port[port_num - 1].roce.netdev_lock); |
| 269 | |
| 270 | out: |
| 271 | mlx5_ib_put_native_port_mdev(ibdev, port_num); |
| 272 | return ndev; |
| 273 | } |
| 274 | |
| 275 | struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *ibdev, |
| 276 | u32 ib_port_num, |
| 277 | u32 *native_port_num) |
| 278 | { |
| 279 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, |
| 280 | ib_port_num); |
| 281 | struct mlx5_core_dev *mdev = NULL; |
| 282 | struct mlx5_ib_multiport_info *mpi; |
| 283 | struct mlx5_ib_port *port; |
| 284 | |
| 285 | if (ibdev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) { |
| 286 | if (native_port_num) |
| 287 | *native_port_num = smi_to_native_portnum(ibdev, |
| 288 | ib_port_num); |
| 289 | return ibdev->mdev; |
| 290 | |
| 291 | } |
| 292 | |
| 293 | if (!mlx5_core_mp_enabled(ibdev->mdev) || |
| 294 | ll != IB_LINK_LAYER_ETHERNET) { |
| 295 | if (native_port_num) |
| 296 | *native_port_num = ib_port_num; |
| 297 | return ibdev->mdev; |
| 298 | } |
| 299 | |
| 300 | if (native_port_num) |
| 301 | *native_port_num = 1; |
| 302 | |
| 303 | port = &ibdev->port[ib_port_num - 1]; |
| 304 | spin_lock(&port->mp.mpi_lock); |
| 305 | mpi = ibdev->port[ib_port_num - 1].mp.mpi; |
| 306 | if (mpi && !mpi->unaffiliate) { |
| 307 | mdev = mpi->mdev; |
| 308 | /* If it's the master no need to refcount, it'll exist |
| 309 | * as long as the ib_dev exists. |
| 310 | */ |
| 311 | if (!mpi->is_master) |
| 312 | mpi->mdev_refcnt++; |
| 313 | } |
| 314 | spin_unlock(&port->mp.mpi_lock); |
| 315 | |
| 316 | return mdev; |
| 317 | } |
| 318 | |
| 319 | void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *ibdev, u32 port_num) |
| 320 | { |
| 321 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(&ibdev->ib_dev, |
| 322 | port_num); |
| 323 | struct mlx5_ib_multiport_info *mpi; |
| 324 | struct mlx5_ib_port *port; |
| 325 | |
| 326 | if (!mlx5_core_mp_enabled(ibdev->mdev) || ll != IB_LINK_LAYER_ETHERNET) |
| 327 | return; |
| 328 | |
| 329 | port = &ibdev->port[port_num - 1]; |
| 330 | |
| 331 | spin_lock(&port->mp.mpi_lock); |
| 332 | mpi = ibdev->port[port_num - 1].mp.mpi; |
| 333 | if (mpi->is_master) |
| 334 | goto out; |
| 335 | |
| 336 | mpi->mdev_refcnt--; |
| 337 | if (mpi->unaffiliate) |
| 338 | complete(&mpi->unref_comp); |
| 339 | out: |
| 340 | spin_unlock(&port->mp.mpi_lock); |
| 341 | } |
| 342 | |
| 343 | static int translate_eth_legacy_proto_oper(u32 eth_proto_oper, |
| 344 | u16 *active_speed, u8 *active_width) |
| 345 | { |
| 346 | switch (eth_proto_oper) { |
| 347 | case MLX5E_PROT_MASK(MLX5E_1000BASE_CX_SGMII): |
| 348 | case MLX5E_PROT_MASK(MLX5E_1000BASE_KX): |
| 349 | case MLX5E_PROT_MASK(MLX5E_100BASE_TX): |
| 350 | case MLX5E_PROT_MASK(MLX5E_1000BASE_T): |
| 351 | *active_width = IB_WIDTH_1X; |
| 352 | *active_speed = IB_SPEED_SDR; |
| 353 | break; |
| 354 | case MLX5E_PROT_MASK(MLX5E_10GBASE_T): |
| 355 | case MLX5E_PROT_MASK(MLX5E_10GBASE_CX4): |
| 356 | case MLX5E_PROT_MASK(MLX5E_10GBASE_KX4): |
| 357 | case MLX5E_PROT_MASK(MLX5E_10GBASE_KR): |
| 358 | case MLX5E_PROT_MASK(MLX5E_10GBASE_CR): |
| 359 | case MLX5E_PROT_MASK(MLX5E_10GBASE_SR): |
| 360 | case MLX5E_PROT_MASK(MLX5E_10GBASE_ER): |
| 361 | *active_width = IB_WIDTH_1X; |
| 362 | *active_speed = IB_SPEED_QDR; |
| 363 | break; |
| 364 | case MLX5E_PROT_MASK(MLX5E_25GBASE_CR): |
| 365 | case MLX5E_PROT_MASK(MLX5E_25GBASE_KR): |
| 366 | case MLX5E_PROT_MASK(MLX5E_25GBASE_SR): |
| 367 | *active_width = IB_WIDTH_1X; |
| 368 | *active_speed = IB_SPEED_EDR; |
| 369 | break; |
| 370 | case MLX5E_PROT_MASK(MLX5E_40GBASE_CR4): |
| 371 | case MLX5E_PROT_MASK(MLX5E_40GBASE_KR4): |
| 372 | case MLX5E_PROT_MASK(MLX5E_40GBASE_SR4): |
| 373 | case MLX5E_PROT_MASK(MLX5E_40GBASE_LR4): |
| 374 | *active_width = IB_WIDTH_4X; |
| 375 | *active_speed = IB_SPEED_QDR; |
| 376 | break; |
| 377 | case MLX5E_PROT_MASK(MLX5E_50GBASE_CR2): |
| 378 | case MLX5E_PROT_MASK(MLX5E_50GBASE_KR2): |
| 379 | case MLX5E_PROT_MASK(MLX5E_50GBASE_SR2): |
| 380 | *active_width = IB_WIDTH_1X; |
| 381 | *active_speed = IB_SPEED_HDR; |
| 382 | break; |
| 383 | case MLX5E_PROT_MASK(MLX5E_56GBASE_R4): |
| 384 | *active_width = IB_WIDTH_4X; |
| 385 | *active_speed = IB_SPEED_FDR; |
| 386 | break; |
| 387 | case MLX5E_PROT_MASK(MLX5E_100GBASE_CR4): |
| 388 | case MLX5E_PROT_MASK(MLX5E_100GBASE_SR4): |
| 389 | case MLX5E_PROT_MASK(MLX5E_100GBASE_KR4): |
| 390 | case MLX5E_PROT_MASK(MLX5E_100GBASE_LR4): |
| 391 | *active_width = IB_WIDTH_4X; |
| 392 | *active_speed = IB_SPEED_EDR; |
| 393 | break; |
| 394 | default: |
| 395 | return -EINVAL; |
| 396 | } |
| 397 | |
| 398 | return 0; |
| 399 | } |
| 400 | |
| 401 | static int translate_eth_ext_proto_oper(u32 eth_proto_oper, u16 *active_speed, |
| 402 | u8 *active_width) |
| 403 | { |
| 404 | switch (eth_proto_oper) { |
| 405 | case MLX5E_PROT_MASK(MLX5E_SGMII_100M): |
| 406 | case MLX5E_PROT_MASK(MLX5E_1000BASE_X_SGMII): |
| 407 | *active_width = IB_WIDTH_1X; |
| 408 | *active_speed = IB_SPEED_SDR; |
| 409 | break; |
| 410 | case MLX5E_PROT_MASK(MLX5E_5GBASE_R): |
| 411 | *active_width = IB_WIDTH_1X; |
| 412 | *active_speed = IB_SPEED_DDR; |
| 413 | break; |
| 414 | case MLX5E_PROT_MASK(MLX5E_10GBASE_XFI_XAUI_1): |
| 415 | *active_width = IB_WIDTH_1X; |
| 416 | *active_speed = IB_SPEED_QDR; |
| 417 | break; |
| 418 | case MLX5E_PROT_MASK(MLX5E_40GBASE_XLAUI_4_XLPPI_4): |
| 419 | *active_width = IB_WIDTH_4X; |
| 420 | *active_speed = IB_SPEED_QDR; |
| 421 | break; |
| 422 | case MLX5E_PROT_MASK(MLX5E_25GAUI_1_25GBASE_CR_KR): |
| 423 | *active_width = IB_WIDTH_1X; |
| 424 | *active_speed = IB_SPEED_EDR; |
| 425 | break; |
| 426 | case MLX5E_PROT_MASK(MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2): |
| 427 | *active_width = IB_WIDTH_2X; |
| 428 | *active_speed = IB_SPEED_EDR; |
| 429 | break; |
| 430 | case MLX5E_PROT_MASK(MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR): |
| 431 | *active_width = IB_WIDTH_1X; |
| 432 | *active_speed = IB_SPEED_HDR; |
| 433 | break; |
| 434 | case MLX5E_PROT_MASK(MLX5E_CAUI_4_100GBASE_CR4_KR4): |
| 435 | *active_width = IB_WIDTH_4X; |
| 436 | *active_speed = IB_SPEED_EDR; |
| 437 | break; |
| 438 | case MLX5E_PROT_MASK(MLX5E_100GAUI_2_100GBASE_CR2_KR2): |
| 439 | *active_width = IB_WIDTH_2X; |
| 440 | *active_speed = IB_SPEED_HDR; |
| 441 | break; |
| 442 | case MLX5E_PROT_MASK(MLX5E_100GAUI_1_100GBASE_CR_KR): |
| 443 | *active_width = IB_WIDTH_1X; |
| 444 | *active_speed = IB_SPEED_NDR; |
| 445 | break; |
| 446 | case MLX5E_PROT_MASK(MLX5E_200GAUI_4_200GBASE_CR4_KR4): |
| 447 | *active_width = IB_WIDTH_4X; |
| 448 | *active_speed = IB_SPEED_HDR; |
| 449 | break; |
| 450 | case MLX5E_PROT_MASK(MLX5E_200GAUI_2_200GBASE_CR2_KR2): |
| 451 | *active_width = IB_WIDTH_2X; |
| 452 | *active_speed = IB_SPEED_NDR; |
| 453 | break; |
| 454 | case MLX5E_PROT_MASK(MLX5E_400GAUI_8_400GBASE_CR8): |
| 455 | *active_width = IB_WIDTH_8X; |
| 456 | *active_speed = IB_SPEED_HDR; |
| 457 | break; |
| 458 | case MLX5E_PROT_MASK(MLX5E_400GAUI_4_400GBASE_CR4_KR4): |
| 459 | *active_width = IB_WIDTH_4X; |
| 460 | *active_speed = IB_SPEED_NDR; |
| 461 | break; |
| 462 | case MLX5E_PROT_MASK(MLX5E_800GAUI_8_800GBASE_CR8_KR8): |
| 463 | *active_width = IB_WIDTH_8X; |
| 464 | *active_speed = IB_SPEED_NDR; |
| 465 | break; |
| 466 | default: |
| 467 | return -EINVAL; |
| 468 | } |
| 469 | |
| 470 | return 0; |
| 471 | } |
| 472 | |
| 473 | static int translate_eth_proto_oper(u32 eth_proto_oper, u16 *active_speed, |
| 474 | u8 *active_width, bool ext) |
| 475 | { |
| 476 | return ext ? |
| 477 | translate_eth_ext_proto_oper(eth_proto_oper, active_speed, |
| 478 | active_width) : |
| 479 | translate_eth_legacy_proto_oper(eth_proto_oper, active_speed, |
| 480 | active_width); |
| 481 | } |
| 482 | |
| 483 | static int mlx5_query_port_roce(struct ib_device *device, u32 port_num, |
| 484 | struct ib_port_attr *props) |
| 485 | { |
| 486 | struct mlx5_ib_dev *dev = to_mdev(device); |
| 487 | u32 out[MLX5_ST_SZ_DW(ptys_reg)] = {0}; |
| 488 | struct mlx5_core_dev *mdev; |
| 489 | struct net_device *ndev, *upper; |
| 490 | enum ib_mtu ndev_ib_mtu; |
| 491 | bool put_mdev = true; |
| 492 | u32 eth_prot_oper; |
| 493 | u32 mdev_port_num; |
| 494 | bool ext; |
| 495 | int err; |
| 496 | |
| 497 | mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); |
| 498 | if (!mdev) { |
| 499 | /* This means the port isn't affiliated yet. Get the |
| 500 | * info for the master port instead. |
| 501 | */ |
| 502 | put_mdev = false; |
| 503 | mdev = dev->mdev; |
| 504 | mdev_port_num = 1; |
| 505 | port_num = 1; |
| 506 | } |
| 507 | |
| 508 | /* Possible bad flows are checked before filling out props so in case |
| 509 | * of an error it will still be zeroed out. |
| 510 | * Use native port in case of reps |
| 511 | */ |
| 512 | if (dev->is_rep) |
| 513 | err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, |
| 514 | 1, 0); |
| 515 | else |
| 516 | err = mlx5_query_port_ptys(mdev, out, sizeof(out), MLX5_PTYS_EN, |
| 517 | mdev_port_num, 0); |
| 518 | if (err) |
| 519 | goto out; |
| 520 | ext = !!MLX5_GET_ETH_PROTO(ptys_reg, out, true, eth_proto_capability); |
| 521 | eth_prot_oper = MLX5_GET_ETH_PROTO(ptys_reg, out, ext, eth_proto_oper); |
| 522 | |
| 523 | props->active_width = IB_WIDTH_4X; |
| 524 | props->active_speed = IB_SPEED_QDR; |
| 525 | |
| 526 | translate_eth_proto_oper(eth_prot_oper, &props->active_speed, |
| 527 | &props->active_width, ext); |
| 528 | |
| 529 | if (!dev->is_rep && dev->mdev->roce.roce_en) { |
| 530 | u16 qkey_viol_cntr; |
| 531 | |
| 532 | props->port_cap_flags |= IB_PORT_CM_SUP; |
| 533 | props->ip_gids = true; |
| 534 | props->gid_tbl_len = MLX5_CAP_ROCE(dev->mdev, |
| 535 | roce_address_table_size); |
| 536 | mlx5_query_nic_vport_qkey_viol_cntr(mdev, &qkey_viol_cntr); |
| 537 | props->qkey_viol_cntr = qkey_viol_cntr; |
| 538 | } |
| 539 | props->max_mtu = IB_MTU_4096; |
| 540 | props->max_msg_sz = 1 << MLX5_CAP_GEN(dev->mdev, log_max_msg); |
| 541 | props->pkey_tbl_len = 1; |
| 542 | props->state = IB_PORT_DOWN; |
| 543 | props->phys_state = IB_PORT_PHYS_STATE_DISABLED; |
| 544 | |
| 545 | /* If this is a stub query for an unaffiliated port stop here */ |
| 546 | if (!put_mdev) |
| 547 | goto out; |
| 548 | |
| 549 | ndev = mlx5_ib_get_netdev(device, port_num); |
| 550 | if (!ndev) |
| 551 | goto out; |
| 552 | |
| 553 | if (dev->lag_active) { |
| 554 | rcu_read_lock(); |
| 555 | upper = netdev_master_upper_dev_get_rcu(ndev); |
| 556 | if (upper) { |
| 557 | dev_put(ndev); |
| 558 | ndev = upper; |
| 559 | dev_hold(ndev); |
| 560 | } |
| 561 | rcu_read_unlock(); |
| 562 | } |
| 563 | |
| 564 | if (netif_running(ndev) && netif_carrier_ok(ndev)) { |
| 565 | props->state = IB_PORT_ACTIVE; |
| 566 | props->phys_state = IB_PORT_PHYS_STATE_LINK_UP; |
| 567 | } |
| 568 | |
| 569 | ndev_ib_mtu = iboe_get_mtu(ndev->mtu); |
| 570 | |
| 571 | dev_put(ndev); |
| 572 | |
| 573 | props->active_mtu = min(props->max_mtu, ndev_ib_mtu); |
| 574 | out: |
| 575 | if (put_mdev) |
| 576 | mlx5_ib_put_native_port_mdev(dev, port_num); |
| 577 | return err; |
| 578 | } |
| 579 | |
| 580 | int set_roce_addr(struct mlx5_ib_dev *dev, u32 port_num, |
| 581 | unsigned int index, const union ib_gid *gid, |
| 582 | const struct ib_gid_attr *attr) |
| 583 | { |
| 584 | enum ib_gid_type gid_type; |
| 585 | u16 vlan_id = 0xffff; |
| 586 | u8 roce_version = 0; |
| 587 | u8 roce_l3_type = 0; |
| 588 | u8 mac[ETH_ALEN]; |
| 589 | int ret; |
| 590 | |
| 591 | gid_type = attr->gid_type; |
| 592 | if (gid) { |
| 593 | ret = rdma_read_gid_l2_fields(attr, &vlan_id, &mac[0]); |
| 594 | if (ret) |
| 595 | return ret; |
| 596 | } |
| 597 | |
| 598 | switch (gid_type) { |
| 599 | case IB_GID_TYPE_ROCE: |
| 600 | roce_version = MLX5_ROCE_VERSION_1; |
| 601 | break; |
| 602 | case IB_GID_TYPE_ROCE_UDP_ENCAP: |
| 603 | roce_version = MLX5_ROCE_VERSION_2; |
| 604 | if (gid && ipv6_addr_v4mapped((void *)gid)) |
| 605 | roce_l3_type = MLX5_ROCE_L3_TYPE_IPV4; |
| 606 | else |
| 607 | roce_l3_type = MLX5_ROCE_L3_TYPE_IPV6; |
| 608 | break; |
| 609 | |
| 610 | default: |
| 611 | mlx5_ib_warn(dev, "Unexpected GID type %u\n", gid_type); |
| 612 | } |
| 613 | |
| 614 | return mlx5_core_roce_gid_set(dev->mdev, index, roce_version, |
| 615 | roce_l3_type, gid->raw, mac, |
| 616 | vlan_id < VLAN_CFI_MASK, vlan_id, |
| 617 | port_num); |
| 618 | } |
| 619 | |
| 620 | static int mlx5_ib_add_gid(const struct ib_gid_attr *attr, |
| 621 | __always_unused void **context) |
| 622 | { |
| 623 | int ret; |
| 624 | |
| 625 | ret = mlx5r_add_gid_macsec_operations(attr); |
| 626 | if (ret) |
| 627 | return ret; |
| 628 | |
| 629 | return set_roce_addr(to_mdev(attr->device), attr->port_num, |
| 630 | attr->index, &attr->gid, attr); |
| 631 | } |
| 632 | |
| 633 | static int mlx5_ib_del_gid(const struct ib_gid_attr *attr, |
| 634 | __always_unused void **context) |
| 635 | { |
| 636 | int ret; |
| 637 | |
| 638 | ret = set_roce_addr(to_mdev(attr->device), attr->port_num, |
| 639 | attr->index, NULL, attr); |
| 640 | if (ret) |
| 641 | return ret; |
| 642 | |
| 643 | mlx5r_del_gid_macsec_operations(attr); |
| 644 | return 0; |
| 645 | } |
| 646 | |
| 647 | __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev, |
| 648 | const struct ib_gid_attr *attr) |
| 649 | { |
| 650 | if (attr->gid_type != IB_GID_TYPE_ROCE_UDP_ENCAP) |
| 651 | return 0; |
| 652 | |
| 653 | return cpu_to_be16(MLX5_CAP_ROCE(dev->mdev, r_roce_min_src_udp_port)); |
| 654 | } |
| 655 | |
| 656 | static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev) |
| 657 | { |
| 658 | if (MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_IB) |
| 659 | return !MLX5_CAP_GEN(dev->mdev, ib_virt); |
| 660 | return 0; |
| 661 | } |
| 662 | |
| 663 | enum { |
| 664 | MLX5_VPORT_ACCESS_METHOD_MAD, |
| 665 | MLX5_VPORT_ACCESS_METHOD_HCA, |
| 666 | MLX5_VPORT_ACCESS_METHOD_NIC, |
| 667 | }; |
| 668 | |
| 669 | static int mlx5_get_vport_access_method(struct ib_device *ibdev) |
| 670 | { |
| 671 | if (mlx5_use_mad_ifc(to_mdev(ibdev))) |
| 672 | return MLX5_VPORT_ACCESS_METHOD_MAD; |
| 673 | |
| 674 | if (mlx5_ib_port_link_layer(ibdev, 1) == |
| 675 | IB_LINK_LAYER_ETHERNET) |
| 676 | return MLX5_VPORT_ACCESS_METHOD_NIC; |
| 677 | |
| 678 | return MLX5_VPORT_ACCESS_METHOD_HCA; |
| 679 | } |
| 680 | |
| 681 | static void get_atomic_caps(struct mlx5_ib_dev *dev, |
| 682 | u8 atomic_size_qp, |
| 683 | struct ib_device_attr *props) |
| 684 | { |
| 685 | u8 tmp; |
| 686 | u8 atomic_operations = MLX5_CAP_ATOMIC(dev->mdev, atomic_operations); |
| 687 | u8 atomic_req_8B_endianness_mode = |
| 688 | MLX5_CAP_ATOMIC(dev->mdev, atomic_req_8B_endianness_mode); |
| 689 | |
| 690 | /* Check if HW supports 8 bytes standard atomic operations and capable |
| 691 | * of host endianness respond |
| 692 | */ |
| 693 | tmp = MLX5_ATOMIC_OPS_CMP_SWAP | MLX5_ATOMIC_OPS_FETCH_ADD; |
| 694 | if (((atomic_operations & tmp) == tmp) && |
| 695 | (atomic_size_qp & MLX5_ATOMIC_SIZE_QP_8BYTES) && |
| 696 | (atomic_req_8B_endianness_mode)) { |
| 697 | props->atomic_cap = IB_ATOMIC_HCA; |
| 698 | } else { |
| 699 | props->atomic_cap = IB_ATOMIC_NONE; |
| 700 | } |
| 701 | } |
| 702 | |
| 703 | static void get_atomic_caps_qp(struct mlx5_ib_dev *dev, |
| 704 | struct ib_device_attr *props) |
| 705 | { |
| 706 | u8 atomic_size_qp = MLX5_CAP_ATOMIC(dev->mdev, atomic_size_qp); |
| 707 | |
| 708 | get_atomic_caps(dev, atomic_size_qp, props); |
| 709 | } |
| 710 | |
| 711 | static int mlx5_query_system_image_guid(struct ib_device *ibdev, |
| 712 | __be64 *sys_image_guid) |
| 713 | { |
| 714 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 715 | struct mlx5_core_dev *mdev = dev->mdev; |
| 716 | u64 tmp; |
| 717 | int err; |
| 718 | |
| 719 | switch (mlx5_get_vport_access_method(ibdev)) { |
| 720 | case MLX5_VPORT_ACCESS_METHOD_MAD: |
| 721 | return mlx5_query_mad_ifc_system_image_guid(ibdev, |
| 722 | sys_image_guid); |
| 723 | |
| 724 | case MLX5_VPORT_ACCESS_METHOD_HCA: |
| 725 | err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp); |
| 726 | break; |
| 727 | |
| 728 | case MLX5_VPORT_ACCESS_METHOD_NIC: |
| 729 | err = mlx5_query_nic_vport_system_image_guid(mdev, &tmp); |
| 730 | break; |
| 731 | |
| 732 | default: |
| 733 | return -EINVAL; |
| 734 | } |
| 735 | |
| 736 | if (!err) |
| 737 | *sys_image_guid = cpu_to_be64(tmp); |
| 738 | |
| 739 | return err; |
| 740 | |
| 741 | } |
| 742 | |
| 743 | static int mlx5_query_max_pkeys(struct ib_device *ibdev, |
| 744 | u16 *max_pkeys) |
| 745 | { |
| 746 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 747 | struct mlx5_core_dev *mdev = dev->mdev; |
| 748 | |
| 749 | switch (mlx5_get_vport_access_method(ibdev)) { |
| 750 | case MLX5_VPORT_ACCESS_METHOD_MAD: |
| 751 | return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys); |
| 752 | |
| 753 | case MLX5_VPORT_ACCESS_METHOD_HCA: |
| 754 | case MLX5_VPORT_ACCESS_METHOD_NIC: |
| 755 | *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, |
| 756 | pkey_table_size)); |
| 757 | return 0; |
| 758 | |
| 759 | default: |
| 760 | return -EINVAL; |
| 761 | } |
| 762 | } |
| 763 | |
| 764 | static int mlx5_query_vendor_id(struct ib_device *ibdev, |
| 765 | u32 *vendor_id) |
| 766 | { |
| 767 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 768 | |
| 769 | switch (mlx5_get_vport_access_method(ibdev)) { |
| 770 | case MLX5_VPORT_ACCESS_METHOD_MAD: |
| 771 | return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id); |
| 772 | |
| 773 | case MLX5_VPORT_ACCESS_METHOD_HCA: |
| 774 | case MLX5_VPORT_ACCESS_METHOD_NIC: |
| 775 | return mlx5_core_query_vendor_id(dev->mdev, vendor_id); |
| 776 | |
| 777 | default: |
| 778 | return -EINVAL; |
| 779 | } |
| 780 | } |
| 781 | |
| 782 | static int mlx5_query_node_guid(struct mlx5_ib_dev *dev, |
| 783 | __be64 *node_guid) |
| 784 | { |
| 785 | u64 tmp; |
| 786 | int err; |
| 787 | |
| 788 | switch (mlx5_get_vport_access_method(&dev->ib_dev)) { |
| 789 | case MLX5_VPORT_ACCESS_METHOD_MAD: |
| 790 | return mlx5_query_mad_ifc_node_guid(dev, node_guid); |
| 791 | |
| 792 | case MLX5_VPORT_ACCESS_METHOD_HCA: |
| 793 | err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp); |
| 794 | break; |
| 795 | |
| 796 | case MLX5_VPORT_ACCESS_METHOD_NIC: |
| 797 | err = mlx5_query_nic_vport_node_guid(dev->mdev, &tmp); |
| 798 | break; |
| 799 | |
| 800 | default: |
| 801 | return -EINVAL; |
| 802 | } |
| 803 | |
| 804 | if (!err) |
| 805 | *node_guid = cpu_to_be64(tmp); |
| 806 | |
| 807 | return err; |
| 808 | } |
| 809 | |
| 810 | struct mlx5_reg_node_desc { |
| 811 | u8 desc[IB_DEVICE_NODE_DESC_MAX]; |
| 812 | }; |
| 813 | |
| 814 | static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc) |
| 815 | { |
| 816 | struct mlx5_reg_node_desc in; |
| 817 | |
| 818 | if (mlx5_use_mad_ifc(dev)) |
| 819 | return mlx5_query_mad_ifc_node_desc(dev, node_desc); |
| 820 | |
| 821 | memset(&in, 0, sizeof(in)); |
| 822 | |
| 823 | return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc, |
| 824 | sizeof(struct mlx5_reg_node_desc), |
| 825 | MLX5_REG_NODE_DESC, 0, 0); |
| 826 | } |
| 827 | |
| 828 | static void fill_esw_mgr_reg_c0(struct mlx5_core_dev *mdev, |
| 829 | struct mlx5_ib_query_device_resp *resp) |
| 830 | { |
| 831 | struct mlx5_eswitch *esw = mdev->priv.eswitch; |
| 832 | u16 vport = mlx5_eswitch_manager_vport(mdev); |
| 833 | |
| 834 | resp->reg_c0.value = mlx5_eswitch_get_vport_metadata_for_match(esw, |
| 835 | vport); |
| 836 | resp->reg_c0.mask = mlx5_eswitch_get_vport_metadata_mask(); |
| 837 | } |
| 838 | |
| 839 | static int mlx5_ib_query_device(struct ib_device *ibdev, |
| 840 | struct ib_device_attr *props, |
| 841 | struct ib_udata *uhw) |
| 842 | { |
| 843 | size_t uhw_outlen = (uhw) ? uhw->outlen : 0; |
| 844 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 845 | struct mlx5_core_dev *mdev = dev->mdev; |
| 846 | int err = -ENOMEM; |
| 847 | int max_sq_desc; |
| 848 | int max_rq_sg; |
| 849 | int max_sq_sg; |
| 850 | u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz); |
| 851 | bool raw_support = !mlx5_core_mp_enabled(mdev); |
| 852 | struct mlx5_ib_query_device_resp resp = {}; |
| 853 | size_t resp_len; |
| 854 | u64 max_tso; |
| 855 | |
| 856 | resp_len = sizeof(resp.comp_mask) + sizeof(resp.response_length); |
| 857 | if (uhw_outlen && uhw_outlen < resp_len) |
| 858 | return -EINVAL; |
| 859 | |
| 860 | resp.response_length = resp_len; |
| 861 | |
| 862 | if (uhw && uhw->inlen && !ib_is_udata_cleared(uhw, 0, uhw->inlen)) |
| 863 | return -EINVAL; |
| 864 | |
| 865 | memset(props, 0, sizeof(*props)); |
| 866 | err = mlx5_query_system_image_guid(ibdev, |
| 867 | &props->sys_image_guid); |
| 868 | if (err) |
| 869 | return err; |
| 870 | |
| 871 | props->max_pkeys = dev->pkey_table_len; |
| 872 | |
| 873 | err = mlx5_query_vendor_id(ibdev, &props->vendor_id); |
| 874 | if (err) |
| 875 | return err; |
| 876 | |
| 877 | props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) | |
| 878 | (fw_rev_min(dev->mdev) << 16) | |
| 879 | fw_rev_sub(dev->mdev); |
| 880 | props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | |
| 881 | IB_DEVICE_PORT_ACTIVE_EVENT | |
| 882 | IB_DEVICE_SYS_IMAGE_GUID | |
| 883 | IB_DEVICE_RC_RNR_NAK_GEN; |
| 884 | |
| 885 | if (MLX5_CAP_GEN(mdev, pkv)) |
| 886 | props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; |
| 887 | if (MLX5_CAP_GEN(mdev, qkv)) |
| 888 | props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; |
| 889 | if (MLX5_CAP_GEN(mdev, apm)) |
| 890 | props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; |
| 891 | if (MLX5_CAP_GEN(mdev, xrc)) |
| 892 | props->device_cap_flags |= IB_DEVICE_XRC; |
| 893 | if (MLX5_CAP_GEN(mdev, imaicl)) { |
| 894 | props->device_cap_flags |= IB_DEVICE_MEM_WINDOW | |
| 895 | IB_DEVICE_MEM_WINDOW_TYPE_2B; |
| 896 | props->max_mw = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); |
| 897 | /* We support 'Gappy' memory registration too */ |
| 898 | props->kernel_cap_flags |= IBK_SG_GAPS_REG; |
| 899 | } |
| 900 | /* IB_WR_REG_MR always requires changing the entity size with UMR */ |
| 901 | if (!MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled)) |
| 902 | props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; |
| 903 | if (MLX5_CAP_GEN(mdev, sho)) { |
| 904 | props->kernel_cap_flags |= IBK_INTEGRITY_HANDOVER; |
| 905 | /* At this stage no support for signature handover */ |
| 906 | props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 | |
| 907 | IB_PROT_T10DIF_TYPE_2 | |
| 908 | IB_PROT_T10DIF_TYPE_3; |
| 909 | props->sig_guard_cap = IB_GUARD_T10DIF_CRC | |
| 910 | IB_GUARD_T10DIF_CSUM; |
| 911 | } |
| 912 | if (MLX5_CAP_GEN(mdev, block_lb_mc)) |
| 913 | props->kernel_cap_flags |= IBK_BLOCK_MULTICAST_LOOPBACK; |
| 914 | |
| 915 | if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && raw_support) { |
| 916 | if (MLX5_CAP_ETH(mdev, csum_cap)) { |
| 917 | /* Legacy bit to support old userspace libraries */ |
| 918 | props->device_cap_flags |= IB_DEVICE_RAW_IP_CSUM; |
| 919 | props->raw_packet_caps |= IB_RAW_PACKET_CAP_IP_CSUM; |
| 920 | } |
| 921 | |
| 922 | if (MLX5_CAP_ETH(dev->mdev, vlan_cap)) |
| 923 | props->raw_packet_caps |= |
| 924 | IB_RAW_PACKET_CAP_CVLAN_STRIPPING; |
| 925 | |
| 926 | if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) { |
| 927 | max_tso = MLX5_CAP_ETH(mdev, max_lso_cap); |
| 928 | if (max_tso) { |
| 929 | resp.tso_caps.max_tso = 1 << max_tso; |
| 930 | resp.tso_caps.supported_qpts |= |
| 931 | 1 << IB_QPT_RAW_PACKET; |
| 932 | resp.response_length += sizeof(resp.tso_caps); |
| 933 | } |
| 934 | } |
| 935 | |
| 936 | if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) { |
| 937 | resp.rss_caps.rx_hash_function = |
| 938 | MLX5_RX_HASH_FUNC_TOEPLITZ; |
| 939 | resp.rss_caps.rx_hash_fields_mask = |
| 940 | MLX5_RX_HASH_SRC_IPV4 | |
| 941 | MLX5_RX_HASH_DST_IPV4 | |
| 942 | MLX5_RX_HASH_SRC_IPV6 | |
| 943 | MLX5_RX_HASH_DST_IPV6 | |
| 944 | MLX5_RX_HASH_SRC_PORT_TCP | |
| 945 | MLX5_RX_HASH_DST_PORT_TCP | |
| 946 | MLX5_RX_HASH_SRC_PORT_UDP | |
| 947 | MLX5_RX_HASH_DST_PORT_UDP | |
| 948 | MLX5_RX_HASH_INNER; |
| 949 | resp.response_length += sizeof(resp.rss_caps); |
| 950 | } |
| 951 | } else { |
| 952 | if (offsetofend(typeof(resp), tso_caps) <= uhw_outlen) |
| 953 | resp.response_length += sizeof(resp.tso_caps); |
| 954 | if (offsetofend(typeof(resp), rss_caps) <= uhw_outlen) |
| 955 | resp.response_length += sizeof(resp.rss_caps); |
| 956 | } |
| 957 | |
| 958 | if (MLX5_CAP_GEN(mdev, ipoib_basic_offloads)) { |
| 959 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; |
| 960 | props->kernel_cap_flags |= IBK_UD_TSO; |
| 961 | } |
| 962 | |
| 963 | if (MLX5_CAP_GEN(dev->mdev, rq_delay_drop) && |
| 964 | MLX5_CAP_GEN(dev->mdev, general_notification_event) && |
| 965 | raw_support) |
| 966 | props->raw_packet_caps |= IB_RAW_PACKET_CAP_DELAY_DROP; |
| 967 | |
| 968 | if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && |
| 969 | MLX5_CAP_IPOIB_ENHANCED(mdev, csum_cap)) |
| 970 | props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM; |
| 971 | |
| 972 | if (MLX5_CAP_GEN(dev->mdev, eth_net_offloads) && |
| 973 | MLX5_CAP_ETH(dev->mdev, scatter_fcs) && |
| 974 | raw_support) { |
| 975 | /* Legacy bit to support old userspace libraries */ |
| 976 | props->device_cap_flags |= IB_DEVICE_RAW_SCATTER_FCS; |
| 977 | props->raw_packet_caps |= IB_RAW_PACKET_CAP_SCATTER_FCS; |
| 978 | } |
| 979 | |
| 980 | if (MLX5_CAP_DEV_MEM(mdev, memic)) { |
| 981 | props->max_dm_size = |
| 982 | MLX5_CAP_DEV_MEM(mdev, max_memic_size); |
| 983 | } |
| 984 | |
| 985 | if (mlx5_get_flow_namespace(dev->mdev, MLX5_FLOW_NAMESPACE_BYPASS)) |
| 986 | props->device_cap_flags |= IB_DEVICE_MANAGED_FLOW_STEERING; |
| 987 | |
| 988 | if (MLX5_CAP_GEN(mdev, end_pad)) |
| 989 | props->device_cap_flags |= IB_DEVICE_PCI_WRITE_END_PADDING; |
| 990 | |
| 991 | props->vendor_part_id = mdev->pdev->device; |
| 992 | props->hw_ver = mdev->pdev->revision; |
| 993 | |
| 994 | props->max_mr_size = ~0ull; |
| 995 | props->page_size_cap = ~(min_page_size - 1); |
| 996 | props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp); |
| 997 | props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); |
| 998 | max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) / |
| 999 | sizeof(struct mlx5_wqe_data_seg); |
| 1000 | max_sq_desc = min_t(int, MLX5_CAP_GEN(mdev, max_wqe_sz_sq), 512); |
| 1001 | max_sq_sg = (max_sq_desc - sizeof(struct mlx5_wqe_ctrl_seg) - |
| 1002 | sizeof(struct mlx5_wqe_raddr_seg)) / |
| 1003 | sizeof(struct mlx5_wqe_data_seg); |
| 1004 | props->max_send_sge = max_sq_sg; |
| 1005 | props->max_recv_sge = max_rq_sg; |
| 1006 | props->max_sge_rd = MLX5_MAX_SGE_RD; |
| 1007 | props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq); |
| 1008 | props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1; |
| 1009 | props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey); |
| 1010 | props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd); |
| 1011 | props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp); |
| 1012 | props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp); |
| 1013 | props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq); |
| 1014 | props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1; |
| 1015 | props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay); |
| 1016 | props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; |
| 1017 | props->max_srq_sge = max_rq_sg - 1; |
| 1018 | props->max_fast_reg_page_list_len = |
| 1019 | 1 << MLX5_CAP_GEN(mdev, log_max_klm_list_size); |
| 1020 | props->max_pi_fast_reg_page_list_len = |
| 1021 | props->max_fast_reg_page_list_len / 2; |
| 1022 | props->max_sgl_rd = |
| 1023 | MLX5_CAP_GEN(mdev, max_sgl_for_optimized_performance); |
| 1024 | get_atomic_caps_qp(dev, props); |
| 1025 | props->masked_atomic_cap = IB_ATOMIC_NONE; |
| 1026 | props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg); |
| 1027 | props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg); |
| 1028 | props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * |
| 1029 | props->max_mcast_grp; |
| 1030 | props->max_ah = INT_MAX; |
| 1031 | props->hca_core_clock = MLX5_CAP_GEN(mdev, device_frequency_khz); |
| 1032 | props->timestamp_mask = 0x7FFFFFFFFFFFFFFFULL; |
| 1033 | |
| 1034 | if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING)) { |
| 1035 | if (dev->odp_caps.general_caps & IB_ODP_SUPPORT) |
| 1036 | props->kernel_cap_flags |= IBK_ON_DEMAND_PAGING; |
| 1037 | props->odp_caps = dev->odp_caps; |
| 1038 | if (!uhw) { |
| 1039 | /* ODP for kernel QPs is not implemented for receive |
| 1040 | * WQEs and SRQ WQEs |
| 1041 | */ |
| 1042 | props->odp_caps.per_transport_caps.rc_odp_caps &= |
| 1043 | ~(IB_ODP_SUPPORT_READ | |
| 1044 | IB_ODP_SUPPORT_SRQ_RECV); |
| 1045 | props->odp_caps.per_transport_caps.uc_odp_caps &= |
| 1046 | ~(IB_ODP_SUPPORT_READ | |
| 1047 | IB_ODP_SUPPORT_SRQ_RECV); |
| 1048 | props->odp_caps.per_transport_caps.ud_odp_caps &= |
| 1049 | ~(IB_ODP_SUPPORT_READ | |
| 1050 | IB_ODP_SUPPORT_SRQ_RECV); |
| 1051 | props->odp_caps.per_transport_caps.xrc_odp_caps &= |
| 1052 | ~(IB_ODP_SUPPORT_READ | |
| 1053 | IB_ODP_SUPPORT_SRQ_RECV); |
| 1054 | } |
| 1055 | } |
| 1056 | |
| 1057 | if (mlx5_core_is_vf(mdev)) |
| 1058 | props->kernel_cap_flags |= IBK_VIRTUAL_FUNCTION; |
| 1059 | |
| 1060 | if (mlx5_ib_port_link_layer(ibdev, 1) == |
| 1061 | IB_LINK_LAYER_ETHERNET && raw_support) { |
| 1062 | props->rss_caps.max_rwq_indirection_tables = |
| 1063 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt); |
| 1064 | props->rss_caps.max_rwq_indirection_table_size = |
| 1065 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_rqt_size); |
| 1066 | props->rss_caps.supported_qpts = 1 << IB_QPT_RAW_PACKET; |
| 1067 | props->max_wq_type_rq = |
| 1068 | 1 << MLX5_CAP_GEN(dev->mdev, log_max_rq); |
| 1069 | } |
| 1070 | |
| 1071 | if (MLX5_CAP_GEN(mdev, tag_matching)) { |
| 1072 | props->tm_caps.max_num_tags = |
| 1073 | (1 << MLX5_CAP_GEN(mdev, log_tag_matching_list_sz)) - 1; |
| 1074 | props->tm_caps.max_ops = |
| 1075 | 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz); |
| 1076 | props->tm_caps.max_sge = MLX5_TM_MAX_SGE; |
| 1077 | } |
| 1078 | |
| 1079 | if (MLX5_CAP_GEN(mdev, tag_matching) && |
| 1080 | MLX5_CAP_GEN(mdev, rndv_offload_rc)) { |
| 1081 | props->tm_caps.flags = IB_TM_CAP_RNDV_RC; |
| 1082 | props->tm_caps.max_rndv_hdr_size = MLX5_TM_MAX_RNDV_MSG_SIZE; |
| 1083 | } |
| 1084 | |
| 1085 | if (MLX5_CAP_GEN(dev->mdev, cq_moderation)) { |
| 1086 | props->cq_caps.max_cq_moderation_count = |
| 1087 | MLX5_MAX_CQ_COUNT; |
| 1088 | props->cq_caps.max_cq_moderation_period = |
| 1089 | MLX5_MAX_CQ_PERIOD; |
| 1090 | } |
| 1091 | |
| 1092 | if (offsetofend(typeof(resp), cqe_comp_caps) <= uhw_outlen) { |
| 1093 | resp.response_length += sizeof(resp.cqe_comp_caps); |
| 1094 | |
| 1095 | if (MLX5_CAP_GEN(dev->mdev, cqe_compression)) { |
| 1096 | resp.cqe_comp_caps.max_num = |
| 1097 | MLX5_CAP_GEN(dev->mdev, |
| 1098 | cqe_compression_max_num); |
| 1099 | |
| 1100 | resp.cqe_comp_caps.supported_format = |
| 1101 | MLX5_IB_CQE_RES_FORMAT_HASH | |
| 1102 | MLX5_IB_CQE_RES_FORMAT_CSUM; |
| 1103 | |
| 1104 | if (MLX5_CAP_GEN(dev->mdev, mini_cqe_resp_stride_index)) |
| 1105 | resp.cqe_comp_caps.supported_format |= |
| 1106 | MLX5_IB_CQE_RES_FORMAT_CSUM_STRIDX; |
| 1107 | } |
| 1108 | } |
| 1109 | |
| 1110 | if (offsetofend(typeof(resp), packet_pacing_caps) <= uhw_outlen && |
| 1111 | raw_support) { |
| 1112 | if (MLX5_CAP_QOS(mdev, packet_pacing) && |
| 1113 | MLX5_CAP_GEN(mdev, qos)) { |
| 1114 | resp.packet_pacing_caps.qp_rate_limit_max = |
| 1115 | MLX5_CAP_QOS(mdev, packet_pacing_max_rate); |
| 1116 | resp.packet_pacing_caps.qp_rate_limit_min = |
| 1117 | MLX5_CAP_QOS(mdev, packet_pacing_min_rate); |
| 1118 | resp.packet_pacing_caps.supported_qpts |= |
| 1119 | 1 << IB_QPT_RAW_PACKET; |
| 1120 | if (MLX5_CAP_QOS(mdev, packet_pacing_burst_bound) && |
| 1121 | MLX5_CAP_QOS(mdev, packet_pacing_typical_size)) |
| 1122 | resp.packet_pacing_caps.cap_flags |= |
| 1123 | MLX5_IB_PP_SUPPORT_BURST; |
| 1124 | } |
| 1125 | resp.response_length += sizeof(resp.packet_pacing_caps); |
| 1126 | } |
| 1127 | |
| 1128 | if (offsetofend(typeof(resp), mlx5_ib_support_multi_pkt_send_wqes) <= |
| 1129 | uhw_outlen) { |
| 1130 | if (MLX5_CAP_ETH(mdev, multi_pkt_send_wqe)) |
| 1131 | resp.mlx5_ib_support_multi_pkt_send_wqes = |
| 1132 | MLX5_IB_ALLOW_MPW; |
| 1133 | |
| 1134 | if (MLX5_CAP_ETH(mdev, enhanced_multi_pkt_send_wqe)) |
| 1135 | resp.mlx5_ib_support_multi_pkt_send_wqes |= |
| 1136 | MLX5_IB_SUPPORT_EMPW; |
| 1137 | |
| 1138 | resp.response_length += |
| 1139 | sizeof(resp.mlx5_ib_support_multi_pkt_send_wqes); |
| 1140 | } |
| 1141 | |
| 1142 | if (offsetofend(typeof(resp), flags) <= uhw_outlen) { |
| 1143 | resp.response_length += sizeof(resp.flags); |
| 1144 | |
| 1145 | if (MLX5_CAP_GEN(mdev, cqe_compression_128)) |
| 1146 | resp.flags |= |
| 1147 | MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_COMP; |
| 1148 | |
| 1149 | if (MLX5_CAP_GEN(mdev, cqe_128_always)) |
| 1150 | resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_CQE_128B_PAD; |
| 1151 | if (MLX5_CAP_GEN(mdev, qp_packet_based)) |
| 1152 | resp.flags |= |
| 1153 | MLX5_IB_QUERY_DEV_RESP_PACKET_BASED_CREDIT_MODE; |
| 1154 | |
| 1155 | resp.flags |= MLX5_IB_QUERY_DEV_RESP_FLAGS_SCAT2CQE_DCT; |
| 1156 | } |
| 1157 | |
| 1158 | if (offsetofend(typeof(resp), sw_parsing_caps) <= uhw_outlen) { |
| 1159 | resp.response_length += sizeof(resp.sw_parsing_caps); |
| 1160 | if (MLX5_CAP_ETH(mdev, swp)) { |
| 1161 | resp.sw_parsing_caps.sw_parsing_offloads |= |
| 1162 | MLX5_IB_SW_PARSING; |
| 1163 | |
| 1164 | if (MLX5_CAP_ETH(mdev, swp_csum)) |
| 1165 | resp.sw_parsing_caps.sw_parsing_offloads |= |
| 1166 | MLX5_IB_SW_PARSING_CSUM; |
| 1167 | |
| 1168 | if (MLX5_CAP_ETH(mdev, swp_lso)) |
| 1169 | resp.sw_parsing_caps.sw_parsing_offloads |= |
| 1170 | MLX5_IB_SW_PARSING_LSO; |
| 1171 | |
| 1172 | if (resp.sw_parsing_caps.sw_parsing_offloads) |
| 1173 | resp.sw_parsing_caps.supported_qpts = |
| 1174 | BIT(IB_QPT_RAW_PACKET); |
| 1175 | } |
| 1176 | } |
| 1177 | |
| 1178 | if (offsetofend(typeof(resp), striding_rq_caps) <= uhw_outlen && |
| 1179 | raw_support) { |
| 1180 | resp.response_length += sizeof(resp.striding_rq_caps); |
| 1181 | if (MLX5_CAP_GEN(mdev, striding_rq)) { |
| 1182 | resp.striding_rq_caps.min_single_stride_log_num_of_bytes = |
| 1183 | MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES; |
| 1184 | resp.striding_rq_caps.max_single_stride_log_num_of_bytes = |
| 1185 | MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES; |
| 1186 | if (MLX5_CAP_GEN(dev->mdev, ext_stride_num_range)) |
| 1187 | resp.striding_rq_caps |
| 1188 | .min_single_wqe_log_num_of_strides = |
| 1189 | MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES; |
| 1190 | else |
| 1191 | resp.striding_rq_caps |
| 1192 | .min_single_wqe_log_num_of_strides = |
| 1193 | MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES; |
| 1194 | resp.striding_rq_caps.max_single_wqe_log_num_of_strides = |
| 1195 | MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES; |
| 1196 | resp.striding_rq_caps.supported_qpts = |
| 1197 | BIT(IB_QPT_RAW_PACKET); |
| 1198 | } |
| 1199 | } |
| 1200 | |
| 1201 | if (offsetofend(typeof(resp), tunnel_offloads_caps) <= uhw_outlen) { |
| 1202 | resp.response_length += sizeof(resp.tunnel_offloads_caps); |
| 1203 | if (MLX5_CAP_ETH(mdev, tunnel_stateless_vxlan)) |
| 1204 | resp.tunnel_offloads_caps |= |
| 1205 | MLX5_IB_TUNNELED_OFFLOADS_VXLAN; |
| 1206 | if (MLX5_CAP_ETH(mdev, tunnel_stateless_geneve_rx)) |
| 1207 | resp.tunnel_offloads_caps |= |
| 1208 | MLX5_IB_TUNNELED_OFFLOADS_GENEVE; |
| 1209 | if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) |
| 1210 | resp.tunnel_offloads_caps |= |
| 1211 | MLX5_IB_TUNNELED_OFFLOADS_GRE; |
| 1212 | if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_gre)) |
| 1213 | resp.tunnel_offloads_caps |= |
| 1214 | MLX5_IB_TUNNELED_OFFLOADS_MPLS_GRE; |
| 1215 | if (MLX5_CAP_ETH(mdev, tunnel_stateless_mpls_over_udp)) |
| 1216 | resp.tunnel_offloads_caps |= |
| 1217 | MLX5_IB_TUNNELED_OFFLOADS_MPLS_UDP; |
| 1218 | } |
| 1219 | |
| 1220 | if (offsetofend(typeof(resp), dci_streams_caps) <= uhw_outlen) { |
| 1221 | resp.response_length += sizeof(resp.dci_streams_caps); |
| 1222 | |
| 1223 | resp.dci_streams_caps.max_log_num_concurent = |
| 1224 | MLX5_CAP_GEN(mdev, log_max_dci_stream_channels); |
| 1225 | |
| 1226 | resp.dci_streams_caps.max_log_num_errored = |
| 1227 | MLX5_CAP_GEN(mdev, log_max_dci_errored_streams); |
| 1228 | } |
| 1229 | |
| 1230 | if (offsetofend(typeof(resp), reserved) <= uhw_outlen) |
| 1231 | resp.response_length += sizeof(resp.reserved); |
| 1232 | |
| 1233 | if (offsetofend(typeof(resp), reg_c0) <= uhw_outlen) { |
| 1234 | struct mlx5_eswitch *esw = mdev->priv.eswitch; |
| 1235 | |
| 1236 | resp.response_length += sizeof(resp.reg_c0); |
| 1237 | |
| 1238 | if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS && |
| 1239 | mlx5_eswitch_vport_match_metadata_enabled(esw)) |
| 1240 | fill_esw_mgr_reg_c0(mdev, &resp); |
| 1241 | } |
| 1242 | |
| 1243 | if (uhw_outlen) { |
| 1244 | err = ib_copy_to_udata(uhw, &resp, resp.response_length); |
| 1245 | |
| 1246 | if (err) |
| 1247 | return err; |
| 1248 | } |
| 1249 | |
| 1250 | return 0; |
| 1251 | } |
| 1252 | |
| 1253 | static void translate_active_width(struct ib_device *ibdev, u16 active_width, |
| 1254 | u8 *ib_width) |
| 1255 | { |
| 1256 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1257 | |
| 1258 | if (active_width & MLX5_PTYS_WIDTH_1X) |
| 1259 | *ib_width = IB_WIDTH_1X; |
| 1260 | else if (active_width & MLX5_PTYS_WIDTH_2X) |
| 1261 | *ib_width = IB_WIDTH_2X; |
| 1262 | else if (active_width & MLX5_PTYS_WIDTH_4X) |
| 1263 | *ib_width = IB_WIDTH_4X; |
| 1264 | else if (active_width & MLX5_PTYS_WIDTH_8X) |
| 1265 | *ib_width = IB_WIDTH_8X; |
| 1266 | else if (active_width & MLX5_PTYS_WIDTH_12X) |
| 1267 | *ib_width = IB_WIDTH_12X; |
| 1268 | else { |
| 1269 | mlx5_ib_dbg(dev, "Invalid active_width %d, setting width to default value: 4x\n", |
| 1270 | active_width); |
| 1271 | *ib_width = IB_WIDTH_4X; |
| 1272 | } |
| 1273 | |
| 1274 | return; |
| 1275 | } |
| 1276 | |
| 1277 | static int mlx5_mtu_to_ib_mtu(int mtu) |
| 1278 | { |
| 1279 | switch (mtu) { |
| 1280 | case 256: return 1; |
| 1281 | case 512: return 2; |
| 1282 | case 1024: return 3; |
| 1283 | case 2048: return 4; |
| 1284 | case 4096: return 5; |
| 1285 | default: |
| 1286 | pr_warn("invalid mtu\n"); |
| 1287 | return -1; |
| 1288 | } |
| 1289 | } |
| 1290 | |
| 1291 | enum ib_max_vl_num { |
| 1292 | __IB_MAX_VL_0 = 1, |
| 1293 | __IB_MAX_VL_0_1 = 2, |
| 1294 | __IB_MAX_VL_0_3 = 3, |
| 1295 | __IB_MAX_VL_0_7 = 4, |
| 1296 | __IB_MAX_VL_0_14 = 5, |
| 1297 | }; |
| 1298 | |
| 1299 | enum mlx5_vl_hw_cap { |
| 1300 | MLX5_VL_HW_0 = 1, |
| 1301 | MLX5_VL_HW_0_1 = 2, |
| 1302 | MLX5_VL_HW_0_2 = 3, |
| 1303 | MLX5_VL_HW_0_3 = 4, |
| 1304 | MLX5_VL_HW_0_4 = 5, |
| 1305 | MLX5_VL_HW_0_5 = 6, |
| 1306 | MLX5_VL_HW_0_6 = 7, |
| 1307 | MLX5_VL_HW_0_7 = 8, |
| 1308 | MLX5_VL_HW_0_14 = 15 |
| 1309 | }; |
| 1310 | |
| 1311 | static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap, |
| 1312 | u8 *max_vl_num) |
| 1313 | { |
| 1314 | switch (vl_hw_cap) { |
| 1315 | case MLX5_VL_HW_0: |
| 1316 | *max_vl_num = __IB_MAX_VL_0; |
| 1317 | break; |
| 1318 | case MLX5_VL_HW_0_1: |
| 1319 | *max_vl_num = __IB_MAX_VL_0_1; |
| 1320 | break; |
| 1321 | case MLX5_VL_HW_0_3: |
| 1322 | *max_vl_num = __IB_MAX_VL_0_3; |
| 1323 | break; |
| 1324 | case MLX5_VL_HW_0_7: |
| 1325 | *max_vl_num = __IB_MAX_VL_0_7; |
| 1326 | break; |
| 1327 | case MLX5_VL_HW_0_14: |
| 1328 | *max_vl_num = __IB_MAX_VL_0_14; |
| 1329 | break; |
| 1330 | |
| 1331 | default: |
| 1332 | return -EINVAL; |
| 1333 | } |
| 1334 | |
| 1335 | return 0; |
| 1336 | } |
| 1337 | |
| 1338 | static int mlx5_query_hca_port(struct ib_device *ibdev, u32 port, |
| 1339 | struct ib_port_attr *props) |
| 1340 | { |
| 1341 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1342 | struct mlx5_core_dev *mdev = dev->mdev; |
| 1343 | struct mlx5_hca_vport_context *rep; |
| 1344 | u8 vl_hw_cap, plane_index = 0; |
| 1345 | u16 max_mtu; |
| 1346 | u16 oper_mtu; |
| 1347 | int err; |
| 1348 | u16 ib_link_width_oper; |
| 1349 | |
| 1350 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
| 1351 | if (!rep) { |
| 1352 | err = -ENOMEM; |
| 1353 | goto out; |
| 1354 | } |
| 1355 | |
| 1356 | /* props being zeroed by the caller, avoid zeroing it here */ |
| 1357 | |
| 1358 | if (ibdev->type == RDMA_DEVICE_TYPE_SMI) { |
| 1359 | plane_index = port; |
| 1360 | port = smi_to_native_portnum(dev, port); |
| 1361 | } |
| 1362 | |
| 1363 | err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep); |
| 1364 | if (err) |
| 1365 | goto out; |
| 1366 | |
| 1367 | props->lid = rep->lid; |
| 1368 | props->lmc = rep->lmc; |
| 1369 | props->sm_lid = rep->sm_lid; |
| 1370 | props->sm_sl = rep->sm_sl; |
| 1371 | props->state = rep->vport_state; |
| 1372 | props->phys_state = rep->port_physical_state; |
| 1373 | |
| 1374 | props->port_cap_flags = rep->cap_mask1; |
| 1375 | if (dev->num_plane) { |
| 1376 | props->port_cap_flags |= IB_PORT_SM_DISABLED; |
| 1377 | props->port_cap_flags &= ~IB_PORT_SM; |
| 1378 | } else if (ibdev->type == RDMA_DEVICE_TYPE_SMI) |
| 1379 | props->port_cap_flags &= ~IB_PORT_CM_SUP; |
| 1380 | |
| 1381 | props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size)); |
| 1382 | props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg); |
| 1383 | props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size)); |
| 1384 | props->bad_pkey_cntr = rep->pkey_violation_counter; |
| 1385 | props->qkey_viol_cntr = rep->qkey_violation_counter; |
| 1386 | props->subnet_timeout = rep->subnet_timeout; |
| 1387 | props->init_type_reply = rep->init_type_reply; |
| 1388 | |
| 1389 | if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP) |
| 1390 | props->port_cap_flags2 = rep->cap_mask2; |
| 1391 | |
| 1392 | err = mlx5_query_ib_port_oper(mdev, &ib_link_width_oper, |
| 1393 | &props->active_speed, port, plane_index); |
| 1394 | if (err) |
| 1395 | goto out; |
| 1396 | |
| 1397 | translate_active_width(ibdev, ib_link_width_oper, &props->active_width); |
| 1398 | |
| 1399 | mlx5_query_port_max_mtu(mdev, &max_mtu, port); |
| 1400 | |
| 1401 | props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu); |
| 1402 | |
| 1403 | mlx5_query_port_oper_mtu(mdev, &oper_mtu, port); |
| 1404 | |
| 1405 | props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu); |
| 1406 | |
| 1407 | err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port); |
| 1408 | if (err) |
| 1409 | goto out; |
| 1410 | |
| 1411 | err = translate_max_vl_num(ibdev, vl_hw_cap, |
| 1412 | &props->max_vl_num); |
| 1413 | out: |
| 1414 | kfree(rep); |
| 1415 | return err; |
| 1416 | } |
| 1417 | |
| 1418 | int mlx5_ib_query_port(struct ib_device *ibdev, u32 port, |
| 1419 | struct ib_port_attr *props) |
| 1420 | { |
| 1421 | unsigned int count; |
| 1422 | int ret; |
| 1423 | |
| 1424 | switch (mlx5_get_vport_access_method(ibdev)) { |
| 1425 | case MLX5_VPORT_ACCESS_METHOD_MAD: |
| 1426 | ret = mlx5_query_mad_ifc_port(ibdev, port, props); |
| 1427 | break; |
| 1428 | |
| 1429 | case MLX5_VPORT_ACCESS_METHOD_HCA: |
| 1430 | ret = mlx5_query_hca_port(ibdev, port, props); |
| 1431 | break; |
| 1432 | |
| 1433 | case MLX5_VPORT_ACCESS_METHOD_NIC: |
| 1434 | ret = mlx5_query_port_roce(ibdev, port, props); |
| 1435 | break; |
| 1436 | |
| 1437 | default: |
| 1438 | ret = -EINVAL; |
| 1439 | } |
| 1440 | |
| 1441 | if (!ret && props) { |
| 1442 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1443 | struct mlx5_core_dev *mdev; |
| 1444 | bool put_mdev = true; |
| 1445 | |
| 1446 | mdev = mlx5_ib_get_native_port_mdev(dev, port, NULL); |
| 1447 | if (!mdev) { |
| 1448 | /* If the port isn't affiliated yet query the master. |
| 1449 | * The master and slave will have the same values. |
| 1450 | */ |
| 1451 | mdev = dev->mdev; |
| 1452 | port = 1; |
| 1453 | put_mdev = false; |
| 1454 | } |
| 1455 | count = mlx5_core_reserved_gids_count(mdev); |
| 1456 | if (put_mdev) |
| 1457 | mlx5_ib_put_native_port_mdev(dev, port); |
| 1458 | props->gid_tbl_len -= count; |
| 1459 | } |
| 1460 | return ret; |
| 1461 | } |
| 1462 | |
| 1463 | static int mlx5_ib_rep_query_port(struct ib_device *ibdev, u32 port, |
| 1464 | struct ib_port_attr *props) |
| 1465 | { |
| 1466 | return mlx5_query_port_roce(ibdev, port, props); |
| 1467 | } |
| 1468 | |
| 1469 | static int mlx5_ib_rep_query_pkey(struct ib_device *ibdev, u32 port, u16 index, |
| 1470 | u16 *pkey) |
| 1471 | { |
| 1472 | /* Default special Pkey for representor device port as per the |
| 1473 | * IB specification 1.3 section 10.9.1.2. |
| 1474 | */ |
| 1475 | *pkey = 0xffff; |
| 1476 | return 0; |
| 1477 | } |
| 1478 | |
| 1479 | static int mlx5_ib_query_gid(struct ib_device *ibdev, u32 port, int index, |
| 1480 | union ib_gid *gid) |
| 1481 | { |
| 1482 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1483 | struct mlx5_core_dev *mdev = dev->mdev; |
| 1484 | |
| 1485 | switch (mlx5_get_vport_access_method(ibdev)) { |
| 1486 | case MLX5_VPORT_ACCESS_METHOD_MAD: |
| 1487 | return mlx5_query_mad_ifc_gids(ibdev, port, index, gid); |
| 1488 | |
| 1489 | case MLX5_VPORT_ACCESS_METHOD_HCA: |
| 1490 | return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid); |
| 1491 | |
| 1492 | default: |
| 1493 | return -EINVAL; |
| 1494 | } |
| 1495 | |
| 1496 | } |
| 1497 | |
| 1498 | static int mlx5_query_hca_nic_pkey(struct ib_device *ibdev, u32 port, |
| 1499 | u16 index, u16 *pkey) |
| 1500 | { |
| 1501 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1502 | struct mlx5_core_dev *mdev; |
| 1503 | bool put_mdev = true; |
| 1504 | u32 mdev_port_num; |
| 1505 | int err; |
| 1506 | |
| 1507 | mdev = mlx5_ib_get_native_port_mdev(dev, port, &mdev_port_num); |
| 1508 | if (!mdev) { |
| 1509 | /* The port isn't affiliated yet, get the PKey from the master |
| 1510 | * port. For RoCE the PKey tables will be the same. |
| 1511 | */ |
| 1512 | put_mdev = false; |
| 1513 | mdev = dev->mdev; |
| 1514 | mdev_port_num = 1; |
| 1515 | } |
| 1516 | |
| 1517 | err = mlx5_query_hca_vport_pkey(mdev, 0, mdev_port_num, 0, |
| 1518 | index, pkey); |
| 1519 | if (put_mdev) |
| 1520 | mlx5_ib_put_native_port_mdev(dev, port); |
| 1521 | |
| 1522 | return err; |
| 1523 | } |
| 1524 | |
| 1525 | static int mlx5_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, |
| 1526 | u16 *pkey) |
| 1527 | { |
| 1528 | switch (mlx5_get_vport_access_method(ibdev)) { |
| 1529 | case MLX5_VPORT_ACCESS_METHOD_MAD: |
| 1530 | return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey); |
| 1531 | |
| 1532 | case MLX5_VPORT_ACCESS_METHOD_HCA: |
| 1533 | case MLX5_VPORT_ACCESS_METHOD_NIC: |
| 1534 | return mlx5_query_hca_nic_pkey(ibdev, port, index, pkey); |
| 1535 | default: |
| 1536 | return -EINVAL; |
| 1537 | } |
| 1538 | } |
| 1539 | |
| 1540 | static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask, |
| 1541 | struct ib_device_modify *props) |
| 1542 | { |
| 1543 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1544 | struct mlx5_reg_node_desc in; |
| 1545 | struct mlx5_reg_node_desc out; |
| 1546 | int err; |
| 1547 | |
| 1548 | if (mask & ~IB_DEVICE_MODIFY_NODE_DESC) |
| 1549 | return -EOPNOTSUPP; |
| 1550 | |
| 1551 | if (!(mask & IB_DEVICE_MODIFY_NODE_DESC)) |
| 1552 | return 0; |
| 1553 | |
| 1554 | /* |
| 1555 | * If possible, pass node desc to FW, so it can generate |
| 1556 | * a 144 trap. If cmd fails, just ignore. |
| 1557 | */ |
| 1558 | memcpy(&in, props->node_desc, IB_DEVICE_NODE_DESC_MAX); |
| 1559 | err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out, |
| 1560 | sizeof(out), MLX5_REG_NODE_DESC, 0, 1); |
| 1561 | if (err) |
| 1562 | return err; |
| 1563 | |
| 1564 | memcpy(ibdev->node_desc, props->node_desc, IB_DEVICE_NODE_DESC_MAX); |
| 1565 | |
| 1566 | return err; |
| 1567 | } |
| 1568 | |
| 1569 | static int set_port_caps_atomic(struct mlx5_ib_dev *dev, u32 port_num, u32 mask, |
| 1570 | u32 value) |
| 1571 | { |
| 1572 | struct mlx5_hca_vport_context ctx = {}; |
| 1573 | struct mlx5_core_dev *mdev; |
| 1574 | u32 mdev_port_num; |
| 1575 | int err; |
| 1576 | |
| 1577 | mdev = mlx5_ib_get_native_port_mdev(dev, port_num, &mdev_port_num); |
| 1578 | if (!mdev) |
| 1579 | return -ENODEV; |
| 1580 | |
| 1581 | err = mlx5_query_hca_vport_context(mdev, 0, mdev_port_num, 0, &ctx); |
| 1582 | if (err) |
| 1583 | goto out; |
| 1584 | |
| 1585 | if (~ctx.cap_mask1_perm & mask) { |
| 1586 | mlx5_ib_warn(dev, "trying to change bitmask 0x%X but change supported 0x%X\n", |
| 1587 | mask, ctx.cap_mask1_perm); |
| 1588 | err = -EINVAL; |
| 1589 | goto out; |
| 1590 | } |
| 1591 | |
| 1592 | ctx.cap_mask1 = value; |
| 1593 | ctx.cap_mask1_perm = mask; |
| 1594 | err = mlx5_core_modify_hca_vport_context(mdev, 0, mdev_port_num, |
| 1595 | 0, &ctx); |
| 1596 | |
| 1597 | out: |
| 1598 | mlx5_ib_put_native_port_mdev(dev, port_num); |
| 1599 | |
| 1600 | return err; |
| 1601 | } |
| 1602 | |
| 1603 | static int mlx5_ib_modify_port(struct ib_device *ibdev, u32 port, int mask, |
| 1604 | struct ib_port_modify *props) |
| 1605 | { |
| 1606 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1607 | struct ib_port_attr attr; |
| 1608 | u32 tmp; |
| 1609 | int err; |
| 1610 | u32 change_mask; |
| 1611 | u32 value; |
| 1612 | bool is_ib = (mlx5_ib_port_link_layer(ibdev, port) == |
| 1613 | IB_LINK_LAYER_INFINIBAND); |
| 1614 | |
| 1615 | /* CM layer calls ib_modify_port() regardless of the link layer. For |
| 1616 | * Ethernet ports, qkey violation and Port capabilities are meaningless. |
| 1617 | */ |
| 1618 | if (!is_ib) |
| 1619 | return 0; |
| 1620 | |
| 1621 | if (MLX5_CAP_GEN(dev->mdev, ib_virt) && is_ib) { |
| 1622 | change_mask = props->clr_port_cap_mask | props->set_port_cap_mask; |
| 1623 | value = ~props->clr_port_cap_mask | props->set_port_cap_mask; |
| 1624 | return set_port_caps_atomic(dev, port, change_mask, value); |
| 1625 | } |
| 1626 | |
| 1627 | mutex_lock(&dev->cap_mask_mutex); |
| 1628 | |
| 1629 | err = ib_query_port(ibdev, port, &attr); |
| 1630 | if (err) |
| 1631 | goto out; |
| 1632 | |
| 1633 | tmp = (attr.port_cap_flags | props->set_port_cap_mask) & |
| 1634 | ~props->clr_port_cap_mask; |
| 1635 | |
| 1636 | err = mlx5_set_port_caps(dev->mdev, port, tmp); |
| 1637 | |
| 1638 | out: |
| 1639 | mutex_unlock(&dev->cap_mask_mutex); |
| 1640 | return err; |
| 1641 | } |
| 1642 | |
| 1643 | static void print_lib_caps(struct mlx5_ib_dev *dev, u64 caps) |
| 1644 | { |
| 1645 | mlx5_ib_dbg(dev, "MLX5_LIB_CAP_4K_UAR = %s\n", |
| 1646 | caps & MLX5_LIB_CAP_4K_UAR ? "y" : "n"); |
| 1647 | } |
| 1648 | |
| 1649 | static u16 calc_dynamic_bfregs(int uars_per_sys_page) |
| 1650 | { |
| 1651 | /* Large page with non 4k uar support might limit the dynamic size */ |
| 1652 | if (uars_per_sys_page == 1 && PAGE_SIZE > 4096) |
| 1653 | return MLX5_MIN_DYN_BFREGS; |
| 1654 | |
| 1655 | return MLX5_MAX_DYN_BFREGS; |
| 1656 | } |
| 1657 | |
| 1658 | static int calc_total_bfregs(struct mlx5_ib_dev *dev, bool lib_uar_4k, |
| 1659 | struct mlx5_ib_alloc_ucontext_req_v2 *req, |
| 1660 | struct mlx5_bfreg_info *bfregi) |
| 1661 | { |
| 1662 | int uars_per_sys_page; |
| 1663 | int bfregs_per_sys_page; |
| 1664 | int ref_bfregs = req->total_num_bfregs; |
| 1665 | |
| 1666 | if (req->total_num_bfregs == 0) |
| 1667 | return -EINVAL; |
| 1668 | |
| 1669 | BUILD_BUG_ON(MLX5_MAX_BFREGS % MLX5_NON_FP_BFREGS_IN_PAGE); |
| 1670 | BUILD_BUG_ON(MLX5_MAX_BFREGS < MLX5_NON_FP_BFREGS_IN_PAGE); |
| 1671 | |
| 1672 | if (req->total_num_bfregs > MLX5_MAX_BFREGS) |
| 1673 | return -ENOMEM; |
| 1674 | |
| 1675 | uars_per_sys_page = get_uars_per_sys_page(dev, lib_uar_4k); |
| 1676 | bfregs_per_sys_page = uars_per_sys_page * MLX5_NON_FP_BFREGS_PER_UAR; |
| 1677 | /* This holds the required static allocation asked by the user */ |
| 1678 | req->total_num_bfregs = ALIGN(req->total_num_bfregs, bfregs_per_sys_page); |
| 1679 | if (req->num_low_latency_bfregs > req->total_num_bfregs - 1) |
| 1680 | return -EINVAL; |
| 1681 | |
| 1682 | bfregi->num_static_sys_pages = req->total_num_bfregs / bfregs_per_sys_page; |
| 1683 | bfregi->num_dyn_bfregs = ALIGN(calc_dynamic_bfregs(uars_per_sys_page), bfregs_per_sys_page); |
| 1684 | bfregi->total_num_bfregs = req->total_num_bfregs + bfregi->num_dyn_bfregs; |
| 1685 | bfregi->num_sys_pages = bfregi->total_num_bfregs / bfregs_per_sys_page; |
| 1686 | |
| 1687 | mlx5_ib_dbg(dev, "uar_4k: fw support %s, lib support %s, user requested %d bfregs, allocated %d, total bfregs %d, using %d sys pages\n", |
| 1688 | MLX5_CAP_GEN(dev->mdev, uar_4k) ? "yes" : "no", |
| 1689 | lib_uar_4k ? "yes" : "no", ref_bfregs, |
| 1690 | req->total_num_bfregs, bfregi->total_num_bfregs, |
| 1691 | bfregi->num_sys_pages); |
| 1692 | |
| 1693 | return 0; |
| 1694 | } |
| 1695 | |
| 1696 | static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *context) |
| 1697 | { |
| 1698 | struct mlx5_bfreg_info *bfregi; |
| 1699 | int err; |
| 1700 | int i; |
| 1701 | |
| 1702 | bfregi = &context->bfregi; |
| 1703 | for (i = 0; i < bfregi->num_static_sys_pages; i++) { |
| 1704 | err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i], |
| 1705 | context->devx_uid); |
| 1706 | if (err) |
| 1707 | goto error; |
| 1708 | |
| 1709 | mlx5_ib_dbg(dev, "allocated uar %d\n", bfregi->sys_pages[i]); |
| 1710 | } |
| 1711 | |
| 1712 | for (i = bfregi->num_static_sys_pages; i < bfregi->num_sys_pages; i++) |
| 1713 | bfregi->sys_pages[i] = MLX5_IB_INVALID_UAR_INDEX; |
| 1714 | |
| 1715 | return 0; |
| 1716 | |
| 1717 | error: |
| 1718 | for (--i; i >= 0; i--) |
| 1719 | if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i], |
| 1720 | context->devx_uid)) |
| 1721 | mlx5_ib_warn(dev, "failed to free uar %d\n", i); |
| 1722 | |
| 1723 | return err; |
| 1724 | } |
| 1725 | |
| 1726 | static void deallocate_uars(struct mlx5_ib_dev *dev, |
| 1727 | struct mlx5_ib_ucontext *context) |
| 1728 | { |
| 1729 | struct mlx5_bfreg_info *bfregi; |
| 1730 | int i; |
| 1731 | |
| 1732 | bfregi = &context->bfregi; |
| 1733 | for (i = 0; i < bfregi->num_sys_pages; i++) |
| 1734 | if (i < bfregi->num_static_sys_pages || |
| 1735 | bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) |
| 1736 | mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i], |
| 1737 | context->devx_uid); |
| 1738 | } |
| 1739 | |
| 1740 | int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) |
| 1741 | { |
| 1742 | int err = 0; |
| 1743 | |
| 1744 | mutex_lock(&dev->lb.mutex); |
| 1745 | if (td) |
| 1746 | dev->lb.user_td++; |
| 1747 | if (qp) |
| 1748 | dev->lb.qps++; |
| 1749 | |
| 1750 | if (dev->lb.user_td == 2 || |
| 1751 | dev->lb.qps == 1) { |
| 1752 | if (!dev->lb.enabled) { |
| 1753 | err = mlx5_nic_vport_update_local_lb(dev->mdev, true); |
| 1754 | dev->lb.enabled = true; |
| 1755 | } |
| 1756 | } |
| 1757 | |
| 1758 | mutex_unlock(&dev->lb.mutex); |
| 1759 | |
| 1760 | return err; |
| 1761 | } |
| 1762 | |
| 1763 | void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) |
| 1764 | { |
| 1765 | mutex_lock(&dev->lb.mutex); |
| 1766 | if (td) |
| 1767 | dev->lb.user_td--; |
| 1768 | if (qp) |
| 1769 | dev->lb.qps--; |
| 1770 | |
| 1771 | if (dev->lb.user_td == 1 && |
| 1772 | dev->lb.qps == 0) { |
| 1773 | if (dev->lb.enabled) { |
| 1774 | mlx5_nic_vport_update_local_lb(dev->mdev, false); |
| 1775 | dev->lb.enabled = false; |
| 1776 | } |
| 1777 | } |
| 1778 | |
| 1779 | mutex_unlock(&dev->lb.mutex); |
| 1780 | } |
| 1781 | |
| 1782 | static int mlx5_ib_alloc_transport_domain(struct mlx5_ib_dev *dev, u32 *tdn, |
| 1783 | u16 uid) |
| 1784 | { |
| 1785 | int err; |
| 1786 | |
| 1787 | if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) |
| 1788 | return 0; |
| 1789 | |
| 1790 | err = mlx5_cmd_alloc_transport_domain(dev->mdev, tdn, uid); |
| 1791 | if (err) |
| 1792 | return err; |
| 1793 | |
| 1794 | if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || |
| 1795 | (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && |
| 1796 | !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) |
| 1797 | return err; |
| 1798 | |
| 1799 | return mlx5_ib_enable_lb(dev, true, false); |
| 1800 | } |
| 1801 | |
| 1802 | static void mlx5_ib_dealloc_transport_domain(struct mlx5_ib_dev *dev, u32 tdn, |
| 1803 | u16 uid) |
| 1804 | { |
| 1805 | if (!MLX5_CAP_GEN(dev->mdev, log_max_transport_domain)) |
| 1806 | return; |
| 1807 | |
| 1808 | mlx5_cmd_dealloc_transport_domain(dev->mdev, tdn, uid); |
| 1809 | |
| 1810 | if ((MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH) || |
| 1811 | (!MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) && |
| 1812 | !MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) |
| 1813 | return; |
| 1814 | |
| 1815 | mlx5_ib_disable_lb(dev, true, false); |
| 1816 | } |
| 1817 | |
| 1818 | static int set_ucontext_resp(struct ib_ucontext *uctx, |
| 1819 | struct mlx5_ib_alloc_ucontext_resp *resp) |
| 1820 | { |
| 1821 | struct ib_device *ibdev = uctx->device; |
| 1822 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1823 | struct mlx5_ib_ucontext *context = to_mucontext(uctx); |
| 1824 | struct mlx5_bfreg_info *bfregi = &context->bfregi; |
| 1825 | |
| 1826 | if (MLX5_CAP_GEN(dev->mdev, dump_fill_mkey)) { |
| 1827 | resp->dump_fill_mkey = dev->mkeys.dump_fill_mkey; |
| 1828 | resp->comp_mask |= |
| 1829 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY; |
| 1830 | } |
| 1831 | |
| 1832 | resp->qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp); |
| 1833 | if (mlx5_wc_support_get(dev->mdev)) |
| 1834 | resp->bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, |
| 1835 | log_bf_reg_size); |
| 1836 | resp->cache_line_size = cache_line_size(); |
| 1837 | resp->max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq); |
| 1838 | resp->max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq); |
| 1839 | resp->max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); |
| 1840 | resp->max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz); |
| 1841 | resp->max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz); |
| 1842 | resp->cqe_version = context->cqe_version; |
| 1843 | resp->log_uar_size = MLX5_CAP_GEN(dev->mdev, uar_4k) ? |
| 1844 | MLX5_ADAPTER_PAGE_SHIFT : PAGE_SHIFT; |
| 1845 | resp->num_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? |
| 1846 | MLX5_CAP_GEN(dev->mdev, |
| 1847 | num_of_uars_per_page) : 1; |
| 1848 | resp->tot_bfregs = bfregi->lib_uar_dyn ? 0 : |
| 1849 | bfregi->total_num_bfregs - bfregi->num_dyn_bfregs; |
| 1850 | resp->num_ports = dev->num_ports; |
| 1851 | resp->cmds_supp_uhw |= MLX5_USER_CMDS_SUPP_UHW_QUERY_DEVICE | |
| 1852 | MLX5_USER_CMDS_SUPP_UHW_CREATE_AH; |
| 1853 | |
| 1854 | if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) { |
| 1855 | mlx5_query_min_inline(dev->mdev, &resp->eth_min_inline); |
| 1856 | resp->eth_min_inline++; |
| 1857 | } |
| 1858 | |
| 1859 | if (dev->mdev->clock_info) |
| 1860 | resp->clock_info_versions = BIT(MLX5_IB_CLOCK_INFO_V1); |
| 1861 | |
| 1862 | /* |
| 1863 | * We don't want to expose information from the PCI bar that is located |
| 1864 | * after 4096 bytes, so if the arch only supports larger pages, let's |
| 1865 | * pretend we don't support reading the HCA's core clock. This is also |
| 1866 | * forced by mmap function. |
| 1867 | */ |
| 1868 | if (PAGE_SIZE <= 4096) { |
| 1869 | resp->comp_mask |= |
| 1870 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET; |
| 1871 | resp->hca_core_clock_offset = |
| 1872 | offsetof(struct mlx5_init_seg, |
| 1873 | internal_timer_h) % PAGE_SIZE; |
| 1874 | } |
| 1875 | |
| 1876 | if (MLX5_CAP_GEN(dev->mdev, ece_support)) |
| 1877 | resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_ECE; |
| 1878 | |
| 1879 | if (rt_supported(MLX5_CAP_GEN(dev->mdev, sq_ts_format)) && |
| 1880 | rt_supported(MLX5_CAP_GEN(dev->mdev, rq_ts_format)) && |
| 1881 | rt_supported(MLX5_CAP_ROCE(dev->mdev, qp_ts_format))) |
| 1882 | resp->comp_mask |= |
| 1883 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_REAL_TIME_TS; |
| 1884 | |
| 1885 | resp->num_dyn_bfregs = bfregi->num_dyn_bfregs; |
| 1886 | |
| 1887 | if (MLX5_CAP_GEN(dev->mdev, drain_sigerr)) |
| 1888 | resp->comp_mask |= MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_SQD2RTS; |
| 1889 | |
| 1890 | resp->comp_mask |= |
| 1891 | MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_MKEY_UPDATE_TAG; |
| 1892 | |
| 1893 | return 0; |
| 1894 | } |
| 1895 | |
| 1896 | static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, |
| 1897 | struct ib_udata *udata) |
| 1898 | { |
| 1899 | struct ib_device *ibdev = uctx->device; |
| 1900 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 1901 | struct mlx5_ib_alloc_ucontext_req_v2 req = {}; |
| 1902 | struct mlx5_ib_alloc_ucontext_resp resp = {}; |
| 1903 | struct mlx5_ib_ucontext *context = to_mucontext(uctx); |
| 1904 | struct mlx5_bfreg_info *bfregi; |
| 1905 | int ver; |
| 1906 | int err; |
| 1907 | size_t min_req_v2 = offsetof(struct mlx5_ib_alloc_ucontext_req_v2, |
| 1908 | max_cqe_version); |
| 1909 | bool lib_uar_4k; |
| 1910 | bool lib_uar_dyn; |
| 1911 | |
| 1912 | if (!dev->ib_active) |
| 1913 | return -EAGAIN; |
| 1914 | |
| 1915 | if (udata->inlen == sizeof(struct mlx5_ib_alloc_ucontext_req)) |
| 1916 | ver = 0; |
| 1917 | else if (udata->inlen >= min_req_v2) |
| 1918 | ver = 2; |
| 1919 | else |
| 1920 | return -EINVAL; |
| 1921 | |
| 1922 | err = ib_copy_from_udata(&req, udata, min(udata->inlen, sizeof(req))); |
| 1923 | if (err) |
| 1924 | return err; |
| 1925 | |
| 1926 | if (req.flags & ~MLX5_IB_ALLOC_UCTX_DEVX) |
| 1927 | return -EOPNOTSUPP; |
| 1928 | |
| 1929 | if (req.comp_mask || req.reserved0 || req.reserved1 || req.reserved2) |
| 1930 | return -EOPNOTSUPP; |
| 1931 | |
| 1932 | req.total_num_bfregs = ALIGN(req.total_num_bfregs, |
| 1933 | MLX5_NON_FP_BFREGS_PER_UAR); |
| 1934 | if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) |
| 1935 | return -EINVAL; |
| 1936 | |
| 1937 | if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { |
| 1938 | err = mlx5_ib_devx_create(dev, true); |
| 1939 | if (err < 0) |
| 1940 | goto out_ctx; |
| 1941 | context->devx_uid = err; |
| 1942 | } |
| 1943 | |
| 1944 | lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; |
| 1945 | lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR; |
| 1946 | bfregi = &context->bfregi; |
| 1947 | |
| 1948 | if (lib_uar_dyn) { |
| 1949 | bfregi->lib_uar_dyn = lib_uar_dyn; |
| 1950 | goto uar_done; |
| 1951 | } |
| 1952 | |
| 1953 | /* updates req->total_num_bfregs */ |
| 1954 | err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); |
| 1955 | if (err) |
| 1956 | goto out_devx; |
| 1957 | |
| 1958 | mutex_init(&bfregi->lock); |
| 1959 | bfregi->lib_uar_4k = lib_uar_4k; |
| 1960 | bfregi->count = kcalloc(bfregi->total_num_bfregs, sizeof(*bfregi->count), |
| 1961 | GFP_KERNEL); |
| 1962 | if (!bfregi->count) { |
| 1963 | err = -ENOMEM; |
| 1964 | goto out_devx; |
| 1965 | } |
| 1966 | |
| 1967 | bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, |
| 1968 | sizeof(*bfregi->sys_pages), |
| 1969 | GFP_KERNEL); |
| 1970 | if (!bfregi->sys_pages) { |
| 1971 | err = -ENOMEM; |
| 1972 | goto out_count; |
| 1973 | } |
| 1974 | |
| 1975 | err = allocate_uars(dev, context); |
| 1976 | if (err) |
| 1977 | goto out_sys_pages; |
| 1978 | |
| 1979 | uar_done: |
| 1980 | err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, |
| 1981 | context->devx_uid); |
| 1982 | if (err) |
| 1983 | goto out_uars; |
| 1984 | |
| 1985 | INIT_LIST_HEAD(&context->db_page_list); |
| 1986 | mutex_init(&context->db_page_mutex); |
| 1987 | |
| 1988 | context->cqe_version = min_t(__u8, |
| 1989 | (__u8)MLX5_CAP_GEN(dev->mdev, cqe_version), |
| 1990 | req.max_cqe_version); |
| 1991 | |
| 1992 | err = set_ucontext_resp(uctx, &resp); |
| 1993 | if (err) |
| 1994 | goto out_mdev; |
| 1995 | |
| 1996 | resp.response_length = min(udata->outlen, sizeof(resp)); |
| 1997 | err = ib_copy_to_udata(udata, &resp, resp.response_length); |
| 1998 | if (err) |
| 1999 | goto out_mdev; |
| 2000 | |
| 2001 | bfregi->ver = ver; |
| 2002 | bfregi->num_low_latency_bfregs = req.num_low_latency_bfregs; |
| 2003 | context->lib_caps = req.lib_caps; |
| 2004 | print_lib_caps(dev, context->lib_caps); |
| 2005 | |
| 2006 | if (mlx5_ib_lag_should_assign_affinity(dev)) { |
| 2007 | u32 port = mlx5_core_native_port_num(dev->mdev) - 1; |
| 2008 | |
| 2009 | atomic_set(&context->tx_port_affinity, |
| 2010 | atomic_add_return( |
| 2011 | 1, &dev->port[port].roce.tx_port_affinity)); |
| 2012 | } |
| 2013 | |
| 2014 | return 0; |
| 2015 | |
| 2016 | out_mdev: |
| 2017 | mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); |
| 2018 | |
| 2019 | out_uars: |
| 2020 | deallocate_uars(dev, context); |
| 2021 | |
| 2022 | out_sys_pages: |
| 2023 | kfree(bfregi->sys_pages); |
| 2024 | |
| 2025 | out_count: |
| 2026 | kfree(bfregi->count); |
| 2027 | |
| 2028 | out_devx: |
| 2029 | if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) |
| 2030 | mlx5_ib_devx_destroy(dev, context->devx_uid); |
| 2031 | |
| 2032 | out_ctx: |
| 2033 | return err; |
| 2034 | } |
| 2035 | |
| 2036 | static int mlx5_ib_query_ucontext(struct ib_ucontext *ibcontext, |
| 2037 | struct uverbs_attr_bundle *attrs) |
| 2038 | { |
| 2039 | struct mlx5_ib_alloc_ucontext_resp uctx_resp = {}; |
| 2040 | int ret; |
| 2041 | |
| 2042 | ret = set_ucontext_resp(ibcontext, &uctx_resp); |
| 2043 | if (ret) |
| 2044 | return ret; |
| 2045 | |
| 2046 | uctx_resp.response_length = |
| 2047 | min_t(size_t, |
| 2048 | uverbs_attr_get_len(attrs, |
| 2049 | MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX), |
| 2050 | sizeof(uctx_resp)); |
| 2051 | |
| 2052 | ret = uverbs_copy_to_struct_or_zero(attrs, |
| 2053 | MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX, |
| 2054 | &uctx_resp, |
| 2055 | sizeof(uctx_resp)); |
| 2056 | return ret; |
| 2057 | } |
| 2058 | |
| 2059 | static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) |
| 2060 | { |
| 2061 | struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); |
| 2062 | struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); |
| 2063 | struct mlx5_bfreg_info *bfregi; |
| 2064 | |
| 2065 | bfregi = &context->bfregi; |
| 2066 | mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); |
| 2067 | |
| 2068 | deallocate_uars(dev, context); |
| 2069 | kfree(bfregi->sys_pages); |
| 2070 | kfree(bfregi->count); |
| 2071 | |
| 2072 | if (context->devx_uid) |
| 2073 | mlx5_ib_devx_destroy(dev, context->devx_uid); |
| 2074 | } |
| 2075 | |
| 2076 | static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, |
| 2077 | int uar_idx) |
| 2078 | { |
| 2079 | int fw_uars_per_page; |
| 2080 | |
| 2081 | fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? MLX5_UARS_IN_PAGE : 1; |
| 2082 | |
| 2083 | return (dev->mdev->bar_addr >> PAGE_SHIFT) + uar_idx / fw_uars_per_page; |
| 2084 | } |
| 2085 | |
| 2086 | static u64 uar_index2paddress(struct mlx5_ib_dev *dev, |
| 2087 | int uar_idx) |
| 2088 | { |
| 2089 | unsigned int fw_uars_per_page; |
| 2090 | |
| 2091 | fw_uars_per_page = MLX5_CAP_GEN(dev->mdev, uar_4k) ? |
| 2092 | MLX5_UARS_IN_PAGE : 1; |
| 2093 | |
| 2094 | return (dev->mdev->bar_addr + (uar_idx / fw_uars_per_page) * PAGE_SIZE); |
| 2095 | } |
| 2096 | |
| 2097 | static int get_command(unsigned long offset) |
| 2098 | { |
| 2099 | return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK; |
| 2100 | } |
| 2101 | |
| 2102 | static int get_arg(unsigned long offset) |
| 2103 | { |
| 2104 | return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1); |
| 2105 | } |
| 2106 | |
| 2107 | static int get_index(unsigned long offset) |
| 2108 | { |
| 2109 | return get_arg(offset); |
| 2110 | } |
| 2111 | |
| 2112 | /* Index resides in an extra byte to enable larger values than 255 */ |
| 2113 | static int get_extended_index(unsigned long offset) |
| 2114 | { |
| 2115 | return get_arg(offset) | ((offset >> 16) & 0xff) << 8; |
| 2116 | } |
| 2117 | |
| 2118 | |
| 2119 | static void mlx5_ib_disassociate_ucontext(struct ib_ucontext *ibcontext) |
| 2120 | { |
| 2121 | } |
| 2122 | |
| 2123 | static inline char *mmap_cmd2str(enum mlx5_ib_mmap_cmd cmd) |
| 2124 | { |
| 2125 | switch (cmd) { |
| 2126 | case MLX5_IB_MMAP_WC_PAGE: |
| 2127 | return "WC"; |
| 2128 | case MLX5_IB_MMAP_REGULAR_PAGE: |
| 2129 | return "best effort WC"; |
| 2130 | case MLX5_IB_MMAP_NC_PAGE: |
| 2131 | return "NC"; |
| 2132 | case MLX5_IB_MMAP_DEVICE_MEM: |
| 2133 | return "Device Memory"; |
| 2134 | default: |
| 2135 | return "Unknown"; |
| 2136 | } |
| 2137 | } |
| 2138 | |
| 2139 | static int mlx5_ib_mmap_clock_info_page(struct mlx5_ib_dev *dev, |
| 2140 | struct vm_area_struct *vma, |
| 2141 | struct mlx5_ib_ucontext *context) |
| 2142 | { |
| 2143 | if ((vma->vm_end - vma->vm_start != PAGE_SIZE) || |
| 2144 | !(vma->vm_flags & VM_SHARED)) |
| 2145 | return -EINVAL; |
| 2146 | |
| 2147 | if (get_index(vma->vm_pgoff) != MLX5_IB_CLOCK_INFO_V1) |
| 2148 | return -EOPNOTSUPP; |
| 2149 | |
| 2150 | if (vma->vm_flags & (VM_WRITE | VM_EXEC)) |
| 2151 | return -EPERM; |
| 2152 | vm_flags_clear(vma, VM_MAYWRITE); |
| 2153 | |
| 2154 | if (!dev->mdev->clock_info) |
| 2155 | return -EOPNOTSUPP; |
| 2156 | |
| 2157 | return vm_insert_page(vma, vma->vm_start, |
| 2158 | virt_to_page(dev->mdev->clock_info)); |
| 2159 | } |
| 2160 | |
| 2161 | static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) |
| 2162 | { |
| 2163 | struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); |
| 2164 | struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); |
| 2165 | struct mlx5_var_table *var_table = &dev->var_table; |
| 2166 | struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext); |
| 2167 | |
| 2168 | switch (mentry->mmap_flag) { |
| 2169 | case MLX5_IB_MMAP_TYPE_MEMIC: |
| 2170 | case MLX5_IB_MMAP_TYPE_MEMIC_OP: |
| 2171 | mlx5_ib_dm_mmap_free(dev, mentry); |
| 2172 | break; |
| 2173 | case MLX5_IB_MMAP_TYPE_VAR: |
| 2174 | mutex_lock(&var_table->bitmap_lock); |
| 2175 | clear_bit(mentry->page_idx, var_table->bitmap); |
| 2176 | mutex_unlock(&var_table->bitmap_lock); |
| 2177 | kfree(mentry); |
| 2178 | break; |
| 2179 | case MLX5_IB_MMAP_TYPE_UAR_WC: |
| 2180 | case MLX5_IB_MMAP_TYPE_UAR_NC: |
| 2181 | mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx, |
| 2182 | context->devx_uid); |
| 2183 | kfree(mentry); |
| 2184 | break; |
| 2185 | default: |
| 2186 | WARN_ON(true); |
| 2187 | } |
| 2188 | } |
| 2189 | |
| 2190 | static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, |
| 2191 | struct vm_area_struct *vma, |
| 2192 | struct mlx5_ib_ucontext *context) |
| 2193 | { |
| 2194 | struct mlx5_bfreg_info *bfregi = &context->bfregi; |
| 2195 | int err; |
| 2196 | unsigned long idx; |
| 2197 | phys_addr_t pfn; |
| 2198 | pgprot_t prot; |
| 2199 | u32 bfreg_dyn_idx = 0; |
| 2200 | u32 uar_index; |
| 2201 | int dyn_uar = (cmd == MLX5_IB_MMAP_ALLOC_WC); |
| 2202 | int max_valid_idx = dyn_uar ? bfregi->num_sys_pages : |
| 2203 | bfregi->num_static_sys_pages; |
| 2204 | |
| 2205 | if (bfregi->lib_uar_dyn) |
| 2206 | return -EINVAL; |
| 2207 | |
| 2208 | if (vma->vm_end - vma->vm_start != PAGE_SIZE) |
| 2209 | return -EINVAL; |
| 2210 | |
| 2211 | if (dyn_uar) |
| 2212 | idx = get_extended_index(vma->vm_pgoff) + bfregi->num_static_sys_pages; |
| 2213 | else |
| 2214 | idx = get_index(vma->vm_pgoff); |
| 2215 | |
| 2216 | if (idx >= max_valid_idx) { |
| 2217 | mlx5_ib_warn(dev, "invalid uar index %lu, max=%d\n", |
| 2218 | idx, max_valid_idx); |
| 2219 | return -EINVAL; |
| 2220 | } |
| 2221 | |
| 2222 | switch (cmd) { |
| 2223 | case MLX5_IB_MMAP_WC_PAGE: |
| 2224 | case MLX5_IB_MMAP_ALLOC_WC: |
| 2225 | case MLX5_IB_MMAP_REGULAR_PAGE: |
| 2226 | /* For MLX5_IB_MMAP_REGULAR_PAGE do the best effort to get WC */ |
| 2227 | prot = pgprot_writecombine(vma->vm_page_prot); |
| 2228 | break; |
| 2229 | case MLX5_IB_MMAP_NC_PAGE: |
| 2230 | prot = pgprot_noncached(vma->vm_page_prot); |
| 2231 | break; |
| 2232 | default: |
| 2233 | return -EINVAL; |
| 2234 | } |
| 2235 | |
| 2236 | if (dyn_uar) { |
| 2237 | int uars_per_page; |
| 2238 | |
| 2239 | uars_per_page = get_uars_per_sys_page(dev, bfregi->lib_uar_4k); |
| 2240 | bfreg_dyn_idx = idx * (uars_per_page * MLX5_NON_FP_BFREGS_PER_UAR); |
| 2241 | if (bfreg_dyn_idx >= bfregi->total_num_bfregs) { |
| 2242 | mlx5_ib_warn(dev, "invalid bfreg_dyn_idx %u, max=%u\n", |
| 2243 | bfreg_dyn_idx, bfregi->total_num_bfregs); |
| 2244 | return -EINVAL; |
| 2245 | } |
| 2246 | |
| 2247 | mutex_lock(&bfregi->lock); |
| 2248 | /* Fail if uar already allocated, first bfreg index of each |
| 2249 | * page holds its count. |
| 2250 | */ |
| 2251 | if (bfregi->count[bfreg_dyn_idx]) { |
| 2252 | mlx5_ib_warn(dev, "wrong offset, idx %lu is busy, bfregn=%u\n", idx, bfreg_dyn_idx); |
| 2253 | mutex_unlock(&bfregi->lock); |
| 2254 | return -EINVAL; |
| 2255 | } |
| 2256 | |
| 2257 | bfregi->count[bfreg_dyn_idx]++; |
| 2258 | mutex_unlock(&bfregi->lock); |
| 2259 | |
| 2260 | err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, |
| 2261 | context->devx_uid); |
| 2262 | if (err) { |
| 2263 | mlx5_ib_warn(dev, "UAR alloc failed\n"); |
| 2264 | goto free_bfreg; |
| 2265 | } |
| 2266 | } else { |
| 2267 | uar_index = bfregi->sys_pages[idx]; |
| 2268 | } |
| 2269 | |
| 2270 | pfn = uar_index2pfn(dev, uar_index); |
| 2271 | mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn %pa\n", idx, &pfn); |
| 2272 | |
| 2273 | err = rdma_user_mmap_io(&context->ibucontext, vma, pfn, PAGE_SIZE, |
| 2274 | prot, NULL); |
| 2275 | if (err) { |
| 2276 | mlx5_ib_err(dev, |
| 2277 | "rdma_user_mmap_io failed with error=%d, mmap_cmd=%s\n", |
| 2278 | err, mmap_cmd2str(cmd)); |
| 2279 | goto err; |
| 2280 | } |
| 2281 | |
| 2282 | if (dyn_uar) |
| 2283 | bfregi->sys_pages[idx] = uar_index; |
| 2284 | return 0; |
| 2285 | |
| 2286 | err: |
| 2287 | if (!dyn_uar) |
| 2288 | return err; |
| 2289 | |
| 2290 | mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid); |
| 2291 | |
| 2292 | free_bfreg: |
| 2293 | mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); |
| 2294 | |
| 2295 | return err; |
| 2296 | } |
| 2297 | |
| 2298 | static unsigned long mlx5_vma_to_pgoff(struct vm_area_struct *vma) |
| 2299 | { |
| 2300 | unsigned long idx; |
| 2301 | u8 command; |
| 2302 | |
| 2303 | command = get_command(vma->vm_pgoff); |
| 2304 | idx = get_extended_index(vma->vm_pgoff); |
| 2305 | |
| 2306 | return (command << 16 | idx); |
| 2307 | } |
| 2308 | |
| 2309 | static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev, |
| 2310 | struct vm_area_struct *vma, |
| 2311 | struct ib_ucontext *ucontext) |
| 2312 | { |
| 2313 | struct mlx5_user_mmap_entry *mentry; |
| 2314 | struct rdma_user_mmap_entry *entry; |
| 2315 | unsigned long pgoff; |
| 2316 | pgprot_t prot; |
| 2317 | phys_addr_t pfn; |
| 2318 | int ret; |
| 2319 | |
| 2320 | pgoff = mlx5_vma_to_pgoff(vma); |
| 2321 | entry = rdma_user_mmap_entry_get_pgoff(ucontext, pgoff); |
| 2322 | if (!entry) |
| 2323 | return -EINVAL; |
| 2324 | |
| 2325 | mentry = to_mmmap(entry); |
| 2326 | pfn = (mentry->address >> PAGE_SHIFT); |
| 2327 | if (mentry->mmap_flag == MLX5_IB_MMAP_TYPE_VAR || |
| 2328 | mentry->mmap_flag == MLX5_IB_MMAP_TYPE_UAR_NC) |
| 2329 | prot = pgprot_noncached(vma->vm_page_prot); |
| 2330 | else |
| 2331 | prot = pgprot_writecombine(vma->vm_page_prot); |
| 2332 | ret = rdma_user_mmap_io(ucontext, vma, pfn, |
| 2333 | entry->npages * PAGE_SIZE, |
| 2334 | prot, |
| 2335 | entry); |
| 2336 | rdma_user_mmap_entry_put(&mentry->rdma_entry); |
| 2337 | return ret; |
| 2338 | } |
| 2339 | |
| 2340 | static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry) |
| 2341 | { |
| 2342 | u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF; |
| 2343 | u64 index = entry->rdma_entry.start_pgoff & 0xFFFF; |
| 2344 | |
| 2345 | return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) | |
| 2346 | (index & 0xFF)) << PAGE_SHIFT; |
| 2347 | } |
| 2348 | |
| 2349 | static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma) |
| 2350 | { |
| 2351 | struct mlx5_ib_ucontext *context = to_mucontext(ibcontext); |
| 2352 | struct mlx5_ib_dev *dev = to_mdev(ibcontext->device); |
| 2353 | unsigned long command; |
| 2354 | phys_addr_t pfn; |
| 2355 | |
| 2356 | command = get_command(vma->vm_pgoff); |
| 2357 | switch (command) { |
| 2358 | case MLX5_IB_MMAP_WC_PAGE: |
| 2359 | case MLX5_IB_MMAP_ALLOC_WC: |
| 2360 | if (!mlx5_wc_support_get(dev->mdev)) |
| 2361 | return -EPERM; |
| 2362 | fallthrough; |
| 2363 | case MLX5_IB_MMAP_NC_PAGE: |
| 2364 | case MLX5_IB_MMAP_REGULAR_PAGE: |
| 2365 | return uar_mmap(dev, command, vma, context); |
| 2366 | |
| 2367 | case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES: |
| 2368 | return -ENOSYS; |
| 2369 | |
| 2370 | case MLX5_IB_MMAP_CORE_CLOCK: |
| 2371 | if (vma->vm_end - vma->vm_start != PAGE_SIZE) |
| 2372 | return -EINVAL; |
| 2373 | |
| 2374 | if (vma->vm_flags & VM_WRITE) |
| 2375 | return -EPERM; |
| 2376 | vm_flags_clear(vma, VM_MAYWRITE); |
| 2377 | |
| 2378 | /* Don't expose to user-space information it shouldn't have */ |
| 2379 | if (PAGE_SIZE > 4096) |
| 2380 | return -EOPNOTSUPP; |
| 2381 | |
| 2382 | pfn = (dev->mdev->iseg_base + |
| 2383 | offsetof(struct mlx5_init_seg, internal_timer_h)) >> |
| 2384 | PAGE_SHIFT; |
| 2385 | return rdma_user_mmap_io(&context->ibucontext, vma, pfn, |
| 2386 | PAGE_SIZE, |
| 2387 | pgprot_noncached(vma->vm_page_prot), |
| 2388 | NULL); |
| 2389 | case MLX5_IB_MMAP_CLOCK_INFO: |
| 2390 | return mlx5_ib_mmap_clock_info_page(dev, vma, context); |
| 2391 | |
| 2392 | default: |
| 2393 | return mlx5_ib_mmap_offset(dev, vma, ibcontext); |
| 2394 | } |
| 2395 | |
| 2396 | return 0; |
| 2397 | } |
| 2398 | |
| 2399 | static int mlx5_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) |
| 2400 | { |
| 2401 | struct mlx5_ib_pd *pd = to_mpd(ibpd); |
| 2402 | struct ib_device *ibdev = ibpd->device; |
| 2403 | struct mlx5_ib_alloc_pd_resp resp; |
| 2404 | int err; |
| 2405 | u32 out[MLX5_ST_SZ_DW(alloc_pd_out)] = {}; |
| 2406 | u32 in[MLX5_ST_SZ_DW(alloc_pd_in)] = {}; |
| 2407 | u16 uid = 0; |
| 2408 | struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context( |
| 2409 | udata, struct mlx5_ib_ucontext, ibucontext); |
| 2410 | |
| 2411 | uid = context ? context->devx_uid : 0; |
| 2412 | MLX5_SET(alloc_pd_in, in, opcode, MLX5_CMD_OP_ALLOC_PD); |
| 2413 | MLX5_SET(alloc_pd_in, in, uid, uid); |
| 2414 | err = mlx5_cmd_exec_inout(to_mdev(ibdev)->mdev, alloc_pd, in, out); |
| 2415 | if (err) |
| 2416 | return err; |
| 2417 | |
| 2418 | pd->pdn = MLX5_GET(alloc_pd_out, out, pd); |
| 2419 | pd->uid = uid; |
| 2420 | if (udata) { |
| 2421 | resp.pdn = pd->pdn; |
| 2422 | if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { |
| 2423 | mlx5_cmd_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn, uid); |
| 2424 | return -EFAULT; |
| 2425 | } |
| 2426 | } |
| 2427 | |
| 2428 | return 0; |
| 2429 | } |
| 2430 | |
| 2431 | static int mlx5_ib_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) |
| 2432 | { |
| 2433 | struct mlx5_ib_dev *mdev = to_mdev(pd->device); |
| 2434 | struct mlx5_ib_pd *mpd = to_mpd(pd); |
| 2435 | |
| 2436 | return mlx5_cmd_dealloc_pd(mdev->mdev, mpd->pdn, mpd->uid); |
| 2437 | } |
| 2438 | |
| 2439 | static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) |
| 2440 | { |
| 2441 | struct mlx5_ib_dev *dev = to_mdev(ibqp->device); |
| 2442 | struct mlx5_ib_qp *mqp = to_mqp(ibqp); |
| 2443 | int err; |
| 2444 | u16 uid; |
| 2445 | |
| 2446 | uid = ibqp->pd ? |
| 2447 | to_mpd(ibqp->pd)->uid : 0; |
| 2448 | |
| 2449 | if (mqp->flags & IB_QP_CREATE_SOURCE_QPN) { |
| 2450 | mlx5_ib_dbg(dev, "Attaching a multi cast group to underlay QP is not supported\n"); |
| 2451 | return -EOPNOTSUPP; |
| 2452 | } |
| 2453 | |
| 2454 | err = mlx5_cmd_attach_mcg(dev->mdev, gid, ibqp->qp_num, uid); |
| 2455 | if (err) |
| 2456 | mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n", |
| 2457 | ibqp->qp_num, gid->raw); |
| 2458 | |
| 2459 | return err; |
| 2460 | } |
| 2461 | |
| 2462 | static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) |
| 2463 | { |
| 2464 | struct mlx5_ib_dev *dev = to_mdev(ibqp->device); |
| 2465 | int err; |
| 2466 | u16 uid; |
| 2467 | |
| 2468 | uid = ibqp->pd ? |
| 2469 | to_mpd(ibqp->pd)->uid : 0; |
| 2470 | err = mlx5_cmd_detach_mcg(dev->mdev, gid, ibqp->qp_num, uid); |
| 2471 | if (err) |
| 2472 | mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n", |
| 2473 | ibqp->qp_num, gid->raw); |
| 2474 | |
| 2475 | return err; |
| 2476 | } |
| 2477 | |
| 2478 | static int init_node_data(struct mlx5_ib_dev *dev) |
| 2479 | { |
| 2480 | int err; |
| 2481 | |
| 2482 | err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc); |
| 2483 | if (err) |
| 2484 | return err; |
| 2485 | |
| 2486 | dev->mdev->rev_id = dev->mdev->pdev->revision; |
| 2487 | |
| 2488 | return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid); |
| 2489 | } |
| 2490 | |
| 2491 | static ssize_t fw_pages_show(struct device *device, |
| 2492 | struct device_attribute *attr, char *buf) |
| 2493 | { |
| 2494 | struct mlx5_ib_dev *dev = |
| 2495 | rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); |
| 2496 | |
| 2497 | return sysfs_emit(buf, "%d\n", dev->mdev->priv.fw_pages); |
| 2498 | } |
| 2499 | static DEVICE_ATTR_RO(fw_pages); |
| 2500 | |
| 2501 | static ssize_t reg_pages_show(struct device *device, |
| 2502 | struct device_attribute *attr, char *buf) |
| 2503 | { |
| 2504 | struct mlx5_ib_dev *dev = |
| 2505 | rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); |
| 2506 | |
| 2507 | return sysfs_emit(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages)); |
| 2508 | } |
| 2509 | static DEVICE_ATTR_RO(reg_pages); |
| 2510 | |
| 2511 | static ssize_t hca_type_show(struct device *device, |
| 2512 | struct device_attribute *attr, char *buf) |
| 2513 | { |
| 2514 | struct mlx5_ib_dev *dev = |
| 2515 | rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); |
| 2516 | |
| 2517 | return sysfs_emit(buf, "MT%d\n", dev->mdev->pdev->device); |
| 2518 | } |
| 2519 | static DEVICE_ATTR_RO(hca_type); |
| 2520 | |
| 2521 | static ssize_t hw_rev_show(struct device *device, |
| 2522 | struct device_attribute *attr, char *buf) |
| 2523 | { |
| 2524 | struct mlx5_ib_dev *dev = |
| 2525 | rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); |
| 2526 | |
| 2527 | return sysfs_emit(buf, "%x\n", dev->mdev->rev_id); |
| 2528 | } |
| 2529 | static DEVICE_ATTR_RO(hw_rev); |
| 2530 | |
| 2531 | static ssize_t board_id_show(struct device *device, |
| 2532 | struct device_attribute *attr, char *buf) |
| 2533 | { |
| 2534 | struct mlx5_ib_dev *dev = |
| 2535 | rdma_device_to_drv_device(device, struct mlx5_ib_dev, ib_dev); |
| 2536 | |
| 2537 | return sysfs_emit(buf, "%.*s\n", MLX5_BOARD_ID_LEN, |
| 2538 | dev->mdev->board_id); |
| 2539 | } |
| 2540 | static DEVICE_ATTR_RO(board_id); |
| 2541 | |
| 2542 | static struct attribute *mlx5_class_attributes[] = { |
| 2543 | &dev_attr_hw_rev.attr, |
| 2544 | &dev_attr_hca_type.attr, |
| 2545 | &dev_attr_board_id.attr, |
| 2546 | &dev_attr_fw_pages.attr, |
| 2547 | &dev_attr_reg_pages.attr, |
| 2548 | NULL, |
| 2549 | }; |
| 2550 | |
| 2551 | static const struct attribute_group mlx5_attr_group = { |
| 2552 | .attrs = mlx5_class_attributes, |
| 2553 | }; |
| 2554 | |
| 2555 | static void pkey_change_handler(struct work_struct *work) |
| 2556 | { |
| 2557 | struct mlx5_ib_port_resources *ports = |
| 2558 | container_of(work, struct mlx5_ib_port_resources, |
| 2559 | pkey_change_work); |
| 2560 | |
| 2561 | if (!ports->gsi) |
| 2562 | /* |
| 2563 | * We got this event before device was fully configured |
| 2564 | * and MAD registration code wasn't called/finished yet. |
| 2565 | */ |
| 2566 | return; |
| 2567 | |
| 2568 | mlx5_ib_gsi_pkey_change(ports->gsi); |
| 2569 | } |
| 2570 | |
| 2571 | static void mlx5_ib_handle_internal_error(struct mlx5_ib_dev *ibdev) |
| 2572 | { |
| 2573 | struct mlx5_ib_qp *mqp; |
| 2574 | struct mlx5_ib_cq *send_mcq, *recv_mcq; |
| 2575 | struct mlx5_core_cq *mcq; |
| 2576 | struct list_head cq_armed_list; |
| 2577 | unsigned long flags_qp; |
| 2578 | unsigned long flags_cq; |
| 2579 | unsigned long flags; |
| 2580 | |
| 2581 | INIT_LIST_HEAD(&cq_armed_list); |
| 2582 | |
| 2583 | /* Go over qp list reside on that ibdev, sync with create/destroy qp.*/ |
| 2584 | spin_lock_irqsave(&ibdev->reset_flow_resource_lock, flags); |
| 2585 | list_for_each_entry(mqp, &ibdev->qp_list, qps_list) { |
| 2586 | spin_lock_irqsave(&mqp->sq.lock, flags_qp); |
| 2587 | if (mqp->sq.tail != mqp->sq.head) { |
| 2588 | send_mcq = to_mcq(mqp->ibqp.send_cq); |
| 2589 | spin_lock_irqsave(&send_mcq->lock, flags_cq); |
| 2590 | if (send_mcq->mcq.comp && |
| 2591 | mqp->ibqp.send_cq->comp_handler) { |
| 2592 | if (!send_mcq->mcq.reset_notify_added) { |
| 2593 | send_mcq->mcq.reset_notify_added = 1; |
| 2594 | list_add_tail(&send_mcq->mcq.reset_notify, |
| 2595 | &cq_armed_list); |
| 2596 | } |
| 2597 | } |
| 2598 | spin_unlock_irqrestore(&send_mcq->lock, flags_cq); |
| 2599 | } |
| 2600 | spin_unlock_irqrestore(&mqp->sq.lock, flags_qp); |
| 2601 | spin_lock_irqsave(&mqp->rq.lock, flags_qp); |
| 2602 | /* no handling is needed for SRQ */ |
| 2603 | if (!mqp->ibqp.srq) { |
| 2604 | if (mqp->rq.tail != mqp->rq.head) { |
| 2605 | recv_mcq = to_mcq(mqp->ibqp.recv_cq); |
| 2606 | spin_lock_irqsave(&recv_mcq->lock, flags_cq); |
| 2607 | if (recv_mcq->mcq.comp && |
| 2608 | mqp->ibqp.recv_cq->comp_handler) { |
| 2609 | if (!recv_mcq->mcq.reset_notify_added) { |
| 2610 | recv_mcq->mcq.reset_notify_added = 1; |
| 2611 | list_add_tail(&recv_mcq->mcq.reset_notify, |
| 2612 | &cq_armed_list); |
| 2613 | } |
| 2614 | } |
| 2615 | spin_unlock_irqrestore(&recv_mcq->lock, |
| 2616 | flags_cq); |
| 2617 | } |
| 2618 | } |
| 2619 | spin_unlock_irqrestore(&mqp->rq.lock, flags_qp); |
| 2620 | } |
| 2621 | /*At that point all inflight post send were put to be executed as of we |
| 2622 | * lock/unlock above locks Now need to arm all involved CQs. |
| 2623 | */ |
| 2624 | list_for_each_entry(mcq, &cq_armed_list, reset_notify) { |
| 2625 | mcq->comp(mcq, NULL); |
| 2626 | } |
| 2627 | spin_unlock_irqrestore(&ibdev->reset_flow_resource_lock, flags); |
| 2628 | } |
| 2629 | |
| 2630 | static void delay_drop_handler(struct work_struct *work) |
| 2631 | { |
| 2632 | int err; |
| 2633 | struct mlx5_ib_delay_drop *delay_drop = |
| 2634 | container_of(work, struct mlx5_ib_delay_drop, |
| 2635 | delay_drop_work); |
| 2636 | |
| 2637 | atomic_inc(&delay_drop->events_cnt); |
| 2638 | |
| 2639 | mutex_lock(&delay_drop->lock); |
| 2640 | err = mlx5_core_set_delay_drop(delay_drop->dev, delay_drop->timeout); |
| 2641 | if (err) { |
| 2642 | mlx5_ib_warn(delay_drop->dev, "Failed to set delay drop, timeout=%u\n", |
| 2643 | delay_drop->timeout); |
| 2644 | delay_drop->activate = false; |
| 2645 | } |
| 2646 | mutex_unlock(&delay_drop->lock); |
| 2647 | } |
| 2648 | |
| 2649 | static void handle_general_event(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, |
| 2650 | struct ib_event *ibev) |
| 2651 | { |
| 2652 | u32 port = (eqe->data.port.port >> 4) & 0xf; |
| 2653 | |
| 2654 | switch (eqe->sub_type) { |
| 2655 | case MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT: |
| 2656 | if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == |
| 2657 | IB_LINK_LAYER_ETHERNET) |
| 2658 | schedule_work(&ibdev->delay_drop.delay_drop_work); |
| 2659 | break; |
| 2660 | default: /* do nothing */ |
| 2661 | return; |
| 2662 | } |
| 2663 | } |
| 2664 | |
| 2665 | static int handle_port_change(struct mlx5_ib_dev *ibdev, struct mlx5_eqe *eqe, |
| 2666 | struct ib_event *ibev) |
| 2667 | { |
| 2668 | u32 port = (eqe->data.port.port >> 4) & 0xf; |
| 2669 | |
| 2670 | ibev->element.port_num = port; |
| 2671 | |
| 2672 | switch (eqe->sub_type) { |
| 2673 | case MLX5_PORT_CHANGE_SUBTYPE_ACTIVE: |
| 2674 | case MLX5_PORT_CHANGE_SUBTYPE_DOWN: |
| 2675 | case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED: |
| 2676 | /* In RoCE, port up/down events are handled in |
| 2677 | * mlx5_netdev_event(). |
| 2678 | */ |
| 2679 | if (mlx5_ib_port_link_layer(&ibdev->ib_dev, port) == |
| 2680 | IB_LINK_LAYER_ETHERNET) |
| 2681 | return -EINVAL; |
| 2682 | |
| 2683 | ibev->event = (eqe->sub_type == MLX5_PORT_CHANGE_SUBTYPE_ACTIVE) ? |
| 2684 | IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; |
| 2685 | break; |
| 2686 | |
| 2687 | case MLX5_PORT_CHANGE_SUBTYPE_LID: |
| 2688 | ibev->event = IB_EVENT_LID_CHANGE; |
| 2689 | break; |
| 2690 | |
| 2691 | case MLX5_PORT_CHANGE_SUBTYPE_PKEY: |
| 2692 | ibev->event = IB_EVENT_PKEY_CHANGE; |
| 2693 | schedule_work(&ibdev->devr.ports[port - 1].pkey_change_work); |
| 2694 | break; |
| 2695 | |
| 2696 | case MLX5_PORT_CHANGE_SUBTYPE_GUID: |
| 2697 | ibev->event = IB_EVENT_GID_CHANGE; |
| 2698 | break; |
| 2699 | |
| 2700 | case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG: |
| 2701 | ibev->event = IB_EVENT_CLIENT_REREGISTER; |
| 2702 | break; |
| 2703 | default: |
| 2704 | return -EINVAL; |
| 2705 | } |
| 2706 | |
| 2707 | return 0; |
| 2708 | } |
| 2709 | |
| 2710 | static void mlx5_ib_handle_event(struct work_struct *_work) |
| 2711 | { |
| 2712 | struct mlx5_ib_event_work *work = |
| 2713 | container_of(_work, struct mlx5_ib_event_work, work); |
| 2714 | struct mlx5_ib_dev *ibdev; |
| 2715 | struct ib_event ibev; |
| 2716 | bool fatal = false; |
| 2717 | |
| 2718 | if (work->is_slave) { |
| 2719 | ibdev = mlx5_ib_get_ibdev_from_mpi(work->mpi); |
| 2720 | if (!ibdev) |
| 2721 | goto out; |
| 2722 | } else { |
| 2723 | ibdev = work->dev; |
| 2724 | } |
| 2725 | |
| 2726 | switch (work->event) { |
| 2727 | case MLX5_DEV_EVENT_SYS_ERROR: |
| 2728 | ibev.event = IB_EVENT_DEVICE_FATAL; |
| 2729 | mlx5_ib_handle_internal_error(ibdev); |
| 2730 | ibev.element.port_num = (u8)(unsigned long)work->param; |
| 2731 | fatal = true; |
| 2732 | break; |
| 2733 | case MLX5_EVENT_TYPE_PORT_CHANGE: |
| 2734 | if (handle_port_change(ibdev, work->param, &ibev)) |
| 2735 | goto out; |
| 2736 | break; |
| 2737 | case MLX5_EVENT_TYPE_GENERAL_EVENT: |
| 2738 | handle_general_event(ibdev, work->param, &ibev); |
| 2739 | fallthrough; |
| 2740 | default: |
| 2741 | goto out; |
| 2742 | } |
| 2743 | |
| 2744 | ibev.device = &ibdev->ib_dev; |
| 2745 | |
| 2746 | if (!rdma_is_port_valid(&ibdev->ib_dev, ibev.element.port_num)) { |
| 2747 | mlx5_ib_warn(ibdev, "warning: event on port %d\n", ibev.element.port_num); |
| 2748 | goto out; |
| 2749 | } |
| 2750 | |
| 2751 | if (ibdev->ib_active) |
| 2752 | ib_dispatch_event(&ibev); |
| 2753 | |
| 2754 | if (fatal) |
| 2755 | ibdev->ib_active = false; |
| 2756 | out: |
| 2757 | kfree(work); |
| 2758 | } |
| 2759 | |
| 2760 | static int mlx5_ib_event(struct notifier_block *nb, |
| 2761 | unsigned long event, void *param) |
| 2762 | { |
| 2763 | struct mlx5_ib_event_work *work; |
| 2764 | |
| 2765 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
| 2766 | if (!work) |
| 2767 | return NOTIFY_DONE; |
| 2768 | |
| 2769 | INIT_WORK(&work->work, mlx5_ib_handle_event); |
| 2770 | work->dev = container_of(nb, struct mlx5_ib_dev, mdev_events); |
| 2771 | work->is_slave = false; |
| 2772 | work->param = param; |
| 2773 | work->event = event; |
| 2774 | |
| 2775 | queue_work(mlx5_ib_event_wq, &work->work); |
| 2776 | |
| 2777 | return NOTIFY_OK; |
| 2778 | } |
| 2779 | |
| 2780 | static int mlx5_ib_event_slave_port(struct notifier_block *nb, |
| 2781 | unsigned long event, void *param) |
| 2782 | { |
| 2783 | struct mlx5_ib_event_work *work; |
| 2784 | |
| 2785 | work = kmalloc(sizeof(*work), GFP_ATOMIC); |
| 2786 | if (!work) |
| 2787 | return NOTIFY_DONE; |
| 2788 | |
| 2789 | INIT_WORK(&work->work, mlx5_ib_handle_event); |
| 2790 | work->mpi = container_of(nb, struct mlx5_ib_multiport_info, mdev_events); |
| 2791 | work->is_slave = true; |
| 2792 | work->param = param; |
| 2793 | work->event = event; |
| 2794 | queue_work(mlx5_ib_event_wq, &work->work); |
| 2795 | |
| 2796 | return NOTIFY_OK; |
| 2797 | } |
| 2798 | |
| 2799 | static int mlx5_ib_get_plane_num(struct mlx5_core_dev *mdev, u8 *num_plane) |
| 2800 | { |
| 2801 | struct mlx5_hca_vport_context vport_ctx; |
| 2802 | int err; |
| 2803 | |
| 2804 | *num_plane = 0; |
| 2805 | if (!MLX5_CAP_GEN(mdev, ib_virt)) |
| 2806 | return 0; |
| 2807 | |
| 2808 | err = mlx5_query_hca_vport_context(mdev, 0, 1, 0, &vport_ctx); |
| 2809 | if (err) |
| 2810 | return err; |
| 2811 | |
| 2812 | *num_plane = vport_ctx.num_plane; |
| 2813 | return 0; |
| 2814 | } |
| 2815 | |
| 2816 | static int set_has_smi_cap(struct mlx5_ib_dev *dev) |
| 2817 | { |
| 2818 | struct mlx5_hca_vport_context vport_ctx; |
| 2819 | int err; |
| 2820 | int port; |
| 2821 | |
| 2822 | if (MLX5_CAP_GEN(dev->mdev, port_type) != MLX5_CAP_PORT_TYPE_IB) |
| 2823 | return 0; |
| 2824 | |
| 2825 | for (port = 1; port <= dev->num_ports; port++) { |
| 2826 | if (dev->num_plane) { |
| 2827 | dev->port_caps[port - 1].has_smi = false; |
| 2828 | continue; |
| 2829 | } else if (!MLX5_CAP_GEN(dev->mdev, ib_virt) || |
| 2830 | dev->ib_dev.type == RDMA_DEVICE_TYPE_SMI) { |
| 2831 | dev->port_caps[port - 1].has_smi = true; |
| 2832 | continue; |
| 2833 | } |
| 2834 | |
| 2835 | err = mlx5_query_hca_vport_context(dev->mdev, 0, port, 0, |
| 2836 | &vport_ctx); |
| 2837 | if (err) { |
| 2838 | mlx5_ib_err(dev, "query_hca_vport_context for port=%d failed %d\n", |
| 2839 | port, err); |
| 2840 | return err; |
| 2841 | } |
| 2842 | dev->port_caps[port - 1].has_smi = vport_ctx.has_smi; |
| 2843 | } |
| 2844 | |
| 2845 | return 0; |
| 2846 | } |
| 2847 | |
| 2848 | static void get_ext_port_caps(struct mlx5_ib_dev *dev) |
| 2849 | { |
| 2850 | unsigned int port; |
| 2851 | |
| 2852 | rdma_for_each_port (&dev->ib_dev, port) |
| 2853 | mlx5_query_ext_port_caps(dev, port); |
| 2854 | } |
| 2855 | |
| 2856 | static u8 mlx5_get_umr_fence(u8 umr_fence_cap) |
| 2857 | { |
| 2858 | switch (umr_fence_cap) { |
| 2859 | case MLX5_CAP_UMR_FENCE_NONE: |
| 2860 | return MLX5_FENCE_MODE_NONE; |
| 2861 | case MLX5_CAP_UMR_FENCE_SMALL: |
| 2862 | return MLX5_FENCE_MODE_INITIATOR_SMALL; |
| 2863 | default: |
| 2864 | return MLX5_FENCE_MODE_STRONG_ORDERING; |
| 2865 | } |
| 2866 | } |
| 2867 | |
| 2868 | int mlx5_ib_dev_res_cq_init(struct mlx5_ib_dev *dev) |
| 2869 | { |
| 2870 | struct mlx5_ib_resources *devr = &dev->devr; |
| 2871 | struct ib_cq_init_attr cq_attr = {.cqe = 1}; |
| 2872 | struct ib_device *ibdev; |
| 2873 | struct ib_pd *pd; |
| 2874 | struct ib_cq *cq; |
| 2875 | int ret = 0; |
| 2876 | |
| 2877 | |
| 2878 | /* |
| 2879 | * devr->c0 is set once, never changed until device unload. |
| 2880 | * Avoid taking the mutex if initialization is already done. |
| 2881 | */ |
| 2882 | if (devr->c0) |
| 2883 | return 0; |
| 2884 | |
| 2885 | mutex_lock(&devr->cq_lock); |
| 2886 | if (devr->c0) |
| 2887 | goto unlock; |
| 2888 | |
| 2889 | ibdev = &dev->ib_dev; |
| 2890 | pd = ib_alloc_pd(ibdev, 0); |
| 2891 | if (IS_ERR(pd)) { |
| 2892 | ret = PTR_ERR(pd); |
| 2893 | mlx5_ib_err(dev, "Couldn't allocate PD for res init, err=%d\n", ret); |
| 2894 | goto unlock; |
| 2895 | } |
| 2896 | |
| 2897 | cq = ib_create_cq(ibdev, NULL, NULL, NULL, &cq_attr); |
| 2898 | if (IS_ERR(cq)) { |
| 2899 | ret = PTR_ERR(cq); |
| 2900 | mlx5_ib_err(dev, "Couldn't create CQ for res init, err=%d\n", ret); |
| 2901 | ib_dealloc_pd(pd); |
| 2902 | goto unlock; |
| 2903 | } |
| 2904 | |
| 2905 | devr->p0 = pd; |
| 2906 | devr->c0 = cq; |
| 2907 | |
| 2908 | unlock: |
| 2909 | mutex_unlock(&devr->cq_lock); |
| 2910 | return ret; |
| 2911 | } |
| 2912 | |
| 2913 | int mlx5_ib_dev_res_srq_init(struct mlx5_ib_dev *dev) |
| 2914 | { |
| 2915 | struct mlx5_ib_resources *devr = &dev->devr; |
| 2916 | struct ib_srq_init_attr attr; |
| 2917 | struct ib_srq *s0, *s1; |
| 2918 | int ret = 0; |
| 2919 | |
| 2920 | /* |
| 2921 | * devr->s1 is set once, never changed until device unload. |
| 2922 | * Avoid taking the mutex if initialization is already done. |
| 2923 | */ |
| 2924 | if (devr->s1) |
| 2925 | return 0; |
| 2926 | |
| 2927 | mutex_lock(&devr->srq_lock); |
| 2928 | if (devr->s1) |
| 2929 | goto unlock; |
| 2930 | |
| 2931 | ret = mlx5_ib_dev_res_cq_init(dev); |
| 2932 | if (ret) |
| 2933 | goto unlock; |
| 2934 | |
| 2935 | memset(&attr, 0, sizeof(attr)); |
| 2936 | attr.attr.max_sge = 1; |
| 2937 | attr.attr.max_wr = 1; |
| 2938 | attr.srq_type = IB_SRQT_XRC; |
| 2939 | attr.ext.cq = devr->c0; |
| 2940 | |
| 2941 | s0 = ib_create_srq(devr->p0, &attr); |
| 2942 | if (IS_ERR(s0)) { |
| 2943 | ret = PTR_ERR(s0); |
| 2944 | mlx5_ib_err(dev, "Couldn't create SRQ 0 for res init, err=%d\n", ret); |
| 2945 | goto unlock; |
| 2946 | } |
| 2947 | |
| 2948 | memset(&attr, 0, sizeof(attr)); |
| 2949 | attr.attr.max_sge = 1; |
| 2950 | attr.attr.max_wr = 1; |
| 2951 | attr.srq_type = IB_SRQT_BASIC; |
| 2952 | |
| 2953 | s1 = ib_create_srq(devr->p0, &attr); |
| 2954 | if (IS_ERR(s1)) { |
| 2955 | ret = PTR_ERR(s1); |
| 2956 | mlx5_ib_err(dev, "Couldn't create SRQ 1 for res init, err=%d\n", ret); |
| 2957 | ib_destroy_srq(s0); |
| 2958 | } |
| 2959 | |
| 2960 | devr->s0 = s0; |
| 2961 | devr->s1 = s1; |
| 2962 | |
| 2963 | unlock: |
| 2964 | mutex_unlock(&devr->srq_lock); |
| 2965 | return ret; |
| 2966 | } |
| 2967 | |
| 2968 | static int mlx5_ib_dev_res_init(struct mlx5_ib_dev *dev) |
| 2969 | { |
| 2970 | struct mlx5_ib_resources *devr = &dev->devr; |
| 2971 | int port; |
| 2972 | int ret; |
| 2973 | |
| 2974 | if (!MLX5_CAP_GEN(dev->mdev, xrc)) |
| 2975 | return -EOPNOTSUPP; |
| 2976 | |
| 2977 | ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn0, 0); |
| 2978 | if (ret) |
| 2979 | return ret; |
| 2980 | |
| 2981 | ret = mlx5_cmd_xrcd_alloc(dev->mdev, &devr->xrcdn1, 0); |
| 2982 | if (ret) { |
| 2983 | mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); |
| 2984 | return ret; |
| 2985 | } |
| 2986 | |
| 2987 | for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) |
| 2988 | INIT_WORK(&devr->ports[port].pkey_change_work, |
| 2989 | pkey_change_handler); |
| 2990 | |
| 2991 | mutex_init(&devr->cq_lock); |
| 2992 | mutex_init(&devr->srq_lock); |
| 2993 | |
| 2994 | return 0; |
| 2995 | } |
| 2996 | |
| 2997 | static void mlx5_ib_dev_res_cleanup(struct mlx5_ib_dev *dev) |
| 2998 | { |
| 2999 | struct mlx5_ib_resources *devr = &dev->devr; |
| 3000 | int port; |
| 3001 | |
| 3002 | /* |
| 3003 | * Make sure no change P_Key work items are still executing. |
| 3004 | * |
| 3005 | * At this stage, the mlx5_ib_event should be unregistered |
| 3006 | * and it ensures that no new works are added. |
| 3007 | */ |
| 3008 | for (port = 0; port < ARRAY_SIZE(devr->ports); ++port) |
| 3009 | cancel_work_sync(&devr->ports[port].pkey_change_work); |
| 3010 | |
| 3011 | /* After s0/s1 init, they are not unset during the device lifetime. */ |
| 3012 | if (devr->s1) { |
| 3013 | ib_destroy_srq(devr->s1); |
| 3014 | ib_destroy_srq(devr->s0); |
| 3015 | } |
| 3016 | mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn1, 0); |
| 3017 | mlx5_cmd_xrcd_dealloc(dev->mdev, devr->xrcdn0, 0); |
| 3018 | /* After p0/c0 init, they are not unset during the device lifetime. */ |
| 3019 | if (devr->c0) { |
| 3020 | ib_destroy_cq(devr->c0); |
| 3021 | ib_dealloc_pd(devr->p0); |
| 3022 | } |
| 3023 | mutex_destroy(&devr->cq_lock); |
| 3024 | mutex_destroy(&devr->srq_lock); |
| 3025 | } |
| 3026 | |
| 3027 | static u32 get_core_cap_flags(struct ib_device *ibdev, |
| 3028 | struct mlx5_hca_vport_context *rep) |
| 3029 | { |
| 3030 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 3031 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, 1); |
| 3032 | u8 l3_type_cap = MLX5_CAP_ROCE(dev->mdev, l3_type); |
| 3033 | u8 roce_version_cap = MLX5_CAP_ROCE(dev->mdev, roce_version); |
| 3034 | bool raw_support = !mlx5_core_mp_enabled(dev->mdev); |
| 3035 | u32 ret = 0; |
| 3036 | |
| 3037 | if (rep->grh_required) |
| 3038 | ret |= RDMA_CORE_CAP_IB_GRH_REQUIRED; |
| 3039 | |
| 3040 | if (dev->num_plane) |
| 3041 | return ret | RDMA_CORE_CAP_PROT_IB | RDMA_CORE_CAP_IB_MAD | |
| 3042 | RDMA_CORE_CAP_IB_CM | RDMA_CORE_CAP_IB_SA | |
| 3043 | RDMA_CORE_CAP_AF_IB; |
| 3044 | else if (ibdev->type == RDMA_DEVICE_TYPE_SMI) |
| 3045 | return ret | RDMA_CORE_CAP_IB_MAD | RDMA_CORE_CAP_IB_SMI; |
| 3046 | |
| 3047 | if (ll == IB_LINK_LAYER_INFINIBAND) |
| 3048 | return ret | RDMA_CORE_PORT_IBA_IB; |
| 3049 | |
| 3050 | if (raw_support) |
| 3051 | ret |= RDMA_CORE_PORT_RAW_PACKET; |
| 3052 | |
| 3053 | if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV4_CAP)) |
| 3054 | return ret; |
| 3055 | |
| 3056 | if (!(l3_type_cap & MLX5_ROCE_L3_TYPE_IPV6_CAP)) |
| 3057 | return ret; |
| 3058 | |
| 3059 | if (roce_version_cap & MLX5_ROCE_VERSION_1_CAP) |
| 3060 | ret |= RDMA_CORE_PORT_IBA_ROCE; |
| 3061 | |
| 3062 | if (roce_version_cap & MLX5_ROCE_VERSION_2_CAP) |
| 3063 | ret |= RDMA_CORE_PORT_IBA_ROCE_UDP_ENCAP; |
| 3064 | |
| 3065 | return ret; |
| 3066 | } |
| 3067 | |
| 3068 | static int mlx5_port_immutable(struct ib_device *ibdev, u32 port_num, |
| 3069 | struct ib_port_immutable *immutable) |
| 3070 | { |
| 3071 | struct ib_port_attr attr; |
| 3072 | struct mlx5_ib_dev *dev = to_mdev(ibdev); |
| 3073 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(ibdev, port_num); |
| 3074 | struct mlx5_hca_vport_context rep = {0}; |
| 3075 | int err; |
| 3076 | |
| 3077 | err = ib_query_port(ibdev, port_num, &attr); |
| 3078 | if (err) |
| 3079 | return err; |
| 3080 | |
| 3081 | if (ll == IB_LINK_LAYER_INFINIBAND) { |
| 3082 | if (ibdev->type == RDMA_DEVICE_TYPE_SMI) |
| 3083 | port_num = smi_to_native_portnum(dev, port_num); |
| 3084 | |
| 3085 | err = mlx5_query_hca_vport_context(dev->mdev, 0, port_num, 0, |
| 3086 | &rep); |
| 3087 | if (err) |
| 3088 | return err; |
| 3089 | } |
| 3090 | |
| 3091 | immutable->pkey_tbl_len = attr.pkey_tbl_len; |
| 3092 | immutable->gid_tbl_len = attr.gid_tbl_len; |
| 3093 | immutable->core_cap_flags = get_core_cap_flags(ibdev, &rep); |
| 3094 | immutable->max_mad_size = IB_MGMT_MAD_SIZE; |
| 3095 | |
| 3096 | return 0; |
| 3097 | } |
| 3098 | |
| 3099 | static int mlx5_port_rep_immutable(struct ib_device *ibdev, u32 port_num, |
| 3100 | struct ib_port_immutable *immutable) |
| 3101 | { |
| 3102 | struct ib_port_attr attr; |
| 3103 | int err; |
| 3104 | |
| 3105 | immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; |
| 3106 | |
| 3107 | err = ib_query_port(ibdev, port_num, &attr); |
| 3108 | if (err) |
| 3109 | return err; |
| 3110 | |
| 3111 | immutable->pkey_tbl_len = attr.pkey_tbl_len; |
| 3112 | immutable->gid_tbl_len = attr.gid_tbl_len; |
| 3113 | immutable->core_cap_flags = RDMA_CORE_PORT_RAW_PACKET; |
| 3114 | |
| 3115 | return 0; |
| 3116 | } |
| 3117 | |
| 3118 | static void get_dev_fw_str(struct ib_device *ibdev, char *str) |
| 3119 | { |
| 3120 | struct mlx5_ib_dev *dev = |
| 3121 | container_of(ibdev, struct mlx5_ib_dev, ib_dev); |
| 3122 | snprintf(str, IB_FW_VERSION_NAME_MAX, "%d.%d.%04d", |
| 3123 | fw_rev_maj(dev->mdev), fw_rev_min(dev->mdev), |
| 3124 | fw_rev_sub(dev->mdev)); |
| 3125 | } |
| 3126 | |
| 3127 | static int mlx5_eth_lag_init(struct mlx5_ib_dev *dev) |
| 3128 | { |
| 3129 | struct mlx5_core_dev *mdev = dev->mdev; |
| 3130 | struct mlx5_flow_namespace *ns = mlx5_get_flow_namespace(mdev, |
| 3131 | MLX5_FLOW_NAMESPACE_LAG); |
| 3132 | struct mlx5_flow_table *ft; |
| 3133 | int err; |
| 3134 | |
| 3135 | if (!ns || !mlx5_lag_is_active(mdev)) |
| 3136 | return 0; |
| 3137 | |
| 3138 | err = mlx5_cmd_create_vport_lag(mdev); |
| 3139 | if (err) |
| 3140 | return err; |
| 3141 | |
| 3142 | ft = mlx5_create_lag_demux_flow_table(ns, 0, 0); |
| 3143 | if (IS_ERR(ft)) { |
| 3144 | err = PTR_ERR(ft); |
| 3145 | goto err_destroy_vport_lag; |
| 3146 | } |
| 3147 | |
| 3148 | dev->flow_db->lag_demux_ft = ft; |
| 3149 | dev->lag_ports = mlx5_lag_get_num_ports(mdev); |
| 3150 | dev->lag_active = true; |
| 3151 | return 0; |
| 3152 | |
| 3153 | err_destroy_vport_lag: |
| 3154 | mlx5_cmd_destroy_vport_lag(mdev); |
| 3155 | return err; |
| 3156 | } |
| 3157 | |
| 3158 | static void mlx5_eth_lag_cleanup(struct mlx5_ib_dev *dev) |
| 3159 | { |
| 3160 | struct mlx5_core_dev *mdev = dev->mdev; |
| 3161 | |
| 3162 | if (dev->lag_active) { |
| 3163 | dev->lag_active = false; |
| 3164 | |
| 3165 | mlx5_destroy_flow_table(dev->flow_db->lag_demux_ft); |
| 3166 | dev->flow_db->lag_demux_ft = NULL; |
| 3167 | |
| 3168 | mlx5_cmd_destroy_vport_lag(mdev); |
| 3169 | } |
| 3170 | } |
| 3171 | |
| 3172 | static void mlx5_netdev_notifier_register(struct mlx5_roce *roce, |
| 3173 | struct net_device *netdev) |
| 3174 | { |
| 3175 | int err; |
| 3176 | |
| 3177 | if (roce->tracking_netdev) |
| 3178 | return; |
| 3179 | roce->tracking_netdev = netdev; |
| 3180 | roce->nb.notifier_call = mlx5_netdev_event; |
| 3181 | err = register_netdevice_notifier_dev_net(netdev, &roce->nb, &roce->nn); |
| 3182 | WARN_ON(err); |
| 3183 | } |
| 3184 | |
| 3185 | static void mlx5_netdev_notifier_unregister(struct mlx5_roce *roce) |
| 3186 | { |
| 3187 | if (!roce->tracking_netdev) |
| 3188 | return; |
| 3189 | unregister_netdevice_notifier_dev_net(roce->tracking_netdev, &roce->nb, |
| 3190 | &roce->nn); |
| 3191 | roce->tracking_netdev = NULL; |
| 3192 | } |
| 3193 | |
| 3194 | static int mlx5e_mdev_notifier_event(struct notifier_block *nb, |
| 3195 | unsigned long event, void *data) |
| 3196 | { |
| 3197 | struct mlx5_roce *roce = container_of(nb, struct mlx5_roce, mdev_nb); |
| 3198 | struct net_device *netdev = data; |
| 3199 | |
| 3200 | switch (event) { |
| 3201 | case MLX5_DRIVER_EVENT_UPLINK_NETDEV: |
| 3202 | if (netdev) |
| 3203 | mlx5_netdev_notifier_register(roce, netdev); |
| 3204 | else |
| 3205 | mlx5_netdev_notifier_unregister(roce); |
| 3206 | break; |
| 3207 | default: |
| 3208 | return NOTIFY_DONE; |
| 3209 | } |
| 3210 | |
| 3211 | return NOTIFY_OK; |
| 3212 | } |
| 3213 | |
| 3214 | static void mlx5_mdev_netdev_track(struct mlx5_ib_dev *dev, u32 port_num) |
| 3215 | { |
| 3216 | struct mlx5_roce *roce = &dev->port[port_num].roce; |
| 3217 | |
| 3218 | roce->mdev_nb.notifier_call = mlx5e_mdev_notifier_event; |
| 3219 | mlx5_blocking_notifier_register(dev->mdev, &roce->mdev_nb); |
| 3220 | mlx5_core_uplink_netdev_event_replay(dev->mdev); |
| 3221 | } |
| 3222 | |
| 3223 | static void mlx5_mdev_netdev_untrack(struct mlx5_ib_dev *dev, u32 port_num) |
| 3224 | { |
| 3225 | struct mlx5_roce *roce = &dev->port[port_num].roce; |
| 3226 | |
| 3227 | mlx5_blocking_notifier_unregister(dev->mdev, &roce->mdev_nb); |
| 3228 | mlx5_netdev_notifier_unregister(roce); |
| 3229 | } |
| 3230 | |
| 3231 | static int mlx5_enable_eth(struct mlx5_ib_dev *dev) |
| 3232 | { |
| 3233 | int err; |
| 3234 | |
| 3235 | if (!dev->is_rep && dev->profile != &raw_eth_profile) { |
| 3236 | err = mlx5_nic_vport_enable_roce(dev->mdev); |
| 3237 | if (err) |
| 3238 | return err; |
| 3239 | } |
| 3240 | |
| 3241 | err = mlx5_eth_lag_init(dev); |
| 3242 | if (err) |
| 3243 | goto err_disable_roce; |
| 3244 | |
| 3245 | return 0; |
| 3246 | |
| 3247 | err_disable_roce: |
| 3248 | if (!dev->is_rep && dev->profile != &raw_eth_profile) |
| 3249 | mlx5_nic_vport_disable_roce(dev->mdev); |
| 3250 | |
| 3251 | return err; |
| 3252 | } |
| 3253 | |
| 3254 | static void mlx5_disable_eth(struct mlx5_ib_dev *dev) |
| 3255 | { |
| 3256 | mlx5_eth_lag_cleanup(dev); |
| 3257 | if (!dev->is_rep && dev->profile != &raw_eth_profile) |
| 3258 | mlx5_nic_vport_disable_roce(dev->mdev); |
| 3259 | } |
| 3260 | |
| 3261 | static int mlx5_ib_rn_get_params(struct ib_device *device, u32 port_num, |
| 3262 | enum rdma_netdev_t type, |
| 3263 | struct rdma_netdev_alloc_params *params) |
| 3264 | { |
| 3265 | if (type != RDMA_NETDEV_IPOIB) |
| 3266 | return -EOPNOTSUPP; |
| 3267 | |
| 3268 | return mlx5_rdma_rn_get_params(to_mdev(device)->mdev, device, params); |
| 3269 | } |
| 3270 | |
| 3271 | static ssize_t delay_drop_timeout_read(struct file *filp, char __user *buf, |
| 3272 | size_t count, loff_t *pos) |
| 3273 | { |
| 3274 | struct mlx5_ib_delay_drop *delay_drop = filp->private_data; |
| 3275 | char lbuf[20]; |
| 3276 | int len; |
| 3277 | |
| 3278 | len = snprintf(lbuf, sizeof(lbuf), "%u\n", delay_drop->timeout); |
| 3279 | return simple_read_from_buffer(buf, count, pos, lbuf, len); |
| 3280 | } |
| 3281 | |
| 3282 | static ssize_t delay_drop_timeout_write(struct file *filp, const char __user *buf, |
| 3283 | size_t count, loff_t *pos) |
| 3284 | { |
| 3285 | struct mlx5_ib_delay_drop *delay_drop = filp->private_data; |
| 3286 | u32 timeout; |
| 3287 | u32 var; |
| 3288 | |
| 3289 | if (kstrtouint_from_user(buf, count, 0, &var)) |
| 3290 | return -EFAULT; |
| 3291 | |
| 3292 | timeout = min_t(u32, roundup(var, 100), MLX5_MAX_DELAY_DROP_TIMEOUT_MS * |
| 3293 | 1000); |
| 3294 | if (timeout != var) |
| 3295 | mlx5_ib_dbg(delay_drop->dev, "Round delay drop timeout to %u usec\n", |
| 3296 | timeout); |
| 3297 | |
| 3298 | delay_drop->timeout = timeout; |
| 3299 | |
| 3300 | return count; |
| 3301 | } |
| 3302 | |
| 3303 | static const struct file_operations fops_delay_drop_timeout = { |
| 3304 | .owner = THIS_MODULE, |
| 3305 | .open = simple_open, |
| 3306 | .write = delay_drop_timeout_write, |
| 3307 | .read = delay_drop_timeout_read, |
| 3308 | }; |
| 3309 | |
| 3310 | static void mlx5_ib_unbind_slave_port(struct mlx5_ib_dev *ibdev, |
| 3311 | struct mlx5_ib_multiport_info *mpi) |
| 3312 | { |
| 3313 | u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; |
| 3314 | struct mlx5_ib_port *port = &ibdev->port[port_num]; |
| 3315 | int comps; |
| 3316 | int err; |
| 3317 | int i; |
| 3318 | |
| 3319 | lockdep_assert_held(&mlx5_ib_multiport_mutex); |
| 3320 | |
| 3321 | mlx5_core_mp_event_replay(ibdev->mdev, |
| 3322 | MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, |
| 3323 | NULL); |
| 3324 | mlx5_core_mp_event_replay(mpi->mdev, |
| 3325 | MLX5_DRIVER_EVENT_AFFILIATION_REMOVED, |
| 3326 | NULL); |
| 3327 | |
| 3328 | mlx5_ib_cleanup_cong_debugfs(ibdev, port_num); |
| 3329 | |
| 3330 | spin_lock(&port->mp.mpi_lock); |
| 3331 | if (!mpi->ibdev) { |
| 3332 | spin_unlock(&port->mp.mpi_lock); |
| 3333 | return; |
| 3334 | } |
| 3335 | |
| 3336 | mpi->ibdev = NULL; |
| 3337 | |
| 3338 | spin_unlock(&port->mp.mpi_lock); |
| 3339 | if (mpi->mdev_events.notifier_call) |
| 3340 | mlx5_notifier_unregister(mpi->mdev, &mpi->mdev_events); |
| 3341 | mpi->mdev_events.notifier_call = NULL; |
| 3342 | mlx5_mdev_netdev_untrack(ibdev, port_num); |
| 3343 | spin_lock(&port->mp.mpi_lock); |
| 3344 | |
| 3345 | comps = mpi->mdev_refcnt; |
| 3346 | if (comps) { |
| 3347 | mpi->unaffiliate = true; |
| 3348 | init_completion(&mpi->unref_comp); |
| 3349 | spin_unlock(&port->mp.mpi_lock); |
| 3350 | |
| 3351 | for (i = 0; i < comps; i++) |
| 3352 | wait_for_completion(&mpi->unref_comp); |
| 3353 | |
| 3354 | spin_lock(&port->mp.mpi_lock); |
| 3355 | mpi->unaffiliate = false; |
| 3356 | } |
| 3357 | |
| 3358 | port->mp.mpi = NULL; |
| 3359 | |
| 3360 | spin_unlock(&port->mp.mpi_lock); |
| 3361 | |
| 3362 | err = mlx5_nic_vport_unaffiliate_multiport(mpi->mdev); |
| 3363 | |
| 3364 | mlx5_ib_dbg(ibdev, "unaffiliated port %u\n", port_num + 1); |
| 3365 | /* Log an error, still needed to cleanup the pointers and add |
| 3366 | * it back to the list. |
| 3367 | */ |
| 3368 | if (err) |
| 3369 | mlx5_ib_err(ibdev, "Failed to unaffiliate port %u\n", |
| 3370 | port_num + 1); |
| 3371 | |
| 3372 | ibdev->port[port_num].roce.last_port_state = IB_PORT_DOWN; |
| 3373 | } |
| 3374 | |
| 3375 | static bool mlx5_ib_bind_slave_port(struct mlx5_ib_dev *ibdev, |
| 3376 | struct mlx5_ib_multiport_info *mpi) |
| 3377 | { |
| 3378 | u32 port_num = mlx5_core_native_port_num(mpi->mdev) - 1; |
| 3379 | u64 key; |
| 3380 | int err; |
| 3381 | |
| 3382 | lockdep_assert_held(&mlx5_ib_multiport_mutex); |
| 3383 | |
| 3384 | spin_lock(&ibdev->port[port_num].mp.mpi_lock); |
| 3385 | if (ibdev->port[port_num].mp.mpi) { |
| 3386 | mlx5_ib_dbg(ibdev, "port %u already affiliated.\n", |
| 3387 | port_num + 1); |
| 3388 | spin_unlock(&ibdev->port[port_num].mp.mpi_lock); |
| 3389 | return false; |
| 3390 | } |
| 3391 | |
| 3392 | ibdev->port[port_num].mp.mpi = mpi; |
| 3393 | mpi->ibdev = ibdev; |
| 3394 | mpi->mdev_events.notifier_call = NULL; |
| 3395 | spin_unlock(&ibdev->port[port_num].mp.mpi_lock); |
| 3396 | |
| 3397 | err = mlx5_nic_vport_affiliate_multiport(ibdev->mdev, mpi->mdev); |
| 3398 | if (err) |
| 3399 | goto unbind; |
| 3400 | |
| 3401 | mlx5_mdev_netdev_track(ibdev, port_num); |
| 3402 | |
| 3403 | mpi->mdev_events.notifier_call = mlx5_ib_event_slave_port; |
| 3404 | mlx5_notifier_register(mpi->mdev, &mpi->mdev_events); |
| 3405 | |
| 3406 | mlx5_ib_init_cong_debugfs(ibdev, port_num); |
| 3407 | |
| 3408 | key = mpi->mdev->priv.adev_idx; |
| 3409 | mlx5_core_mp_event_replay(mpi->mdev, |
| 3410 | MLX5_DRIVER_EVENT_AFFILIATION_DONE, |
| 3411 | &key); |
| 3412 | mlx5_core_mp_event_replay(ibdev->mdev, |
| 3413 | MLX5_DRIVER_EVENT_AFFILIATION_DONE, |
| 3414 | &key); |
| 3415 | |
| 3416 | return true; |
| 3417 | |
| 3418 | unbind: |
| 3419 | mlx5_ib_unbind_slave_port(ibdev, mpi); |
| 3420 | return false; |
| 3421 | } |
| 3422 | |
| 3423 | static int mlx5_ib_init_multiport_master(struct mlx5_ib_dev *dev) |
| 3424 | { |
| 3425 | u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1; |
| 3426 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, |
| 3427 | port_num + 1); |
| 3428 | struct mlx5_ib_multiport_info *mpi; |
| 3429 | int err; |
| 3430 | u32 i; |
| 3431 | |
| 3432 | if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) |
| 3433 | return 0; |
| 3434 | |
| 3435 | err = mlx5_query_nic_vport_system_image_guid(dev->mdev, |
| 3436 | &dev->sys_image_guid); |
| 3437 | if (err) |
| 3438 | return err; |
| 3439 | |
| 3440 | err = mlx5_nic_vport_enable_roce(dev->mdev); |
| 3441 | if (err) |
| 3442 | return err; |
| 3443 | |
| 3444 | mutex_lock(&mlx5_ib_multiport_mutex); |
| 3445 | for (i = 0; i < dev->num_ports; i++) { |
| 3446 | bool bound = false; |
| 3447 | |
| 3448 | /* build a stub multiport info struct for the native port. */ |
| 3449 | if (i == port_num) { |
| 3450 | mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); |
| 3451 | if (!mpi) { |
| 3452 | mutex_unlock(&mlx5_ib_multiport_mutex); |
| 3453 | mlx5_nic_vport_disable_roce(dev->mdev); |
| 3454 | return -ENOMEM; |
| 3455 | } |
| 3456 | |
| 3457 | mpi->is_master = true; |
| 3458 | mpi->mdev = dev->mdev; |
| 3459 | mpi->sys_image_guid = dev->sys_image_guid; |
| 3460 | dev->port[i].mp.mpi = mpi; |
| 3461 | mpi->ibdev = dev; |
| 3462 | mpi = NULL; |
| 3463 | continue; |
| 3464 | } |
| 3465 | |
| 3466 | list_for_each_entry(mpi, &mlx5_ib_unaffiliated_port_list, |
| 3467 | list) { |
| 3468 | if (dev->sys_image_guid == mpi->sys_image_guid && |
| 3469 | (mlx5_core_native_port_num(mpi->mdev) - 1) == i) { |
| 3470 | bound = mlx5_ib_bind_slave_port(dev, mpi); |
| 3471 | } |
| 3472 | |
| 3473 | if (bound) { |
| 3474 | dev_dbg(mpi->mdev->device, |
| 3475 | "removing port from unaffiliated list.\n"); |
| 3476 | mlx5_ib_dbg(dev, "port %d bound\n", i + 1); |
| 3477 | list_del(&mpi->list); |
| 3478 | break; |
| 3479 | } |
| 3480 | } |
| 3481 | if (!bound) |
| 3482 | mlx5_ib_dbg(dev, "no free port found for port %d\n", |
| 3483 | i + 1); |
| 3484 | } |
| 3485 | |
| 3486 | list_add_tail(&dev->ib_dev_list, &mlx5_ib_dev_list); |
| 3487 | mutex_unlock(&mlx5_ib_multiport_mutex); |
| 3488 | return err; |
| 3489 | } |
| 3490 | |
| 3491 | static void mlx5_ib_cleanup_multiport_master(struct mlx5_ib_dev *dev) |
| 3492 | { |
| 3493 | u32 port_num = mlx5_core_native_port_num(dev->mdev) - 1; |
| 3494 | enum rdma_link_layer ll = mlx5_ib_port_link_layer(&dev->ib_dev, |
| 3495 | port_num + 1); |
| 3496 | u32 i; |
| 3497 | |
| 3498 | if (!mlx5_core_is_mp_master(dev->mdev) || ll != IB_LINK_LAYER_ETHERNET) |
| 3499 | return; |
| 3500 | |
| 3501 | mutex_lock(&mlx5_ib_multiport_mutex); |
| 3502 | for (i = 0; i < dev->num_ports; i++) { |
| 3503 | if (dev->port[i].mp.mpi) { |
| 3504 | /* Destroy the native port stub */ |
| 3505 | if (i == port_num) { |
| 3506 | kfree(dev->port[i].mp.mpi); |
| 3507 | dev->port[i].mp.mpi = NULL; |
| 3508 | } else { |
| 3509 | mlx5_ib_dbg(dev, "unbinding port_num: %u\n", |
| 3510 | i + 1); |
| 3511 | list_add_tail(&dev->port[i].mp.mpi->list, |
| 3512 | &mlx5_ib_unaffiliated_port_list); |
| 3513 | mlx5_ib_unbind_slave_port(dev, |
| 3514 | dev->port[i].mp.mpi); |
| 3515 | } |
| 3516 | } |
| 3517 | } |
| 3518 | |
| 3519 | mlx5_ib_dbg(dev, "removing from devlist\n"); |
| 3520 | list_del(&dev->ib_dev_list); |
| 3521 | mutex_unlock(&mlx5_ib_multiport_mutex); |
| 3522 | |
| 3523 | mlx5_nic_vport_disable_roce(dev->mdev); |
| 3524 | } |
| 3525 | |
| 3526 | static int mmap_obj_cleanup(struct ib_uobject *uobject, |
| 3527 | enum rdma_remove_reason why, |
| 3528 | struct uverbs_attr_bundle *attrs) |
| 3529 | { |
| 3530 | struct mlx5_user_mmap_entry *obj = uobject->object; |
| 3531 | |
| 3532 | rdma_user_mmap_entry_remove(&obj->rdma_entry); |
| 3533 | return 0; |
| 3534 | } |
| 3535 | |
| 3536 | static int mlx5_rdma_user_mmap_entry_insert(struct mlx5_ib_ucontext *c, |
| 3537 | struct mlx5_user_mmap_entry *entry, |
| 3538 | size_t length) |
| 3539 | { |
| 3540 | return rdma_user_mmap_entry_insert_range( |
| 3541 | &c->ibucontext, &entry->rdma_entry, length, |
| 3542 | (MLX5_IB_MMAP_OFFSET_START << 16), |
| 3543 | ((MLX5_IB_MMAP_OFFSET_END << 16) + (1UL << 16) - 1)); |
| 3544 | } |
| 3545 | |
| 3546 | static struct mlx5_user_mmap_entry * |
| 3547 | alloc_var_entry(struct mlx5_ib_ucontext *c) |
| 3548 | { |
| 3549 | struct mlx5_user_mmap_entry *entry; |
| 3550 | struct mlx5_var_table *var_table; |
| 3551 | u32 page_idx; |
| 3552 | int err; |
| 3553 | |
| 3554 | var_table = &to_mdev(c->ibucontext.device)->var_table; |
| 3555 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
| 3556 | if (!entry) |
| 3557 | return ERR_PTR(-ENOMEM); |
| 3558 | |
| 3559 | mutex_lock(&var_table->bitmap_lock); |
| 3560 | page_idx = find_first_zero_bit(var_table->bitmap, |
| 3561 | var_table->num_var_hw_entries); |
| 3562 | if (page_idx >= var_table->num_var_hw_entries) { |
| 3563 | err = -ENOSPC; |
| 3564 | mutex_unlock(&var_table->bitmap_lock); |
| 3565 | goto end; |
| 3566 | } |
| 3567 | |
| 3568 | set_bit(page_idx, var_table->bitmap); |
| 3569 | mutex_unlock(&var_table->bitmap_lock); |
| 3570 | |
| 3571 | entry->address = var_table->hw_start_addr + |
| 3572 | (page_idx * var_table->stride_size); |
| 3573 | entry->page_idx = page_idx; |
| 3574 | entry->mmap_flag = MLX5_IB_MMAP_TYPE_VAR; |
| 3575 | |
| 3576 | err = mlx5_rdma_user_mmap_entry_insert(c, entry, |
| 3577 | var_table->stride_size); |
| 3578 | if (err) |
| 3579 | goto err_insert; |
| 3580 | |
| 3581 | return entry; |
| 3582 | |
| 3583 | err_insert: |
| 3584 | mutex_lock(&var_table->bitmap_lock); |
| 3585 | clear_bit(page_idx, var_table->bitmap); |
| 3586 | mutex_unlock(&var_table->bitmap_lock); |
| 3587 | end: |
| 3588 | kfree(entry); |
| 3589 | return ERR_PTR(err); |
| 3590 | } |
| 3591 | |
| 3592 | static int UVERBS_HANDLER(MLX5_IB_METHOD_VAR_OBJ_ALLOC)( |
| 3593 | struct uverbs_attr_bundle *attrs) |
| 3594 | { |
| 3595 | struct ib_uobject *uobj = uverbs_attr_get_uobject( |
| 3596 | attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE); |
| 3597 | struct mlx5_ib_ucontext *c; |
| 3598 | struct mlx5_user_mmap_entry *entry; |
| 3599 | u64 mmap_offset; |
| 3600 | u32 length; |
| 3601 | int err; |
| 3602 | |
| 3603 | c = to_mucontext(ib_uverbs_get_ucontext(attrs)); |
| 3604 | if (IS_ERR(c)) |
| 3605 | return PTR_ERR(c); |
| 3606 | |
| 3607 | entry = alloc_var_entry(c); |
| 3608 | if (IS_ERR(entry)) |
| 3609 | return PTR_ERR(entry); |
| 3610 | |
| 3611 | mmap_offset = mlx5_entry_to_mmap_offset(entry); |
| 3612 | length = entry->rdma_entry.npages * PAGE_SIZE; |
| 3613 | uobj->object = entry; |
| 3614 | uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE); |
| 3615 | |
| 3616 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET, |
| 3617 | &mmap_offset, sizeof(mmap_offset)); |
| 3618 | if (err) |
| 3619 | return err; |
| 3620 | |
| 3621 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID, |
| 3622 | &entry->page_idx, sizeof(entry->page_idx)); |
| 3623 | if (err) |
| 3624 | return err; |
| 3625 | |
| 3626 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH, |
| 3627 | &length, sizeof(length)); |
| 3628 | return err; |
| 3629 | } |
| 3630 | |
| 3631 | DECLARE_UVERBS_NAMED_METHOD( |
| 3632 | MLX5_IB_METHOD_VAR_OBJ_ALLOC, |
| 3633 | UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE, |
| 3634 | MLX5_IB_OBJECT_VAR, |
| 3635 | UVERBS_ACCESS_NEW, |
| 3636 | UA_MANDATORY), |
| 3637 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID, |
| 3638 | UVERBS_ATTR_TYPE(u32), |
| 3639 | UA_MANDATORY), |
| 3640 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH, |
| 3641 | UVERBS_ATTR_TYPE(u32), |
| 3642 | UA_MANDATORY), |
| 3643 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET, |
| 3644 | UVERBS_ATTR_TYPE(u64), |
| 3645 | UA_MANDATORY)); |
| 3646 | |
| 3647 | DECLARE_UVERBS_NAMED_METHOD_DESTROY( |
| 3648 | MLX5_IB_METHOD_VAR_OBJ_DESTROY, |
| 3649 | UVERBS_ATTR_IDR(MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE, |
| 3650 | MLX5_IB_OBJECT_VAR, |
| 3651 | UVERBS_ACCESS_DESTROY, |
| 3652 | UA_MANDATORY)); |
| 3653 | |
| 3654 | DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_VAR, |
| 3655 | UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup), |
| 3656 | &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_ALLOC), |
| 3657 | &UVERBS_METHOD(MLX5_IB_METHOD_VAR_OBJ_DESTROY)); |
| 3658 | |
| 3659 | static bool var_is_supported(struct ib_device *device) |
| 3660 | { |
| 3661 | struct mlx5_ib_dev *dev = to_mdev(device); |
| 3662 | |
| 3663 | return (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & |
| 3664 | MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q); |
| 3665 | } |
| 3666 | |
| 3667 | static struct mlx5_user_mmap_entry * |
| 3668 | alloc_uar_entry(struct mlx5_ib_ucontext *c, |
| 3669 | enum mlx5_ib_uapi_uar_alloc_type alloc_type) |
| 3670 | { |
| 3671 | struct mlx5_user_mmap_entry *entry; |
| 3672 | struct mlx5_ib_dev *dev; |
| 3673 | u32 uar_index; |
| 3674 | int err; |
| 3675 | |
| 3676 | entry = kzalloc(sizeof(*entry), GFP_KERNEL); |
| 3677 | if (!entry) |
| 3678 | return ERR_PTR(-ENOMEM); |
| 3679 | |
| 3680 | dev = to_mdev(c->ibucontext.device); |
| 3681 | err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid); |
| 3682 | if (err) |
| 3683 | goto end; |
| 3684 | |
| 3685 | entry->page_idx = uar_index; |
| 3686 | entry->address = uar_index2paddress(dev, uar_index); |
| 3687 | if (alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF) |
| 3688 | entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_WC; |
| 3689 | else |
| 3690 | entry->mmap_flag = MLX5_IB_MMAP_TYPE_UAR_NC; |
| 3691 | |
| 3692 | err = mlx5_rdma_user_mmap_entry_insert(c, entry, PAGE_SIZE); |
| 3693 | if (err) |
| 3694 | goto err_insert; |
| 3695 | |
| 3696 | return entry; |
| 3697 | |
| 3698 | err_insert: |
| 3699 | mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid); |
| 3700 | end: |
| 3701 | kfree(entry); |
| 3702 | return ERR_PTR(err); |
| 3703 | } |
| 3704 | |
| 3705 | static int UVERBS_HANDLER(MLX5_IB_METHOD_UAR_OBJ_ALLOC)( |
| 3706 | struct uverbs_attr_bundle *attrs) |
| 3707 | { |
| 3708 | struct ib_uobject *uobj = uverbs_attr_get_uobject( |
| 3709 | attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE); |
| 3710 | enum mlx5_ib_uapi_uar_alloc_type alloc_type; |
| 3711 | struct mlx5_ib_ucontext *c; |
| 3712 | struct mlx5_user_mmap_entry *entry; |
| 3713 | u64 mmap_offset; |
| 3714 | u32 length; |
| 3715 | int err; |
| 3716 | |
| 3717 | c = to_mucontext(ib_uverbs_get_ucontext(attrs)); |
| 3718 | if (IS_ERR(c)) |
| 3719 | return PTR_ERR(c); |
| 3720 | |
| 3721 | err = uverbs_get_const(&alloc_type, attrs, |
| 3722 | MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE); |
| 3723 | if (err) |
| 3724 | return err; |
| 3725 | |
| 3726 | if (alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF && |
| 3727 | alloc_type != MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC) |
| 3728 | return -EOPNOTSUPP; |
| 3729 | |
| 3730 | if (!mlx5_wc_support_get(to_mdev(c->ibucontext.device)->mdev) && |
| 3731 | alloc_type == MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF) |
| 3732 | return -EOPNOTSUPP; |
| 3733 | |
| 3734 | entry = alloc_uar_entry(c, alloc_type); |
| 3735 | if (IS_ERR(entry)) |
| 3736 | return PTR_ERR(entry); |
| 3737 | |
| 3738 | mmap_offset = mlx5_entry_to_mmap_offset(entry); |
| 3739 | length = entry->rdma_entry.npages * PAGE_SIZE; |
| 3740 | uobj->object = entry; |
| 3741 | uverbs_finalize_uobj_create(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE); |
| 3742 | |
| 3743 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET, |
| 3744 | &mmap_offset, sizeof(mmap_offset)); |
| 3745 | if (err) |
| 3746 | return err; |
| 3747 | |
| 3748 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID, |
| 3749 | &entry->page_idx, sizeof(entry->page_idx)); |
| 3750 | if (err) |
| 3751 | return err; |
| 3752 | |
| 3753 | err = uverbs_copy_to(attrs, MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH, |
| 3754 | &length, sizeof(length)); |
| 3755 | return err; |
| 3756 | } |
| 3757 | |
| 3758 | DECLARE_UVERBS_NAMED_METHOD( |
| 3759 | MLX5_IB_METHOD_UAR_OBJ_ALLOC, |
| 3760 | UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_ALLOC_HANDLE, |
| 3761 | MLX5_IB_OBJECT_UAR, |
| 3762 | UVERBS_ACCESS_NEW, |
| 3763 | UA_MANDATORY), |
| 3764 | UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_UAR_OBJ_ALLOC_TYPE, |
| 3765 | enum mlx5_ib_uapi_uar_alloc_type, |
| 3766 | UA_MANDATORY), |
| 3767 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_PAGE_ID, |
| 3768 | UVERBS_ATTR_TYPE(u32), |
| 3769 | UA_MANDATORY), |
| 3770 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_LENGTH, |
| 3771 | UVERBS_ATTR_TYPE(u32), |
| 3772 | UA_MANDATORY), |
| 3773 | UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_UAR_OBJ_ALLOC_MMAP_OFFSET, |
| 3774 | UVERBS_ATTR_TYPE(u64), |
| 3775 | UA_MANDATORY)); |
| 3776 | |
| 3777 | DECLARE_UVERBS_NAMED_METHOD_DESTROY( |
| 3778 | MLX5_IB_METHOD_UAR_OBJ_DESTROY, |
| 3779 | UVERBS_ATTR_IDR(MLX5_IB_ATTR_UAR_OBJ_DESTROY_HANDLE, |
| 3780 | MLX5_IB_OBJECT_UAR, |
| 3781 | UVERBS_ACCESS_DESTROY, |
| 3782 | UA_MANDATORY)); |
| 3783 | |
| 3784 | DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_UAR, |
| 3785 | UVERBS_TYPE_ALLOC_IDR(mmap_obj_cleanup), |
| 3786 | &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_ALLOC), |
| 3787 | &UVERBS_METHOD(MLX5_IB_METHOD_UAR_OBJ_DESTROY)); |
| 3788 | |
| 3789 | ADD_UVERBS_ATTRIBUTES_SIMPLE( |
| 3790 | mlx5_ib_query_context, |
| 3791 | UVERBS_OBJECT_DEVICE, |
| 3792 | UVERBS_METHOD_QUERY_CONTEXT, |
| 3793 | UVERBS_ATTR_PTR_OUT( |
| 3794 | MLX5_IB_ATTR_QUERY_CONTEXT_RESP_UCTX, |
| 3795 | UVERBS_ATTR_STRUCT(struct mlx5_ib_alloc_ucontext_resp, |
| 3796 | dump_fill_mkey), |
| 3797 | UA_MANDATORY)); |
| 3798 | |
| 3799 | static const struct uapi_definition mlx5_ib_defs[] = { |
| 3800 | UAPI_DEF_CHAIN(mlx5_ib_devx_defs), |
| 3801 | UAPI_DEF_CHAIN(mlx5_ib_flow_defs), |
| 3802 | UAPI_DEF_CHAIN(mlx5_ib_qos_defs), |
| 3803 | UAPI_DEF_CHAIN(mlx5_ib_std_types_defs), |
| 3804 | UAPI_DEF_CHAIN(mlx5_ib_dm_defs), |
| 3805 | UAPI_DEF_CHAIN(mlx5_ib_create_cq_defs), |
| 3806 | |
| 3807 | UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DEVICE, &mlx5_ib_query_context), |
| 3808 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_VAR, |
| 3809 | UAPI_DEF_IS_OBJ_SUPPORTED(var_is_supported)), |
| 3810 | UAPI_DEF_CHAIN_OBJ_TREE_NAMED(MLX5_IB_OBJECT_UAR), |
| 3811 | {} |
| 3812 | }; |
| 3813 | |
| 3814 | static void mlx5_ib_stage_init_cleanup(struct mlx5_ib_dev *dev) |
| 3815 | { |
| 3816 | mlx5_ib_cleanup_multiport_master(dev); |
| 3817 | WARN_ON(!xa_empty(&dev->odp_mkeys)); |
| 3818 | mutex_destroy(&dev->cap_mask_mutex); |
| 3819 | WARN_ON(!xa_empty(&dev->sig_mrs)); |
| 3820 | WARN_ON(!bitmap_empty(dev->dm.memic_alloc_pages, MLX5_MAX_MEMIC_PAGES)); |
| 3821 | mlx5r_macsec_dealloc_gids(dev); |
| 3822 | } |
| 3823 | |
| 3824 | static int mlx5_ib_stage_init_init(struct mlx5_ib_dev *dev) |
| 3825 | { |
| 3826 | struct mlx5_core_dev *mdev = dev->mdev; |
| 3827 | int err, i; |
| 3828 | |
| 3829 | dev->ib_dev.node_type = RDMA_NODE_IB_CA; |
| 3830 | dev->ib_dev.local_dma_lkey = 0 /* not supported for now */; |
| 3831 | dev->ib_dev.phys_port_cnt = dev->num_ports; |
| 3832 | dev->ib_dev.dev.parent = mdev->device; |
| 3833 | dev->ib_dev.lag_flags = RDMA_LAG_FLAGS_HASH_ALL_SLAVES; |
| 3834 | |
| 3835 | for (i = 0; i < dev->num_ports; i++) { |
| 3836 | spin_lock_init(&dev->port[i].mp.mpi_lock); |
| 3837 | rwlock_init(&dev->port[i].roce.netdev_lock); |
| 3838 | dev->port[i].roce.dev = dev; |
| 3839 | dev->port[i].roce.native_port_num = i + 1; |
| 3840 | dev->port[i].roce.last_port_state = IB_PORT_DOWN; |
| 3841 | } |
| 3842 | |
| 3843 | err = mlx5r_cmd_query_special_mkeys(dev); |
| 3844 | if (err) |
| 3845 | return err; |
| 3846 | |
| 3847 | err = mlx5r_macsec_init_gids_and_devlist(dev); |
| 3848 | if (err) |
| 3849 | return err; |
| 3850 | |
| 3851 | err = mlx5_ib_init_multiport_master(dev); |
| 3852 | if (err) |
| 3853 | goto err; |
| 3854 | |
| 3855 | err = set_has_smi_cap(dev); |
| 3856 | if (err) |
| 3857 | goto err_mp; |
| 3858 | |
| 3859 | err = mlx5_query_max_pkeys(&dev->ib_dev, &dev->pkey_table_len); |
| 3860 | if (err) |
| 3861 | goto err_mp; |
| 3862 | |
| 3863 | if (mlx5_use_mad_ifc(dev)) |
| 3864 | get_ext_port_caps(dev); |
| 3865 | |
| 3866 | dev->ib_dev.num_comp_vectors = mlx5_comp_vectors_max(mdev); |
| 3867 | |
| 3868 | mutex_init(&dev->cap_mask_mutex); |
| 3869 | INIT_LIST_HEAD(&dev->qp_list); |
| 3870 | spin_lock_init(&dev->reset_flow_resource_lock); |
| 3871 | xa_init(&dev->odp_mkeys); |
| 3872 | xa_init(&dev->sig_mrs); |
| 3873 | atomic_set(&dev->mkey_var, 0); |
| 3874 | |
| 3875 | spin_lock_init(&dev->dm.lock); |
| 3876 | dev->dm.dev = mdev; |
| 3877 | return 0; |
| 3878 | err_mp: |
| 3879 | mlx5_ib_cleanup_multiport_master(dev); |
| 3880 | err: |
| 3881 | mlx5r_macsec_dealloc_gids(dev); |
| 3882 | return err; |
| 3883 | } |
| 3884 | |
| 3885 | static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent, |
| 3886 | enum rdma_nl_dev_type type, |
| 3887 | const char *name); |
| 3888 | static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev); |
| 3889 | |
| 3890 | static const struct ib_device_ops mlx5_ib_dev_ops = { |
| 3891 | .owner = THIS_MODULE, |
| 3892 | .driver_id = RDMA_DRIVER_MLX5, |
| 3893 | .uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION, |
| 3894 | |
| 3895 | .add_gid = mlx5_ib_add_gid, |
| 3896 | .add_sub_dev = mlx5_ib_add_sub_dev, |
| 3897 | .alloc_mr = mlx5_ib_alloc_mr, |
| 3898 | .alloc_mr_integrity = mlx5_ib_alloc_mr_integrity, |
| 3899 | .alloc_pd = mlx5_ib_alloc_pd, |
| 3900 | .alloc_ucontext = mlx5_ib_alloc_ucontext, |
| 3901 | .attach_mcast = mlx5_ib_mcg_attach, |
| 3902 | .check_mr_status = mlx5_ib_check_mr_status, |
| 3903 | .create_ah = mlx5_ib_create_ah, |
| 3904 | .create_cq = mlx5_ib_create_cq, |
| 3905 | .create_qp = mlx5_ib_create_qp, |
| 3906 | .create_srq = mlx5_ib_create_srq, |
| 3907 | .create_user_ah = mlx5_ib_create_ah, |
| 3908 | .dealloc_pd = mlx5_ib_dealloc_pd, |
| 3909 | .dealloc_ucontext = mlx5_ib_dealloc_ucontext, |
| 3910 | .del_gid = mlx5_ib_del_gid, |
| 3911 | .del_sub_dev = mlx5_ib_del_sub_dev, |
| 3912 | .dereg_mr = mlx5_ib_dereg_mr, |
| 3913 | .destroy_ah = mlx5_ib_destroy_ah, |
| 3914 | .destroy_cq = mlx5_ib_destroy_cq, |
| 3915 | .destroy_qp = mlx5_ib_destroy_qp, |
| 3916 | .destroy_srq = mlx5_ib_destroy_srq, |
| 3917 | .detach_mcast = mlx5_ib_mcg_detach, |
| 3918 | .disassociate_ucontext = mlx5_ib_disassociate_ucontext, |
| 3919 | .drain_rq = mlx5_ib_drain_rq, |
| 3920 | .drain_sq = mlx5_ib_drain_sq, |
| 3921 | .device_group = &mlx5_attr_group, |
| 3922 | .get_dev_fw_str = get_dev_fw_str, |
| 3923 | .get_dma_mr = mlx5_ib_get_dma_mr, |
| 3924 | .get_link_layer = mlx5_ib_port_link_layer, |
| 3925 | .map_mr_sg = mlx5_ib_map_mr_sg, |
| 3926 | .map_mr_sg_pi = mlx5_ib_map_mr_sg_pi, |
| 3927 | .mmap = mlx5_ib_mmap, |
| 3928 | .mmap_free = mlx5_ib_mmap_free, |
| 3929 | .modify_cq = mlx5_ib_modify_cq, |
| 3930 | .modify_device = mlx5_ib_modify_device, |
| 3931 | .modify_port = mlx5_ib_modify_port, |
| 3932 | .modify_qp = mlx5_ib_modify_qp, |
| 3933 | .modify_srq = mlx5_ib_modify_srq, |
| 3934 | .poll_cq = mlx5_ib_poll_cq, |
| 3935 | .post_recv = mlx5_ib_post_recv_nodrain, |
| 3936 | .post_send = mlx5_ib_post_send_nodrain, |
| 3937 | .post_srq_recv = mlx5_ib_post_srq_recv, |
| 3938 | .process_mad = mlx5_ib_process_mad, |
| 3939 | .query_ah = mlx5_ib_query_ah, |
| 3940 | .query_device = mlx5_ib_query_device, |
| 3941 | .query_gid = mlx5_ib_query_gid, |
| 3942 | .query_pkey = mlx5_ib_query_pkey, |
| 3943 | .query_qp = mlx5_ib_query_qp, |
| 3944 | .query_srq = mlx5_ib_query_srq, |
| 3945 | .query_ucontext = mlx5_ib_query_ucontext, |
| 3946 | .reg_user_mr = mlx5_ib_reg_user_mr, |
| 3947 | .reg_user_mr_dmabuf = mlx5_ib_reg_user_mr_dmabuf, |
| 3948 | .req_notify_cq = mlx5_ib_arm_cq, |
| 3949 | .rereg_user_mr = mlx5_ib_rereg_user_mr, |
| 3950 | .resize_cq = mlx5_ib_resize_cq, |
| 3951 | |
| 3952 | INIT_RDMA_OBJ_SIZE(ib_ah, mlx5_ib_ah, ibah), |
| 3953 | INIT_RDMA_OBJ_SIZE(ib_counters, mlx5_ib_mcounters, ibcntrs), |
| 3954 | INIT_RDMA_OBJ_SIZE(ib_cq, mlx5_ib_cq, ibcq), |
| 3955 | INIT_RDMA_OBJ_SIZE(ib_pd, mlx5_ib_pd, ibpd), |
| 3956 | INIT_RDMA_OBJ_SIZE(ib_qp, mlx5_ib_qp, ibqp), |
| 3957 | INIT_RDMA_OBJ_SIZE(ib_srq, mlx5_ib_srq, ibsrq), |
| 3958 | INIT_RDMA_OBJ_SIZE(ib_ucontext, mlx5_ib_ucontext, ibucontext), |
| 3959 | }; |
| 3960 | |
| 3961 | static const struct ib_device_ops mlx5_ib_dev_ipoib_enhanced_ops = { |
| 3962 | .rdma_netdev_get_params = mlx5_ib_rn_get_params, |
| 3963 | }; |
| 3964 | |
| 3965 | static const struct ib_device_ops mlx5_ib_dev_sriov_ops = { |
| 3966 | .get_vf_config = mlx5_ib_get_vf_config, |
| 3967 | .get_vf_guid = mlx5_ib_get_vf_guid, |
| 3968 | .get_vf_stats = mlx5_ib_get_vf_stats, |
| 3969 | .set_vf_guid = mlx5_ib_set_vf_guid, |
| 3970 | .set_vf_link_state = mlx5_ib_set_vf_link_state, |
| 3971 | }; |
| 3972 | |
| 3973 | static const struct ib_device_ops mlx5_ib_dev_mw_ops = { |
| 3974 | .alloc_mw = mlx5_ib_alloc_mw, |
| 3975 | .dealloc_mw = mlx5_ib_dealloc_mw, |
| 3976 | |
| 3977 | INIT_RDMA_OBJ_SIZE(ib_mw, mlx5_ib_mw, ibmw), |
| 3978 | }; |
| 3979 | |
| 3980 | static const struct ib_device_ops mlx5_ib_dev_xrc_ops = { |
| 3981 | .alloc_xrcd = mlx5_ib_alloc_xrcd, |
| 3982 | .dealloc_xrcd = mlx5_ib_dealloc_xrcd, |
| 3983 | |
| 3984 | INIT_RDMA_OBJ_SIZE(ib_xrcd, mlx5_ib_xrcd, ibxrcd), |
| 3985 | }; |
| 3986 | |
| 3987 | static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev) |
| 3988 | { |
| 3989 | struct mlx5_core_dev *mdev = dev->mdev; |
| 3990 | struct mlx5_var_table *var_table = &dev->var_table; |
| 3991 | u8 log_doorbell_bar_size; |
| 3992 | u8 log_doorbell_stride; |
| 3993 | u64 bar_size; |
| 3994 | |
| 3995 | log_doorbell_bar_size = MLX5_CAP_DEV_VDPA_EMULATION(mdev, |
| 3996 | log_doorbell_bar_size); |
| 3997 | log_doorbell_stride = MLX5_CAP_DEV_VDPA_EMULATION(mdev, |
| 3998 | log_doorbell_stride); |
| 3999 | var_table->hw_start_addr = dev->mdev->bar_addr + |
| 4000 | MLX5_CAP64_DEV_VDPA_EMULATION(mdev, |
| 4001 | doorbell_bar_offset); |
| 4002 | bar_size = (1ULL << log_doorbell_bar_size) * 4096; |
| 4003 | var_table->stride_size = 1ULL << log_doorbell_stride; |
| 4004 | var_table->num_var_hw_entries = div_u64(bar_size, |
| 4005 | var_table->stride_size); |
| 4006 | mutex_init(&var_table->bitmap_lock); |
| 4007 | var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries, |
| 4008 | GFP_KERNEL); |
| 4009 | return (var_table->bitmap) ? 0 : -ENOMEM; |
| 4010 | } |
| 4011 | |
| 4012 | static void mlx5_ib_stage_caps_cleanup(struct mlx5_ib_dev *dev) |
| 4013 | { |
| 4014 | bitmap_free(dev->var_table.bitmap); |
| 4015 | } |
| 4016 | |
| 4017 | static int mlx5_ib_stage_caps_init(struct mlx5_ib_dev *dev) |
| 4018 | { |
| 4019 | struct mlx5_core_dev *mdev = dev->mdev; |
| 4020 | int err; |
| 4021 | |
| 4022 | if (MLX5_CAP_GEN(mdev, ipoib_enhanced_offloads) && |
| 4023 | IS_ENABLED(CONFIG_MLX5_CORE_IPOIB)) |
| 4024 | ib_set_device_ops(&dev->ib_dev, |
| 4025 | &mlx5_ib_dev_ipoib_enhanced_ops); |
| 4026 | |
| 4027 | if (mlx5_core_is_pf(mdev)) |
| 4028 | ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_sriov_ops); |
| 4029 | |
| 4030 | dev->umr_fence = mlx5_get_umr_fence(MLX5_CAP_GEN(mdev, umr_fence)); |
| 4031 | |
| 4032 | if (MLX5_CAP_GEN(mdev, imaicl)) |
| 4033 | ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_mw_ops); |
| 4034 | |
| 4035 | if (MLX5_CAP_GEN(mdev, xrc)) |
| 4036 | ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_xrc_ops); |
| 4037 | |
| 4038 | if (MLX5_CAP_DEV_MEM(mdev, memic) || |
| 4039 | MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & |
| 4040 | MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM) |
| 4041 | ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_dm_ops); |
| 4042 | |
| 4043 | ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_ops); |
| 4044 | |
| 4045 | if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) |
| 4046 | dev->ib_dev.driver_def = mlx5_ib_defs; |
| 4047 | |
| 4048 | err = init_node_data(dev); |
| 4049 | if (err) |
| 4050 | return err; |
| 4051 | |
| 4052 | if ((MLX5_CAP_GEN(dev->mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && |
| 4053 | (MLX5_CAP_GEN(dev->mdev, disable_local_lb_uc) || |
| 4054 | MLX5_CAP_GEN(dev->mdev, disable_local_lb_mc))) |
| 4055 | mutex_init(&dev->lb.mutex); |
| 4056 | |
| 4057 | if (MLX5_CAP_GEN_64(dev->mdev, general_obj_types) & |
| 4058 | MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) { |
| 4059 | err = mlx5_ib_init_var_table(dev); |
| 4060 | if (err) |
| 4061 | return err; |
| 4062 | } |
| 4063 | |
| 4064 | dev->ib_dev.use_cq_dim = true; |
| 4065 | |
| 4066 | return 0; |
| 4067 | } |
| 4068 | |
| 4069 | static const struct ib_device_ops mlx5_ib_dev_port_ops = { |
| 4070 | .get_port_immutable = mlx5_port_immutable, |
| 4071 | .query_port = mlx5_ib_query_port, |
| 4072 | }; |
| 4073 | |
| 4074 | static int mlx5_ib_stage_non_default_cb(struct mlx5_ib_dev *dev) |
| 4075 | { |
| 4076 | ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_ops); |
| 4077 | return 0; |
| 4078 | } |
| 4079 | |
| 4080 | static const struct ib_device_ops mlx5_ib_dev_port_rep_ops = { |
| 4081 | .get_port_immutable = mlx5_port_rep_immutable, |
| 4082 | .query_port = mlx5_ib_rep_query_port, |
| 4083 | .query_pkey = mlx5_ib_rep_query_pkey, |
| 4084 | }; |
| 4085 | |
| 4086 | static int mlx5_ib_stage_raw_eth_non_default_cb(struct mlx5_ib_dev *dev) |
| 4087 | { |
| 4088 | ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_port_rep_ops); |
| 4089 | return 0; |
| 4090 | } |
| 4091 | |
| 4092 | static const struct ib_device_ops mlx5_ib_dev_common_roce_ops = { |
| 4093 | .create_rwq_ind_table = mlx5_ib_create_rwq_ind_table, |
| 4094 | .create_wq = mlx5_ib_create_wq, |
| 4095 | .destroy_rwq_ind_table = mlx5_ib_destroy_rwq_ind_table, |
| 4096 | .destroy_wq = mlx5_ib_destroy_wq, |
| 4097 | .get_netdev = mlx5_ib_get_netdev, |
| 4098 | .modify_wq = mlx5_ib_modify_wq, |
| 4099 | |
| 4100 | INIT_RDMA_OBJ_SIZE(ib_rwq_ind_table, mlx5_ib_rwq_ind_table, |
| 4101 | ib_rwq_ind_tbl), |
| 4102 | }; |
| 4103 | |
| 4104 | static int mlx5_ib_roce_init(struct mlx5_ib_dev *dev) |
| 4105 | { |
| 4106 | struct mlx5_core_dev *mdev = dev->mdev; |
| 4107 | enum rdma_link_layer ll; |
| 4108 | int port_type_cap; |
| 4109 | u32 port_num = 0; |
| 4110 | int err; |
| 4111 | |
| 4112 | port_type_cap = MLX5_CAP_GEN(mdev, port_type); |
| 4113 | ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); |
| 4114 | |
| 4115 | if (ll == IB_LINK_LAYER_ETHERNET) { |
| 4116 | ib_set_device_ops(&dev->ib_dev, &mlx5_ib_dev_common_roce_ops); |
| 4117 | |
| 4118 | port_num = mlx5_core_native_port_num(dev->mdev) - 1; |
| 4119 | |
| 4120 | /* Register only for native ports */ |
| 4121 | mlx5_mdev_netdev_track(dev, port_num); |
| 4122 | |
| 4123 | err = mlx5_enable_eth(dev); |
| 4124 | if (err) |
| 4125 | goto cleanup; |
| 4126 | } |
| 4127 | |
| 4128 | return 0; |
| 4129 | cleanup: |
| 4130 | mlx5_mdev_netdev_untrack(dev, port_num); |
| 4131 | return err; |
| 4132 | } |
| 4133 | |
| 4134 | static void mlx5_ib_roce_cleanup(struct mlx5_ib_dev *dev) |
| 4135 | { |
| 4136 | struct mlx5_core_dev *mdev = dev->mdev; |
| 4137 | enum rdma_link_layer ll; |
| 4138 | int port_type_cap; |
| 4139 | u32 port_num; |
| 4140 | |
| 4141 | port_type_cap = MLX5_CAP_GEN(mdev, port_type); |
| 4142 | ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); |
| 4143 | |
| 4144 | if (ll == IB_LINK_LAYER_ETHERNET) { |
| 4145 | mlx5_disable_eth(dev); |
| 4146 | |
| 4147 | port_num = mlx5_core_native_port_num(dev->mdev) - 1; |
| 4148 | mlx5_mdev_netdev_untrack(dev, port_num); |
| 4149 | } |
| 4150 | } |
| 4151 | |
| 4152 | static int mlx5_ib_stage_cong_debugfs_init(struct mlx5_ib_dev *dev) |
| 4153 | { |
| 4154 | mlx5_ib_init_cong_debugfs(dev, |
| 4155 | mlx5_core_native_port_num(dev->mdev) - 1); |
| 4156 | return 0; |
| 4157 | } |
| 4158 | |
| 4159 | static void mlx5_ib_stage_cong_debugfs_cleanup(struct mlx5_ib_dev *dev) |
| 4160 | { |
| 4161 | mlx5_ib_cleanup_cong_debugfs(dev, |
| 4162 | mlx5_core_native_port_num(dev->mdev) - 1); |
| 4163 | } |
| 4164 | |
| 4165 | static int mlx5_ib_stage_uar_init(struct mlx5_ib_dev *dev) |
| 4166 | { |
| 4167 | dev->mdev->priv.uar = mlx5_get_uars_page(dev->mdev); |
| 4168 | return PTR_ERR_OR_ZERO(dev->mdev->priv.uar); |
| 4169 | } |
| 4170 | |
| 4171 | static void mlx5_ib_stage_uar_cleanup(struct mlx5_ib_dev *dev) |
| 4172 | { |
| 4173 | mlx5_put_uars_page(dev->mdev, dev->mdev->priv.uar); |
| 4174 | } |
| 4175 | |
| 4176 | static int mlx5_ib_stage_bfrag_init(struct mlx5_ib_dev *dev) |
| 4177 | { |
| 4178 | int err; |
| 4179 | |
| 4180 | err = mlx5_alloc_bfreg(dev->mdev, &dev->bfreg, false, false); |
| 4181 | if (err) |
| 4182 | return err; |
| 4183 | |
| 4184 | err = mlx5_alloc_bfreg(dev->mdev, &dev->fp_bfreg, false, true); |
| 4185 | if (err) |
| 4186 | mlx5_free_bfreg(dev->mdev, &dev->bfreg); |
| 4187 | |
| 4188 | return err; |
| 4189 | } |
| 4190 | |
| 4191 | static void mlx5_ib_stage_bfrag_cleanup(struct mlx5_ib_dev *dev) |
| 4192 | { |
| 4193 | mlx5_free_bfreg(dev->mdev, &dev->fp_bfreg); |
| 4194 | mlx5_free_bfreg(dev->mdev, &dev->bfreg); |
| 4195 | } |
| 4196 | |
| 4197 | static int mlx5_ib_stage_ib_reg_init(struct mlx5_ib_dev *dev) |
| 4198 | { |
| 4199 | const char *name; |
| 4200 | |
| 4201 | if (dev->sub_dev_name) { |
| 4202 | name = dev->sub_dev_name; |
| 4203 | ib_mark_name_assigned_by_user(&dev->ib_dev); |
| 4204 | } else if (!mlx5_lag_is_active(dev->mdev)) |
| 4205 | name = "mlx5_%d"; |
| 4206 | else |
| 4207 | name = "mlx5_bond_%d"; |
| 4208 | return ib_register_device(&dev->ib_dev, name, &dev->mdev->pdev->dev); |
| 4209 | } |
| 4210 | |
| 4211 | static void mlx5_ib_stage_pre_ib_reg_umr_cleanup(struct mlx5_ib_dev *dev) |
| 4212 | { |
| 4213 | mlx5_mkey_cache_cleanup(dev); |
| 4214 | mlx5r_umr_resource_cleanup(dev); |
| 4215 | mlx5r_umr_cleanup(dev); |
| 4216 | } |
| 4217 | |
| 4218 | static void mlx5_ib_stage_ib_reg_cleanup(struct mlx5_ib_dev *dev) |
| 4219 | { |
| 4220 | ib_unregister_device(&dev->ib_dev); |
| 4221 | } |
| 4222 | |
| 4223 | static int mlx5_ib_stage_post_ib_reg_umr_init(struct mlx5_ib_dev *dev) |
| 4224 | { |
| 4225 | int ret; |
| 4226 | |
| 4227 | ret = mlx5r_umr_init(dev); |
| 4228 | if (ret) |
| 4229 | return ret; |
| 4230 | |
| 4231 | ret = mlx5_mkey_cache_init(dev); |
| 4232 | if (ret) |
| 4233 | mlx5_ib_warn(dev, "mr cache init failed %d\n", ret); |
| 4234 | return ret; |
| 4235 | } |
| 4236 | |
| 4237 | static int mlx5_ib_stage_delay_drop_init(struct mlx5_ib_dev *dev) |
| 4238 | { |
| 4239 | struct dentry *root; |
| 4240 | |
| 4241 | if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) |
| 4242 | return 0; |
| 4243 | |
| 4244 | mutex_init(&dev->delay_drop.lock); |
| 4245 | dev->delay_drop.dev = dev; |
| 4246 | dev->delay_drop.activate = false; |
| 4247 | dev->delay_drop.timeout = MLX5_MAX_DELAY_DROP_TIMEOUT_MS * 1000; |
| 4248 | INIT_WORK(&dev->delay_drop.delay_drop_work, delay_drop_handler); |
| 4249 | atomic_set(&dev->delay_drop.rqs_cnt, 0); |
| 4250 | atomic_set(&dev->delay_drop.events_cnt, 0); |
| 4251 | |
| 4252 | if (!mlx5_debugfs_root) |
| 4253 | return 0; |
| 4254 | |
| 4255 | root = debugfs_create_dir("delay_drop", mlx5_debugfs_get_dev_root(dev->mdev)); |
| 4256 | dev->delay_drop.dir_debugfs = root; |
| 4257 | |
| 4258 | debugfs_create_atomic_t("num_timeout_events", 0400, root, |
| 4259 | &dev->delay_drop.events_cnt); |
| 4260 | debugfs_create_atomic_t("num_rqs", 0400, root, |
| 4261 | &dev->delay_drop.rqs_cnt); |
| 4262 | debugfs_create_file("timeout", 0600, root, &dev->delay_drop, |
| 4263 | &fops_delay_drop_timeout); |
| 4264 | return 0; |
| 4265 | } |
| 4266 | |
| 4267 | static void mlx5_ib_stage_delay_drop_cleanup(struct mlx5_ib_dev *dev) |
| 4268 | { |
| 4269 | if (!(dev->ib_dev.attrs.raw_packet_caps & IB_RAW_PACKET_CAP_DELAY_DROP)) |
| 4270 | return; |
| 4271 | |
| 4272 | cancel_work_sync(&dev->delay_drop.delay_drop_work); |
| 4273 | if (!dev->delay_drop.dir_debugfs) |
| 4274 | return; |
| 4275 | |
| 4276 | debugfs_remove_recursive(dev->delay_drop.dir_debugfs); |
| 4277 | dev->delay_drop.dir_debugfs = NULL; |
| 4278 | } |
| 4279 | |
| 4280 | static int mlx5_ib_stage_dev_notifier_init(struct mlx5_ib_dev *dev) |
| 4281 | { |
| 4282 | dev->mdev_events.notifier_call = mlx5_ib_event; |
| 4283 | mlx5_notifier_register(dev->mdev, &dev->mdev_events); |
| 4284 | |
| 4285 | mlx5r_macsec_event_register(dev); |
| 4286 | |
| 4287 | return 0; |
| 4288 | } |
| 4289 | |
| 4290 | static void mlx5_ib_stage_dev_notifier_cleanup(struct mlx5_ib_dev *dev) |
| 4291 | { |
| 4292 | mlx5r_macsec_event_unregister(dev); |
| 4293 | mlx5_notifier_unregister(dev->mdev, &dev->mdev_events); |
| 4294 | } |
| 4295 | |
| 4296 | void __mlx5_ib_remove(struct mlx5_ib_dev *dev, |
| 4297 | const struct mlx5_ib_profile *profile, |
| 4298 | int stage) |
| 4299 | { |
| 4300 | dev->ib_active = false; |
| 4301 | |
| 4302 | /* Number of stages to cleanup */ |
| 4303 | while (stage) { |
| 4304 | stage--; |
| 4305 | if (profile->stage[stage].cleanup) |
| 4306 | profile->stage[stage].cleanup(dev); |
| 4307 | } |
| 4308 | |
| 4309 | kfree(dev->port); |
| 4310 | ib_dealloc_device(&dev->ib_dev); |
| 4311 | } |
| 4312 | |
| 4313 | int __mlx5_ib_add(struct mlx5_ib_dev *dev, |
| 4314 | const struct mlx5_ib_profile *profile) |
| 4315 | { |
| 4316 | int err; |
| 4317 | int i; |
| 4318 | |
| 4319 | dev->profile = profile; |
| 4320 | |
| 4321 | for (i = 0; i < MLX5_IB_STAGE_MAX; i++) { |
| 4322 | if (profile->stage[i].init) { |
| 4323 | err = profile->stage[i].init(dev); |
| 4324 | if (err) |
| 4325 | goto err_out; |
| 4326 | } |
| 4327 | } |
| 4328 | |
| 4329 | dev->ib_active = true; |
| 4330 | return 0; |
| 4331 | |
| 4332 | err_out: |
| 4333 | /* Clean up stages which were initialized */ |
| 4334 | while (i) { |
| 4335 | i--; |
| 4336 | if (profile->stage[i].cleanup) |
| 4337 | profile->stage[i].cleanup(dev); |
| 4338 | } |
| 4339 | return -ENOMEM; |
| 4340 | } |
| 4341 | |
| 4342 | static const struct mlx5_ib_profile pf_profile = { |
| 4343 | STAGE_CREATE(MLX5_IB_STAGE_INIT, |
| 4344 | mlx5_ib_stage_init_init, |
| 4345 | mlx5_ib_stage_init_cleanup), |
| 4346 | STAGE_CREATE(MLX5_IB_STAGE_FS, |
| 4347 | mlx5_ib_fs_init, |
| 4348 | mlx5_ib_fs_cleanup), |
| 4349 | STAGE_CREATE(MLX5_IB_STAGE_CAPS, |
| 4350 | mlx5_ib_stage_caps_init, |
| 4351 | mlx5_ib_stage_caps_cleanup), |
| 4352 | STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, |
| 4353 | mlx5_ib_stage_non_default_cb, |
| 4354 | NULL), |
| 4355 | STAGE_CREATE(MLX5_IB_STAGE_ROCE, |
| 4356 | mlx5_ib_roce_init, |
| 4357 | mlx5_ib_roce_cleanup), |
| 4358 | STAGE_CREATE(MLX5_IB_STAGE_QP, |
| 4359 | mlx5_init_qp_table, |
| 4360 | mlx5_cleanup_qp_table), |
| 4361 | STAGE_CREATE(MLX5_IB_STAGE_SRQ, |
| 4362 | mlx5_init_srq_table, |
| 4363 | mlx5_cleanup_srq_table), |
| 4364 | STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, |
| 4365 | mlx5_ib_dev_res_init, |
| 4366 | mlx5_ib_dev_res_cleanup), |
| 4367 | STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, |
| 4368 | mlx5_ib_stage_dev_notifier_init, |
| 4369 | mlx5_ib_stage_dev_notifier_cleanup), |
| 4370 | STAGE_CREATE(MLX5_IB_STAGE_ODP, |
| 4371 | mlx5_ib_odp_init_one, |
| 4372 | mlx5_ib_odp_cleanup_one), |
| 4373 | STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, |
| 4374 | mlx5_ib_counters_init, |
| 4375 | mlx5_ib_counters_cleanup), |
| 4376 | STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, |
| 4377 | mlx5_ib_stage_cong_debugfs_init, |
| 4378 | mlx5_ib_stage_cong_debugfs_cleanup), |
| 4379 | STAGE_CREATE(MLX5_IB_STAGE_UAR, |
| 4380 | mlx5_ib_stage_uar_init, |
| 4381 | mlx5_ib_stage_uar_cleanup), |
| 4382 | STAGE_CREATE(MLX5_IB_STAGE_BFREG, |
| 4383 | mlx5_ib_stage_bfrag_init, |
| 4384 | mlx5_ib_stage_bfrag_cleanup), |
| 4385 | STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, |
| 4386 | NULL, |
| 4387 | mlx5_ib_stage_pre_ib_reg_umr_cleanup), |
| 4388 | STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, |
| 4389 | mlx5_ib_devx_init, |
| 4390 | mlx5_ib_devx_cleanup), |
| 4391 | STAGE_CREATE(MLX5_IB_STAGE_IB_REG, |
| 4392 | mlx5_ib_stage_ib_reg_init, |
| 4393 | mlx5_ib_stage_ib_reg_cleanup), |
| 4394 | STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, |
| 4395 | mlx5_ib_stage_post_ib_reg_umr_init, |
| 4396 | NULL), |
| 4397 | STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, |
| 4398 | mlx5_ib_stage_delay_drop_init, |
| 4399 | mlx5_ib_stage_delay_drop_cleanup), |
| 4400 | STAGE_CREATE(MLX5_IB_STAGE_RESTRACK, |
| 4401 | mlx5_ib_restrack_init, |
| 4402 | NULL), |
| 4403 | }; |
| 4404 | |
| 4405 | const struct mlx5_ib_profile raw_eth_profile = { |
| 4406 | STAGE_CREATE(MLX5_IB_STAGE_INIT, |
| 4407 | mlx5_ib_stage_init_init, |
| 4408 | mlx5_ib_stage_init_cleanup), |
| 4409 | STAGE_CREATE(MLX5_IB_STAGE_FS, |
| 4410 | mlx5_ib_fs_init, |
| 4411 | mlx5_ib_fs_cleanup), |
| 4412 | STAGE_CREATE(MLX5_IB_STAGE_CAPS, |
| 4413 | mlx5_ib_stage_caps_init, |
| 4414 | mlx5_ib_stage_caps_cleanup), |
| 4415 | STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, |
| 4416 | mlx5_ib_stage_raw_eth_non_default_cb, |
| 4417 | NULL), |
| 4418 | STAGE_CREATE(MLX5_IB_STAGE_ROCE, |
| 4419 | mlx5_ib_roce_init, |
| 4420 | mlx5_ib_roce_cleanup), |
| 4421 | STAGE_CREATE(MLX5_IB_STAGE_QP, |
| 4422 | mlx5_init_qp_table, |
| 4423 | mlx5_cleanup_qp_table), |
| 4424 | STAGE_CREATE(MLX5_IB_STAGE_SRQ, |
| 4425 | mlx5_init_srq_table, |
| 4426 | mlx5_cleanup_srq_table), |
| 4427 | STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, |
| 4428 | mlx5_ib_dev_res_init, |
| 4429 | mlx5_ib_dev_res_cleanup), |
| 4430 | STAGE_CREATE(MLX5_IB_STAGE_DEVICE_NOTIFIER, |
| 4431 | mlx5_ib_stage_dev_notifier_init, |
| 4432 | mlx5_ib_stage_dev_notifier_cleanup), |
| 4433 | STAGE_CREATE(MLX5_IB_STAGE_COUNTERS, |
| 4434 | mlx5_ib_counters_init, |
| 4435 | mlx5_ib_counters_cleanup), |
| 4436 | STAGE_CREATE(MLX5_IB_STAGE_CONG_DEBUGFS, |
| 4437 | mlx5_ib_stage_cong_debugfs_init, |
| 4438 | mlx5_ib_stage_cong_debugfs_cleanup), |
| 4439 | STAGE_CREATE(MLX5_IB_STAGE_UAR, |
| 4440 | mlx5_ib_stage_uar_init, |
| 4441 | mlx5_ib_stage_uar_cleanup), |
| 4442 | STAGE_CREATE(MLX5_IB_STAGE_BFREG, |
| 4443 | mlx5_ib_stage_bfrag_init, |
| 4444 | mlx5_ib_stage_bfrag_cleanup), |
| 4445 | STAGE_CREATE(MLX5_IB_STAGE_PRE_IB_REG_UMR, |
| 4446 | NULL, |
| 4447 | mlx5_ib_stage_pre_ib_reg_umr_cleanup), |
| 4448 | STAGE_CREATE(MLX5_IB_STAGE_WHITELIST_UID, |
| 4449 | mlx5_ib_devx_init, |
| 4450 | mlx5_ib_devx_cleanup), |
| 4451 | STAGE_CREATE(MLX5_IB_STAGE_IB_REG, |
| 4452 | mlx5_ib_stage_ib_reg_init, |
| 4453 | mlx5_ib_stage_ib_reg_cleanup), |
| 4454 | STAGE_CREATE(MLX5_IB_STAGE_POST_IB_REG_UMR, |
| 4455 | mlx5_ib_stage_post_ib_reg_umr_init, |
| 4456 | NULL), |
| 4457 | STAGE_CREATE(MLX5_IB_STAGE_DELAY_DROP, |
| 4458 | mlx5_ib_stage_delay_drop_init, |
| 4459 | mlx5_ib_stage_delay_drop_cleanup), |
| 4460 | STAGE_CREATE(MLX5_IB_STAGE_RESTRACK, |
| 4461 | mlx5_ib_restrack_init, |
| 4462 | NULL), |
| 4463 | }; |
| 4464 | |
| 4465 | static const struct mlx5_ib_profile plane_profile = { |
| 4466 | STAGE_CREATE(MLX5_IB_STAGE_INIT, |
| 4467 | mlx5_ib_stage_init_init, |
| 4468 | mlx5_ib_stage_init_cleanup), |
| 4469 | STAGE_CREATE(MLX5_IB_STAGE_CAPS, |
| 4470 | mlx5_ib_stage_caps_init, |
| 4471 | mlx5_ib_stage_caps_cleanup), |
| 4472 | STAGE_CREATE(MLX5_IB_STAGE_NON_DEFAULT_CB, |
| 4473 | mlx5_ib_stage_non_default_cb, |
| 4474 | NULL), |
| 4475 | STAGE_CREATE(MLX5_IB_STAGE_QP, |
| 4476 | mlx5_init_qp_table, |
| 4477 | mlx5_cleanup_qp_table), |
| 4478 | STAGE_CREATE(MLX5_IB_STAGE_SRQ, |
| 4479 | mlx5_init_srq_table, |
| 4480 | mlx5_cleanup_srq_table), |
| 4481 | STAGE_CREATE(MLX5_IB_STAGE_DEVICE_RESOURCES, |
| 4482 | mlx5_ib_dev_res_init, |
| 4483 | mlx5_ib_dev_res_cleanup), |
| 4484 | STAGE_CREATE(MLX5_IB_STAGE_BFREG, |
| 4485 | mlx5_ib_stage_bfrag_init, |
| 4486 | mlx5_ib_stage_bfrag_cleanup), |
| 4487 | STAGE_CREATE(MLX5_IB_STAGE_IB_REG, |
| 4488 | mlx5_ib_stage_ib_reg_init, |
| 4489 | mlx5_ib_stage_ib_reg_cleanup), |
| 4490 | }; |
| 4491 | |
| 4492 | static struct ib_device *mlx5_ib_add_sub_dev(struct ib_device *parent, |
| 4493 | enum rdma_nl_dev_type type, |
| 4494 | const char *name) |
| 4495 | { |
| 4496 | struct mlx5_ib_dev *mparent = to_mdev(parent), *mplane; |
| 4497 | enum rdma_link_layer ll; |
| 4498 | int ret; |
| 4499 | |
| 4500 | if (mparent->smi_dev) |
| 4501 | return ERR_PTR(-EEXIST); |
| 4502 | |
| 4503 | ll = mlx5_port_type_cap_to_rdma_ll(MLX5_CAP_GEN(mparent->mdev, |
| 4504 | port_type)); |
| 4505 | if (type != RDMA_DEVICE_TYPE_SMI || !mparent->num_plane || |
| 4506 | ll != IB_LINK_LAYER_INFINIBAND || |
| 4507 | !MLX5_CAP_GEN_2(mparent->mdev, multiplane_qp_ud)) |
| 4508 | return ERR_PTR(-EOPNOTSUPP); |
| 4509 | |
| 4510 | mplane = ib_alloc_device(mlx5_ib_dev, ib_dev); |
| 4511 | if (!mplane) |
| 4512 | return ERR_PTR(-ENOMEM); |
| 4513 | |
| 4514 | mplane->port = kcalloc(mparent->num_plane * mparent->num_ports, |
| 4515 | sizeof(*mplane->port), GFP_KERNEL); |
| 4516 | if (!mplane->port) { |
| 4517 | ret = -ENOMEM; |
| 4518 | goto fail_kcalloc; |
| 4519 | } |
| 4520 | |
| 4521 | mplane->ib_dev.type = type; |
| 4522 | mplane->mdev = mparent->mdev; |
| 4523 | mplane->num_ports = mparent->num_plane; |
| 4524 | mplane->sub_dev_name = name; |
| 4525 | |
| 4526 | ret = __mlx5_ib_add(mplane, &plane_profile); |
| 4527 | if (ret) |
| 4528 | goto fail_ib_add; |
| 4529 | |
| 4530 | mparent->smi_dev = mplane; |
| 4531 | return &mplane->ib_dev; |
| 4532 | |
| 4533 | fail_ib_add: |
| 4534 | kfree(mplane->port); |
| 4535 | fail_kcalloc: |
| 4536 | ib_dealloc_device(&mplane->ib_dev); |
| 4537 | return ERR_PTR(ret); |
| 4538 | } |
| 4539 | |
| 4540 | static void mlx5_ib_del_sub_dev(struct ib_device *sub_dev) |
| 4541 | { |
| 4542 | struct mlx5_ib_dev *mdev = to_mdev(sub_dev); |
| 4543 | |
| 4544 | to_mdev(sub_dev->parent)->smi_dev = NULL; |
| 4545 | __mlx5_ib_remove(mdev, mdev->profile, MLX5_IB_STAGE_MAX); |
| 4546 | } |
| 4547 | |
| 4548 | static int mlx5r_mp_probe(struct auxiliary_device *adev, |
| 4549 | const struct auxiliary_device_id *id) |
| 4550 | { |
| 4551 | struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev); |
| 4552 | struct mlx5_core_dev *mdev = idev->mdev; |
| 4553 | struct mlx5_ib_multiport_info *mpi; |
| 4554 | struct mlx5_ib_dev *dev; |
| 4555 | bool bound = false; |
| 4556 | int err; |
| 4557 | |
| 4558 | mpi = kzalloc(sizeof(*mpi), GFP_KERNEL); |
| 4559 | if (!mpi) |
| 4560 | return -ENOMEM; |
| 4561 | |
| 4562 | mpi->mdev = mdev; |
| 4563 | err = mlx5_query_nic_vport_system_image_guid(mdev, |
| 4564 | &mpi->sys_image_guid); |
| 4565 | if (err) { |
| 4566 | kfree(mpi); |
| 4567 | return err; |
| 4568 | } |
| 4569 | |
| 4570 | mutex_lock(&mlx5_ib_multiport_mutex); |
| 4571 | list_for_each_entry(dev, &mlx5_ib_dev_list, ib_dev_list) { |
| 4572 | if (dev->sys_image_guid == mpi->sys_image_guid) |
| 4573 | bound = mlx5_ib_bind_slave_port(dev, mpi); |
| 4574 | |
| 4575 | if (bound) { |
| 4576 | rdma_roce_rescan_device(&dev->ib_dev); |
| 4577 | mpi->ibdev->ib_active = true; |
| 4578 | break; |
| 4579 | } |
| 4580 | } |
| 4581 | |
| 4582 | if (!bound) { |
| 4583 | list_add_tail(&mpi->list, &mlx5_ib_unaffiliated_port_list); |
| 4584 | dev_dbg(mdev->device, |
| 4585 | "no suitable IB device found to bind to, added to unaffiliated list.\n"); |
| 4586 | } |
| 4587 | mutex_unlock(&mlx5_ib_multiport_mutex); |
| 4588 | |
| 4589 | auxiliary_set_drvdata(adev, mpi); |
| 4590 | return 0; |
| 4591 | } |
| 4592 | |
| 4593 | static void mlx5r_mp_remove(struct auxiliary_device *adev) |
| 4594 | { |
| 4595 | struct mlx5_ib_multiport_info *mpi; |
| 4596 | |
| 4597 | mpi = auxiliary_get_drvdata(adev); |
| 4598 | mutex_lock(&mlx5_ib_multiport_mutex); |
| 4599 | if (mpi->ibdev) |
| 4600 | mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); |
| 4601 | else |
| 4602 | list_del(&mpi->list); |
| 4603 | mutex_unlock(&mlx5_ib_multiport_mutex); |
| 4604 | kfree(mpi); |
| 4605 | } |
| 4606 | |
| 4607 | static int mlx5r_probe(struct auxiliary_device *adev, |
| 4608 | const struct auxiliary_device_id *id) |
| 4609 | { |
| 4610 | struct mlx5_adev *idev = container_of(adev, struct mlx5_adev, adev); |
| 4611 | struct mlx5_core_dev *mdev = idev->mdev; |
| 4612 | const struct mlx5_ib_profile *profile; |
| 4613 | int port_type_cap, num_ports, ret; |
| 4614 | enum rdma_link_layer ll; |
| 4615 | struct mlx5_ib_dev *dev; |
| 4616 | |
| 4617 | port_type_cap = MLX5_CAP_GEN(mdev, port_type); |
| 4618 | ll = mlx5_port_type_cap_to_rdma_ll(port_type_cap); |
| 4619 | |
| 4620 | num_ports = max(MLX5_CAP_GEN(mdev, num_ports), |
| 4621 | MLX5_CAP_GEN(mdev, num_vhca_ports)); |
| 4622 | dev = ib_alloc_device(mlx5_ib_dev, ib_dev); |
| 4623 | if (!dev) |
| 4624 | return -ENOMEM; |
| 4625 | |
| 4626 | if (ll == IB_LINK_LAYER_INFINIBAND) { |
| 4627 | ret = mlx5_ib_get_plane_num(mdev, &dev->num_plane); |
| 4628 | if (ret) |
| 4629 | goto fail; |
| 4630 | } |
| 4631 | |
| 4632 | dev->port = kcalloc(num_ports, sizeof(*dev->port), |
| 4633 | GFP_KERNEL); |
| 4634 | if (!dev->port) { |
| 4635 | ret = -ENOMEM; |
| 4636 | goto fail; |
| 4637 | } |
| 4638 | |
| 4639 | dev->mdev = mdev; |
| 4640 | dev->num_ports = num_ports; |
| 4641 | |
| 4642 | if (ll == IB_LINK_LAYER_ETHERNET && !mlx5_get_roce_state(mdev)) |
| 4643 | profile = &raw_eth_profile; |
| 4644 | else |
| 4645 | profile = &pf_profile; |
| 4646 | |
| 4647 | ret = __mlx5_ib_add(dev, profile); |
| 4648 | if (ret) |
| 4649 | goto fail_ib_add; |
| 4650 | |
| 4651 | auxiliary_set_drvdata(adev, dev); |
| 4652 | return 0; |
| 4653 | |
| 4654 | fail_ib_add: |
| 4655 | kfree(dev->port); |
| 4656 | fail: |
| 4657 | ib_dealloc_device(&dev->ib_dev); |
| 4658 | return ret; |
| 4659 | } |
| 4660 | |
| 4661 | static void mlx5r_remove(struct auxiliary_device *adev) |
| 4662 | { |
| 4663 | struct mlx5_ib_dev *dev; |
| 4664 | |
| 4665 | dev = auxiliary_get_drvdata(adev); |
| 4666 | __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); |
| 4667 | } |
| 4668 | |
| 4669 | static const struct auxiliary_device_id mlx5r_mp_id_table[] = { |
| 4670 | { .name = MLX5_ADEV_NAME ".multiport", }, |
| 4671 | {}, |
| 4672 | }; |
| 4673 | |
| 4674 | static const struct auxiliary_device_id mlx5r_id_table[] = { |
| 4675 | { .name = MLX5_ADEV_NAME ".rdma", }, |
| 4676 | {}, |
| 4677 | }; |
| 4678 | |
| 4679 | MODULE_DEVICE_TABLE(auxiliary, mlx5r_mp_id_table); |
| 4680 | MODULE_DEVICE_TABLE(auxiliary, mlx5r_id_table); |
| 4681 | |
| 4682 | static struct auxiliary_driver mlx5r_mp_driver = { |
| 4683 | .name = "multiport", |
| 4684 | .probe = mlx5r_mp_probe, |
| 4685 | .remove = mlx5r_mp_remove, |
| 4686 | .id_table = mlx5r_mp_id_table, |
| 4687 | }; |
| 4688 | |
| 4689 | static struct auxiliary_driver mlx5r_driver = { |
| 4690 | .name = "rdma", |
| 4691 | .probe = mlx5r_probe, |
| 4692 | .remove = mlx5r_remove, |
| 4693 | .id_table = mlx5r_id_table, |
| 4694 | }; |
| 4695 | |
| 4696 | static int __init mlx5_ib_init(void) |
| 4697 | { |
| 4698 | int ret; |
| 4699 | |
| 4700 | xlt_emergency_page = (void *)__get_free_page(GFP_KERNEL); |
| 4701 | if (!xlt_emergency_page) |
| 4702 | return -ENOMEM; |
| 4703 | |
| 4704 | mlx5_ib_event_wq = alloc_ordered_workqueue("mlx5_ib_event_wq", 0); |
| 4705 | if (!mlx5_ib_event_wq) { |
| 4706 | free_page((unsigned long)xlt_emergency_page); |
| 4707 | return -ENOMEM; |
| 4708 | } |
| 4709 | |
| 4710 | ret = mlx5_ib_qp_event_init(); |
| 4711 | if (ret) |
| 4712 | goto qp_event_err; |
| 4713 | |
| 4714 | mlx5_ib_odp_init(); |
| 4715 | ret = mlx5r_rep_init(); |
| 4716 | if (ret) |
| 4717 | goto rep_err; |
| 4718 | ret = auxiliary_driver_register(&mlx5r_mp_driver); |
| 4719 | if (ret) |
| 4720 | goto mp_err; |
| 4721 | ret = auxiliary_driver_register(&mlx5r_driver); |
| 4722 | if (ret) |
| 4723 | goto drv_err; |
| 4724 | return 0; |
| 4725 | |
| 4726 | drv_err: |
| 4727 | auxiliary_driver_unregister(&mlx5r_mp_driver); |
| 4728 | mp_err: |
| 4729 | mlx5r_rep_cleanup(); |
| 4730 | rep_err: |
| 4731 | mlx5_ib_qp_event_cleanup(); |
| 4732 | qp_event_err: |
| 4733 | destroy_workqueue(mlx5_ib_event_wq); |
| 4734 | free_page((unsigned long)xlt_emergency_page); |
| 4735 | return ret; |
| 4736 | } |
| 4737 | |
| 4738 | static void __exit mlx5_ib_cleanup(void) |
| 4739 | { |
| 4740 | auxiliary_driver_unregister(&mlx5r_driver); |
| 4741 | auxiliary_driver_unregister(&mlx5r_mp_driver); |
| 4742 | mlx5r_rep_cleanup(); |
| 4743 | |
| 4744 | mlx5_ib_qp_event_cleanup(); |
| 4745 | destroy_workqueue(mlx5_ib_event_wq); |
| 4746 | free_page((unsigned long)xlt_emergency_page); |
| 4747 | } |
| 4748 | |
| 4749 | module_init(mlx5_ib_init); |
| 4750 | module_exit(mlx5_ib_cleanup); |