2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/sched.h>
42 #include <rdma/ib_user_verbs.h>
43 #include <linux/mlx5/vport.h>
44 #include <rdma/ib_smi.h>
45 #include <rdma/ib_umem.h>
47 #include <linux/etherdevice.h>
48 #include <linux/mlx5/fs.h>
52 #define DRIVER_NAME "mlx5_ib"
53 #define DRIVER_VERSION "2.2-1"
54 #define DRIVER_RELDATE "Feb 2014"
56 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
57 MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
58 MODULE_LICENSE("Dual BSD/GPL");
59 MODULE_VERSION(DRIVER_VERSION);
61 static int deprecated_prof_sel = 2;
62 module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
63 MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
65 static char mlx5_version[] =
66 DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
67 DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
69 static enum rdma_link_layer
70 mlx5_ib_port_link_layer(struct ib_device *device)
72 struct mlx5_ib_dev *dev = to_mdev(device);
74 switch (MLX5_CAP_GEN(dev->mdev, port_type)) {
75 case MLX5_CAP_PORT_TYPE_IB:
76 return IB_LINK_LAYER_INFINIBAND;
77 case MLX5_CAP_PORT_TYPE_ETH:
78 return IB_LINK_LAYER_ETHERNET;
80 return IB_LINK_LAYER_UNSPECIFIED;
84 static int mlx5_use_mad_ifc(struct mlx5_ib_dev *dev)
86 return !dev->mdev->issi;
90 MLX5_VPORT_ACCESS_METHOD_MAD,
91 MLX5_VPORT_ACCESS_METHOD_HCA,
92 MLX5_VPORT_ACCESS_METHOD_NIC,
95 static int mlx5_get_vport_access_method(struct ib_device *ibdev)
97 if (mlx5_use_mad_ifc(to_mdev(ibdev)))
98 return MLX5_VPORT_ACCESS_METHOD_MAD;
100 if (mlx5_ib_port_link_layer(ibdev) ==
101 IB_LINK_LAYER_ETHERNET)
102 return MLX5_VPORT_ACCESS_METHOD_NIC;
104 return MLX5_VPORT_ACCESS_METHOD_HCA;
107 static int mlx5_query_system_image_guid(struct ib_device *ibdev,
108 __be64 *sys_image_guid)
110 struct mlx5_ib_dev *dev = to_mdev(ibdev);
111 struct mlx5_core_dev *mdev = dev->mdev;
115 switch (mlx5_get_vport_access_method(ibdev)) {
116 case MLX5_VPORT_ACCESS_METHOD_MAD:
117 return mlx5_query_mad_ifc_system_image_guid(ibdev,
120 case MLX5_VPORT_ACCESS_METHOD_HCA:
121 err = mlx5_query_hca_vport_system_image_guid(mdev, &tmp);
123 *sys_image_guid = cpu_to_be64(tmp);
131 static int mlx5_query_max_pkeys(struct ib_device *ibdev,
134 struct mlx5_ib_dev *dev = to_mdev(ibdev);
135 struct mlx5_core_dev *mdev = dev->mdev;
137 switch (mlx5_get_vport_access_method(ibdev)) {
138 case MLX5_VPORT_ACCESS_METHOD_MAD:
139 return mlx5_query_mad_ifc_max_pkeys(ibdev, max_pkeys);
141 case MLX5_VPORT_ACCESS_METHOD_HCA:
142 case MLX5_VPORT_ACCESS_METHOD_NIC:
143 *max_pkeys = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev,
152 static int mlx5_query_vendor_id(struct ib_device *ibdev,
155 struct mlx5_ib_dev *dev = to_mdev(ibdev);
157 switch (mlx5_get_vport_access_method(ibdev)) {
158 case MLX5_VPORT_ACCESS_METHOD_MAD:
159 return mlx5_query_mad_ifc_vendor_id(ibdev, vendor_id);
161 case MLX5_VPORT_ACCESS_METHOD_HCA:
162 case MLX5_VPORT_ACCESS_METHOD_NIC:
163 return mlx5_core_query_vendor_id(dev->mdev, vendor_id);
170 static int mlx5_query_node_guid(struct mlx5_ib_dev *dev,
176 switch (mlx5_get_vport_access_method(&dev->ib_dev)) {
177 case MLX5_VPORT_ACCESS_METHOD_MAD:
178 return mlx5_query_mad_ifc_node_guid(dev, node_guid);
180 case MLX5_VPORT_ACCESS_METHOD_HCA:
181 err = mlx5_query_hca_vport_node_guid(dev->mdev, &tmp);
183 *node_guid = cpu_to_be64(tmp);
191 struct mlx5_reg_node_desc {
195 static int mlx5_query_node_desc(struct mlx5_ib_dev *dev, char *node_desc)
197 struct mlx5_reg_node_desc in;
199 if (mlx5_use_mad_ifc(dev))
200 return mlx5_query_mad_ifc_node_desc(dev, node_desc);
202 memset(&in, 0, sizeof(in));
204 return mlx5_core_access_reg(dev->mdev, &in, sizeof(in), node_desc,
205 sizeof(struct mlx5_reg_node_desc),
206 MLX5_REG_NODE_DESC, 0, 0);
209 static int mlx5_ib_query_device(struct ib_device *ibdev,
210 struct ib_device_attr *props,
211 struct ib_udata *uhw)
213 struct mlx5_ib_dev *dev = to_mdev(ibdev);
214 struct mlx5_core_dev *mdev = dev->mdev;
218 u64 min_page_size = 1ull << MLX5_CAP_GEN(mdev, log_pg_sz);
220 if (uhw->inlen || uhw->outlen)
223 memset(props, 0, sizeof(*props));
224 err = mlx5_query_system_image_guid(ibdev,
225 &props->sys_image_guid);
229 err = mlx5_query_max_pkeys(ibdev, &props->max_pkeys);
233 err = mlx5_query_vendor_id(ibdev, &props->vendor_id);
237 props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
238 (fw_rev_min(dev->mdev) << 16) |
239 fw_rev_sub(dev->mdev);
240 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
241 IB_DEVICE_PORT_ACTIVE_EVENT |
242 IB_DEVICE_SYS_IMAGE_GUID |
243 IB_DEVICE_RC_RNR_NAK_GEN;
245 if (MLX5_CAP_GEN(mdev, pkv))
246 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
247 if (MLX5_CAP_GEN(mdev, qkv))
248 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
249 if (MLX5_CAP_GEN(mdev, apm))
250 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
251 if (MLX5_CAP_GEN(mdev, xrc))
252 props->device_cap_flags |= IB_DEVICE_XRC;
253 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
254 if (MLX5_CAP_GEN(mdev, sho)) {
255 props->device_cap_flags |= IB_DEVICE_SIGNATURE_HANDOVER;
256 /* At this stage no support for signature handover */
257 props->sig_prot_cap = IB_PROT_T10DIF_TYPE_1 |
258 IB_PROT_T10DIF_TYPE_2 |
259 IB_PROT_T10DIF_TYPE_3;
260 props->sig_guard_cap = IB_GUARD_T10DIF_CRC |
261 IB_GUARD_T10DIF_CSUM;
263 if (MLX5_CAP_GEN(mdev, block_lb_mc))
264 props->device_cap_flags |= IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
266 props->vendor_part_id = mdev->pdev->device;
267 props->hw_ver = mdev->pdev->revision;
269 props->max_mr_size = ~0ull;
270 props->page_size_cap = ~(min_page_size - 1);
271 props->max_qp = 1 << MLX5_CAP_GEN(mdev, log_max_qp);
272 props->max_qp_wr = 1 << MLX5_CAP_GEN(mdev, log_max_qp_sz);
273 max_rq_sg = MLX5_CAP_GEN(mdev, max_wqe_sz_rq) /
274 sizeof(struct mlx5_wqe_data_seg);
275 max_sq_sg = (MLX5_CAP_GEN(mdev, max_wqe_sz_sq) -
276 sizeof(struct mlx5_wqe_ctrl_seg)) /
277 sizeof(struct mlx5_wqe_data_seg);
278 props->max_sge = min(max_rq_sg, max_sq_sg);
279 props->max_sge_rd = props->max_sge;
280 props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
281 props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_eq_sz)) - 1;
282 props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
283 props->max_pd = 1 << MLX5_CAP_GEN(mdev, log_max_pd);
284 props->max_qp_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_req_qp);
285 props->max_qp_init_rd_atom = 1 << MLX5_CAP_GEN(mdev, log_max_ra_res_qp);
286 props->max_srq = 1 << MLX5_CAP_GEN(mdev, log_max_srq);
287 props->max_srq_wr = (1 << MLX5_CAP_GEN(mdev, log_max_srq_sz)) - 1;
288 props->local_ca_ack_delay = MLX5_CAP_GEN(mdev, local_ca_ack_delay);
289 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
290 props->max_srq_sge = max_rq_sg - 1;
291 props->max_fast_reg_page_list_len = (unsigned int)-1;
292 props->atomic_cap = IB_ATOMIC_NONE;
293 props->masked_atomic_cap = IB_ATOMIC_NONE;
294 props->max_mcast_grp = 1 << MLX5_CAP_GEN(mdev, log_max_mcg);
295 props->max_mcast_qp_attach = MLX5_CAP_GEN(mdev, max_qp_mcg);
296 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
297 props->max_mcast_grp;
298 props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
300 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
301 if (MLX5_CAP_GEN(mdev, pg))
302 props->device_cap_flags |= IB_DEVICE_ON_DEMAND_PAGING;
303 props->odp_caps = dev->odp_caps;
310 MLX5_IB_WIDTH_1X = 1 << 0,
311 MLX5_IB_WIDTH_2X = 1 << 1,
312 MLX5_IB_WIDTH_4X = 1 << 2,
313 MLX5_IB_WIDTH_8X = 1 << 3,
314 MLX5_IB_WIDTH_12X = 1 << 4
317 static int translate_active_width(struct ib_device *ibdev, u8 active_width,
320 struct mlx5_ib_dev *dev = to_mdev(ibdev);
323 if (active_width & MLX5_IB_WIDTH_1X) {
324 *ib_width = IB_WIDTH_1X;
325 } else if (active_width & MLX5_IB_WIDTH_2X) {
326 mlx5_ib_dbg(dev, "active_width %d is not supported by IB spec\n",
329 } else if (active_width & MLX5_IB_WIDTH_4X) {
330 *ib_width = IB_WIDTH_4X;
331 } else if (active_width & MLX5_IB_WIDTH_8X) {
332 *ib_width = IB_WIDTH_8X;
333 } else if (active_width & MLX5_IB_WIDTH_12X) {
334 *ib_width = IB_WIDTH_12X;
336 mlx5_ib_dbg(dev, "Invalid active_width %d\n",
344 static int mlx5_mtu_to_ib_mtu(int mtu)
353 pr_warn("invalid mtu\n");
363 __IB_MAX_VL_0_14 = 5,
366 enum mlx5_vl_hw_cap {
378 static int translate_max_vl_num(struct ib_device *ibdev, u8 vl_hw_cap,
383 *max_vl_num = __IB_MAX_VL_0;
386 *max_vl_num = __IB_MAX_VL_0_1;
389 *max_vl_num = __IB_MAX_VL_0_3;
392 *max_vl_num = __IB_MAX_VL_0_7;
394 case MLX5_VL_HW_0_14:
395 *max_vl_num = __IB_MAX_VL_0_14;
405 static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
406 struct ib_port_attr *props)
408 struct mlx5_ib_dev *dev = to_mdev(ibdev);
409 struct mlx5_core_dev *mdev = dev->mdev;
410 struct mlx5_hca_vport_context *rep;
414 u8 ib_link_width_oper;
417 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
423 memset(props, 0, sizeof(*props));
425 err = mlx5_query_hca_vport_context(mdev, 0, port, 0, rep);
429 props->lid = rep->lid;
430 props->lmc = rep->lmc;
431 props->sm_lid = rep->sm_lid;
432 props->sm_sl = rep->sm_sl;
433 props->state = rep->vport_state;
434 props->phys_state = rep->port_physical_state;
435 props->port_cap_flags = rep->cap_mask1;
436 props->gid_tbl_len = mlx5_get_gid_table_len(MLX5_CAP_GEN(mdev, gid_table_size));
437 props->max_msg_sz = 1 << MLX5_CAP_GEN(mdev, log_max_msg);
438 props->pkey_tbl_len = mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(mdev, pkey_table_size));
439 props->bad_pkey_cntr = rep->pkey_violation_counter;
440 props->qkey_viol_cntr = rep->qkey_violation_counter;
441 props->subnet_timeout = rep->subnet_timeout;
442 props->init_type_reply = rep->init_type_reply;
444 err = mlx5_query_port_link_width_oper(mdev, &ib_link_width_oper, port);
448 err = translate_active_width(ibdev, ib_link_width_oper,
449 &props->active_width);
452 err = mlx5_query_port_proto_oper(mdev, &props->active_speed, MLX5_PTYS_IB,
457 mlx5_query_port_max_mtu(mdev, &max_mtu, port);
459 props->max_mtu = mlx5_mtu_to_ib_mtu(max_mtu);
461 mlx5_query_port_oper_mtu(mdev, &oper_mtu, port);
463 props->active_mtu = mlx5_mtu_to_ib_mtu(oper_mtu);
465 err = mlx5_query_port_vl_hw_cap(mdev, &vl_hw_cap, port);
469 err = translate_max_vl_num(ibdev, vl_hw_cap,
476 int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
477 struct ib_port_attr *props)
479 switch (mlx5_get_vport_access_method(ibdev)) {
480 case MLX5_VPORT_ACCESS_METHOD_MAD:
481 return mlx5_query_mad_ifc_port(ibdev, port, props);
483 case MLX5_VPORT_ACCESS_METHOD_HCA:
484 return mlx5_query_hca_port(ibdev, port, props);
491 static int mlx5_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
494 struct mlx5_ib_dev *dev = to_mdev(ibdev);
495 struct mlx5_core_dev *mdev = dev->mdev;
497 switch (mlx5_get_vport_access_method(ibdev)) {
498 case MLX5_VPORT_ACCESS_METHOD_MAD:
499 return mlx5_query_mad_ifc_gids(ibdev, port, index, gid);
501 case MLX5_VPORT_ACCESS_METHOD_HCA:
502 return mlx5_query_hca_vport_gid(mdev, 0, port, 0, index, gid);
510 static int mlx5_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
513 struct mlx5_ib_dev *dev = to_mdev(ibdev);
514 struct mlx5_core_dev *mdev = dev->mdev;
516 switch (mlx5_get_vport_access_method(ibdev)) {
517 case MLX5_VPORT_ACCESS_METHOD_MAD:
518 return mlx5_query_mad_ifc_pkey(ibdev, port, index, pkey);
520 case MLX5_VPORT_ACCESS_METHOD_HCA:
521 case MLX5_VPORT_ACCESS_METHOD_NIC:
522 return mlx5_query_hca_vport_pkey(mdev, 0, port, 0, index,
529 static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
530 struct ib_device_modify *props)
532 struct mlx5_ib_dev *dev = to_mdev(ibdev);
533 struct mlx5_reg_node_desc in;
534 struct mlx5_reg_node_desc out;
537 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
540 if (!(mask & IB_DEVICE_MODIFY_NODE_DESC))
544 * If possible, pass node desc to FW, so it can generate
545 * a 144 trap. If cmd fails, just ignore.
547 memcpy(&in, props->node_desc, 64);
548 err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
549 sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
553 memcpy(ibdev->node_desc, props->node_desc, 64);
558 static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
559 struct ib_port_modify *props)
561 struct mlx5_ib_dev *dev = to_mdev(ibdev);
562 struct ib_port_attr attr;
566 mutex_lock(&dev->cap_mask_mutex);
568 err = mlx5_ib_query_port(ibdev, port, &attr);
572 tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
573 ~props->clr_port_cap_mask;
575 err = mlx5_set_port_caps(dev->mdev, port, tmp);
578 mutex_unlock(&dev->cap_mask_mutex);
582 static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
583 struct ib_udata *udata)
585 struct mlx5_ib_dev *dev = to_mdev(ibdev);
586 struct mlx5_ib_alloc_ucontext_req_v2 req;
587 struct mlx5_ib_alloc_ucontext_resp resp;
588 struct mlx5_ib_ucontext *context;
589 struct mlx5_uuar_info *uuari;
590 struct mlx5_uar *uars;
600 return ERR_PTR(-EAGAIN);
602 memset(&req, 0, sizeof(req));
603 reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
604 if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
606 else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
609 return ERR_PTR(-EINVAL);
611 err = ib_copy_from_udata(&req, udata, reqlen);
615 if (req.flags || req.reserved)
616 return ERR_PTR(-EINVAL);
618 if (req.total_num_uuars > MLX5_MAX_UUARS)
619 return ERR_PTR(-ENOMEM);
621 if (req.total_num_uuars == 0)
622 return ERR_PTR(-EINVAL);
624 req.total_num_uuars = ALIGN(req.total_num_uuars,
625 MLX5_NON_FP_BF_REGS_PER_PAGE);
626 if (req.num_low_latency_uuars > req.total_num_uuars - 1)
627 return ERR_PTR(-EINVAL);
629 num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
630 gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
631 resp.qp_tab_size = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp);
632 resp.bf_reg_size = 1 << MLX5_CAP_GEN(dev->mdev, log_bf_reg_size);
633 resp.cache_line_size = L1_CACHE_BYTES;
634 resp.max_sq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_sq);
635 resp.max_rq_desc_sz = MLX5_CAP_GEN(dev->mdev, max_wqe_sz_rq);
636 resp.max_send_wqebb = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
637 resp.max_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_qp_sz);
638 resp.max_srq_recv_wr = 1 << MLX5_CAP_GEN(dev->mdev, log_max_srq_sz);
640 context = kzalloc(sizeof(*context), GFP_KERNEL);
642 return ERR_PTR(-ENOMEM);
644 uuari = &context->uuari;
645 mutex_init(&uuari->lock);
646 uars = kcalloc(num_uars, sizeof(*uars), GFP_KERNEL);
652 uuari->bitmap = kcalloc(BITS_TO_LONGS(gross_uuars),
653 sizeof(*uuari->bitmap),
655 if (!uuari->bitmap) {
660 * clear all fast path uuars
662 for (i = 0; i < gross_uuars; i++) {
664 if (uuarn == 2 || uuarn == 3)
665 set_bit(i, uuari->bitmap);
668 uuari->count = kcalloc(gross_uuars, sizeof(*uuari->count), GFP_KERNEL);
674 for (i = 0; i < num_uars; i++) {
675 err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
680 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
681 context->ibucontext.invalidate_range = &mlx5_ib_invalidate_range;
684 INIT_LIST_HEAD(&context->db_page_list);
685 mutex_init(&context->db_page_mutex);
687 resp.tot_uuars = req.total_num_uuars;
688 resp.num_ports = MLX5_CAP_GEN(dev->mdev, num_ports);
689 err = ib_copy_to_udata(udata, &resp,
690 sizeof(resp) - sizeof(resp.reserved));
695 uuari->num_low_latency_uuars = req.num_low_latency_uuars;
697 uuari->num_uars = num_uars;
698 return &context->ibucontext;
701 for (i--; i >= 0; i--)
702 mlx5_cmd_free_uar(dev->mdev, uars[i].index);
707 kfree(uuari->bitmap);
717 static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
719 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
720 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
721 struct mlx5_uuar_info *uuari = &context->uuari;
724 for (i = 0; i < uuari->num_uars; i++) {
725 if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
726 mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
730 kfree(uuari->bitmap);
737 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
739 return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
742 static int get_command(unsigned long offset)
744 return (offset >> MLX5_IB_MMAP_CMD_SHIFT) & MLX5_IB_MMAP_CMD_MASK;
747 static int get_arg(unsigned long offset)
749 return offset & ((1 << MLX5_IB_MMAP_CMD_SHIFT) - 1);
752 static int get_index(unsigned long offset)
754 return get_arg(offset);
757 static int mlx5_ib_mmap(struct ib_ucontext *ibcontext, struct vm_area_struct *vma)
759 struct mlx5_ib_ucontext *context = to_mucontext(ibcontext);
760 struct mlx5_ib_dev *dev = to_mdev(ibcontext->device);
761 struct mlx5_uuar_info *uuari = &context->uuari;
762 unsigned long command;
766 command = get_command(vma->vm_pgoff);
768 case MLX5_IB_MMAP_REGULAR_PAGE:
769 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
772 idx = get_index(vma->vm_pgoff);
773 if (idx >= uuari->num_uars)
776 pfn = uar_index2pfn(dev, uuari->uars[idx].index);
777 mlx5_ib_dbg(dev, "uar idx 0x%lx, pfn 0x%llx\n", idx,
778 (unsigned long long)pfn);
780 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
781 if (io_remap_pfn_range(vma, vma->vm_start, pfn,
782 PAGE_SIZE, vma->vm_page_prot))
785 mlx5_ib_dbg(dev, "mapped WC at 0x%lx, PA 0x%llx\n",
787 (unsigned long long)pfn << PAGE_SHIFT);
790 case MLX5_IB_MMAP_GET_CONTIGUOUS_PAGES:
800 static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
801 struct ib_ucontext *context,
802 struct ib_udata *udata)
804 struct mlx5_ib_alloc_pd_resp resp;
805 struct mlx5_ib_pd *pd;
808 pd = kmalloc(sizeof(*pd), GFP_KERNEL);
810 return ERR_PTR(-ENOMEM);
812 err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
820 if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
821 mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
823 return ERR_PTR(-EFAULT);
830 static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
832 struct mlx5_ib_dev *mdev = to_mdev(pd->device);
833 struct mlx5_ib_pd *mpd = to_mpd(pd);
835 mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
841 static bool outer_header_zero(u32 *match_criteria)
843 int size = MLX5_ST_SZ_BYTES(fte_match_param);
844 char *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_criteria,
847 return outer_headers_c[0] == 0 && !memcmp(outer_headers_c,
852 static int parse_flow_attr(u32 *match_c, u32 *match_v,
853 union ib_flow_spec *ib_spec)
855 void *outer_headers_c = MLX5_ADDR_OF(fte_match_param, match_c,
857 void *outer_headers_v = MLX5_ADDR_OF(fte_match_param, match_v,
859 switch (ib_spec->type) {
860 case IB_FLOW_SPEC_ETH:
861 if (ib_spec->size != sizeof(ib_spec->eth))
864 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
866 ib_spec->eth.mask.dst_mac);
867 ether_addr_copy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
869 ib_spec->eth.val.dst_mac);
871 if (ib_spec->eth.mask.vlan_tag) {
872 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
874 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
877 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
878 first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
879 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
880 first_vid, ntohs(ib_spec->eth.val.vlan_tag));
882 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
884 ntohs(ib_spec->eth.mask.vlan_tag) >> 12);
885 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
887 ntohs(ib_spec->eth.val.vlan_tag) >> 12);
889 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
891 ntohs(ib_spec->eth.mask.vlan_tag) >> 13);
892 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
894 ntohs(ib_spec->eth.val.vlan_tag) >> 13);
896 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
897 ethertype, ntohs(ib_spec->eth.mask.ether_type));
898 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
899 ethertype, ntohs(ib_spec->eth.val.ether_type));
901 case IB_FLOW_SPEC_IPV4:
902 if (ib_spec->size != sizeof(ib_spec->ipv4))
905 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
907 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
908 ethertype, ETH_P_IP);
910 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
911 src_ipv4_src_ipv6.ipv4_layout.ipv4),
912 &ib_spec->ipv4.mask.src_ip,
913 sizeof(ib_spec->ipv4.mask.src_ip));
914 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
915 src_ipv4_src_ipv6.ipv4_layout.ipv4),
916 &ib_spec->ipv4.val.src_ip,
917 sizeof(ib_spec->ipv4.val.src_ip));
918 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_c,
919 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
920 &ib_spec->ipv4.mask.dst_ip,
921 sizeof(ib_spec->ipv4.mask.dst_ip));
922 memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, outer_headers_v,
923 dst_ipv4_dst_ipv6.ipv4_layout.ipv4),
924 &ib_spec->ipv4.val.dst_ip,
925 sizeof(ib_spec->ipv4.val.dst_ip));
927 case IB_FLOW_SPEC_TCP:
928 if (ib_spec->size != sizeof(ib_spec->tcp_udp))
931 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
933 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
936 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_sport,
937 ntohs(ib_spec->tcp_udp.mask.src_port));
938 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_sport,
939 ntohs(ib_spec->tcp_udp.val.src_port));
941 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, tcp_dport,
942 ntohs(ib_spec->tcp_udp.mask.dst_port));
943 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, tcp_dport,
944 ntohs(ib_spec->tcp_udp.val.dst_port));
946 case IB_FLOW_SPEC_UDP:
947 if (ib_spec->size != sizeof(ib_spec->tcp_udp))
950 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, ip_protocol,
952 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, ip_protocol,
955 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_sport,
956 ntohs(ib_spec->tcp_udp.mask.src_port));
957 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_sport,
958 ntohs(ib_spec->tcp_udp.val.src_port));
960 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c, udp_dport,
961 ntohs(ib_spec->tcp_udp.mask.dst_port));
962 MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v, udp_dport,
963 ntohs(ib_spec->tcp_udp.val.dst_port));
972 /* If a flow could catch both multicast and unicast packets,
973 * it won't fall into the multicast flow steering table and this rule
974 * could steal other multicast packets.
976 static bool flow_is_multicast_only(struct ib_flow_attr *ib_attr)
978 struct ib_flow_spec_eth *eth_spec;
980 if (ib_attr->type != IB_FLOW_ATTR_NORMAL ||
981 ib_attr->size < sizeof(struct ib_flow_attr) +
982 sizeof(struct ib_flow_spec_eth) ||
983 ib_attr->num_of_specs < 1)
986 eth_spec = (struct ib_flow_spec_eth *)(ib_attr + 1);
987 if (eth_spec->type != IB_FLOW_SPEC_ETH ||
988 eth_spec->size != sizeof(*eth_spec))
991 return is_multicast_ether_addr(eth_spec->mask.dst_mac) &&
992 is_multicast_ether_addr(eth_spec->val.dst_mac);
995 static bool is_valid_attr(struct ib_flow_attr *flow_attr)
997 union ib_flow_spec *ib_spec = (union ib_flow_spec *)(flow_attr + 1);
998 bool has_ipv4_spec = false;
999 bool eth_type_ipv4 = true;
1000 unsigned int spec_index;
1002 /* Validate that ethertype is correct */
1003 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1004 if (ib_spec->type == IB_FLOW_SPEC_ETH &&
1005 ib_spec->eth.mask.ether_type) {
1006 if (!((ib_spec->eth.mask.ether_type == htons(0xffff)) &&
1007 ib_spec->eth.val.ether_type == htons(ETH_P_IP)))
1008 eth_type_ipv4 = false;
1009 } else if (ib_spec->type == IB_FLOW_SPEC_IPV4) {
1010 has_ipv4_spec = true;
1012 ib_spec = (void *)ib_spec + ib_spec->size;
1014 return !has_ipv4_spec || eth_type_ipv4;
1017 static void put_flow_table(struct mlx5_ib_dev *dev,
1018 struct mlx5_ib_flow_prio *prio, bool ft_added)
1020 prio->refcount -= !!ft_added;
1021 if (!prio->refcount) {
1022 mlx5_destroy_flow_table(prio->flow_table);
1023 prio->flow_table = NULL;
1027 static int mlx5_ib_destroy_flow(struct ib_flow *flow_id)
1029 struct mlx5_ib_dev *dev = to_mdev(flow_id->qp->device);
1030 struct mlx5_ib_flow_handler *handler = container_of(flow_id,
1031 struct mlx5_ib_flow_handler,
1033 struct mlx5_ib_flow_handler *iter, *tmp;
1035 mutex_lock(&dev->flow_db.lock);
1037 list_for_each_entry_safe(iter, tmp, &handler->list, list) {
1038 mlx5_del_flow_rule(iter->rule);
1039 list_del(&iter->list);
1043 mlx5_del_flow_rule(handler->rule);
1044 put_flow_table(dev, &dev->flow_db.prios[handler->prio], true);
1045 mutex_unlock(&dev->flow_db.lock);
1052 #define MLX5_FS_MAX_TYPES 10
1053 #define MLX5_FS_MAX_ENTRIES 32000UL
1054 static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev,
1055 struct ib_flow_attr *flow_attr)
1057 struct mlx5_flow_namespace *ns = NULL;
1058 struct mlx5_ib_flow_prio *prio;
1059 struct mlx5_flow_table *ft;
1065 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1066 if (flow_is_multicast_only(flow_attr))
1067 priority = MLX5_IB_FLOW_MCAST_PRIO;
1069 priority = flow_attr->priority;
1070 ns = mlx5_get_flow_namespace(dev->mdev,
1071 MLX5_FLOW_NAMESPACE_BYPASS);
1072 num_entries = MLX5_FS_MAX_ENTRIES;
1073 num_groups = MLX5_FS_MAX_TYPES;
1074 prio = &dev->flow_db.prios[priority];
1075 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1076 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
1077 ns = mlx5_get_flow_namespace(dev->mdev,
1078 MLX5_FLOW_NAMESPACE_LEFTOVERS);
1079 build_leftovers_ft_param(&priority,
1082 prio = &dev->flow_db.prios[MLX5_IB_FLOW_LEFTOVERS_PRIO];
1086 return ERR_PTR(-ENOTSUPP);
1088 ft = prio->flow_table;
1090 ft = mlx5_create_auto_grouped_flow_table(ns, priority,
1096 prio->flow_table = ft;
1102 return err ? ERR_PTR(err) : prio;
1105 static struct mlx5_ib_flow_handler *create_flow_rule(struct mlx5_ib_dev *dev,
1106 struct mlx5_ib_flow_prio *ft_prio,
1107 struct ib_flow_attr *flow_attr,
1108 struct mlx5_flow_destination *dst)
1110 struct mlx5_flow_table *ft = ft_prio->flow_table;
1111 struct mlx5_ib_flow_handler *handler;
1112 void *ib_flow = flow_attr + 1;
1113 u8 match_criteria_enable = 0;
1114 unsigned int spec_index;
1119 if (!is_valid_attr(flow_attr))
1120 return ERR_PTR(-EINVAL);
1122 match_c = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1123 match_v = kzalloc(MLX5_ST_SZ_BYTES(fte_match_param), GFP_KERNEL);
1124 handler = kzalloc(sizeof(*handler), GFP_KERNEL);
1125 if (!handler || !match_c || !match_v) {
1130 INIT_LIST_HEAD(&handler->list);
1132 for (spec_index = 0; spec_index < flow_attr->num_of_specs; spec_index++) {
1133 err = parse_flow_attr(match_c, match_v, ib_flow);
1137 ib_flow += ((union ib_flow_spec *)ib_flow)->size;
1140 /* Outer header support only */
1141 match_criteria_enable = (!outer_header_zero(match_c)) << 0;
1142 handler->rule = mlx5_add_flow_rule(ft, match_criteria_enable,
1144 MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,
1145 MLX5_FS_DEFAULT_FLOW_TAG,
1148 if (IS_ERR(handler->rule)) {
1149 err = PTR_ERR(handler->rule);
1153 handler->prio = ft_prio - dev->flow_db.prios;
1155 ft_prio->flow_table = ft;
1161 return err ? ERR_PTR(err) : handler;
1169 static struct mlx5_ib_flow_handler *create_leftovers_rule(struct mlx5_ib_dev *dev,
1170 struct mlx5_ib_flow_prio *ft_prio,
1171 struct ib_flow_attr *flow_attr,
1172 struct mlx5_flow_destination *dst)
1174 struct mlx5_ib_flow_handler *handler_ucast = NULL;
1175 struct mlx5_ib_flow_handler *handler = NULL;
1178 struct ib_flow_attr flow_attr;
1179 struct ib_flow_spec_eth eth_flow;
1180 } leftovers_specs[] = {
1184 .size = sizeof(leftovers_specs[0])
1187 .type = IB_FLOW_SPEC_ETH,
1188 .size = sizeof(struct ib_flow_spec_eth),
1189 .mask = {.dst_mac = {0x1} },
1190 .val = {.dst_mac = {0x1} }
1196 .size = sizeof(leftovers_specs[0])
1199 .type = IB_FLOW_SPEC_ETH,
1200 .size = sizeof(struct ib_flow_spec_eth),
1201 .mask = {.dst_mac = {0x1} },
1202 .val = {.dst_mac = {} }
1207 handler = create_flow_rule(dev, ft_prio,
1208 &leftovers_specs[LEFTOVERS_MC].flow_attr,
1210 if (!IS_ERR(handler) &&
1211 flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT) {
1212 handler_ucast = create_flow_rule(dev, ft_prio,
1213 &leftovers_specs[LEFTOVERS_UC].flow_attr,
1215 if (IS_ERR(handler_ucast)) {
1217 handler = handler_ucast;
1219 list_add(&handler_ucast->list, &handler->list);
1226 static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
1227 struct ib_flow_attr *flow_attr,
1230 struct mlx5_ib_dev *dev = to_mdev(qp->device);
1231 struct mlx5_ib_flow_handler *handler = NULL;
1232 struct mlx5_flow_destination *dst = NULL;
1233 struct mlx5_ib_flow_prio *ft_prio;
1236 if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
1237 return ERR_PTR(-ENOSPC);
1239 if (domain != IB_FLOW_DOMAIN_USER ||
1240 flow_attr->port > MLX5_CAP_GEN(dev->mdev, num_ports) ||
1242 return ERR_PTR(-EINVAL);
1244 dst = kzalloc(sizeof(*dst), GFP_KERNEL);
1246 return ERR_PTR(-ENOMEM);
1248 mutex_lock(&dev->flow_db.lock);
1250 ft_prio = get_flow_table(dev, flow_attr);
1251 if (IS_ERR(ft_prio)) {
1252 err = PTR_ERR(ft_prio);
1256 dst->type = MLX5_FLOW_DESTINATION_TYPE_TIR;
1257 dst->tir_num = to_mqp(qp)->raw_packet_qp.rq.tirn;
1259 if (flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1260 handler = create_flow_rule(dev, ft_prio, flow_attr,
1262 } else if (flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
1263 flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT) {
1264 handler = create_leftovers_rule(dev, ft_prio, flow_attr,
1271 if (IS_ERR(handler)) {
1272 err = PTR_ERR(handler);
1277 ft_prio->refcount++;
1278 mutex_unlock(&dev->flow_db.lock);
1281 return &handler->ibflow;
1284 put_flow_table(dev, ft_prio, false);
1286 mutex_unlock(&dev->flow_db.lock);
1289 return ERR_PTR(err);
1292 static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1294 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1297 err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
1299 mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
1300 ibqp->qp_num, gid->raw);
1305 static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
1307 struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
1310 err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
1312 mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
1313 ibqp->qp_num, gid->raw);
1318 static int init_node_data(struct mlx5_ib_dev *dev)
1322 err = mlx5_query_node_desc(dev, dev->ib_dev.node_desc);
1326 dev->mdev->rev_id = dev->mdev->pdev->revision;
1328 return mlx5_query_node_guid(dev, &dev->ib_dev.node_guid);
1331 static ssize_t show_fw_pages(struct device *device, struct device_attribute *attr,
1334 struct mlx5_ib_dev *dev =
1335 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1337 return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
1340 static ssize_t show_reg_pages(struct device *device,
1341 struct device_attribute *attr, char *buf)
1343 struct mlx5_ib_dev *dev =
1344 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1346 return sprintf(buf, "%d\n", atomic_read(&dev->mdev->priv.reg_pages));
1349 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
1352 struct mlx5_ib_dev *dev =
1353 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1354 return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
1357 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
1360 struct mlx5_ib_dev *dev =
1361 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1362 return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
1363 fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
1366 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
1369 struct mlx5_ib_dev *dev =
1370 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1371 return sprintf(buf, "%x\n", dev->mdev->rev_id);
1374 static ssize_t show_board(struct device *device, struct device_attribute *attr,
1377 struct mlx5_ib_dev *dev =
1378 container_of(device, struct mlx5_ib_dev, ib_dev.dev);
1379 return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
1380 dev->mdev->board_id);
1383 static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
1384 static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
1385 static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
1386 static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
1387 static DEVICE_ATTR(fw_pages, S_IRUGO, show_fw_pages, NULL);
1388 static DEVICE_ATTR(reg_pages, S_IRUGO, show_reg_pages, NULL);
1390 static struct device_attribute *mlx5_class_attributes[] = {
1396 &dev_attr_reg_pages,
1399 static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
1400 enum mlx5_dev_event event, unsigned long param)
1402 struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
1403 struct ib_event ibev;
1408 case MLX5_DEV_EVENT_SYS_ERROR:
1409 ibdev->ib_active = false;
1410 ibev.event = IB_EVENT_DEVICE_FATAL;
1413 case MLX5_DEV_EVENT_PORT_UP:
1414 ibev.event = IB_EVENT_PORT_ACTIVE;
1418 case MLX5_DEV_EVENT_PORT_DOWN:
1419 ibev.event = IB_EVENT_PORT_ERR;
1423 case MLX5_DEV_EVENT_PORT_INITIALIZED:
1424 /* not used by ULPs */
1427 case MLX5_DEV_EVENT_LID_CHANGE:
1428 ibev.event = IB_EVENT_LID_CHANGE;
1432 case MLX5_DEV_EVENT_PKEY_CHANGE:
1433 ibev.event = IB_EVENT_PKEY_CHANGE;
1437 case MLX5_DEV_EVENT_GUID_CHANGE:
1438 ibev.event = IB_EVENT_GID_CHANGE;
1442 case MLX5_DEV_EVENT_CLIENT_REREG:
1443 ibev.event = IB_EVENT_CLIENT_REREGISTER;
1448 ibev.device = &ibdev->ib_dev;
1449 ibev.element.port_num = port;
1451 if (port < 1 || port > ibdev->num_ports) {
1452 mlx5_ib_warn(ibdev, "warning: event on port %d\n", port);
1456 if (ibdev->ib_active)
1457 ib_dispatch_event(&ibev);
1460 static void get_ext_port_caps(struct mlx5_ib_dev *dev)
1464 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++)
1465 mlx5_query_ext_port_caps(dev, port);
1468 static int get_port_caps(struct mlx5_ib_dev *dev)
1470 struct ib_device_attr *dprops = NULL;
1471 struct ib_port_attr *pprops = NULL;
1474 struct ib_udata uhw = {.inlen = 0, .outlen = 0};
1476 pprops = kmalloc(sizeof(*pprops), GFP_KERNEL);
1480 dprops = kmalloc(sizeof(*dprops), GFP_KERNEL);
1484 err = mlx5_ib_query_device(&dev->ib_dev, dprops, &uhw);
1486 mlx5_ib_warn(dev, "query_device failed %d\n", err);
1490 for (port = 1; port <= MLX5_CAP_GEN(dev->mdev, num_ports); port++) {
1491 err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
1493 mlx5_ib_warn(dev, "query_port %d failed %d\n",
1497 dev->mdev->port_caps[port - 1].pkey_table_len =
1499 dev->mdev->port_caps[port - 1].gid_table_len =
1500 pprops->gid_tbl_len;
1501 mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
1502 dprops->max_pkeys, pprops->gid_tbl_len);
1512 static void destroy_umrc_res(struct mlx5_ib_dev *dev)
1516 err = mlx5_mr_cache_cleanup(dev);
1518 mlx5_ib_warn(dev, "mr cache cleanup failed\n");
1520 mlx5_ib_destroy_qp(dev->umrc.qp);
1521 ib_destroy_cq(dev->umrc.cq);
1522 ib_dealloc_pd(dev->umrc.pd);
1529 static int create_umr_res(struct mlx5_ib_dev *dev)
1531 struct ib_qp_init_attr *init_attr = NULL;
1532 struct ib_qp_attr *attr = NULL;
1536 struct ib_cq_init_attr cq_attr = {};
1539 attr = kzalloc(sizeof(*attr), GFP_KERNEL);
1540 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1541 if (!attr || !init_attr) {
1546 pd = ib_alloc_pd(&dev->ib_dev);
1548 mlx5_ib_dbg(dev, "Couldn't create PD for sync UMR QP\n");
1554 cq = ib_create_cq(&dev->ib_dev, mlx5_umr_cq_handler, NULL, NULL,
1557 mlx5_ib_dbg(dev, "Couldn't create CQ for sync UMR QP\n");
1561 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
1563 init_attr->send_cq = cq;
1564 init_attr->recv_cq = cq;
1565 init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
1566 init_attr->cap.max_send_wr = MAX_UMR_WR;
1567 init_attr->cap.max_send_sge = 1;
1568 init_attr->qp_type = MLX5_IB_QPT_REG_UMR;
1569 init_attr->port_num = 1;
1570 qp = mlx5_ib_create_qp(pd, init_attr, NULL);
1572 mlx5_ib_dbg(dev, "Couldn't create sync UMR QP\n");
1576 qp->device = &dev->ib_dev;
1579 qp->qp_type = MLX5_IB_QPT_REG_UMR;
1581 attr->qp_state = IB_QPS_INIT;
1583 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE | IB_QP_PKEY_INDEX |
1586 mlx5_ib_dbg(dev, "Couldn't modify UMR QP\n");
1590 memset(attr, 0, sizeof(*attr));
1591 attr->qp_state = IB_QPS_RTR;
1592 attr->path_mtu = IB_MTU_256;
1594 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
1596 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rtr\n");
1600 memset(attr, 0, sizeof(*attr));
1601 attr->qp_state = IB_QPS_RTS;
1602 ret = mlx5_ib_modify_qp(qp, attr, IB_QP_STATE, NULL);
1604 mlx5_ib_dbg(dev, "Couldn't modify umr QP to rts\n");
1612 sema_init(&dev->umrc.sem, MAX_UMR_WR);
1613 ret = mlx5_mr_cache_init(dev);
1615 mlx5_ib_warn(dev, "mr cache init failed %d\n", ret);
1625 mlx5_ib_destroy_qp(qp);
1639 static int create_dev_resources(struct mlx5_ib_resources *devr)
1641 struct ib_srq_init_attr attr;
1642 struct mlx5_ib_dev *dev;
1643 struct ib_cq_init_attr cq_attr = {.cqe = 1};
1646 dev = container_of(devr, struct mlx5_ib_dev, devr);
1648 devr->p0 = mlx5_ib_alloc_pd(&dev->ib_dev, NULL, NULL);
1649 if (IS_ERR(devr->p0)) {
1650 ret = PTR_ERR(devr->p0);
1653 devr->p0->device = &dev->ib_dev;
1654 devr->p0->uobject = NULL;
1655 atomic_set(&devr->p0->usecnt, 0);
1657 devr->c0 = mlx5_ib_create_cq(&dev->ib_dev, &cq_attr, NULL, NULL);
1658 if (IS_ERR(devr->c0)) {
1659 ret = PTR_ERR(devr->c0);
1662 devr->c0->device = &dev->ib_dev;
1663 devr->c0->uobject = NULL;
1664 devr->c0->comp_handler = NULL;
1665 devr->c0->event_handler = NULL;
1666 devr->c0->cq_context = NULL;
1667 atomic_set(&devr->c0->usecnt, 0);
1669 devr->x0 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
1670 if (IS_ERR(devr->x0)) {
1671 ret = PTR_ERR(devr->x0);
1674 devr->x0->device = &dev->ib_dev;
1675 devr->x0->inode = NULL;
1676 atomic_set(&devr->x0->usecnt, 0);
1677 mutex_init(&devr->x0->tgt_qp_mutex);
1678 INIT_LIST_HEAD(&devr->x0->tgt_qp_list);
1680 devr->x1 = mlx5_ib_alloc_xrcd(&dev->ib_dev, NULL, NULL);
1681 if (IS_ERR(devr->x1)) {
1682 ret = PTR_ERR(devr->x1);
1685 devr->x1->device = &dev->ib_dev;
1686 devr->x1->inode = NULL;
1687 atomic_set(&devr->x1->usecnt, 0);
1688 mutex_init(&devr->x1->tgt_qp_mutex);
1689 INIT_LIST_HEAD(&devr->x1->tgt_qp_list);
1691 memset(&attr, 0, sizeof(attr));
1692 attr.attr.max_sge = 1;
1693 attr.attr.max_wr = 1;
1694 attr.srq_type = IB_SRQT_XRC;
1695 attr.ext.xrc.cq = devr->c0;
1696 attr.ext.xrc.xrcd = devr->x0;
1698 devr->s0 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
1699 if (IS_ERR(devr->s0)) {
1700 ret = PTR_ERR(devr->s0);
1703 devr->s0->device = &dev->ib_dev;
1704 devr->s0->pd = devr->p0;
1705 devr->s0->uobject = NULL;
1706 devr->s0->event_handler = NULL;
1707 devr->s0->srq_context = NULL;
1708 devr->s0->srq_type = IB_SRQT_XRC;
1709 devr->s0->ext.xrc.xrcd = devr->x0;
1710 devr->s0->ext.xrc.cq = devr->c0;
1711 atomic_inc(&devr->s0->ext.xrc.xrcd->usecnt);
1712 atomic_inc(&devr->s0->ext.xrc.cq->usecnt);
1713 atomic_inc(&devr->p0->usecnt);
1714 atomic_set(&devr->s0->usecnt, 0);
1716 memset(&attr, 0, sizeof(attr));
1717 attr.attr.max_sge = 1;
1718 attr.attr.max_wr = 1;
1719 attr.srq_type = IB_SRQT_BASIC;
1720 devr->s1 = mlx5_ib_create_srq(devr->p0, &attr, NULL);
1721 if (IS_ERR(devr->s1)) {
1722 ret = PTR_ERR(devr->s1);
1725 devr->s1->device = &dev->ib_dev;
1726 devr->s1->pd = devr->p0;
1727 devr->s1->uobject = NULL;
1728 devr->s1->event_handler = NULL;
1729 devr->s1->srq_context = NULL;
1730 devr->s1->srq_type = IB_SRQT_BASIC;
1731 devr->s1->ext.xrc.cq = devr->c0;
1732 atomic_inc(&devr->p0->usecnt);
1733 atomic_set(&devr->s0->usecnt, 0);
1738 mlx5_ib_destroy_srq(devr->s0);
1740 mlx5_ib_dealloc_xrcd(devr->x1);
1742 mlx5_ib_dealloc_xrcd(devr->x0);
1744 mlx5_ib_destroy_cq(devr->c0);
1746 mlx5_ib_dealloc_pd(devr->p0);
1751 static void destroy_dev_resources(struct mlx5_ib_resources *devr)
1753 mlx5_ib_destroy_srq(devr->s1);
1754 mlx5_ib_destroy_srq(devr->s0);
1755 mlx5_ib_dealloc_xrcd(devr->x0);
1756 mlx5_ib_dealloc_xrcd(devr->x1);
1757 mlx5_ib_destroy_cq(devr->c0);
1758 mlx5_ib_dealloc_pd(devr->p0);
1761 static int mlx5_port_immutable(struct ib_device *ibdev, u8 port_num,
1762 struct ib_port_immutable *immutable)
1764 struct ib_port_attr attr;
1767 err = mlx5_ib_query_port(ibdev, port_num, &attr);
1771 immutable->pkey_tbl_len = attr.pkey_tbl_len;
1772 immutable->gid_tbl_len = attr.gid_tbl_len;
1773 immutable->core_cap_flags = RDMA_CORE_PORT_IBA_IB;
1774 immutable->max_mad_size = IB_MGMT_MAD_SIZE;
1779 static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
1781 struct mlx5_ib_dev *dev;
1785 /* don't create IB instance over Eth ports, no RoCE yet! */
1786 if (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
1789 printk_once(KERN_INFO "%s", mlx5_version);
1791 dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
1797 err = get_port_caps(dev);
1801 if (mlx5_use_mad_ifc(dev))
1802 get_ext_port_caps(dev);
1804 MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
1806 strlcpy(dev->ib_dev.name, "mlx5_%d", IB_DEVICE_NAME_MAX);
1807 dev->ib_dev.owner = THIS_MODULE;
1808 dev->ib_dev.node_type = RDMA_NODE_IB_CA;
1809 dev->ib_dev.local_dma_lkey = 0 /* not supported for now */;
1810 dev->num_ports = MLX5_CAP_GEN(mdev, num_ports);
1811 dev->ib_dev.phys_port_cnt = dev->num_ports;
1812 dev->ib_dev.num_comp_vectors =
1813 dev->mdev->priv.eq_table.num_comp_vectors;
1814 dev->ib_dev.dma_device = &mdev->pdev->dev;
1816 dev->ib_dev.uverbs_abi_ver = MLX5_IB_UVERBS_ABI_VERSION;
1817 dev->ib_dev.uverbs_cmd_mask =
1818 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
1819 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
1820 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
1821 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
1822 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
1823 (1ull << IB_USER_VERBS_CMD_REG_MR) |
1824 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
1825 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
1826 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
1827 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
1828 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
1829 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
1830 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
1831 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
1832 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
1833 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
1834 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
1835 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
1836 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
1837 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
1838 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) |
1839 (1ull << IB_USER_VERBS_CMD_CREATE_XSRQ) |
1840 (1ull << IB_USER_VERBS_CMD_OPEN_QP);
1841 dev->ib_dev.uverbs_ex_cmd_mask =
1842 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE);
1844 dev->ib_dev.query_device = mlx5_ib_query_device;
1845 dev->ib_dev.query_port = mlx5_ib_query_port;
1846 dev->ib_dev.query_gid = mlx5_ib_query_gid;
1847 dev->ib_dev.query_pkey = mlx5_ib_query_pkey;
1848 dev->ib_dev.modify_device = mlx5_ib_modify_device;
1849 dev->ib_dev.modify_port = mlx5_ib_modify_port;
1850 dev->ib_dev.alloc_ucontext = mlx5_ib_alloc_ucontext;
1851 dev->ib_dev.dealloc_ucontext = mlx5_ib_dealloc_ucontext;
1852 dev->ib_dev.mmap = mlx5_ib_mmap;
1853 dev->ib_dev.alloc_pd = mlx5_ib_alloc_pd;
1854 dev->ib_dev.dealloc_pd = mlx5_ib_dealloc_pd;
1855 dev->ib_dev.create_ah = mlx5_ib_create_ah;
1856 dev->ib_dev.query_ah = mlx5_ib_query_ah;
1857 dev->ib_dev.destroy_ah = mlx5_ib_destroy_ah;
1858 dev->ib_dev.create_srq = mlx5_ib_create_srq;
1859 dev->ib_dev.modify_srq = mlx5_ib_modify_srq;
1860 dev->ib_dev.query_srq = mlx5_ib_query_srq;
1861 dev->ib_dev.destroy_srq = mlx5_ib_destroy_srq;
1862 dev->ib_dev.post_srq_recv = mlx5_ib_post_srq_recv;
1863 dev->ib_dev.create_qp = mlx5_ib_create_qp;
1864 dev->ib_dev.modify_qp = mlx5_ib_modify_qp;
1865 dev->ib_dev.query_qp = mlx5_ib_query_qp;
1866 dev->ib_dev.destroy_qp = mlx5_ib_destroy_qp;
1867 dev->ib_dev.post_send = mlx5_ib_post_send;
1868 dev->ib_dev.post_recv = mlx5_ib_post_recv;
1869 dev->ib_dev.create_cq = mlx5_ib_create_cq;
1870 dev->ib_dev.modify_cq = mlx5_ib_modify_cq;
1871 dev->ib_dev.resize_cq = mlx5_ib_resize_cq;
1872 dev->ib_dev.destroy_cq = mlx5_ib_destroy_cq;
1873 dev->ib_dev.poll_cq = mlx5_ib_poll_cq;
1874 dev->ib_dev.req_notify_cq = mlx5_ib_arm_cq;
1875 dev->ib_dev.get_dma_mr = mlx5_ib_get_dma_mr;
1876 dev->ib_dev.reg_user_mr = mlx5_ib_reg_user_mr;
1877 dev->ib_dev.dereg_mr = mlx5_ib_dereg_mr;
1878 dev->ib_dev.attach_mcast = mlx5_ib_mcg_attach;
1879 dev->ib_dev.detach_mcast = mlx5_ib_mcg_detach;
1880 dev->ib_dev.process_mad = mlx5_ib_process_mad;
1881 dev->ib_dev.alloc_mr = mlx5_ib_alloc_mr;
1882 dev->ib_dev.map_mr_sg = mlx5_ib_map_mr_sg;
1883 dev->ib_dev.check_mr_status = mlx5_ib_check_mr_status;
1884 dev->ib_dev.get_port_immutable = mlx5_port_immutable;
1886 mlx5_ib_internal_fill_odp_caps(dev);
1888 if (MLX5_CAP_GEN(mdev, xrc)) {
1889 dev->ib_dev.alloc_xrcd = mlx5_ib_alloc_xrcd;
1890 dev->ib_dev.dealloc_xrcd = mlx5_ib_dealloc_xrcd;
1891 dev->ib_dev.uverbs_cmd_mask |=
1892 (1ull << IB_USER_VERBS_CMD_OPEN_XRCD) |
1893 (1ull << IB_USER_VERBS_CMD_CLOSE_XRCD);
1896 if (mlx5_ib_port_link_layer(&dev->ib_dev) ==
1897 IB_LINK_LAYER_ETHERNET) {
1898 dev->ib_dev.create_flow = mlx5_ib_create_flow;
1899 dev->ib_dev.destroy_flow = mlx5_ib_destroy_flow;
1900 dev->ib_dev.uverbs_ex_cmd_mask |=
1901 (1ull << IB_USER_VERBS_EX_CMD_CREATE_FLOW) |
1902 (1ull << IB_USER_VERBS_EX_CMD_DESTROY_FLOW);
1904 err = init_node_data(dev);
1908 mutex_init(&dev->flow_db.lock);
1909 mutex_init(&dev->cap_mask_mutex);
1911 err = create_dev_resources(&dev->devr);
1915 err = mlx5_ib_odp_init_one(dev);
1919 err = ib_register_device(&dev->ib_dev, NULL);
1923 err = create_umr_res(dev);
1927 for (i = 0; i < ARRAY_SIZE(mlx5_class_attributes); i++) {
1928 err = device_create_file(&dev->ib_dev.dev,
1929 mlx5_class_attributes[i]);
1934 dev->ib_active = true;
1939 destroy_umrc_res(dev);
1942 ib_unregister_device(&dev->ib_dev);
1945 mlx5_ib_odp_remove_one(dev);
1948 destroy_dev_resources(&dev->devr);
1951 ib_dealloc_device((struct ib_device *)dev);
1956 static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
1958 struct mlx5_ib_dev *dev = context;
1960 ib_unregister_device(&dev->ib_dev);
1961 destroy_umrc_res(dev);
1962 mlx5_ib_odp_remove_one(dev);
1963 destroy_dev_resources(&dev->devr);
1964 ib_dealloc_device(&dev->ib_dev);
1967 static struct mlx5_interface mlx5_ib_interface = {
1969 .remove = mlx5_ib_remove,
1970 .event = mlx5_ib_event,
1971 .protocol = MLX5_INTERFACE_PROTOCOL_IB,
1974 static int __init mlx5_ib_init(void)
1978 if (deprecated_prof_sel != 2)
1979 pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
1981 err = mlx5_ib_odp_init();
1985 err = mlx5_register_interface(&mlx5_ib_interface);
1992 mlx5_ib_odp_cleanup();
1996 static void __exit mlx5_ib_cleanup(void)
1998 mlx5_unregister_interface(&mlx5_ib_interface);
1999 mlx5_ib_odp_cleanup();
2002 module_init(mlx5_ib_init);
2003 module_exit(mlx5_ib_cleanup);