2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/io-mapping.h>
41 #include <linux/interrupt.h>
42 #include <linux/delay.h>
43 #include <linux/mlx5/driver.h>
44 #include <linux/mlx5/cq.h>
45 #include <linux/mlx5/qp.h>
46 #include <linux/mlx5/srq.h>
47 #include <linux/debugfs.h>
48 #include <linux/kmod.h>
49 #include <linux/delay.h>
50 #include <linux/mlx5/mlx5_ifc.h>
51 #include "mlx5_core.h"
53 #ifdef CONFIG_MLX5_CORE_EN
57 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
58 MODULE_DESCRIPTION("Mellanox Connect-IB, ConnectX-4 core driver");
59 MODULE_LICENSE("Dual BSD/GPL");
60 MODULE_VERSION(DRIVER_VERSION);
62 int mlx5_core_debug_mask;
63 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
64 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
66 #define MLX5_DEFAULT_PROF 2
67 static int prof_sel = MLX5_DEFAULT_PROF;
68 module_param_named(prof_sel, prof_sel, int, 0444);
69 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
71 static LIST_HEAD(intf_list);
72 static LIST_HEAD(dev_list);
73 static DEFINE_MUTEX(intf_mutex);
75 struct mlx5_device_context {
76 struct list_head list;
77 struct mlx5_interface *intf;
82 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
83 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
86 static struct mlx5_profile profile[] = {
91 .mask = MLX5_PROF_MASK_QP_SIZE,
95 .mask = MLX5_PROF_MASK_QP_SIZE |
96 MLX5_PROF_MASK_MR_CACHE,
165 #define FW_INIT_TIMEOUT_MILI 2000
166 #define FW_INIT_WAIT_MS 2
168 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili)
170 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
173 while (fw_initializing(dev)) {
174 if (time_after(jiffies, end)) {
178 msleep(FW_INIT_WAIT_MS);
184 static int set_dma_caps(struct pci_dev *pdev)
188 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
190 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
191 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
193 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
198 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
201 "Warning: couldn't set 64-bit consistent PCI DMA mask\n");
202 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
205 "Can't set consistent PCI DMA mask, aborting\n");
210 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
214 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
216 struct pci_dev *pdev = dev->pdev;
219 mutex_lock(&dev->pci_status_mutex);
220 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
221 err = pci_enable_device(pdev);
223 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
225 mutex_unlock(&dev->pci_status_mutex);
230 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
232 struct pci_dev *pdev = dev->pdev;
234 mutex_lock(&dev->pci_status_mutex);
235 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
236 pci_disable_device(pdev);
237 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
239 mutex_unlock(&dev->pci_status_mutex);
242 static int request_bar(struct pci_dev *pdev)
246 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
247 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
251 err = pci_request_regions(pdev, DRIVER_NAME);
253 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
258 static void release_bar(struct pci_dev *pdev)
260 pci_release_regions(pdev);
263 static int mlx5_enable_msix(struct mlx5_core_dev *dev)
265 struct mlx5_priv *priv = &dev->priv;
266 struct mlx5_eq_table *table = &priv->eq_table;
267 int num_eqs = 1 << MLX5_CAP_GEN(dev, log_max_eq);
271 nvec = MLX5_CAP_GEN(dev, num_ports) * num_online_cpus() +
272 MLX5_EQ_VEC_COMP_BASE;
273 nvec = min_t(int, nvec, num_eqs);
274 if (nvec <= MLX5_EQ_VEC_COMP_BASE)
277 priv->msix_arr = kcalloc(nvec, sizeof(*priv->msix_arr), GFP_KERNEL);
279 priv->irq_info = kcalloc(nvec, sizeof(*priv->irq_info), GFP_KERNEL);
280 if (!priv->msix_arr || !priv->irq_info)
283 for (i = 0; i < nvec; i++)
284 priv->msix_arr[i].entry = i;
286 nvec = pci_enable_msix_range(dev->pdev, priv->msix_arr,
287 MLX5_EQ_VEC_COMP_BASE + 1, nvec);
291 table->num_comp_vectors = nvec - MLX5_EQ_VEC_COMP_BASE;
296 kfree(priv->irq_info);
297 kfree(priv->msix_arr);
301 static void mlx5_disable_msix(struct mlx5_core_dev *dev)
303 struct mlx5_priv *priv = &dev->priv;
305 pci_disable_msix(dev->pdev);
306 kfree(priv->irq_info);
307 kfree(priv->msix_arr);
310 struct mlx5_reg_host_endianess {
316 #define CAP_MASK(pos, size) ((u64)((1 << (size)) - 1) << (pos))
319 MLX5_CAP_BITS_RW_MASK = CAP_MASK(MLX5_CAP_OFF_CMDIF_CSUM, 2) |
320 MLX5_DEV_CAP_FLAG_DCT,
323 static u16 to_fw_pkey_sz(u32 size)
339 pr_warn("invalid pkey table size %d\n", size);
344 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
345 enum mlx5_cap_mode cap_mode)
347 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
348 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
349 void *out, *hca_caps;
350 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
353 memset(in, 0, sizeof(in));
354 out = kzalloc(out_sz, GFP_KERNEL);
358 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
359 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
360 err = mlx5_cmd_exec(dev, in, sizeof(in), out, out_sz);
364 err = mlx5_cmd_status_to_err_v2(out);
367 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
368 cap_type, cap_mode, err);
372 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
375 case HCA_CAP_OPMOD_GET_MAX:
376 memcpy(dev->hca_caps_max[cap_type], hca_caps,
377 MLX5_UN_SZ_BYTES(hca_cap_union));
379 case HCA_CAP_OPMOD_GET_CUR:
380 memcpy(dev->hca_caps_cur[cap_type], hca_caps,
381 MLX5_UN_SZ_BYTES(hca_cap_union));
385 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
395 static int set_caps(struct mlx5_core_dev *dev, void *in, int in_sz, int opmod)
397 u32 out[MLX5_ST_SZ_DW(set_hca_cap_out)];
400 memset(out, 0, sizeof(out));
402 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
403 MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
404 err = mlx5_cmd_exec(dev, in, in_sz, out, sizeof(out));
408 err = mlx5_cmd_status_to_err_v2(out);
413 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev)
417 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
421 if (MLX5_CAP_GEN(dev, atomic)) {
422 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC,
423 HCA_CAP_OPMOD_GET_CUR);
432 supported_atomic_req_8B_endianess_mode_1);
434 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
437 set_ctx = kzalloc(set_sz, GFP_KERNEL);
441 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
443 /* Set requestor to host endianness */
444 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianess_mode,
445 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
447 err = set_caps(dev, set_ctx, set_sz, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
453 static int handle_hca_cap(struct mlx5_core_dev *dev)
455 void *set_ctx = NULL;
456 struct mlx5_profile *prof = dev->profile;
458 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
461 set_ctx = kzalloc(set_sz, GFP_KERNEL);
465 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_MAX);
469 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL, HCA_CAP_OPMOD_GET_CUR);
473 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
475 memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
476 MLX5_ST_SZ_BYTES(cmd_hca_cap));
478 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
479 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
481 /* we limit the size of the pkey table to 128 entries for now */
482 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
485 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
486 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
489 /* disable cmdif checksum */
490 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
492 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
494 err = set_caps(dev, set_ctx, set_sz,
495 MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
502 static int set_hca_ctrl(struct mlx5_core_dev *dev)
504 struct mlx5_reg_host_endianess he_in;
505 struct mlx5_reg_host_endianess he_out;
508 if (!mlx5_core_is_pf(dev))
511 memset(&he_in, 0, sizeof(he_in));
512 he_in.he = MLX5_SET_HOST_ENDIANNESS;
513 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
514 &he_out, sizeof(he_out),
515 MLX5_REG_HOST_ENDIANNESS, 0, 1);
519 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
521 u32 out[MLX5_ST_SZ_DW(enable_hca_out)];
522 u32 in[MLX5_ST_SZ_DW(enable_hca_in)];
525 memset(in, 0, sizeof(in));
526 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
527 MLX5_SET(enable_hca_in, in, function_id, func_id);
528 memset(out, 0, sizeof(out));
530 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
534 return mlx5_cmd_status_to_err_v2(out);
537 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
539 u32 out[MLX5_ST_SZ_DW(disable_hca_out)];
540 u32 in[MLX5_ST_SZ_DW(disable_hca_in)];
543 memset(in, 0, sizeof(in));
544 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
545 MLX5_SET(disable_hca_in, in, function_id, func_id);
546 memset(out, 0, sizeof(out));
547 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
551 return mlx5_cmd_status_to_err_v2(out);
554 cycle_t mlx5_read_internal_timer(struct mlx5_core_dev *dev)
556 u32 timer_h, timer_h1, timer_l;
558 timer_h = ioread32be(&dev->iseg->internal_timer_h);
559 timer_l = ioread32be(&dev->iseg->internal_timer_l);
560 timer_h1 = ioread32be(&dev->iseg->internal_timer_h);
561 if (timer_h != timer_h1) /* wrap around */
562 timer_l = ioread32be(&dev->iseg->internal_timer_l);
564 return (cycle_t)timer_l | (cycle_t)timer_h1 << 32;
567 static int mlx5_irq_set_affinity_hint(struct mlx5_core_dev *mdev, int i)
569 struct mlx5_priv *priv = &mdev->priv;
570 struct msix_entry *msix = priv->msix_arr;
571 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
572 int numa_node = priv->numa_node;
575 if (!zalloc_cpumask_var(&priv->irq_info[i].mask, GFP_KERNEL)) {
576 mlx5_core_warn(mdev, "zalloc_cpumask_var failed");
580 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
581 priv->irq_info[i].mask);
583 err = irq_set_affinity_hint(irq, priv->irq_info[i].mask);
585 mlx5_core_warn(mdev, "irq_set_affinity_hint failed,irq 0x%.4x",
593 free_cpumask_var(priv->irq_info[i].mask);
597 static void mlx5_irq_clear_affinity_hint(struct mlx5_core_dev *mdev, int i)
599 struct mlx5_priv *priv = &mdev->priv;
600 struct msix_entry *msix = priv->msix_arr;
601 int irq = msix[i + MLX5_EQ_VEC_COMP_BASE].vector;
603 irq_set_affinity_hint(irq, NULL);
604 free_cpumask_var(priv->irq_info[i].mask);
607 static int mlx5_irq_set_affinity_hints(struct mlx5_core_dev *mdev)
612 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++) {
613 err = mlx5_irq_set_affinity_hint(mdev, i);
621 for (i--; i >= 0; i--)
622 mlx5_irq_clear_affinity_hint(mdev, i);
627 static void mlx5_irq_clear_affinity_hints(struct mlx5_core_dev *mdev)
631 for (i = 0; i < mdev->priv.eq_table.num_comp_vectors; i++)
632 mlx5_irq_clear_affinity_hint(mdev, i);
635 int mlx5_vector2eqn(struct mlx5_core_dev *dev, int vector, int *eqn,
638 struct mlx5_eq_table *table = &dev->priv.eq_table;
639 struct mlx5_eq *eq, *n;
642 spin_lock(&table->lock);
643 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
644 if (eq->index == vector) {
651 spin_unlock(&table->lock);
655 EXPORT_SYMBOL(mlx5_vector2eqn);
657 static void free_comp_eqs(struct mlx5_core_dev *dev)
659 struct mlx5_eq_table *table = &dev->priv.eq_table;
660 struct mlx5_eq *eq, *n;
662 spin_lock(&table->lock);
663 list_for_each_entry_safe(eq, n, &table->comp_eqs_list, list) {
665 spin_unlock(&table->lock);
666 if (mlx5_destroy_unmap_eq(dev, eq))
667 mlx5_core_warn(dev, "failed to destroy EQ 0x%x\n",
670 spin_lock(&table->lock);
672 spin_unlock(&table->lock);
675 static int alloc_comp_eqs(struct mlx5_core_dev *dev)
677 struct mlx5_eq_table *table = &dev->priv.eq_table;
678 char name[MLX5_MAX_IRQ_NAME];
685 INIT_LIST_HEAD(&table->comp_eqs_list);
686 ncomp_vec = table->num_comp_vectors;
687 nent = MLX5_COMP_EQ_SIZE;
688 for (i = 0; i < ncomp_vec; i++) {
689 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
695 snprintf(name, MLX5_MAX_IRQ_NAME, "mlx5_comp%d", i);
696 err = mlx5_create_map_eq(dev, eq,
697 i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
698 name, &dev->priv.uuari.uars[0]);
703 mlx5_core_dbg(dev, "allocated completion EQN %d\n", eq->eqn);
705 spin_lock(&table->lock);
706 list_add_tail(&eq->list, &table->comp_eqs_list);
707 spin_unlock(&table->lock);
717 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
719 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)];
720 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)];
721 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)];
722 u32 set_out[MLX5_ST_SZ_DW(set_issi_out)];
726 memset(query_in, 0, sizeof(query_in));
727 memset(query_out, 0, sizeof(query_out));
729 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
731 err = mlx5_cmd_exec_check_status(dev, query_in, sizeof(query_in),
732 query_out, sizeof(query_out));
734 if (((struct mlx5_outbox_hdr *)query_out)->status ==
735 MLX5_CMD_STAT_BAD_OP_ERR) {
736 pr_debug("Only ISSI 0 is supported\n");
740 pr_err("failed to query ISSI\n");
744 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
746 if (sup_issi & (1 << 1)) {
747 memset(set_in, 0, sizeof(set_in));
748 memset(set_out, 0, sizeof(set_out));
750 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
751 MLX5_SET(set_issi_in, set_in, current_issi, 1);
753 err = mlx5_cmd_exec_check_status(dev, set_in, sizeof(set_in),
754 set_out, sizeof(set_out));
756 pr_err("failed to set ISSI=1\n");
763 } else if (sup_issi & (1 << 0) || !sup_issi) {
770 static int map_bf_area(struct mlx5_core_dev *dev)
772 resource_size_t bf_start = pci_resource_start(dev->pdev, 0);
773 resource_size_t bf_len = pci_resource_len(dev->pdev, 0);
775 dev->priv.bf_mapping = io_mapping_create_wc(bf_start, bf_len);
777 return dev->priv.bf_mapping ? 0 : -ENOMEM;
780 static void unmap_bf_area(struct mlx5_core_dev *dev)
782 if (dev->priv.bf_mapping)
783 io_mapping_free(dev->priv.bf_mapping);
786 static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
788 struct mlx5_device_context *dev_ctx;
789 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
791 dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
795 dev_ctx->intf = intf;
796 dev_ctx->context = intf->add(dev);
798 if (dev_ctx->context) {
799 spin_lock_irq(&priv->ctx_lock);
800 list_add_tail(&dev_ctx->list, &priv->ctx_list);
801 spin_unlock_irq(&priv->ctx_lock);
807 static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
809 struct mlx5_device_context *dev_ctx;
810 struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
812 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
813 if (dev_ctx->intf == intf) {
814 spin_lock_irq(&priv->ctx_lock);
815 list_del(&dev_ctx->list);
816 spin_unlock_irq(&priv->ctx_lock);
818 intf->remove(dev, dev_ctx->context);
824 static int mlx5_register_device(struct mlx5_core_dev *dev)
826 struct mlx5_priv *priv = &dev->priv;
827 struct mlx5_interface *intf;
829 mutex_lock(&intf_mutex);
830 list_add_tail(&priv->dev_list, &dev_list);
831 list_for_each_entry(intf, &intf_list, list)
832 mlx5_add_device(intf, priv);
833 mutex_unlock(&intf_mutex);
838 static void mlx5_unregister_device(struct mlx5_core_dev *dev)
840 struct mlx5_priv *priv = &dev->priv;
841 struct mlx5_interface *intf;
843 mutex_lock(&intf_mutex);
844 list_for_each_entry(intf, &intf_list, list)
845 mlx5_remove_device(intf, priv);
846 list_del(&priv->dev_list);
847 mutex_unlock(&intf_mutex);
850 int mlx5_register_interface(struct mlx5_interface *intf)
852 struct mlx5_priv *priv;
854 if (!intf->add || !intf->remove)
857 mutex_lock(&intf_mutex);
858 list_add_tail(&intf->list, &intf_list);
859 list_for_each_entry(priv, &dev_list, dev_list)
860 mlx5_add_device(intf, priv);
861 mutex_unlock(&intf_mutex);
865 EXPORT_SYMBOL(mlx5_register_interface);
867 void mlx5_unregister_interface(struct mlx5_interface *intf)
869 struct mlx5_priv *priv;
871 mutex_lock(&intf_mutex);
872 list_for_each_entry(priv, &dev_list, dev_list)
873 mlx5_remove_device(intf, priv);
874 list_del(&intf->list);
875 mutex_unlock(&intf_mutex);
877 EXPORT_SYMBOL(mlx5_unregister_interface);
879 void *mlx5_get_protocol_dev(struct mlx5_core_dev *mdev, int protocol)
881 struct mlx5_priv *priv = &mdev->priv;
882 struct mlx5_device_context *dev_ctx;
886 spin_lock_irqsave(&priv->ctx_lock, flags);
888 list_for_each_entry(dev_ctx, &mdev->priv.ctx_list, list)
889 if ((dev_ctx->intf->protocol == protocol) &&
890 dev_ctx->intf->get_dev) {
891 result = dev_ctx->intf->get_dev(dev_ctx->context);
895 spin_unlock_irqrestore(&priv->ctx_lock, flags);
899 EXPORT_SYMBOL(mlx5_get_protocol_dev);
901 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
903 struct pci_dev *pdev = dev->pdev;
906 pci_set_drvdata(dev->pdev, dev);
907 strncpy(priv->name, dev_name(&pdev->dev), MLX5_MAX_NAME_LEN);
908 priv->name[MLX5_MAX_NAME_LEN - 1] = 0;
910 mutex_init(&priv->pgdir_mutex);
911 INIT_LIST_HEAD(&priv->pgdir_list);
912 spin_lock_init(&priv->mkey_lock);
914 mutex_init(&priv->alloc_mutex);
916 priv->numa_node = dev_to_node(&dev->pdev->dev);
918 priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root);
922 err = mlx5_pci_enable_device(dev);
924 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
928 err = request_bar(pdev);
930 dev_err(&pdev->dev, "error requesting BARs, aborting\n");
934 pci_set_master(pdev);
936 err = set_dma_caps(pdev);
938 dev_err(&pdev->dev, "Failed setting DMA capabilities mask, aborting\n");
942 dev->iseg_base = pci_resource_start(dev->pdev, 0);
943 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
946 dev_err(&pdev->dev, "Failed mapping initialization segment, aborting\n");
953 pci_clear_master(dev->pdev);
954 release_bar(dev->pdev);
956 mlx5_pci_disable_device(dev);
959 debugfs_remove(priv->dbg_root);
963 static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
966 pci_clear_master(dev->pdev);
967 release_bar(dev->pdev);
968 mlx5_pci_disable_device(dev);
969 debugfs_remove(priv->dbg_root);
972 #define MLX5_IB_MOD "mlx5_ib"
973 static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
975 struct pci_dev *pdev = dev->pdev;
978 mutex_lock(&dev->intf_state_mutex);
979 if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
980 dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
985 dev_info(&pdev->dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
986 fw_rev_min(dev), fw_rev_sub(dev));
988 /* on load removing any previous indication of internal error, device is
991 dev->state = MLX5_DEVICE_STATE_UP;
993 err = mlx5_cmd_init(dev);
995 dev_err(&pdev->dev, "Failed initializing command interface, aborting\n");
999 err = wait_fw_init(dev, FW_INIT_TIMEOUT_MILI);
1001 dev_err(&dev->pdev->dev, "Firmware over %d MS in initializing state, aborting\n",
1002 FW_INIT_TIMEOUT_MILI);
1006 mlx5_pagealloc_init(dev);
1008 err = mlx5_core_enable_hca(dev, 0);
1010 dev_err(&pdev->dev, "enable hca failed\n");
1011 goto err_pagealloc_cleanup;
1014 err = mlx5_core_set_issi(dev);
1016 dev_err(&pdev->dev, "failed to set issi\n");
1017 goto err_disable_hca;
1020 err = mlx5_satisfy_startup_pages(dev, 1);
1022 dev_err(&pdev->dev, "failed to allocate boot pages\n");
1023 goto err_disable_hca;
1026 err = set_hca_ctrl(dev);
1028 dev_err(&pdev->dev, "set_hca_ctrl failed\n");
1029 goto reclaim_boot_pages;
1032 err = handle_hca_cap(dev);
1034 dev_err(&pdev->dev, "handle_hca_cap failed\n");
1035 goto reclaim_boot_pages;
1038 err = handle_hca_cap_atomic(dev);
1040 dev_err(&pdev->dev, "handle_hca_cap_atomic failed\n");
1041 goto reclaim_boot_pages;
1044 err = mlx5_satisfy_startup_pages(dev, 0);
1046 dev_err(&pdev->dev, "failed to allocate init pages\n");
1047 goto reclaim_boot_pages;
1050 err = mlx5_pagealloc_start(dev);
1052 dev_err(&pdev->dev, "mlx5_pagealloc_start failed\n");
1053 goto reclaim_boot_pages;
1056 err = mlx5_cmd_init_hca(dev);
1058 dev_err(&pdev->dev, "init hca failed\n");
1059 goto err_pagealloc_stop;
1062 mlx5_start_health_poll(dev);
1064 err = mlx5_query_hca_caps(dev);
1066 dev_err(&pdev->dev, "query hca failed\n");
1070 err = mlx5_query_board_id(dev);
1072 dev_err(&pdev->dev, "query board id failed\n");
1076 err = mlx5_enable_msix(dev);
1078 dev_err(&pdev->dev, "enable msix failed\n");
1082 err = mlx5_eq_init(dev);
1084 dev_err(&pdev->dev, "failed to initialize eq\n");
1088 err = mlx5_alloc_uuars(dev, &priv->uuari);
1090 dev_err(&pdev->dev, "Failed allocating uar, aborting\n");
1091 goto err_eq_cleanup;
1094 err = mlx5_start_eqs(dev);
1096 dev_err(&pdev->dev, "Failed to start pages and async EQs\n");
1100 err = alloc_comp_eqs(dev);
1102 dev_err(&pdev->dev, "Failed to alloc completion EQs\n");
1106 if (map_bf_area(dev))
1107 dev_err(&pdev->dev, "Failed to map blue flame area\n");
1109 err = mlx5_irq_set_affinity_hints(dev);
1111 dev_err(&pdev->dev, "Failed to alloc affinity hint cpumask\n");
1112 goto err_unmap_bf_area;
1115 MLX5_INIT_DOORBELL_LOCK(&priv->cq_uar_lock);
1117 mlx5_init_cq_table(dev);
1118 mlx5_init_qp_table(dev);
1119 mlx5_init_srq_table(dev);
1120 mlx5_init_mr_table(dev);
1122 err = mlx5_init_fs(dev);
1124 dev_err(&pdev->dev, "Failed to init flow steering\n");
1127 #ifdef CONFIG_MLX5_CORE_EN
1128 err = mlx5_eswitch_init(dev);
1130 dev_err(&pdev->dev, "eswitch init failed %d\n", err);
1135 err = mlx5_sriov_init(dev);
1137 dev_err(&pdev->dev, "sriov init failed %d\n", err);
1141 err = mlx5_register_device(dev);
1143 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
1147 err = request_module_nowait(MLX5_IB_MOD);
1149 pr_info("failed request module on %s\n", MLX5_IB_MOD);
1151 dev->interface_state = MLX5_INTERFACE_STATE_UP;
1153 mutex_unlock(&dev->intf_state_mutex);
1158 if (mlx5_sriov_cleanup(dev))
1159 dev_err(&dev->pdev->dev, "sriov cleanup failed\n");
1161 #ifdef CONFIG_MLX5_CORE_EN
1162 mlx5_eswitch_cleanup(dev->priv.eswitch);
1165 mlx5_cleanup_fs(dev);
1167 mlx5_cleanup_mr_table(dev);
1168 mlx5_cleanup_srq_table(dev);
1169 mlx5_cleanup_qp_table(dev);
1170 mlx5_cleanup_cq_table(dev);
1171 mlx5_irq_clear_affinity_hints(dev);
1182 mlx5_free_uuars(dev, &priv->uuari);
1185 mlx5_eq_cleanup(dev);
1188 mlx5_disable_msix(dev);
1191 mlx5_stop_health_poll(dev);
1192 if (mlx5_cmd_teardown_hca(dev)) {
1193 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1198 mlx5_pagealloc_stop(dev);
1201 mlx5_reclaim_startup_pages(dev);
1204 mlx5_core_disable_hca(dev, 0);
1206 err_pagealloc_cleanup:
1207 mlx5_pagealloc_cleanup(dev);
1208 mlx5_cmd_cleanup(dev);
1211 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1212 mutex_unlock(&dev->intf_state_mutex);
1217 static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
1221 err = mlx5_sriov_cleanup(dev);
1223 dev_warn(&dev->pdev->dev, "%s: sriov cleanup failed - abort\n",
1228 mutex_lock(&dev->intf_state_mutex);
1229 if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
1230 dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
1234 mlx5_unregister_device(dev);
1235 #ifdef CONFIG_MLX5_CORE_EN
1236 mlx5_eswitch_cleanup(dev->priv.eswitch);
1239 mlx5_cleanup_fs(dev);
1240 mlx5_cleanup_mr_table(dev);
1241 mlx5_cleanup_srq_table(dev);
1242 mlx5_cleanup_qp_table(dev);
1243 mlx5_cleanup_cq_table(dev);
1244 mlx5_irq_clear_affinity_hints(dev);
1248 mlx5_free_uuars(dev, &priv->uuari);
1249 mlx5_eq_cleanup(dev);
1250 mlx5_disable_msix(dev);
1251 mlx5_stop_health_poll(dev);
1252 err = mlx5_cmd_teardown_hca(dev);
1254 dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n");
1257 mlx5_pagealloc_stop(dev);
1258 mlx5_reclaim_startup_pages(dev);
1259 mlx5_core_disable_hca(dev, 0);
1260 mlx5_pagealloc_cleanup(dev);
1261 mlx5_cmd_cleanup(dev);
1264 dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
1265 mutex_unlock(&dev->intf_state_mutex);
1269 void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
1270 unsigned long param)
1272 struct mlx5_priv *priv = &dev->priv;
1273 struct mlx5_device_context *dev_ctx;
1274 unsigned long flags;
1276 spin_lock_irqsave(&priv->ctx_lock, flags);
1278 list_for_each_entry(dev_ctx, &priv->ctx_list, list)
1279 if (dev_ctx->intf->event)
1280 dev_ctx->intf->event(dev, dev_ctx->context, event, param);
1282 spin_unlock_irqrestore(&priv->ctx_lock, flags);
1285 struct mlx5_core_event_handler {
1286 void (*event)(struct mlx5_core_dev *dev,
1287 enum mlx5_dev_event event,
1292 static int init_one(struct pci_dev *pdev,
1293 const struct pci_device_id *id)
1295 struct mlx5_core_dev *dev;
1296 struct mlx5_priv *priv;
1299 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1301 dev_err(&pdev->dev, "kzalloc failed\n");
1305 priv->pci_dev_data = id->driver_data;
1307 pci_set_drvdata(pdev, dev);
1309 if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
1310 pr_warn("selected profile out of range, selecting default (%d)\n",
1312 prof_sel = MLX5_DEFAULT_PROF;
1314 dev->profile = &profile[prof_sel];
1316 dev->event = mlx5_core_event;
1318 INIT_LIST_HEAD(&priv->ctx_list);
1319 spin_lock_init(&priv->ctx_lock);
1320 mutex_init(&dev->pci_status_mutex);
1321 mutex_init(&dev->intf_state_mutex);
1322 err = mlx5_pci_init(dev, priv);
1324 dev_err(&pdev->dev, "mlx5_pci_init failed with error code %d\n", err);
1328 err = mlx5_health_init(dev);
1330 dev_err(&pdev->dev, "mlx5_health_init failed with error code %d\n", err);
1334 err = mlx5_load_one(dev, priv);
1336 dev_err(&pdev->dev, "mlx5_load_one failed with error code %d\n", err);
1343 mlx5_health_cleanup(dev);
1345 mlx5_pci_close(dev, priv);
1347 pci_set_drvdata(pdev, NULL);
1353 static void remove_one(struct pci_dev *pdev)
1355 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1356 struct mlx5_priv *priv = &dev->priv;
1358 if (mlx5_unload_one(dev, priv)) {
1359 dev_err(&dev->pdev->dev, "mlx5_unload_one failed\n");
1360 mlx5_health_cleanup(dev);
1363 mlx5_health_cleanup(dev);
1364 mlx5_pci_close(dev, priv);
1365 pci_set_drvdata(pdev, NULL);
1369 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
1370 pci_channel_state_t state)
1372 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1373 struct mlx5_priv *priv = &dev->priv;
1375 dev_info(&pdev->dev, "%s was called\n", __func__);
1376 mlx5_enter_error_state(dev);
1377 mlx5_unload_one(dev, priv);
1378 mlx5_pci_disable_device(dev);
1379 return state == pci_channel_io_perm_failure ?
1380 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
1383 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
1385 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1388 dev_info(&pdev->dev, "%s was called\n", __func__);
1390 err = mlx5_pci_enable_device(dev);
1392 dev_err(&pdev->dev, "%s: mlx5_pci_enable_device failed with error code: %d\n"
1394 return PCI_ERS_RESULT_DISCONNECT;
1396 pci_set_master(pdev);
1397 pci_set_power_state(pdev, PCI_D0);
1398 pci_restore_state(pdev);
1400 return err ? PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_RECOVERED;
1403 void mlx5_disable_device(struct mlx5_core_dev *dev)
1405 mlx5_pci_err_detected(dev->pdev, 0);
1408 /* wait for the device to show vital signs. For now we check
1409 * that we can read the device ID and that the health buffer
1410 * shows a non zero value which is different than 0xffffffff
1412 static void wait_vital(struct pci_dev *pdev)
1414 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1415 struct mlx5_core_health *health = &dev->priv.health;
1416 const int niter = 100;
1421 /* Wait for firmware to be ready after reset */
1423 for (i = 0; i < niter; i++) {
1424 if (pci_read_config_word(pdev, 2, &did)) {
1425 dev_warn(&pdev->dev, "failed reading config word\n");
1428 if (did == pdev->device) {
1429 dev_info(&pdev->dev, "device ID correctly read after %d iterations\n", i);
1435 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1437 for (i = 0; i < niter; i++) {
1438 count = ioread32be(health->health_counter);
1439 if (count && count != 0xffffffff) {
1440 dev_info(&pdev->dev, "Counter value 0x%x after %d iterations\n", count, i);
1447 dev_warn(&pdev->dev, "%s-%d: could not read device ID\n", __func__, __LINE__);
1450 static void mlx5_pci_resume(struct pci_dev *pdev)
1452 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1453 struct mlx5_priv *priv = &dev->priv;
1456 dev_info(&pdev->dev, "%s was called\n", __func__);
1458 pci_save_state(pdev);
1461 err = mlx5_load_one(dev, priv);
1463 dev_err(&pdev->dev, "%s: mlx5_load_one failed with error code: %d\n"
1466 dev_info(&pdev->dev, "%s: device recovered\n", __func__);
1469 static const struct pci_error_handlers mlx5_err_handler = {
1470 .error_detected = mlx5_pci_err_detected,
1471 .slot_reset = mlx5_pci_slot_reset,
1472 .resume = mlx5_pci_resume
1475 static const struct pci_device_id mlx5_core_pci_table[] = {
1476 { PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
1477 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
1478 { PCI_VDEVICE(MELLANOX, 0x1013) }, /* ConnectX-4 */
1479 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
1480 { PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
1481 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
1485 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
1487 static struct pci_driver mlx5_core_driver = {
1488 .name = DRIVER_NAME,
1489 .id_table = mlx5_core_pci_table,
1491 .remove = remove_one,
1492 .err_handler = &mlx5_err_handler,
1493 .sriov_configure = mlx5_core_sriov_configure,
1496 static int __init init(void)
1500 mlx5_register_debugfs();
1502 err = pci_register_driver(&mlx5_core_driver);
1506 #ifdef CONFIG_MLX5_CORE_EN
1513 mlx5_unregister_debugfs();
1517 static void __exit cleanup(void)
1519 #ifdef CONFIG_MLX5_CORE_EN
1522 pci_unregister_driver(&mlx5_core_driver);
1523 mlx5_unregister_debugfs();
1527 module_exit(cleanup);