2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/init.h>
36 #include <linux/errno.h>
37 #include <linux/pci.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/slab.h>
40 #include <linux/interrupt.h>
41 #include <linux/delay.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/mlx5/cq.h>
44 #include <linux/mlx5/qp.h>
45 #include <linux/debugfs.h>
46 #include <linux/kmod.h>
47 #include <linux/mlx5/mlx5_ifc.h>
48 #include <linux/mlx5/vport.h>
49 #include <linux/version.h>
50 #include <net/devlink.h>
51 #include "mlx5_core.h"
60 #include "fpga/core.h"
61 #include "en_accel/ipsec.h"
62 #include "lib/clock.h"
63 #include "lib/vxlan.h"
64 #include "lib/geneve.h"
65 #include "lib/devcom.h"
66 #include "lib/pci_vsc.h"
67 #include "diag/fw_tracer.h"
69 #include "lib/hv_vhca.h"
70 #include "diag/rsc_dump.h"
71 #include "sf/vhca_event.h"
72 #include "sf/dev/dev.h"
78 MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
79 MODULE_DESCRIPTION("Mellanox 5th generation network adapters (ConnectX series) core driver");
80 MODULE_LICENSE("Dual BSD/GPL");
82 unsigned int mlx5_core_debug_mask;
83 module_param_named(debug_mask, mlx5_core_debug_mask, uint, 0644);
84 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
86 static unsigned int prof_sel = MLX5_DEFAULT_PROF;
87 module_param_named(prof_sel, prof_sel, uint, 0444);
88 MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
90 static u32 sw_owner_id[4];
91 #define MAX_SW_VHCA_ID (BIT(__mlx5_bit_sz(cmd_hca_cap_2, sw_vhca_id)) - 1)
92 static DEFINE_IDA(sw_vhca_ida);
95 MLX5_ATOMIC_REQ_MODE_BE = 0x0,
96 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS = 0x1,
99 #define LOG_MAX_SUPPORTED_QPS 0xff
101 static struct mlx5_profile profile[] = {
104 .num_cmd_caches = MLX5_NUM_COMMAND_CACHES,
107 .mask = MLX5_PROF_MASK_QP_SIZE,
109 .num_cmd_caches = MLX5_NUM_COMMAND_CACHES,
113 .mask = MLX5_PROF_MASK_QP_SIZE |
114 MLX5_PROF_MASK_MR_CACHE,
115 .log_max_qp = LOG_MAX_SUPPORTED_QPS,
116 .num_cmd_caches = MLX5_NUM_COMMAND_CACHES,
183 .mask = MLX5_PROF_MASK_QP_SIZE,
184 .log_max_qp = LOG_MAX_SUPPORTED_QPS,
189 static int wait_fw_init(struct mlx5_core_dev *dev, u32 max_wait_mili,
190 u32 warn_time_mili, const char *init_state)
192 unsigned long warn = jiffies + msecs_to_jiffies(warn_time_mili);
193 unsigned long end = jiffies + msecs_to_jiffies(max_wait_mili);
197 fw_initializing = ioread32be(&dev->iseg->initializing);
198 if (!(fw_initializing >> 31))
200 if (time_after(jiffies, end)) {
201 mlx5_core_err(dev, "Firmware over %u MS in %s state, aborting\n",
202 max_wait_mili, init_state);
205 if (test_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state)) {
206 mlx5_core_warn(dev, "device is being removed, stop waiting for FW %s\n",
210 if (warn_time_mili && time_after(jiffies, warn)) {
211 mlx5_core_warn(dev, "Waiting for FW %s, timeout abort in %ds (0x%x)\n",
212 init_state, jiffies_to_msecs(end - warn) / 1000,
214 warn = jiffies + msecs_to_jiffies(warn_time_mili);
216 msleep(mlx5_tout_ms(dev, FW_PRE_INIT_WAIT));
222 static void mlx5_set_driver_version(struct mlx5_core_dev *dev)
224 int driver_ver_sz = MLX5_FLD_SZ_BYTES(set_driver_version_in,
226 u8 in[MLX5_ST_SZ_BYTES(set_driver_version_in)] = {};
229 if (!MLX5_CAP_GEN(dev, driver_version))
232 string = MLX5_ADDR_OF(set_driver_version_in, in, driver_version);
234 snprintf(string, driver_ver_sz, "Linux,%s,%u.%u.%u",
235 KBUILD_MODNAME, LINUX_VERSION_MAJOR,
236 LINUX_VERSION_PATCHLEVEL, LINUX_VERSION_SUBLEVEL);
239 MLX5_SET(set_driver_version_in, in, opcode,
240 MLX5_CMD_OP_SET_DRIVER_VERSION);
242 mlx5_cmd_exec_in(dev, set_driver_version, in);
245 static int set_dma_caps(struct pci_dev *pdev)
249 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
251 dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask\n");
252 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
254 dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting\n");
259 dma_set_max_seg_size(&pdev->dev, 2u * 1024 * 1024 * 1024);
263 static int mlx5_pci_enable_device(struct mlx5_core_dev *dev)
265 struct pci_dev *pdev = dev->pdev;
268 mutex_lock(&dev->pci_status_mutex);
269 if (dev->pci_status == MLX5_PCI_STATUS_DISABLED) {
270 err = pci_enable_device(pdev);
272 dev->pci_status = MLX5_PCI_STATUS_ENABLED;
274 mutex_unlock(&dev->pci_status_mutex);
279 static void mlx5_pci_disable_device(struct mlx5_core_dev *dev)
281 struct pci_dev *pdev = dev->pdev;
283 mutex_lock(&dev->pci_status_mutex);
284 if (dev->pci_status == MLX5_PCI_STATUS_ENABLED) {
285 pci_disable_device(pdev);
286 dev->pci_status = MLX5_PCI_STATUS_DISABLED;
288 mutex_unlock(&dev->pci_status_mutex);
291 static int request_bar(struct pci_dev *pdev)
295 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
296 dev_err(&pdev->dev, "Missing registers BAR, aborting\n");
300 err = pci_request_regions(pdev, KBUILD_MODNAME);
302 dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
307 static void release_bar(struct pci_dev *pdev)
309 pci_release_regions(pdev);
312 struct mlx5_reg_host_endianness {
317 static u16 to_fw_pkey_sz(struct mlx5_core_dev *dev, u32 size)
333 mlx5_core_warn(dev, "invalid pkey table size %d\n", size);
338 void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *dev, struct net_device *netdev)
340 mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
341 dev->mlx5e_res.uplink_netdev = netdev;
342 mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_UPLINK_NETDEV,
344 mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
347 void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *dev)
349 mutex_lock(&dev->mlx5e_res.uplink_netdev_lock);
350 mlx5_blocking_notifier_call_chain(dev, MLX5_DRIVER_EVENT_UPLINK_NETDEV,
351 dev->mlx5e_res.uplink_netdev);
352 mutex_unlock(&dev->mlx5e_res.uplink_netdev_lock);
354 EXPORT_SYMBOL(mlx5_core_uplink_netdev_event_replay);
356 void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data)
358 mlx5_blocking_notifier_call_chain(dev, event, data);
360 EXPORT_SYMBOL(mlx5_core_mp_event_replay);
362 int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type,
363 enum mlx5_cap_mode cap_mode)
365 u8 in[MLX5_ST_SZ_BYTES(query_hca_cap_in)];
366 int out_sz = MLX5_ST_SZ_BYTES(query_hca_cap_out);
367 void *out, *hca_caps;
368 u16 opmod = (cap_type << 1) | (cap_mode & 0x01);
371 memset(in, 0, sizeof(in));
372 out = kzalloc(out_sz, GFP_KERNEL);
376 MLX5_SET(query_hca_cap_in, in, opcode, MLX5_CMD_OP_QUERY_HCA_CAP);
377 MLX5_SET(query_hca_cap_in, in, op_mod, opmod);
378 err = mlx5_cmd_exec_inout(dev, query_hca_cap, in, out);
381 "QUERY_HCA_CAP : type(%x) opmode(%x) Failed(%d)\n",
382 cap_type, cap_mode, err);
386 hca_caps = MLX5_ADDR_OF(query_hca_cap_out, out, capability);
389 case HCA_CAP_OPMOD_GET_MAX:
390 memcpy(dev->caps.hca[cap_type]->max, hca_caps,
391 MLX5_UN_SZ_BYTES(hca_cap_union));
393 case HCA_CAP_OPMOD_GET_CUR:
394 memcpy(dev->caps.hca[cap_type]->cur, hca_caps,
395 MLX5_UN_SZ_BYTES(hca_cap_union));
399 "Tried to query dev cap type(%x) with wrong opmode(%x)\n",
409 int mlx5_core_get_caps(struct mlx5_core_dev *dev, enum mlx5_cap_type cap_type)
413 ret = mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_CUR);
416 return mlx5_core_get_caps_mode(dev, cap_type, HCA_CAP_OPMOD_GET_MAX);
419 static int set_caps(struct mlx5_core_dev *dev, void *in, int opmod)
421 MLX5_SET(set_hca_cap_in, in, opcode, MLX5_CMD_OP_SET_HCA_CAP);
422 MLX5_SET(set_hca_cap_in, in, op_mod, opmod << 1);
423 return mlx5_cmd_exec_in(dev, set_hca_cap, in);
426 static int handle_hca_cap_atomic(struct mlx5_core_dev *dev, void *set_ctx)
432 if (!MLX5_CAP_GEN(dev, atomic))
435 err = mlx5_core_get_caps(dev, MLX5_CAP_ATOMIC);
441 supported_atomic_req_8B_endianness_mode_1);
443 if (req_endianness != MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS)
446 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
448 /* Set requestor to host endianness */
449 MLX5_SET(atomic_caps, set_hca_cap, atomic_req_8B_endianness_mode,
450 MLX5_ATOMIC_REQ_MODE_HOST_ENDIANNESS);
452 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ATOMIC);
455 static int handle_hca_cap_odp(struct mlx5_core_dev *dev, void *set_ctx)
461 if (!IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) ||
462 !MLX5_CAP_GEN(dev, pg))
465 err = mlx5_core_get_caps(dev, MLX5_CAP_ODP);
469 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
470 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ODP]->cur,
471 MLX5_ST_SZ_BYTES(odp_cap));
473 #define ODP_CAP_SET_MAX(dev, field) \
475 u32 _res = MLX5_CAP_ODP_MAX(dev, field); \
478 MLX5_SET(odp_cap, set_hca_cap, field, _res); \
482 ODP_CAP_SET_MAX(dev, ud_odp_caps.srq_receive);
483 ODP_CAP_SET_MAX(dev, rc_odp_caps.srq_receive);
484 ODP_CAP_SET_MAX(dev, xrc_odp_caps.srq_receive);
485 ODP_CAP_SET_MAX(dev, xrc_odp_caps.send);
486 ODP_CAP_SET_MAX(dev, xrc_odp_caps.receive);
487 ODP_CAP_SET_MAX(dev, xrc_odp_caps.write);
488 ODP_CAP_SET_MAX(dev, xrc_odp_caps.read);
489 ODP_CAP_SET_MAX(dev, xrc_odp_caps.atomic);
490 ODP_CAP_SET_MAX(dev, dc_odp_caps.srq_receive);
491 ODP_CAP_SET_MAX(dev, dc_odp_caps.send);
492 ODP_CAP_SET_MAX(dev, dc_odp_caps.receive);
493 ODP_CAP_SET_MAX(dev, dc_odp_caps.write);
494 ODP_CAP_SET_MAX(dev, dc_odp_caps.read);
495 ODP_CAP_SET_MAX(dev, dc_odp_caps.atomic);
500 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ODP);
503 static int max_uc_list_get_devlink_param(struct mlx5_core_dev *dev)
505 struct devlink *devlink = priv_to_devlink(dev);
506 union devlink_param_value val;
509 err = devl_param_driverinit_value_get(devlink,
510 DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
514 mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
518 bool mlx5_is_roce_on(struct mlx5_core_dev *dev)
520 struct devlink *devlink = priv_to_devlink(dev);
521 union devlink_param_value val;
524 err = devl_param_driverinit_value_get(devlink,
525 DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
531 mlx5_core_dbg(dev, "Failed to get param. err = %d\n", err);
532 return MLX5_CAP_GEN(dev, roce);
534 EXPORT_SYMBOL(mlx5_is_roce_on);
536 static int handle_hca_cap_2(struct mlx5_core_dev *dev, void *set_ctx)
541 if (!MLX5_CAP_GEN_MAX(dev, hca_cap_2))
544 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL_2);
548 if (!MLX5_CAP_GEN_2_MAX(dev, sw_vhca_id_valid) ||
549 !(dev->priv.sw_vhca_id > 0))
552 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
554 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL_2]->cur,
555 MLX5_ST_SZ_BYTES(cmd_hca_cap_2));
556 MLX5_SET(cmd_hca_cap_2, set_hca_cap, sw_vhca_id_valid, 1);
558 return set_caps(dev, set_ctx, MLX5_CAP_GENERAL_2);
561 static int handle_hca_cap(struct mlx5_core_dev *dev, void *set_ctx)
563 struct mlx5_profile *prof = &dev->profile;
568 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
572 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
574 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_GENERAL]->cur,
575 MLX5_ST_SZ_BYTES(cmd_hca_cap));
577 mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
578 mlx5_to_sw_pkey_sz(MLX5_CAP_GEN(dev, pkey_table_size)),
580 /* we limit the size of the pkey table to 128 entries for now */
581 MLX5_SET(cmd_hca_cap, set_hca_cap, pkey_table_size,
582 to_fw_pkey_sz(dev, 128));
584 /* Check log_max_qp from HCA caps to set in current profile */
585 if (prof->log_max_qp == LOG_MAX_SUPPORTED_QPS) {
586 prof->log_max_qp = min_t(u8, 18, MLX5_CAP_GEN_MAX(dev, log_max_qp));
587 } else if (MLX5_CAP_GEN_MAX(dev, log_max_qp) < prof->log_max_qp) {
588 mlx5_core_warn(dev, "log_max_qp value in current profile is %d, changing it to HCA capability limit (%d)\n",
590 MLX5_CAP_GEN_MAX(dev, log_max_qp));
591 prof->log_max_qp = MLX5_CAP_GEN_MAX(dev, log_max_qp);
593 if (prof->mask & MLX5_PROF_MASK_QP_SIZE)
594 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_qp,
597 /* disable cmdif checksum */
598 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
600 /* Enable 4K UAR only when HCA supports it and page size is bigger
603 if (MLX5_CAP_GEN_MAX(dev, uar_4k) && PAGE_SIZE > 4096)
604 MLX5_SET(cmd_hca_cap, set_hca_cap, uar_4k, 1);
606 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
608 if (MLX5_CAP_GEN_MAX(dev, cache_line_128byte))
609 MLX5_SET(cmd_hca_cap,
612 cache_line_size() >= 128 ? 1 : 0);
614 if (MLX5_CAP_GEN_MAX(dev, dct))
615 MLX5_SET(cmd_hca_cap, set_hca_cap, dct, 1);
617 if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_event))
618 MLX5_SET(cmd_hca_cap, set_hca_cap, pci_sync_for_fw_update_event, 1);
619 if (MLX5_CAP_GEN_MAX(dev, pci_sync_for_fw_update_with_driver_unload))
620 MLX5_SET(cmd_hca_cap, set_hca_cap,
621 pci_sync_for_fw_update_with_driver_unload, 1);
623 if (MLX5_CAP_GEN_MAX(dev, num_vhca_ports))
624 MLX5_SET(cmd_hca_cap,
627 MLX5_CAP_GEN_MAX(dev, num_vhca_ports));
629 if (MLX5_CAP_GEN_MAX(dev, release_all_pages))
630 MLX5_SET(cmd_hca_cap, set_hca_cap, release_all_pages, 1);
632 if (MLX5_CAP_GEN_MAX(dev, mkey_by_name))
633 MLX5_SET(cmd_hca_cap, set_hca_cap, mkey_by_name, 1);
635 mlx5_vhca_state_cap_handle(dev, set_hca_cap);
637 if (MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix))
638 MLX5_SET(cmd_hca_cap, set_hca_cap, num_total_dynamic_vf_msix,
639 MLX5_CAP_GEN_MAX(dev, num_total_dynamic_vf_msix));
641 if (MLX5_CAP_GEN(dev, roce_rw_supported) && MLX5_CAP_GEN_MAX(dev, roce))
642 MLX5_SET(cmd_hca_cap, set_hca_cap, roce,
643 mlx5_is_roce_on(dev));
645 max_uc_list = max_uc_list_get_devlink_param(dev);
647 MLX5_SET(cmd_hca_cap, set_hca_cap, log_max_current_uc_list,
650 return set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE);
653 /* Cached MLX5_CAP_GEN(dev, roce) can be out of sync this early in the
655 * In case RoCE cap is writable in FW and user/devlink requested to change the
656 * cap, we are yet to query the final state of the above cap.
657 * Hence, the need for this function.
661 * 1) RoCE cap is read only in FW and already disabled
663 * 2) RoCE cap is writable in FW and user/devlink requested it off.
665 * In any other case, return False.
667 static bool is_roce_fw_disabled(struct mlx5_core_dev *dev)
669 return (MLX5_CAP_GEN(dev, roce_rw_supported) && !mlx5_is_roce_on(dev)) ||
670 (!MLX5_CAP_GEN(dev, roce_rw_supported) && !MLX5_CAP_GEN(dev, roce));
673 static int handle_hca_cap_roce(struct mlx5_core_dev *dev, void *set_ctx)
678 if (is_roce_fw_disabled(dev))
681 err = mlx5_core_get_caps(dev, MLX5_CAP_ROCE);
685 if (MLX5_CAP_ROCE(dev, sw_r_roce_src_udp_port) ||
686 !MLX5_CAP_ROCE_MAX(dev, sw_r_roce_src_udp_port))
689 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
690 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_ROCE]->cur,
691 MLX5_ST_SZ_BYTES(roce_cap));
692 MLX5_SET(roce_cap, set_hca_cap, sw_r_roce_src_udp_port, 1);
694 if (MLX5_CAP_ROCE_MAX(dev, qp_ooo_transmit_default))
695 MLX5_SET(roce_cap, set_hca_cap, qp_ooo_transmit_default, 1);
697 err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_ROCE);
701 static int handle_hca_cap_port_selection(struct mlx5_core_dev *dev,
707 if (!MLX5_CAP_GEN(dev, port_selection_cap))
710 err = mlx5_core_get_caps(dev, MLX5_CAP_PORT_SELECTION);
714 if (MLX5_CAP_PORT_SELECTION(dev, port_select_flow_table_bypass) ||
715 !MLX5_CAP_PORT_SELECTION_MAX(dev, port_select_flow_table_bypass))
718 set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx, capability);
719 memcpy(set_hca_cap, dev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur,
720 MLX5_ST_SZ_BYTES(port_selection_cap));
721 MLX5_SET(port_selection_cap, set_hca_cap, port_select_flow_table_bypass, 1);
723 err = set_caps(dev, set_ctx, MLX5_SET_HCA_CAP_OP_MOD_PORT_SELECTION);
728 static int set_hca_cap(struct mlx5_core_dev *dev)
730 int set_sz = MLX5_ST_SZ_BYTES(set_hca_cap_in);
734 set_ctx = kzalloc(set_sz, GFP_KERNEL);
738 err = handle_hca_cap(dev, set_ctx);
740 mlx5_core_err(dev, "handle_hca_cap failed\n");
744 memset(set_ctx, 0, set_sz);
745 err = handle_hca_cap_atomic(dev, set_ctx);
747 mlx5_core_err(dev, "handle_hca_cap_atomic failed\n");
751 memset(set_ctx, 0, set_sz);
752 err = handle_hca_cap_odp(dev, set_ctx);
754 mlx5_core_err(dev, "handle_hca_cap_odp failed\n");
758 memset(set_ctx, 0, set_sz);
759 err = handle_hca_cap_roce(dev, set_ctx);
761 mlx5_core_err(dev, "handle_hca_cap_roce failed\n");
765 memset(set_ctx, 0, set_sz);
766 err = handle_hca_cap_2(dev, set_ctx);
768 mlx5_core_err(dev, "handle_hca_cap_2 failed\n");
772 memset(set_ctx, 0, set_sz);
773 err = handle_hca_cap_port_selection(dev, set_ctx);
775 mlx5_core_err(dev, "handle_hca_cap_port_selection failed\n");
784 static int set_hca_ctrl(struct mlx5_core_dev *dev)
786 struct mlx5_reg_host_endianness he_in;
787 struct mlx5_reg_host_endianness he_out;
790 if (!mlx5_core_is_pf(dev))
793 memset(&he_in, 0, sizeof(he_in));
794 he_in.he = MLX5_SET_HOST_ENDIANNESS;
795 err = mlx5_core_access_reg(dev, &he_in, sizeof(he_in),
796 &he_out, sizeof(he_out),
797 MLX5_REG_HOST_ENDIANNESS, 0, 1);
801 static int mlx5_core_set_hca_defaults(struct mlx5_core_dev *dev)
805 /* Disable local_lb by default */
806 if (MLX5_CAP_GEN(dev, port_type) == MLX5_CAP_PORT_TYPE_ETH)
807 ret = mlx5_nic_vport_update_local_lb(dev, false);
812 int mlx5_core_enable_hca(struct mlx5_core_dev *dev, u16 func_id)
814 u32 in[MLX5_ST_SZ_DW(enable_hca_in)] = {};
816 MLX5_SET(enable_hca_in, in, opcode, MLX5_CMD_OP_ENABLE_HCA);
817 MLX5_SET(enable_hca_in, in, function_id, func_id);
818 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
819 dev->caps.embedded_cpu);
820 return mlx5_cmd_exec_in(dev, enable_hca, in);
823 int mlx5_core_disable_hca(struct mlx5_core_dev *dev, u16 func_id)
825 u32 in[MLX5_ST_SZ_DW(disable_hca_in)] = {};
827 MLX5_SET(disable_hca_in, in, opcode, MLX5_CMD_OP_DISABLE_HCA);
828 MLX5_SET(disable_hca_in, in, function_id, func_id);
829 MLX5_SET(enable_hca_in, in, embedded_cpu_function,
830 dev->caps.embedded_cpu);
831 return mlx5_cmd_exec_in(dev, disable_hca, in);
834 static int mlx5_core_set_issi(struct mlx5_core_dev *dev)
836 u32 query_out[MLX5_ST_SZ_DW(query_issi_out)] = {};
837 u32 query_in[MLX5_ST_SZ_DW(query_issi_in)] = {};
841 MLX5_SET(query_issi_in, query_in, opcode, MLX5_CMD_OP_QUERY_ISSI);
842 err = mlx5_cmd_exec_inout(dev, query_issi, query_in, query_out);
844 u32 syndrome = MLX5_GET(query_issi_out, query_out, syndrome);
845 u8 status = MLX5_GET(query_issi_out, query_out, status);
847 if (!status || syndrome == MLX5_DRIVER_SYND) {
848 mlx5_core_err(dev, "Failed to query ISSI err(%d) status(%d) synd(%d)\n",
849 err, status, syndrome);
853 mlx5_core_warn(dev, "Query ISSI is not supported by FW, ISSI is 0\n");
858 sup_issi = MLX5_GET(query_issi_out, query_out, supported_issi_dw0);
860 if (sup_issi & (1 << 1)) {
861 u32 set_in[MLX5_ST_SZ_DW(set_issi_in)] = {};
863 MLX5_SET(set_issi_in, set_in, opcode, MLX5_CMD_OP_SET_ISSI);
864 MLX5_SET(set_issi_in, set_in, current_issi, 1);
865 err = mlx5_cmd_exec_in(dev, set_issi, set_in);
867 mlx5_core_err(dev, "Failed to set ISSI to 1 err(%d)\n",
875 } else if (sup_issi & (1 << 0) || !sup_issi) {
882 static int mlx5_pci_init(struct mlx5_core_dev *dev, struct pci_dev *pdev,
883 const struct pci_device_id *id)
887 mutex_init(&dev->pci_status_mutex);
888 pci_set_drvdata(dev->pdev, dev);
890 dev->bar_addr = pci_resource_start(pdev, 0);
892 err = mlx5_pci_enable_device(dev);
894 mlx5_core_err(dev, "Cannot enable PCI device, aborting\n");
898 err = request_bar(pdev);
900 mlx5_core_err(dev, "error requesting BARs, aborting\n");
904 pci_set_master(pdev);
906 err = set_dma_caps(pdev);
908 mlx5_core_err(dev, "Failed setting DMA capabilities mask, aborting\n");
912 if (pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP32) &&
913 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP64) &&
914 pci_enable_atomic_ops_to_root(pdev, PCI_EXP_DEVCAP2_ATOMIC_COMP128))
915 mlx5_core_dbg(dev, "Enabling pci atomics failed\n");
917 dev->iseg_base = dev->bar_addr;
918 dev->iseg = ioremap(dev->iseg_base, sizeof(*dev->iseg));
921 mlx5_core_err(dev, "Failed mapping initialization segment, aborting\n");
925 mlx5_pci_vsc_init(dev);
929 release_bar(dev->pdev);
931 mlx5_pci_disable_device(dev);
935 static void mlx5_pci_close(struct mlx5_core_dev *dev)
937 /* health work might still be active, and it needs pci bar in
938 * order to know the NIC state. Therefore, drain the health WQ
939 * before removing the pci bars
941 mlx5_drain_health_wq(dev);
943 release_bar(dev->pdev);
944 mlx5_pci_disable_device(dev);
947 static void mlx5_register_hca_devcom_comp(struct mlx5_core_dev *dev)
949 /* This component is use to sync adding core_dev to lag_dev and to sync
950 * changes of mlx5_adev_devices between LAG layer and other layers.
952 if (!mlx5_lag_is_supported(dev))
955 dev->priv.hca_devcom_comp =
956 mlx5_devcom_register_component(dev->priv.devc, MLX5_DEVCOM_HCA_PORTS,
957 mlx5_query_nic_system_image_guid(dev),
959 if (IS_ERR(dev->priv.hca_devcom_comp))
960 mlx5_core_err(dev, "Failed to register devcom HCA component\n");
963 static void mlx5_unregister_hca_devcom_comp(struct mlx5_core_dev *dev)
965 mlx5_devcom_unregister_component(dev->priv.hca_devcom_comp);
968 static int mlx5_init_once(struct mlx5_core_dev *dev)
972 dev->priv.devc = mlx5_devcom_register_device(dev);
973 if (IS_ERR(dev->priv.devc))
974 mlx5_core_warn(dev, "failed to register devcom device %ld\n",
975 PTR_ERR(dev->priv.devc));
976 mlx5_register_hca_devcom_comp(dev);
978 err = mlx5_query_board_id(dev);
980 mlx5_core_err(dev, "query board id failed\n");
984 err = mlx5_irq_table_init(dev);
986 mlx5_core_err(dev, "failed to initialize irq table\n");
990 err = mlx5_eq_table_init(dev);
992 mlx5_core_err(dev, "failed to initialize eq\n");
993 goto err_irq_cleanup;
996 err = mlx5_events_init(dev);
998 mlx5_core_err(dev, "failed to initialize events\n");
1002 err = mlx5_fw_reset_init(dev);
1004 mlx5_core_err(dev, "failed to initialize fw reset events\n");
1005 goto err_events_cleanup;
1008 mlx5_cq_debugfs_init(dev);
1010 mlx5_init_reserved_gids(dev);
1012 mlx5_init_clock(dev);
1014 dev->vxlan = mlx5_vxlan_create(dev);
1015 dev->geneve = mlx5_geneve_create(dev);
1017 err = mlx5_init_rl_table(dev);
1019 mlx5_core_err(dev, "Failed to init rate limiting\n");
1020 goto err_tables_cleanup;
1023 err = mlx5_mpfs_init(dev);
1025 mlx5_core_err(dev, "Failed to init l2 table %d\n", err);
1026 goto err_rl_cleanup;
1029 err = mlx5_sriov_init(dev);
1031 mlx5_core_err(dev, "Failed to init sriov %d\n", err);
1032 goto err_mpfs_cleanup;
1035 err = mlx5_eswitch_init(dev);
1037 mlx5_core_err(dev, "Failed to init eswitch %d\n", err);
1038 goto err_sriov_cleanup;
1041 err = mlx5_fpga_init(dev);
1043 mlx5_core_err(dev, "Failed to init fpga device %d\n", err);
1044 goto err_eswitch_cleanup;
1047 err = mlx5_vhca_event_init(dev);
1049 mlx5_core_err(dev, "Failed to init vhca event notifier %d\n", err);
1050 goto err_fpga_cleanup;
1053 err = mlx5_sf_hw_table_init(dev);
1055 mlx5_core_err(dev, "Failed to init SF HW table %d\n", err);
1056 goto err_sf_hw_table_cleanup;
1059 err = mlx5_sf_table_init(dev);
1061 mlx5_core_err(dev, "Failed to init SF table %d\n", err);
1062 goto err_sf_table_cleanup;
1065 err = mlx5_fs_core_alloc(dev);
1067 mlx5_core_err(dev, "Failed to alloc flow steering\n");
1071 dev->dm = mlx5_dm_create(dev);
1072 if (IS_ERR(dev->dm))
1073 mlx5_core_warn(dev, "Failed to init device memory %ld\n", PTR_ERR(dev->dm));
1075 dev->tracer = mlx5_fw_tracer_create(dev);
1076 dev->hv_vhca = mlx5_hv_vhca_create(dev);
1077 dev->rsc_dump = mlx5_rsc_dump_create(dev);
1082 mlx5_sf_table_cleanup(dev);
1083 err_sf_table_cleanup:
1084 mlx5_sf_hw_table_cleanup(dev);
1085 err_sf_hw_table_cleanup:
1086 mlx5_vhca_event_cleanup(dev);
1088 mlx5_fpga_cleanup(dev);
1089 err_eswitch_cleanup:
1090 mlx5_eswitch_cleanup(dev->priv.eswitch);
1092 mlx5_sriov_cleanup(dev);
1094 mlx5_mpfs_cleanup(dev);
1096 mlx5_cleanup_rl_table(dev);
1098 mlx5_geneve_destroy(dev->geneve);
1099 mlx5_vxlan_destroy(dev->vxlan);
1100 mlx5_cleanup_clock(dev);
1101 mlx5_cleanup_reserved_gids(dev);
1102 mlx5_cq_debugfs_cleanup(dev);
1103 mlx5_fw_reset_cleanup(dev);
1105 mlx5_events_cleanup(dev);
1107 mlx5_eq_table_cleanup(dev);
1109 mlx5_irq_table_cleanup(dev);
1111 mlx5_unregister_hca_devcom_comp(dev);
1112 mlx5_devcom_unregister_device(dev->priv.devc);
1117 static void mlx5_cleanup_once(struct mlx5_core_dev *dev)
1119 mlx5_rsc_dump_destroy(dev);
1120 mlx5_hv_vhca_destroy(dev->hv_vhca);
1121 mlx5_fw_tracer_destroy(dev->tracer);
1122 mlx5_dm_cleanup(dev);
1123 mlx5_fs_core_free(dev);
1124 mlx5_sf_table_cleanup(dev);
1125 mlx5_sf_hw_table_cleanup(dev);
1126 mlx5_vhca_event_cleanup(dev);
1127 mlx5_fpga_cleanup(dev);
1128 mlx5_eswitch_cleanup(dev->priv.eswitch);
1129 mlx5_sriov_cleanup(dev);
1130 mlx5_mpfs_cleanup(dev);
1131 mlx5_cleanup_rl_table(dev);
1132 mlx5_geneve_destroy(dev->geneve);
1133 mlx5_vxlan_destroy(dev->vxlan);
1134 mlx5_cleanup_clock(dev);
1135 mlx5_cleanup_reserved_gids(dev);
1136 mlx5_cq_debugfs_cleanup(dev);
1137 mlx5_fw_reset_cleanup(dev);
1138 mlx5_events_cleanup(dev);
1139 mlx5_eq_table_cleanup(dev);
1140 mlx5_irq_table_cleanup(dev);
1141 mlx5_unregister_hca_devcom_comp(dev);
1142 mlx5_devcom_unregister_device(dev->priv.devc);
1145 static int mlx5_function_enable(struct mlx5_core_dev *dev, bool boot, u64 timeout)
1149 mlx5_core_info(dev, "firmware version: %d.%d.%d\n", fw_rev_maj(dev),
1150 fw_rev_min(dev), fw_rev_sub(dev));
1152 /* Only PFs hold the relevant PCIe information for this query */
1153 if (mlx5_core_is_pf(dev))
1154 pcie_print_link_status(dev->pdev);
1156 /* wait for firmware to accept initialization segments configurations
1158 err = wait_fw_init(dev, timeout,
1159 mlx5_tout_ms(dev, FW_PRE_INIT_WARN_MESSAGE_INTERVAL),
1160 "pre-initializing");
1164 err = mlx5_cmd_enable(dev);
1166 mlx5_core_err(dev, "Failed initializing command interface, aborting\n");
1170 mlx5_tout_query_iseg(dev);
1172 err = wait_fw_init(dev, mlx5_tout_ms(dev, FW_INIT), 0, "initializing");
1174 goto err_cmd_cleanup;
1176 dev->caps.embedded_cpu = mlx5_read_embedded_cpu(dev);
1177 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_UP);
1179 mlx5_start_health_poll(dev);
1181 err = mlx5_core_enable_hca(dev, 0);
1183 mlx5_core_err(dev, "enable hca failed\n");
1184 goto stop_health_poll;
1187 err = mlx5_core_set_issi(dev);
1189 mlx5_core_err(dev, "failed to set issi\n");
1190 goto err_disable_hca;
1193 err = mlx5_satisfy_startup_pages(dev, 1);
1195 mlx5_core_err(dev, "failed to allocate boot pages\n");
1196 goto err_disable_hca;
1199 err = mlx5_tout_query_dtor(dev);
1201 mlx5_core_err(dev, "failed to read dtor\n");
1202 goto reclaim_boot_pages;
1208 mlx5_reclaim_startup_pages(dev);
1210 mlx5_core_disable_hca(dev, 0);
1212 mlx5_stop_health_poll(dev, boot);
1214 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1215 mlx5_cmd_disable(dev);
1220 static void mlx5_function_disable(struct mlx5_core_dev *dev, bool boot)
1222 mlx5_reclaim_startup_pages(dev);
1223 mlx5_core_disable_hca(dev, 0);
1224 mlx5_stop_health_poll(dev, boot);
1225 mlx5_cmd_set_state(dev, MLX5_CMDIF_STATE_DOWN);
1226 mlx5_cmd_disable(dev);
1229 static int mlx5_function_open(struct mlx5_core_dev *dev)
1233 err = set_hca_ctrl(dev);
1235 mlx5_core_err(dev, "set_hca_ctrl failed\n");
1239 err = set_hca_cap(dev);
1241 mlx5_core_err(dev, "set_hca_cap failed\n");
1245 err = mlx5_satisfy_startup_pages(dev, 0);
1247 mlx5_core_err(dev, "failed to allocate init pages\n");
1251 err = mlx5_cmd_init_hca(dev, sw_owner_id);
1253 mlx5_core_err(dev, "init hca failed\n");
1257 mlx5_set_driver_version(dev);
1259 err = mlx5_query_hca_caps(dev);
1261 mlx5_core_err(dev, "query hca failed\n");
1264 mlx5_start_health_fw_log_up(dev);
1268 static int mlx5_function_close(struct mlx5_core_dev *dev)
1272 err = mlx5_cmd_teardown_hca(dev);
1274 mlx5_core_err(dev, "tear_down_hca failed, skip cleanup\n");
1281 static int mlx5_function_setup(struct mlx5_core_dev *dev, bool boot, u64 timeout)
1285 err = mlx5_function_enable(dev, boot, timeout);
1289 err = mlx5_function_open(dev);
1291 mlx5_function_disable(dev, boot);
1295 static int mlx5_function_teardown(struct mlx5_core_dev *dev, bool boot)
1297 int err = mlx5_function_close(dev);
1300 mlx5_function_disable(dev, boot);
1304 static int mlx5_load(struct mlx5_core_dev *dev)
1308 dev->priv.uar = mlx5_get_uars_page(dev);
1309 if (IS_ERR(dev->priv.uar)) {
1310 mlx5_core_err(dev, "Failed allocating uar, aborting\n");
1311 err = PTR_ERR(dev->priv.uar);
1315 mlx5_events_start(dev);
1316 mlx5_pagealloc_start(dev);
1318 err = mlx5_irq_table_create(dev);
1320 mlx5_core_err(dev, "Failed to alloc IRQs\n");
1324 err = mlx5_eq_table_create(dev);
1326 mlx5_core_err(dev, "Failed to create EQs\n");
1330 err = mlx5_fw_tracer_init(dev->tracer);
1332 mlx5_core_err(dev, "Failed to init FW tracer %d\n", err);
1333 mlx5_fw_tracer_destroy(dev->tracer);
1337 mlx5_fw_reset_events_start(dev);
1338 mlx5_hv_vhca_init(dev->hv_vhca);
1340 err = mlx5_rsc_dump_init(dev);
1342 mlx5_core_err(dev, "Failed to init Resource dump %d\n", err);
1343 mlx5_rsc_dump_destroy(dev);
1344 dev->rsc_dump = NULL;
1347 err = mlx5_fpga_device_start(dev);
1349 mlx5_core_err(dev, "fpga device start failed %d\n", err);
1350 goto err_fpga_start;
1353 err = mlx5_fs_core_init(dev);
1355 mlx5_core_err(dev, "Failed to init flow steering\n");
1359 err = mlx5_core_set_hca_defaults(dev);
1361 mlx5_core_err(dev, "Failed to set hca defaults\n");
1365 mlx5_vhca_event_start(dev);
1367 err = mlx5_sf_hw_table_create(dev);
1369 mlx5_core_err(dev, "sf table create failed %d\n", err);
1373 err = mlx5_ec_init(dev);
1375 mlx5_core_err(dev, "Failed to init embedded CPU\n");
1379 mlx5_lag_add_mdev(dev);
1380 err = mlx5_sriov_attach(dev);
1382 mlx5_core_err(dev, "sriov init failed %d\n", err);
1386 mlx5_sf_dev_table_create(dev);
1388 err = mlx5_devlink_traps_register(priv_to_devlink(dev));
1395 mlx5_sf_dev_table_destroy(dev);
1396 mlx5_sriov_detach(dev);
1398 mlx5_lag_remove_mdev(dev);
1399 mlx5_ec_cleanup(dev);
1401 mlx5_sf_hw_table_destroy(dev);
1403 mlx5_vhca_event_stop(dev);
1405 mlx5_fs_core_cleanup(dev);
1407 mlx5_fpga_device_stop(dev);
1409 mlx5_rsc_dump_cleanup(dev);
1410 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1411 mlx5_fw_reset_events_stop(dev);
1412 mlx5_fw_tracer_cleanup(dev->tracer);
1413 mlx5_eq_table_destroy(dev);
1415 mlx5_irq_table_destroy(dev);
1417 mlx5_pagealloc_stop(dev);
1418 mlx5_events_stop(dev);
1419 mlx5_put_uars_page(dev, dev->priv.uar);
1423 static void mlx5_unload(struct mlx5_core_dev *dev)
1425 mlx5_eswitch_disable(dev->priv.eswitch);
1426 mlx5_devlink_traps_unregister(priv_to_devlink(dev));
1427 mlx5_sf_dev_table_destroy(dev);
1428 mlx5_sriov_detach(dev);
1429 mlx5_lag_remove_mdev(dev);
1430 mlx5_ec_cleanup(dev);
1431 mlx5_sf_hw_table_destroy(dev);
1432 mlx5_vhca_event_stop(dev);
1433 mlx5_fs_core_cleanup(dev);
1434 mlx5_fpga_device_stop(dev);
1435 mlx5_rsc_dump_cleanup(dev);
1436 mlx5_hv_vhca_cleanup(dev->hv_vhca);
1437 mlx5_fw_reset_events_stop(dev);
1438 mlx5_fw_tracer_cleanup(dev->tracer);
1439 mlx5_eq_table_destroy(dev);
1440 mlx5_irq_table_destroy(dev);
1441 mlx5_pagealloc_stop(dev);
1442 mlx5_events_stop(dev);
1443 mlx5_put_uars_page(dev, dev->priv.uar);
1446 int mlx5_init_one_devl_locked(struct mlx5_core_dev *dev)
1448 bool light_probe = mlx5_dev_is_lightweight(dev);
1451 mutex_lock(&dev->intf_state_mutex);
1452 dev->state = MLX5_DEVICE_STATE_UP;
1454 err = mlx5_function_setup(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
1458 err = mlx5_init_once(dev);
1460 mlx5_core_err(dev, "sw objs init failed\n");
1461 goto function_teardown;
1464 /* In case of light_probe, mlx5_devlink is already registered.
1465 * Hence, don't register devlink again.
1468 err = mlx5_devlink_params_register(priv_to_devlink(dev));
1470 goto err_devlink_params_reg;
1473 err = mlx5_load(dev);
1477 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1479 err = mlx5_register_device(dev);
1483 err = mlx5_crdump_enable(dev);
1485 mlx5_core_err(dev, "mlx5_crdump_enable failed with error code %d\n", err);
1487 err = mlx5_hwmon_dev_register(dev);
1489 mlx5_core_err(dev, "mlx5_hwmon_dev_register failed with error code %d\n", err);
1491 mutex_unlock(&dev->intf_state_mutex);
1495 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1499 mlx5_devlink_params_unregister(priv_to_devlink(dev));
1500 err_devlink_params_reg:
1501 mlx5_cleanup_once(dev);
1503 mlx5_function_teardown(dev, true);
1505 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1506 mutex_unlock(&dev->intf_state_mutex);
1510 int mlx5_init_one(struct mlx5_core_dev *dev)
1512 struct devlink *devlink = priv_to_devlink(dev);
1516 devl_register(devlink);
1517 err = mlx5_init_one_devl_locked(dev);
1519 devl_unregister(devlink);
1520 devl_unlock(devlink);
1524 void mlx5_uninit_one(struct mlx5_core_dev *dev)
1526 struct devlink *devlink = priv_to_devlink(dev);
1529 mutex_lock(&dev->intf_state_mutex);
1531 mlx5_hwmon_dev_unregister(dev);
1532 mlx5_crdump_disable(dev);
1533 mlx5_unregister_device(dev);
1535 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1536 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1538 mlx5_devlink_params_unregister(priv_to_devlink(dev));
1539 mlx5_cleanup_once(dev);
1543 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1545 mlx5_devlink_params_unregister(priv_to_devlink(dev));
1546 mlx5_cleanup_once(dev);
1547 mlx5_function_teardown(dev, true);
1549 mutex_unlock(&dev->intf_state_mutex);
1550 devl_unregister(devlink);
1551 devl_unlock(devlink);
1554 int mlx5_load_one_devl_locked(struct mlx5_core_dev *dev, bool recovery)
1559 devl_assert_locked(priv_to_devlink(dev));
1560 mutex_lock(&dev->intf_state_mutex);
1561 if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1562 mlx5_core_warn(dev, "interface is up, NOP\n");
1565 /* remove any previous indication of internal error */
1566 dev->state = MLX5_DEVICE_STATE_UP;
1569 timeout = mlx5_tout_ms(dev, FW_PRE_INIT_ON_RECOVERY_TIMEOUT);
1571 timeout = mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT);
1572 err = mlx5_function_setup(dev, false, timeout);
1576 err = mlx5_load(dev);
1580 set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1582 err = mlx5_attach_device(dev);
1586 mutex_unlock(&dev->intf_state_mutex);
1590 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1593 mlx5_function_teardown(dev, false);
1595 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1597 mutex_unlock(&dev->intf_state_mutex);
1601 int mlx5_load_one(struct mlx5_core_dev *dev, bool recovery)
1603 struct devlink *devlink = priv_to_devlink(dev);
1607 ret = mlx5_load_one_devl_locked(dev, recovery);
1608 devl_unlock(devlink);
1612 void mlx5_unload_one_devl_locked(struct mlx5_core_dev *dev, bool suspend)
1614 devl_assert_locked(priv_to_devlink(dev));
1615 mutex_lock(&dev->intf_state_mutex);
1617 mlx5_detach_device(dev, suspend);
1619 if (!test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
1620 mlx5_core_warn(dev, "%s: interface is down, NOP\n",
1625 clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
1627 mlx5_function_teardown(dev, false);
1629 mutex_unlock(&dev->intf_state_mutex);
1632 void mlx5_unload_one(struct mlx5_core_dev *dev, bool suspend)
1634 struct devlink *devlink = priv_to_devlink(dev);
1637 mlx5_unload_one_devl_locked(dev, suspend);
1638 devl_unlock(devlink);
1641 /* In case of light probe, we don't need a full query of hca_caps, but only the bellow caps.
1642 * A full query of hca_caps will be done when the device will reload.
1644 static int mlx5_query_hca_caps_light(struct mlx5_core_dev *dev)
1648 err = mlx5_core_get_caps(dev, MLX5_CAP_GENERAL);
1652 if (MLX5_CAP_GEN(dev, eth_net_offloads)) {
1653 err = mlx5_core_get_caps_mode(dev, MLX5_CAP_ETHERNET_OFFLOADS,
1654 HCA_CAP_OPMOD_GET_CUR);
1659 if (MLX5_CAP_GEN(dev, nic_flow_table) ||
1660 MLX5_CAP_GEN(dev, ipoib_enhanced_offloads)) {
1661 err = mlx5_core_get_caps_mode(dev, MLX5_CAP_FLOW_TABLE,
1662 HCA_CAP_OPMOD_GET_CUR);
1667 if (MLX5_CAP_GEN_64(dev, general_obj_types) &
1668 MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q) {
1669 err = mlx5_core_get_caps_mode(dev, MLX5_CAP_VDPA_EMULATION,
1670 HCA_CAP_OPMOD_GET_CUR);
1678 int mlx5_init_one_light(struct mlx5_core_dev *dev)
1680 struct devlink *devlink = priv_to_devlink(dev);
1683 dev->state = MLX5_DEVICE_STATE_UP;
1684 err = mlx5_function_enable(dev, true, mlx5_tout_ms(dev, FW_PRE_INIT_TIMEOUT));
1686 mlx5_core_warn(dev, "mlx5_function_enable err=%d\n", err);
1690 err = mlx5_query_hca_caps_light(dev);
1692 mlx5_core_warn(dev, "mlx5_query_hca_caps_light err=%d\n", err);
1693 goto query_hca_caps_err;
1697 devl_register(devlink);
1699 err = mlx5_devlink_params_register(priv_to_devlink(dev));
1701 mlx5_core_warn(dev, "mlx5_devlink_param_reg err = %d\n", err);
1702 goto query_hca_caps_err;
1705 devl_unlock(devlink);
1709 devl_unregister(devlink);
1710 devl_unlock(devlink);
1711 mlx5_function_disable(dev, true);
1713 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1717 void mlx5_uninit_one_light(struct mlx5_core_dev *dev)
1719 struct devlink *devlink = priv_to_devlink(dev);
1722 mlx5_devlink_params_unregister(priv_to_devlink(dev));
1723 devl_unregister(devlink);
1724 devl_unlock(devlink);
1725 if (dev->state != MLX5_DEVICE_STATE_UP)
1727 mlx5_function_disable(dev, true);
1730 /* xxx_light() function are used in order to configure the device without full
1731 * init (light init). e.g.: There isn't a point in reload a device to light state.
1732 * Hence, mlx5_load_one_light() isn't needed.
1735 void mlx5_unload_one_light(struct mlx5_core_dev *dev)
1737 if (dev->state != MLX5_DEVICE_STATE_UP)
1739 mlx5_function_disable(dev, false);
1740 dev->state = MLX5_DEVICE_STATE_INTERNAL_ERROR;
1743 static const int types[] = {
1746 MLX5_CAP_ETHERNET_OFFLOADS,
1747 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1751 MLX5_CAP_IPOIB_OFFLOADS,
1752 MLX5_CAP_FLOW_TABLE,
1753 MLX5_CAP_ESWITCH_FLOW_TABLE,
1760 MLX5_CAP_VDPA_EMULATION,
1762 MLX5_CAP_PORT_SELECTION,
1764 MLX5_CAP_ADV_VIRTUALIZATION,
1768 static void mlx5_hca_caps_free(struct mlx5_core_dev *dev)
1773 for (i = 0; i < ARRAY_SIZE(types); i++) {
1775 kfree(dev->caps.hca[type]);
1779 static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev)
1781 struct mlx5_hca_cap *cap;
1785 for (i = 0; i < ARRAY_SIZE(types); i++) {
1786 cap = kzalloc(sizeof(*cap), GFP_KERNEL);
1790 dev->caps.hca[type] = cap;
1796 mlx5_hca_caps_free(dev);
1800 static int vhca_id_show(struct seq_file *file, void *priv)
1802 struct mlx5_core_dev *dev = file->private;
1804 seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id));
1808 DEFINE_SHOW_ATTRIBUTE(vhca_id);
1810 int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx)
1812 struct mlx5_priv *priv = &dev->priv;
1815 memcpy(&dev->profile, &profile[profile_idx], sizeof(dev->profile));
1816 lockdep_register_key(&dev->lock_key);
1817 mutex_init(&dev->intf_state_mutex);
1818 lockdep_set_class(&dev->intf_state_mutex, &dev->lock_key);
1819 mutex_init(&dev->mlx5e_res.uplink_netdev_lock);
1821 mutex_init(&priv->bfregs.reg_head.lock);
1822 mutex_init(&priv->bfregs.wc_head.lock);
1823 INIT_LIST_HEAD(&priv->bfregs.reg_head.list);
1824 INIT_LIST_HEAD(&priv->bfregs.wc_head.list);
1826 mutex_init(&priv->alloc_mutex);
1827 mutex_init(&priv->pgdir_mutex);
1828 INIT_LIST_HEAD(&priv->pgdir_list);
1830 priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev));
1831 priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device),
1833 debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops);
1834 INIT_LIST_HEAD(&priv->traps);
1836 err = mlx5_cmd_init(dev);
1838 mlx5_core_err(dev, "Failed initializing cmdif SW structs, aborting\n");
1842 err = mlx5_tout_init(dev);
1844 mlx5_core_err(dev, "Failed initializing timeouts, aborting\n");
1845 goto err_timeout_init;
1848 err = mlx5_health_init(dev);
1850 goto err_health_init;
1852 err = mlx5_pagealloc_init(dev);
1854 goto err_pagealloc_init;
1856 err = mlx5_adev_init(dev);
1860 err = mlx5_hca_caps_alloc(dev);
1864 /* The conjunction of sw_vhca_id with sw_owner_id will be a global
1865 * unique id per function which uses mlx5_core.
1866 * Those values are supplied to FW as part of the init HCA command to
1867 * be used by both driver and FW when it's applicable.
1869 dev->priv.sw_vhca_id = ida_alloc_range(&sw_vhca_ida, 1,
1872 if (dev->priv.sw_vhca_id < 0)
1873 mlx5_core_err(dev, "failed to allocate sw_vhca_id, err=%d\n",
1874 dev->priv.sw_vhca_id);
1879 mlx5_adev_cleanup(dev);
1881 mlx5_pagealloc_cleanup(dev);
1883 mlx5_health_cleanup(dev);
1885 mlx5_tout_cleanup(dev);
1887 mlx5_cmd_cleanup(dev);
1889 debugfs_remove(dev->priv.dbg.dbg_root);
1890 mutex_destroy(&priv->pgdir_mutex);
1891 mutex_destroy(&priv->alloc_mutex);
1892 mutex_destroy(&priv->bfregs.wc_head.lock);
1893 mutex_destroy(&priv->bfregs.reg_head.lock);
1894 mutex_destroy(&dev->intf_state_mutex);
1895 lockdep_unregister_key(&dev->lock_key);
1899 void mlx5_mdev_uninit(struct mlx5_core_dev *dev)
1901 struct mlx5_priv *priv = &dev->priv;
1903 if (priv->sw_vhca_id > 0)
1904 ida_free(&sw_vhca_ida, dev->priv.sw_vhca_id);
1906 mlx5_hca_caps_free(dev);
1907 mlx5_adev_cleanup(dev);
1908 mlx5_pagealloc_cleanup(dev);
1909 mlx5_health_cleanup(dev);
1910 mlx5_tout_cleanup(dev);
1911 mlx5_cmd_cleanup(dev);
1912 debugfs_remove_recursive(dev->priv.dbg.dbg_root);
1913 mutex_destroy(&priv->pgdir_mutex);
1914 mutex_destroy(&priv->alloc_mutex);
1915 mutex_destroy(&priv->bfregs.wc_head.lock);
1916 mutex_destroy(&priv->bfregs.reg_head.lock);
1917 mutex_destroy(&dev->mlx5e_res.uplink_netdev_lock);
1918 mutex_destroy(&dev->intf_state_mutex);
1919 lockdep_unregister_key(&dev->lock_key);
1922 static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
1924 struct mlx5_core_dev *dev;
1925 struct devlink *devlink;
1928 devlink = mlx5_devlink_alloc(&pdev->dev);
1930 dev_err(&pdev->dev, "devlink alloc failed\n");
1934 dev = devlink_priv(devlink);
1935 dev->device = &pdev->dev;
1938 dev->coredev_type = id->driver_data & MLX5_PCI_DEV_IS_VF ?
1939 MLX5_COREDEV_VF : MLX5_COREDEV_PF;
1941 dev->priv.adev_idx = mlx5_adev_idx_alloc();
1942 if (dev->priv.adev_idx < 0) {
1943 err = dev->priv.adev_idx;
1947 err = mlx5_mdev_init(dev, prof_sel);
1951 err = mlx5_pci_init(dev, pdev, id);
1953 mlx5_core_err(dev, "mlx5_pci_init failed with error code %d\n",
1958 err = mlx5_init_one(dev);
1960 mlx5_core_err(dev, "mlx5_init_one failed with error code %d\n",
1965 pci_save_state(pdev);
1969 mlx5_pci_close(dev);
1971 mlx5_mdev_uninit(dev);
1973 mlx5_adev_idx_free(dev->priv.adev_idx);
1975 mlx5_devlink_free(devlink);
1980 static void remove_one(struct pci_dev *pdev)
1982 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
1983 struct devlink *devlink = priv_to_devlink(dev);
1985 set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
1986 mlx5_drain_fw_reset(dev);
1987 mlx5_drain_health_wq(dev);
1988 mlx5_sriov_disable(pdev, false);
1989 mlx5_uninit_one(dev);
1990 mlx5_pci_close(dev);
1991 mlx5_mdev_uninit(dev);
1992 mlx5_adev_idx_free(dev->priv.adev_idx);
1993 mlx5_devlink_free(devlink);
1996 #define mlx5_pci_trace(dev, fmt, ...) ({ \
1997 struct mlx5_core_dev *__dev = (dev); \
1998 mlx5_core_info(__dev, "%s Device state = %d health sensors: %d pci_status: %d. " fmt, \
1999 __func__, __dev->state, mlx5_health_check_fatal_sensors(__dev), \
2000 __dev->pci_status, ##__VA_ARGS__); \
2003 static const char *result2str(enum pci_ers_result result)
2005 return result == PCI_ERS_RESULT_NEED_RESET ? "need reset" :
2006 result == PCI_ERS_RESULT_DISCONNECT ? "disconnect" :
2007 result == PCI_ERS_RESULT_RECOVERED ? "recovered" :
2011 static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
2012 pci_channel_state_t state)
2014 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
2015 enum pci_ers_result res;
2017 mlx5_pci_trace(dev, "Enter, pci channel state = %d\n", state);
2019 mlx5_enter_error_state(dev, false);
2020 mlx5_error_sw_reset(dev);
2021 mlx5_unload_one(dev, false);
2022 mlx5_drain_health_wq(dev);
2023 mlx5_pci_disable_device(dev);
2025 res = state == pci_channel_io_perm_failure ?
2026 PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
2028 mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, result = %d, %s\n",
2029 __func__, dev->state, dev->pci_status, res, result2str(res));
2033 /* wait for the device to show vital signs by waiting
2034 * for the health counter to start counting.
2036 static int wait_vital(struct pci_dev *pdev)
2038 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
2039 struct mlx5_core_health *health = &dev->priv.health;
2040 const int niter = 100;
2045 for (i = 0; i < niter; i++) {
2046 count = ioread32be(health->health_counter);
2047 if (count && count != 0xffffffff) {
2048 if (last_count && last_count != count) {
2050 "wait vital counter value 0x%x after %d iterations\n",
2062 static pci_ers_result_t mlx5_pci_slot_reset(struct pci_dev *pdev)
2064 enum pci_ers_result res = PCI_ERS_RESULT_DISCONNECT;
2065 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
2068 mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Enter\n",
2069 __func__, dev->state, dev->pci_status);
2071 err = mlx5_pci_enable_device(dev);
2073 mlx5_core_err(dev, "%s: mlx5_pci_enable_device failed with error code: %d\n",
2078 pci_set_master(pdev);
2079 pci_restore_state(pdev);
2080 pci_save_state(pdev);
2082 err = wait_vital(pdev);
2084 mlx5_core_err(dev, "%s: wait vital failed with error code: %d\n",
2089 res = PCI_ERS_RESULT_RECOVERED;
2091 mlx5_core_info(dev, "%s Device state = %d pci_status: %d. Exit, err = %d, result = %d, %s\n",
2092 __func__, dev->state, dev->pci_status, err, res, result2str(res));
2096 static void mlx5_pci_resume(struct pci_dev *pdev)
2098 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
2101 mlx5_pci_trace(dev, "Enter, loading driver..\n");
2103 err = mlx5_load_one(dev, false);
2106 devlink_health_reporter_state_update(dev->priv.health.fw_fatal_reporter,
2107 DEVLINK_HEALTH_REPORTER_STATE_HEALTHY);
2109 mlx5_pci_trace(dev, "Done, err = %d, device %s\n", err,
2110 !err ? "recovered" : "Failed");
2113 static const struct pci_error_handlers mlx5_err_handler = {
2114 .error_detected = mlx5_pci_err_detected,
2115 .slot_reset = mlx5_pci_slot_reset,
2116 .resume = mlx5_pci_resume
2119 static int mlx5_try_fast_unload(struct mlx5_core_dev *dev)
2121 bool fast_teardown = false, force_teardown = false;
2124 fast_teardown = MLX5_CAP_GEN(dev, fast_teardown);
2125 force_teardown = MLX5_CAP_GEN(dev, force_teardown);
2127 mlx5_core_dbg(dev, "force teardown firmware support=%d\n", force_teardown);
2128 mlx5_core_dbg(dev, "fast teardown firmware support=%d\n", fast_teardown);
2130 if (!fast_teardown && !force_teardown)
2133 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
2134 mlx5_core_dbg(dev, "Device in internal error state, giving up\n");
2138 /* Panic tear down fw command will stop the PCI bus communication
2139 * with the HCA, so the health poll is no longer needed.
2141 mlx5_drain_health_wq(dev);
2142 mlx5_stop_health_poll(dev, false);
2144 ret = mlx5_cmd_fast_teardown_hca(dev);
2148 ret = mlx5_cmd_force_teardown_hca(dev);
2152 mlx5_core_dbg(dev, "Firmware couldn't do fast unload error: %d\n", ret);
2153 mlx5_start_health_poll(dev);
2157 mlx5_enter_error_state(dev, true);
2159 /* Some platforms requiring freeing the IRQ's in the shutdown
2160 * flow. If they aren't freed they can't be allocated after
2161 * kexec. There is no need to cleanup the mlx5_core software
2164 mlx5_core_eq_free_irqs(dev);
2169 static void shutdown(struct pci_dev *pdev)
2171 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
2174 mlx5_core_info(dev, "Shutdown was called\n");
2175 set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state);
2176 err = mlx5_try_fast_unload(dev);
2178 mlx5_unload_one(dev, false);
2179 mlx5_pci_disable_device(dev);
2182 static int mlx5_suspend(struct pci_dev *pdev, pm_message_t state)
2184 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
2186 mlx5_unload_one(dev, true);
2191 static int mlx5_resume(struct pci_dev *pdev)
2193 struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
2195 return mlx5_load_one(dev, false);
2198 static const struct pci_device_id mlx5_core_pci_table[] = {
2199 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTIB) },
2200 { PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
2201 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4) },
2202 { PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
2203 { PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_CONNECTX4_LX) },
2204 { PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
2205 { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5, PCIe 3.0 */
2206 { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
2207 { PCI_VDEVICE(MELLANOX, 0x1019) }, /* ConnectX-5 Ex */
2208 { PCI_VDEVICE(MELLANOX, 0x101a), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 Ex VF */
2209 { PCI_VDEVICE(MELLANOX, 0x101b) }, /* ConnectX-6 */
2210 { PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
2211 { PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
2212 { PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
2213 { PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
2214 { PCI_VDEVICE(MELLANOX, 0x1021) }, /* ConnectX-7 */
2215 { PCI_VDEVICE(MELLANOX, 0x1023) }, /* ConnectX-8 */
2216 { PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
2217 { PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
2218 { PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
2219 { PCI_VDEVICE(MELLANOX, 0xa2dc) }, /* BlueField-3 integrated ConnectX-7 network controller */
2220 { PCI_VDEVICE(MELLANOX, 0xa2df) }, /* BlueField-4 integrated ConnectX-8 network controller */
2224 MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
2226 void mlx5_disable_device(struct mlx5_core_dev *dev)
2228 mlx5_error_sw_reset(dev);
2229 mlx5_unload_one_devl_locked(dev, false);
2232 int mlx5_recover_device(struct mlx5_core_dev *dev)
2234 if (!mlx5_core_is_sf(dev)) {
2235 mlx5_pci_disable_device(dev);
2236 if (mlx5_pci_slot_reset(dev->pdev) != PCI_ERS_RESULT_RECOVERED)
2240 return mlx5_load_one_devl_locked(dev, true);
2243 static struct pci_driver mlx5_core_driver = {
2244 .name = KBUILD_MODNAME,
2245 .id_table = mlx5_core_pci_table,
2247 .remove = remove_one,
2248 .suspend = mlx5_suspend,
2249 .resume = mlx5_resume,
2250 .shutdown = shutdown,
2251 .err_handler = &mlx5_err_handler,
2252 .sriov_configure = mlx5_core_sriov_configure,
2253 .sriov_get_vf_total_msix = mlx5_sriov_get_vf_total_msix,
2254 .sriov_set_msix_vec_count = mlx5_core_sriov_set_msix_vec_count,
2258 * mlx5_vf_get_core_dev - Get the mlx5 core device from a given VF PCI device if
2259 * mlx5_core is its driver.
2260 * @pdev: The associated PCI device.
2262 * Upon return the interface state lock stay held to let caller uses it safely.
2263 * Caller must ensure to use the returned mlx5 device for a narrow window
2264 * and put it back with mlx5_vf_put_core_dev() immediately once usage was over.
2266 * Return: Pointer to the associated mlx5_core_dev or NULL.
2268 struct mlx5_core_dev *mlx5_vf_get_core_dev(struct pci_dev *pdev)
2270 struct mlx5_core_dev *mdev;
2272 mdev = pci_iov_get_pf_drvdata(pdev, &mlx5_core_driver);
2276 mutex_lock(&mdev->intf_state_mutex);
2277 if (!test_bit(MLX5_INTERFACE_STATE_UP, &mdev->intf_state)) {
2278 mutex_unlock(&mdev->intf_state_mutex);
2284 EXPORT_SYMBOL(mlx5_vf_get_core_dev);
2287 * mlx5_vf_put_core_dev - Put the mlx5 core device back.
2288 * @mdev: The mlx5 core device.
2290 * Upon return the interface state lock is unlocked and caller should not
2291 * access the mdev any more.
2293 void mlx5_vf_put_core_dev(struct mlx5_core_dev *mdev)
2295 mutex_unlock(&mdev->intf_state_mutex);
2297 EXPORT_SYMBOL(mlx5_vf_put_core_dev);
2299 static void mlx5_core_verify_params(void)
2301 if (prof_sel >= ARRAY_SIZE(profile)) {
2302 pr_warn("mlx5_core: WARNING: Invalid module parameter prof_sel %d, valid range 0-%zu, changing back to default(%d)\n",
2304 ARRAY_SIZE(profile) - 1,
2306 prof_sel = MLX5_DEFAULT_PROF;
2310 static int __init mlx5_init(void)
2314 WARN_ONCE(strcmp(MLX5_ADEV_NAME, KBUILD_MODNAME),
2315 "mlx5_core name not in sync with kernel module name");
2317 get_random_bytes(&sw_owner_id, sizeof(sw_owner_id));
2319 mlx5_core_verify_params();
2320 mlx5_register_debugfs();
2326 err = mlx5_sf_driver_register();
2330 err = pci_register_driver(&mlx5_core_driver);
2337 mlx5_sf_driver_unregister();
2341 mlx5_unregister_debugfs();
2345 static void __exit mlx5_cleanup(void)
2347 pci_unregister_driver(&mlx5_core_driver);
2348 mlx5_sf_driver_unregister();
2350 mlx5_unregister_debugfs();
2353 module_init(mlx5_init);
2354 module_exit(mlx5_cleanup);