1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/version.h>
38 #include <linux/delay.h>
39 #include <asm/byteorder.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/string.h>
42 #include <linux/module.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/ethtool.h>
46 #include <linux/etherdevice.h>
47 #include <linux/vmalloc.h>
48 #include <linux/qed/qed_if.h>
49 #include <linux/qed/qed_ll2_if.h>
52 #include "qed_sriov.h"
54 #include "qed_dev_api.h"
59 #include "qed_selftest.h"
60 #include "qed_debug.h"
62 #define QED_ROCE_QPS (8192)
63 #define QED_ROCE_DPIS (8)
65 static char version[] =
66 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
68 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
69 MODULE_LICENSE("GPL");
70 MODULE_VERSION(DRV_MODULE_VERSION);
72 #define FW_FILE_VERSION \
73 __stringify(FW_MAJOR_VERSION) "." \
74 __stringify(FW_MINOR_VERSION) "." \
75 __stringify(FW_REVISION_VERSION) "." \
76 __stringify(FW_ENGINEERING_VERSION)
78 #define QED_FW_FILE_NAME \
79 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
81 MODULE_FIRMWARE(QED_FW_FILE_NAME);
83 static int __init qed_init(void)
85 pr_info("%s", version);
90 static void __exit qed_cleanup(void)
92 pr_notice("qed_cleanup called\n");
95 module_init(qed_init);
96 module_exit(qed_cleanup);
98 /* Check if the DMA controller on the machine can properly handle the DMA
99 * addressing required by the device.
101 static int qed_set_coherency_mask(struct qed_dev *cdev)
103 struct device *dev = &cdev->pdev->dev;
105 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
106 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
108 "Can't request 64-bit consistent allocations\n");
111 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
112 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
119 static void qed_free_pci(struct qed_dev *cdev)
121 struct pci_dev *pdev = cdev->pdev;
124 iounmap(cdev->doorbells);
126 iounmap(cdev->regview);
127 if (atomic_read(&pdev->enable_cnt) == 1)
128 pci_release_regions(pdev);
130 pci_disable_device(pdev);
133 #define PCI_REVISION_ID_ERROR_VAL 0xff
135 /* Performs PCI initializations as well as initializing PCI-related parameters
136 * in the device structrue. Returns 0 in case of success.
138 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
145 rc = pci_enable_device(pdev);
147 DP_NOTICE(cdev, "Cannot enable PCI device\n");
151 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
152 DP_NOTICE(cdev, "No memory region found in bar #0\n");
157 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
158 DP_NOTICE(cdev, "No memory region found in bar #2\n");
163 if (atomic_read(&pdev->enable_cnt) == 1) {
164 rc = pci_request_regions(pdev, "qed");
167 "Failed to request PCI memory resources\n");
170 pci_set_master(pdev);
171 pci_save_state(pdev);
174 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
175 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
177 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
182 if (!pci_is_pcie(pdev)) {
183 DP_NOTICE(cdev, "The bus is not PCI Express\n");
188 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
189 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
190 DP_NOTICE(cdev, "Cannot find power management capability\n");
192 rc = qed_set_coherency_mask(cdev);
196 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
197 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
198 cdev->pci_params.irq = pdev->irq;
200 cdev->regview = pci_ioremap_bar(pdev, 0);
201 if (!cdev->regview) {
202 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
208 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
209 cdev->db_size = pci_resource_len(cdev->pdev, 2);
210 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
211 if (!cdev->doorbells) {
212 DP_NOTICE(cdev, "Cannot map doorbell space\n");
220 pci_release_regions(pdev);
222 pci_disable_device(pdev);
227 int qed_fill_dev_info(struct qed_dev *cdev,
228 struct qed_dev_info *dev_info)
232 memset(dev_info, 0, sizeof(struct qed_dev_info));
234 dev_info->num_hwfns = cdev->num_hwfns;
235 dev_info->pci_mem_start = cdev->pci_params.mem_start;
236 dev_info->pci_mem_end = cdev->pci_params.mem_end;
237 dev_info->pci_irq = cdev->pci_params.irq;
238 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
240 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
241 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
244 dev_info->fw_major = FW_MAJOR_VERSION;
245 dev_info->fw_minor = FW_MINOR_VERSION;
246 dev_info->fw_rev = FW_REVISION_VERSION;
247 dev_info->fw_eng = FW_ENGINEERING_VERSION;
248 dev_info->mf_mode = cdev->mf_mode;
249 dev_info->tx_switching = true;
251 if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
253 dev_info->wol_support = true;
255 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
256 &dev_info->fw_minor, &dev_info->fw_rev,
261 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
263 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
264 &dev_info->mfw_rev, NULL);
266 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
267 &dev_info->flash_size);
269 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
272 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
273 &dev_info->mfw_rev, NULL);
276 dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
281 static void qed_free_cdev(struct qed_dev *cdev)
286 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
288 struct qed_dev *cdev;
290 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
294 qed_init_struct(cdev);
299 /* Sets the requested power state */
300 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
305 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
310 static struct qed_dev *qed_probe(struct pci_dev *pdev,
311 struct qed_probe_params *params)
313 struct qed_dev *cdev;
316 cdev = qed_alloc_cdev(pdev);
320 cdev->protocol = params->protocol;
323 cdev->b_is_vf = true;
325 qed_init_dp(cdev, params->dp_module, params->dp_level);
327 rc = qed_init_pci(cdev, pdev);
329 DP_ERR(cdev, "init pci failed\n");
332 DP_INFO(cdev, "PCI init completed successfully\n");
334 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
336 DP_ERR(cdev, "hw prepare failed\n");
340 DP_INFO(cdev, "qed_probe completed successffuly\n");
352 static void qed_remove(struct qed_dev *cdev)
361 qed_set_power_state(cdev, PCI_D3hot);
366 static void qed_disable_msix(struct qed_dev *cdev)
368 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
369 pci_disable_msix(cdev->pdev);
370 kfree(cdev->int_params.msix_table);
371 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
372 pci_disable_msi(cdev->pdev);
375 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
378 static int qed_enable_msix(struct qed_dev *cdev,
379 struct qed_int_params *int_params)
383 cnt = int_params->in.num_vectors;
385 for (i = 0; i < cnt; i++)
386 int_params->msix_table[i].entry = i;
388 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
389 int_params->in.min_msix_cnt, cnt);
390 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
391 (rc % cdev->num_hwfns)) {
392 pci_disable_msix(cdev->pdev);
394 /* If fastpath is initialized, we need at least one interrupt
395 * per hwfn [and the slow path interrupts]. New requested number
396 * should be a multiple of the number of hwfns.
398 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
400 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
401 cnt, int_params->in.num_vectors);
402 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
409 /* MSI-x configuration was achieved */
410 int_params->out.int_mode = QED_INT_MODE_MSIX;
411 int_params->out.num_vectors = rc;
415 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
422 /* This function outputs the int mode and the number of enabled msix vector */
423 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
425 struct qed_int_params *int_params = &cdev->int_params;
426 struct msix_entry *tbl;
429 switch (int_params->in.int_mode) {
430 case QED_INT_MODE_MSIX:
431 /* Allocate MSIX table */
432 cnt = int_params->in.num_vectors;
433 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
434 if (!int_params->msix_table) {
440 rc = qed_enable_msix(cdev, int_params);
444 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
445 kfree(int_params->msix_table);
450 case QED_INT_MODE_MSI:
451 if (cdev->num_hwfns == 1) {
452 rc = pci_enable_msi(cdev->pdev);
454 int_params->out.int_mode = QED_INT_MODE_MSI;
458 DP_NOTICE(cdev, "Failed to enable MSI\n");
464 case QED_INT_MODE_INTA:
465 int_params->out.int_mode = QED_INT_MODE_INTA;
469 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
470 int_params->in.int_mode);
476 DP_INFO(cdev, "Using %s interrupts\n",
477 int_params->out.int_mode == QED_INT_MODE_INTA ?
478 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
480 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
485 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
486 int index, void(*handler)(void *))
488 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
489 int relative_idx = index / cdev->num_hwfns;
491 hwfn->simd_proto_handler[relative_idx].func = handler;
492 hwfn->simd_proto_handler[relative_idx].token = token;
495 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
497 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
498 int relative_idx = index / cdev->num_hwfns;
500 memset(&hwfn->simd_proto_handler[relative_idx], 0,
501 sizeof(struct qed_simd_fp_handler));
504 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
506 tasklet_schedule((struct tasklet_struct *)tasklet);
510 static irqreturn_t qed_single_int(int irq, void *dev_instance)
512 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
513 struct qed_hwfn *hwfn;
514 irqreturn_t rc = IRQ_NONE;
518 for (i = 0; i < cdev->num_hwfns; i++) {
519 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
524 hwfn = &cdev->hwfns[i];
526 /* Slowpath interrupt */
527 if (unlikely(status & 0x1)) {
528 tasklet_schedule(hwfn->sp_dpc);
533 /* Fastpath interrupts */
534 for (j = 0; j < 64; j++) {
535 if ((0x2ULL << j) & status) {
536 hwfn->simd_proto_handler[j].func(
537 hwfn->simd_proto_handler[j].token);
538 status &= ~(0x2ULL << j);
543 if (unlikely(status))
544 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
545 "got an unknown interrupt status 0x%llx\n",
552 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
554 struct qed_dev *cdev = hwfn->cdev;
559 int_mode = cdev->int_params.out.int_mode;
560 if (int_mode == QED_INT_MODE_MSIX) {
562 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
563 id, cdev->pdev->bus->number,
564 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
565 rc = request_irq(cdev->int_params.msix_table[id].vector,
566 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
568 unsigned long flags = 0;
570 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
571 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
572 PCI_FUNC(cdev->pdev->devfn));
574 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
575 flags |= IRQF_SHARED;
577 rc = request_irq(cdev->pdev->irq, qed_single_int,
578 flags, cdev->name, cdev);
582 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
584 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
585 "Requested slowpath %s\n",
586 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
591 static void qed_slowpath_irq_free(struct qed_dev *cdev)
595 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
596 for_each_hwfn(cdev, i) {
597 if (!cdev->hwfns[i].b_int_requested)
599 synchronize_irq(cdev->int_params.msix_table[i].vector);
600 free_irq(cdev->int_params.msix_table[i].vector,
601 cdev->hwfns[i].sp_dpc);
604 if (QED_LEADING_HWFN(cdev)->b_int_requested)
605 free_irq(cdev->pdev->irq, cdev);
607 qed_int_disable_post_isr_release(cdev);
610 static int qed_nic_stop(struct qed_dev *cdev)
614 rc = qed_hw_stop(cdev);
616 for (i = 0; i < cdev->num_hwfns; i++) {
617 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
619 if (p_hwfn->b_sp_dpc_enabled) {
620 tasklet_disable(p_hwfn->sp_dpc);
621 p_hwfn->b_sp_dpc_enabled = false;
622 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
623 "Disabled sp taskelt [hwfn %d] at %p\n",
628 qed_dbg_pf_exit(cdev);
633 static int qed_nic_reset(struct qed_dev *cdev)
637 rc = qed_hw_reset(cdev);
646 static int qed_nic_setup(struct qed_dev *cdev)
650 /* Determine if interface is going to require LL2 */
651 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
652 for (i = 0; i < cdev->num_hwfns; i++) {
653 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
655 p_hwfn->using_ll2 = true;
659 rc = qed_resc_alloc(cdev);
663 DP_INFO(cdev, "Allocated qed resources\n");
665 qed_resc_setup(cdev);
670 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
674 /* Mark the fastpath as free/used */
675 cdev->int_params.fp_initialized = cnt ? true : false;
677 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
678 limit = cdev->num_hwfns * 63;
679 else if (cdev->int_params.fp_msix_cnt)
680 limit = cdev->int_params.fp_msix_cnt;
685 return min_t(int, cnt, limit);
688 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
690 memset(info, 0, sizeof(struct qed_int_info));
692 if (!cdev->int_params.fp_initialized) {
694 "Protocol driver requested interrupt information, but its support is not yet configured\n");
698 /* Need to expose only MSI-X information; Single IRQ is handled solely
701 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
702 int msix_base = cdev->int_params.fp_msix_base;
704 info->msix_cnt = cdev->int_params.fp_msix_cnt;
705 info->msix = &cdev->int_params.msix_table[msix_base];
711 static int qed_slowpath_setup_int(struct qed_dev *cdev,
712 enum qed_int_mode int_mode)
714 struct qed_sb_cnt_info sb_cnt_info;
715 int num_l2_queues = 0;
719 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
720 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
724 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
725 cdev->int_params.in.int_mode = int_mode;
726 for_each_hwfn(cdev, i) {
727 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
728 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
729 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
730 cdev->int_params.in.num_vectors++; /* slowpath */
733 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
734 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
736 rc = qed_set_int_mode(cdev, false);
738 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
742 cdev->int_params.fp_msix_base = cdev->num_hwfns;
743 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
746 if (!IS_ENABLED(CONFIG_QED_RDMA))
749 for_each_hwfn(cdev, i)
750 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
752 DP_VERBOSE(cdev, QED_MSG_RDMA,
753 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
754 cdev->int_params.fp_msix_cnt, num_l2_queues);
756 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
757 cdev->int_params.rdma_msix_cnt =
758 (cdev->int_params.fp_msix_cnt - num_l2_queues)
760 cdev->int_params.rdma_msix_base =
761 cdev->int_params.fp_msix_base + num_l2_queues;
762 cdev->int_params.fp_msix_cnt = num_l2_queues;
764 cdev->int_params.rdma_msix_cnt = 0;
767 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
768 cdev->int_params.rdma_msix_cnt,
769 cdev->int_params.rdma_msix_base);
774 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
778 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
779 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
781 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
782 &cdev->int_params.in.num_vectors);
783 if (cdev->num_hwfns > 1) {
786 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
787 cdev->int_params.in.num_vectors += vectors;
790 /* We want a minimum of one fastpath vector per vf hwfn */
791 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
793 rc = qed_set_int_mode(cdev, true);
797 cdev->int_params.fp_msix_base = 0;
798 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
803 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
804 u8 *input_buf, u32 max_size, u8 *unzip_buf)
808 p_hwfn->stream->next_in = input_buf;
809 p_hwfn->stream->avail_in = input_len;
810 p_hwfn->stream->next_out = unzip_buf;
811 p_hwfn->stream->avail_out = max_size;
813 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
816 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
821 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
822 zlib_inflateEnd(p_hwfn->stream);
824 if (rc != Z_OK && rc != Z_STREAM_END) {
825 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
826 p_hwfn->stream->msg, rc);
830 return p_hwfn->stream->total_out / 4;
833 static int qed_alloc_stream_mem(struct qed_dev *cdev)
838 for_each_hwfn(cdev, i) {
839 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
841 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
845 workspace = vzalloc(zlib_inflate_workspacesize());
848 p_hwfn->stream->workspace = workspace;
854 static void qed_free_stream_mem(struct qed_dev *cdev)
858 for_each_hwfn(cdev, i) {
859 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
864 vfree(p_hwfn->stream->workspace);
865 kfree(p_hwfn->stream);
869 static void qed_update_pf_params(struct qed_dev *cdev,
870 struct qed_pf_params *params)
874 if (IS_ENABLED(CONFIG_QED_RDMA)) {
875 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
876 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
877 /* divide by 3 the MRs to avoid MF ILT overflow */
878 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
879 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
882 /* In case we might support RDMA, don't allow qede to be greedy
883 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
885 if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
889 num_cons = ¶ms->eth_pf_params.num_cons;
890 *num_cons = min_t(u16, *num_cons, 192);
893 for (i = 0; i < cdev->num_hwfns; i++) {
894 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
896 p_hwfn->pf_params = *params;
900 static int qed_slowpath_start(struct qed_dev *cdev,
901 struct qed_slowpath_params *params)
903 struct qed_tunn_start_params tunn_info;
904 struct qed_mcp_drv_version drv_version;
905 const u8 *data = NULL;
906 struct qed_hwfn *hwfn;
907 struct qed_ptt *p_ptt;
910 if (qed_iov_wq_start(cdev))
914 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
918 "Failed to find fw file - /lib/firmware/%s\n",
923 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
925 QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
927 DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
932 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
933 rc = qed_nic_setup(cdev);
938 rc = qed_slowpath_setup_int(cdev, params->int_mode);
940 rc = qed_slowpath_vf_setup_int(cdev);
945 /* Allocate stream for unzipping */
946 rc = qed_alloc_stream_mem(cdev);
950 /* First Dword used to diffrentiate between various sources */
951 data = cdev->firmware->data + sizeof(u32);
953 qed_dbg_pf_init(cdev);
956 memset(&tunn_info, 0, sizeof(tunn_info));
957 tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
958 1 << QED_MODE_L2GRE_TUNN |
959 1 << QED_MODE_IPGRE_TUNN |
960 1 << QED_MODE_L2GENEVE_TUNN |
961 1 << QED_MODE_IPGENEVE_TUNN;
963 tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
964 tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
965 tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
967 /* Start the slowpath */
968 rc = qed_hw_init(cdev, &tunn_info, true,
969 cdev->int_params.out.int_mode,
975 "HW initialization and function start completed successfully\n");
977 /* Allocate LL2 interface if needed */
978 if (QED_LEADING_HWFN(cdev)->using_ll2) {
979 rc = qed_ll2_alloc_if(cdev);
984 hwfn = QED_LEADING_HWFN(cdev);
985 drv_version.version = (params->drv_major << 24) |
986 (params->drv_minor << 16) |
987 (params->drv_rev << 8) |
989 strlcpy(drv_version.name, params->name,
990 MCP_DRV_VER_STR_SIZE - 4);
991 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
994 DP_NOTICE(cdev, "Failed sending drv version command\n");
999 qed_reset_vport_stats(cdev);
1006 qed_hw_timers_stop_all(cdev);
1008 qed_slowpath_irq_free(cdev);
1009 qed_free_stream_mem(cdev);
1010 qed_disable_msix(cdev);
1012 qed_resc_free(cdev);
1015 release_firmware(cdev->firmware);
1017 if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
1018 qed_ptt_release(QED_LEADING_HWFN(cdev),
1019 QED_LEADING_HWFN(cdev)->p_ptp_ptt);
1021 qed_iov_wq_stop(cdev, false);
1026 static int qed_slowpath_stop(struct qed_dev *cdev)
1031 qed_ll2_dealloc_if(cdev);
1034 qed_ptt_release(QED_LEADING_HWFN(cdev),
1035 QED_LEADING_HWFN(cdev)->p_ptp_ptt);
1036 qed_free_stream_mem(cdev);
1037 if (IS_QED_ETH_IF(cdev))
1038 qed_sriov_disable(cdev, true);
1041 qed_slowpath_irq_free(cdev);
1044 qed_disable_msix(cdev);
1045 qed_nic_reset(cdev);
1047 qed_iov_wq_stop(cdev, true);
1050 release_firmware(cdev->firmware);
1055 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
1056 char ver_str[VER_SIZE])
1060 memcpy(cdev->name, name, NAME_SIZE);
1061 for_each_hwfn(cdev, i)
1062 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1064 memcpy(cdev->ver_str, ver_str, VER_SIZE);
1065 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
1068 static u32 qed_sb_init(struct qed_dev *cdev,
1069 struct qed_sb_info *sb_info,
1071 dma_addr_t sb_phy_addr, u16 sb_id,
1072 enum qed_sb_type type)
1074 struct qed_hwfn *p_hwfn;
1080 /* RoCE uses single engine and CMT uses two engines. When using both
1081 * we force only a single engine. Storage uses only engine 0 too.
1083 if (type == QED_SB_TYPE_L2_QUEUE)
1084 n_hwfns = cdev->num_hwfns;
1088 hwfn_index = sb_id % n_hwfns;
1089 p_hwfn = &cdev->hwfns[hwfn_index];
1090 rel_sb_id = sb_id / n_hwfns;
1092 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1093 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1094 hwfn_index, rel_sb_id, sb_id);
1096 rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
1097 sb_virt_addr, sb_phy_addr, rel_sb_id);
1102 static u32 qed_sb_release(struct qed_dev *cdev,
1103 struct qed_sb_info *sb_info, u16 sb_id)
1105 struct qed_hwfn *p_hwfn;
1110 hwfn_index = sb_id % cdev->num_hwfns;
1111 p_hwfn = &cdev->hwfns[hwfn_index];
1112 rel_sb_id = sb_id / cdev->num_hwfns;
1114 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1115 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1116 hwfn_index, rel_sb_id, sb_id);
1118 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1123 static bool qed_can_link_change(struct qed_dev *cdev)
1128 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1130 struct qed_hwfn *hwfn;
1131 struct qed_mcp_link_params *link_params;
1132 struct qed_ptt *ptt;
1141 /* The link should be set only once per PF */
1142 hwfn = &cdev->hwfns[0];
1144 ptt = qed_ptt_acquire(hwfn);
1148 link_params = qed_mcp_get_link_params(hwfn);
1149 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1150 link_params->speed.autoneg = params->autoneg;
1151 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1152 link_params->speed.advertised_speeds = 0;
1153 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1154 (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1155 link_params->speed.advertised_speeds |=
1156 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1157 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1158 link_params->speed.advertised_speeds |=
1159 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1160 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1161 link_params->speed.advertised_speeds |=
1162 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1163 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1164 link_params->speed.advertised_speeds |=
1165 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1166 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1167 link_params->speed.advertised_speeds |=
1168 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1169 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1170 link_params->speed.advertised_speeds |=
1171 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1173 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1174 link_params->speed.forced_speed = params->forced_speed;
1175 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1176 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1177 link_params->pause.autoneg = true;
1179 link_params->pause.autoneg = false;
1180 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1181 link_params->pause.forced_rx = true;
1183 link_params->pause.forced_rx = false;
1184 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1185 link_params->pause.forced_tx = true;
1187 link_params->pause.forced_tx = false;
1189 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1190 switch (params->loopback_mode) {
1191 case QED_LINK_LOOPBACK_INT_PHY:
1192 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1194 case QED_LINK_LOOPBACK_EXT_PHY:
1195 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1197 case QED_LINK_LOOPBACK_EXT:
1198 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1200 case QED_LINK_LOOPBACK_MAC:
1201 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1204 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1209 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1211 qed_ptt_release(hwfn, ptt);
1216 static int qed_get_port_type(u32 media_type)
1220 switch (media_type) {
1221 case MEDIA_SFPP_10G_FIBER:
1222 case MEDIA_SFP_1G_FIBER:
1223 case MEDIA_XFP_FIBER:
1224 case MEDIA_MODULE_FIBER:
1226 port_type = PORT_FIBRE;
1228 case MEDIA_DA_TWINAX:
1229 port_type = PORT_DA;
1232 port_type = PORT_TP;
1234 case MEDIA_NOT_PRESENT:
1235 port_type = PORT_NONE;
1237 case MEDIA_UNSPECIFIED:
1239 port_type = PORT_OTHER;
1245 static int qed_get_link_data(struct qed_hwfn *hwfn,
1246 struct qed_mcp_link_params *params,
1247 struct qed_mcp_link_state *link,
1248 struct qed_mcp_link_capabilities *link_caps)
1252 if (!IS_PF(hwfn->cdev)) {
1253 qed_vf_get_link_params(hwfn, params);
1254 qed_vf_get_link_state(hwfn, link);
1255 qed_vf_get_link_caps(hwfn, link_caps);
1260 p = qed_mcp_get_link_params(hwfn);
1263 memcpy(params, p, sizeof(*params));
1265 p = qed_mcp_get_link_state(hwfn);
1268 memcpy(link, p, sizeof(*link));
1270 p = qed_mcp_get_link_capabilities(hwfn);
1273 memcpy(link_caps, p, sizeof(*link_caps));
1278 static void qed_fill_link(struct qed_hwfn *hwfn,
1279 struct qed_link_output *if_link)
1281 struct qed_mcp_link_params params;
1282 struct qed_mcp_link_state link;
1283 struct qed_mcp_link_capabilities link_caps;
1286 memset(if_link, 0, sizeof(*if_link));
1288 /* Prepare source inputs */
1289 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1290 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1294 /* Set the link parameters to pass to protocol driver */
1296 if_link->link_up = true;
1298 /* TODO - at the moment assume supported and advertised speed equal */
1299 if_link->supported_caps = QED_LM_FIBRE_BIT;
1300 if (params.speed.autoneg)
1301 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1302 if (params.pause.autoneg ||
1303 (params.pause.forced_rx && params.pause.forced_tx))
1304 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1305 if (params.pause.autoneg || params.pause.forced_rx ||
1306 params.pause.forced_tx)
1307 if_link->supported_caps |= QED_LM_Pause_BIT;
1309 if_link->advertised_caps = if_link->supported_caps;
1310 if (params.speed.advertised_speeds &
1311 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1312 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1313 QED_LM_1000baseT_Full_BIT;
1314 if (params.speed.advertised_speeds &
1315 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1316 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1317 if (params.speed.advertised_speeds &
1318 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1319 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1320 if (params.speed.advertised_speeds &
1321 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1322 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1323 if (params.speed.advertised_speeds &
1324 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1325 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1326 if (params.speed.advertised_speeds &
1327 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1328 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1330 if (link_caps.speed_capabilities &
1331 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1332 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1333 QED_LM_1000baseT_Full_BIT;
1334 if (link_caps.speed_capabilities &
1335 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1336 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1337 if (link_caps.speed_capabilities &
1338 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1339 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1340 if (link_caps.speed_capabilities &
1341 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1342 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1343 if (link_caps.speed_capabilities &
1344 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1345 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1346 if (link_caps.speed_capabilities &
1347 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1348 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1351 if_link->speed = link.speed;
1353 /* TODO - fill duplex properly */
1354 if_link->duplex = DUPLEX_FULL;
1355 qed_mcp_get_media_type(hwfn->cdev, &media_type);
1356 if_link->port = qed_get_port_type(media_type);
1358 if_link->autoneg = params.speed.autoneg;
1360 if (params.pause.autoneg)
1361 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1362 if (params.pause.forced_rx)
1363 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1364 if (params.pause.forced_tx)
1365 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1367 /* Link partner capabilities */
1368 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1369 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1370 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1371 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1372 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1373 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1374 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1375 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1376 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1377 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1378 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1379 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1380 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1381 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1383 if (link.an_complete)
1384 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1386 if (link.partner_adv_pause)
1387 if_link->lp_caps |= QED_LM_Pause_BIT;
1388 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1389 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1390 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1393 static void qed_get_current_link(struct qed_dev *cdev,
1394 struct qed_link_output *if_link)
1398 qed_fill_link(&cdev->hwfns[0], if_link);
1400 for_each_hwfn(cdev, i)
1401 qed_inform_vf_link_state(&cdev->hwfns[i]);
1404 void qed_link_update(struct qed_hwfn *hwfn)
1406 void *cookie = hwfn->cdev->ops_cookie;
1407 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1408 struct qed_link_output if_link;
1410 qed_fill_link(hwfn, &if_link);
1411 qed_inform_vf_link_state(hwfn);
1413 if (IS_LEAD_HWFN(hwfn) && cookie)
1414 op->link_update(cookie, &if_link);
1417 static int qed_drain(struct qed_dev *cdev)
1419 struct qed_hwfn *hwfn;
1420 struct qed_ptt *ptt;
1426 for_each_hwfn(cdev, i) {
1427 hwfn = &cdev->hwfns[i];
1428 ptt = qed_ptt_acquire(hwfn);
1430 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1433 rc = qed_mcp_drain(hwfn, ptt);
1436 qed_ptt_release(hwfn, ptt);
1442 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
1444 *rx_coal = cdev->rx_coalesce_usecs;
1445 *tx_coal = cdev->tx_coalesce_usecs;
1448 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1451 struct qed_hwfn *hwfn;
1452 struct qed_ptt *ptt;
1456 hwfn_index = qid % cdev->num_hwfns;
1457 hwfn = &cdev->hwfns[hwfn_index];
1458 ptt = qed_ptt_acquire(hwfn);
1462 status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
1463 qid / cdev->num_hwfns, sb_id);
1466 status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
1467 qid / cdev->num_hwfns, sb_id);
1469 qed_ptt_release(hwfn, ptt);
1474 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1476 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1477 struct qed_ptt *ptt;
1480 ptt = qed_ptt_acquire(hwfn);
1484 status = qed_mcp_set_led(hwfn, ptt, mode);
1486 qed_ptt_release(hwfn, ptt);
1491 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
1493 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1494 struct qed_ptt *ptt;
1500 ptt = qed_ptt_acquire(hwfn);
1504 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
1505 : QED_OV_WOL_DISABLED);
1508 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1511 qed_ptt_release(hwfn, ptt);
1515 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
1517 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1518 struct qed_ptt *ptt;
1524 ptt = qed_ptt_acquire(hwfn);
1528 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
1529 QED_OV_DRIVER_STATE_ACTIVE :
1530 QED_OV_DRIVER_STATE_DISABLED);
1532 qed_ptt_release(hwfn, ptt);
1537 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
1539 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1540 struct qed_ptt *ptt;
1546 ptt = qed_ptt_acquire(hwfn);
1550 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
1554 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1557 qed_ptt_release(hwfn, ptt);
1561 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
1563 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1564 struct qed_ptt *ptt;
1570 ptt = qed_ptt_acquire(hwfn);
1574 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
1578 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1581 qed_ptt_release(hwfn, ptt);
1585 static struct qed_selftest_ops qed_selftest_ops_pass = {
1586 .selftest_memory = &qed_selftest_memory,
1587 .selftest_interrupt = &qed_selftest_interrupt,
1588 .selftest_register = &qed_selftest_register,
1589 .selftest_clock = &qed_selftest_clock,
1590 .selftest_nvram = &qed_selftest_nvram,
1593 const struct qed_common_ops qed_common_ops_pass = {
1594 .selftest = &qed_selftest_ops_pass,
1595 .probe = &qed_probe,
1596 .remove = &qed_remove,
1597 .set_power_state = &qed_set_power_state,
1598 .set_id = &qed_set_id,
1599 .update_pf_params = &qed_update_pf_params,
1600 .slowpath_start = &qed_slowpath_start,
1601 .slowpath_stop = &qed_slowpath_stop,
1602 .set_fp_int = &qed_set_int_fp,
1603 .get_fp_int = &qed_get_int_fp,
1604 .sb_init = &qed_sb_init,
1605 .sb_release = &qed_sb_release,
1606 .simd_handler_config = &qed_simd_handler_config,
1607 .simd_handler_clean = &qed_simd_handler_clean,
1608 .dbg_grc = &qed_dbg_grc,
1609 .dbg_grc_size = &qed_dbg_grc_size,
1610 .can_link_change = &qed_can_link_change,
1611 .set_link = &qed_set_link,
1612 .get_link = &qed_get_current_link,
1613 .drain = &qed_drain,
1614 .update_msglvl = &qed_init_dp,
1615 .dbg_all_data = &qed_dbg_all_data,
1616 .dbg_all_data_size = &qed_dbg_all_data_size,
1617 .chain_alloc = &qed_chain_alloc,
1618 .chain_free = &qed_chain_free,
1619 .get_coalesce = &qed_get_coalesce,
1620 .set_coalesce = &qed_set_coalesce,
1621 .set_led = &qed_set_led,
1622 .update_drv_state = &qed_update_drv_state,
1623 .update_mac = &qed_update_mac,
1624 .update_mtu = &qed_update_mtu,
1625 .update_wol = &qed_update_wol,
1628 void qed_get_protocol_stats(struct qed_dev *cdev,
1629 enum qed_mcp_protocol_type type,
1630 union qed_mcp_protocol_stats *stats)
1632 struct qed_eth_stats eth_stats;
1634 memset(stats, 0, sizeof(*stats));
1637 case QED_MCP_LAN_STATS:
1638 qed_get_vport_stats(cdev, ð_stats);
1639 stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
1640 stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
1641 stats->lan_stats.fcs_err = -1;
1643 case QED_MCP_FCOE_STATS:
1644 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
1647 DP_ERR(cdev, "Invalid protocol type = %d\n", type);