1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/version.h>
38 #include <linux/delay.h>
39 #include <asm/byteorder.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/string.h>
42 #include <linux/module.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/ethtool.h>
46 #include <linux/etherdevice.h>
47 #include <linux/vmalloc.h>
48 #include <linux/qed/qed_if.h>
49 #include <linux/qed/qed_ll2_if.h>
52 #include "qed_sriov.h"
54 #include "qed_dev_api.h"
58 #include "qed_selftest.h"
60 #define QED_ROCE_QPS (8192)
61 #define QED_ROCE_DPIS (8)
63 static char version[] =
64 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
66 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
67 MODULE_LICENSE("GPL");
68 MODULE_VERSION(DRV_MODULE_VERSION);
70 #define FW_FILE_VERSION \
71 __stringify(FW_MAJOR_VERSION) "." \
72 __stringify(FW_MINOR_VERSION) "." \
73 __stringify(FW_REVISION_VERSION) "." \
74 __stringify(FW_ENGINEERING_VERSION)
76 #define QED_FW_FILE_NAME \
77 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
79 MODULE_FIRMWARE(QED_FW_FILE_NAME);
81 static int __init qed_init(void)
83 pr_info("%s", version);
88 static void __exit qed_cleanup(void)
90 pr_notice("qed_cleanup called\n");
93 module_init(qed_init);
94 module_exit(qed_cleanup);
96 /* Check if the DMA controller on the machine can properly handle the DMA
97 * addressing required by the device.
99 static int qed_set_coherency_mask(struct qed_dev *cdev)
101 struct device *dev = &cdev->pdev->dev;
103 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
104 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
106 "Can't request 64-bit consistent allocations\n");
109 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
110 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
117 static void qed_free_pci(struct qed_dev *cdev)
119 struct pci_dev *pdev = cdev->pdev;
122 iounmap(cdev->doorbells);
124 iounmap(cdev->regview);
125 if (atomic_read(&pdev->enable_cnt) == 1)
126 pci_release_regions(pdev);
128 pci_disable_device(pdev);
131 #define PCI_REVISION_ID_ERROR_VAL 0xff
133 /* Performs PCI initializations as well as initializing PCI-related parameters
134 * in the device structrue. Returns 0 in case of success.
136 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
143 rc = pci_enable_device(pdev);
145 DP_NOTICE(cdev, "Cannot enable PCI device\n");
149 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
150 DP_NOTICE(cdev, "No memory region found in bar #0\n");
155 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
156 DP_NOTICE(cdev, "No memory region found in bar #2\n");
161 if (atomic_read(&pdev->enable_cnt) == 1) {
162 rc = pci_request_regions(pdev, "qed");
165 "Failed to request PCI memory resources\n");
168 pci_set_master(pdev);
169 pci_save_state(pdev);
172 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
173 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
175 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
180 if (!pci_is_pcie(pdev)) {
181 DP_NOTICE(cdev, "The bus is not PCI Express\n");
186 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
187 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
188 DP_NOTICE(cdev, "Cannot find power management capability\n");
190 rc = qed_set_coherency_mask(cdev);
194 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
195 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
196 cdev->pci_params.irq = pdev->irq;
198 cdev->regview = pci_ioremap_bar(pdev, 0);
199 if (!cdev->regview) {
200 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
206 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
207 cdev->db_size = pci_resource_len(cdev->pdev, 2);
208 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
209 if (!cdev->doorbells) {
210 DP_NOTICE(cdev, "Cannot map doorbell space\n");
218 pci_release_regions(pdev);
220 pci_disable_device(pdev);
225 int qed_fill_dev_info(struct qed_dev *cdev,
226 struct qed_dev_info *dev_info)
230 memset(dev_info, 0, sizeof(struct qed_dev_info));
232 dev_info->num_hwfns = cdev->num_hwfns;
233 dev_info->pci_mem_start = cdev->pci_params.mem_start;
234 dev_info->pci_mem_end = cdev->pci_params.mem_end;
235 dev_info->pci_irq = cdev->pci_params.irq;
236 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
238 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
239 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
242 dev_info->fw_major = FW_MAJOR_VERSION;
243 dev_info->fw_minor = FW_MINOR_VERSION;
244 dev_info->fw_rev = FW_REVISION_VERSION;
245 dev_info->fw_eng = FW_ENGINEERING_VERSION;
246 dev_info->mf_mode = cdev->mf_mode;
247 dev_info->tx_switching = true;
249 if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
251 dev_info->wol_support = true;
253 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
254 &dev_info->fw_minor, &dev_info->fw_rev,
259 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
261 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
262 &dev_info->mfw_rev, NULL);
264 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
265 &dev_info->flash_size);
267 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
270 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
271 &dev_info->mfw_rev, NULL);
274 dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
279 static void qed_free_cdev(struct qed_dev *cdev)
284 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
286 struct qed_dev *cdev;
288 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
292 qed_init_struct(cdev);
297 /* Sets the requested power state */
298 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
303 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
308 static struct qed_dev *qed_probe(struct pci_dev *pdev,
309 struct qed_probe_params *params)
311 struct qed_dev *cdev;
314 cdev = qed_alloc_cdev(pdev);
318 cdev->protocol = params->protocol;
321 cdev->b_is_vf = true;
323 qed_init_dp(cdev, params->dp_module, params->dp_level);
325 rc = qed_init_pci(cdev, pdev);
327 DP_ERR(cdev, "init pci failed\n");
330 DP_INFO(cdev, "PCI init completed successfully\n");
332 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
334 DP_ERR(cdev, "hw prepare failed\n");
338 DP_INFO(cdev, "qed_probe completed successffuly\n");
350 static void qed_remove(struct qed_dev *cdev)
359 qed_set_power_state(cdev, PCI_D3hot);
364 static void qed_disable_msix(struct qed_dev *cdev)
366 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
367 pci_disable_msix(cdev->pdev);
368 kfree(cdev->int_params.msix_table);
369 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
370 pci_disable_msi(cdev->pdev);
373 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
376 static int qed_enable_msix(struct qed_dev *cdev,
377 struct qed_int_params *int_params)
381 cnt = int_params->in.num_vectors;
383 for (i = 0; i < cnt; i++)
384 int_params->msix_table[i].entry = i;
386 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
387 int_params->in.min_msix_cnt, cnt);
388 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
389 (rc % cdev->num_hwfns)) {
390 pci_disable_msix(cdev->pdev);
392 /* If fastpath is initialized, we need at least one interrupt
393 * per hwfn [and the slow path interrupts]. New requested number
394 * should be a multiple of the number of hwfns.
396 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
398 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
399 cnt, int_params->in.num_vectors);
400 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
407 /* MSI-x configuration was achieved */
408 int_params->out.int_mode = QED_INT_MODE_MSIX;
409 int_params->out.num_vectors = rc;
413 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
420 /* This function outputs the int mode and the number of enabled msix vector */
421 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
423 struct qed_int_params *int_params = &cdev->int_params;
424 struct msix_entry *tbl;
427 switch (int_params->in.int_mode) {
428 case QED_INT_MODE_MSIX:
429 /* Allocate MSIX table */
430 cnt = int_params->in.num_vectors;
431 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
432 if (!int_params->msix_table) {
438 rc = qed_enable_msix(cdev, int_params);
442 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
443 kfree(int_params->msix_table);
448 case QED_INT_MODE_MSI:
449 if (cdev->num_hwfns == 1) {
450 rc = pci_enable_msi(cdev->pdev);
452 int_params->out.int_mode = QED_INT_MODE_MSI;
456 DP_NOTICE(cdev, "Failed to enable MSI\n");
462 case QED_INT_MODE_INTA:
463 int_params->out.int_mode = QED_INT_MODE_INTA;
467 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
468 int_params->in.int_mode);
474 DP_INFO(cdev, "Using %s interrupts\n",
475 int_params->out.int_mode == QED_INT_MODE_INTA ?
476 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
478 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
483 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
484 int index, void(*handler)(void *))
486 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
487 int relative_idx = index / cdev->num_hwfns;
489 hwfn->simd_proto_handler[relative_idx].func = handler;
490 hwfn->simd_proto_handler[relative_idx].token = token;
493 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
495 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
496 int relative_idx = index / cdev->num_hwfns;
498 memset(&hwfn->simd_proto_handler[relative_idx], 0,
499 sizeof(struct qed_simd_fp_handler));
502 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
504 tasklet_schedule((struct tasklet_struct *)tasklet);
508 static irqreturn_t qed_single_int(int irq, void *dev_instance)
510 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
511 struct qed_hwfn *hwfn;
512 irqreturn_t rc = IRQ_NONE;
516 for (i = 0; i < cdev->num_hwfns; i++) {
517 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
522 hwfn = &cdev->hwfns[i];
524 /* Slowpath interrupt */
525 if (unlikely(status & 0x1)) {
526 tasklet_schedule(hwfn->sp_dpc);
531 /* Fastpath interrupts */
532 for (j = 0; j < 64; j++) {
533 if ((0x2ULL << j) & status) {
534 hwfn->simd_proto_handler[j].func(
535 hwfn->simd_proto_handler[j].token);
536 status &= ~(0x2ULL << j);
541 if (unlikely(status))
542 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
543 "got an unknown interrupt status 0x%llx\n",
550 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
552 struct qed_dev *cdev = hwfn->cdev;
557 int_mode = cdev->int_params.out.int_mode;
558 if (int_mode == QED_INT_MODE_MSIX) {
560 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
561 id, cdev->pdev->bus->number,
562 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
563 rc = request_irq(cdev->int_params.msix_table[id].vector,
564 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
566 unsigned long flags = 0;
568 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
569 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
570 PCI_FUNC(cdev->pdev->devfn));
572 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
573 flags |= IRQF_SHARED;
575 rc = request_irq(cdev->pdev->irq, qed_single_int,
576 flags, cdev->name, cdev);
580 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
582 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
583 "Requested slowpath %s\n",
584 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
589 static void qed_slowpath_irq_free(struct qed_dev *cdev)
593 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
594 for_each_hwfn(cdev, i) {
595 if (!cdev->hwfns[i].b_int_requested)
597 synchronize_irq(cdev->int_params.msix_table[i].vector);
598 free_irq(cdev->int_params.msix_table[i].vector,
599 cdev->hwfns[i].sp_dpc);
602 if (QED_LEADING_HWFN(cdev)->b_int_requested)
603 free_irq(cdev->pdev->irq, cdev);
605 qed_int_disable_post_isr_release(cdev);
608 static int qed_nic_stop(struct qed_dev *cdev)
612 rc = qed_hw_stop(cdev);
614 for (i = 0; i < cdev->num_hwfns; i++) {
615 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
617 if (p_hwfn->b_sp_dpc_enabled) {
618 tasklet_disable(p_hwfn->sp_dpc);
619 p_hwfn->b_sp_dpc_enabled = false;
620 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
621 "Disabled sp taskelt [hwfn %d] at %p\n",
626 qed_dbg_pf_exit(cdev);
631 static int qed_nic_reset(struct qed_dev *cdev)
635 rc = qed_hw_reset(cdev);
644 static int qed_nic_setup(struct qed_dev *cdev)
648 /* Determine if interface is going to require LL2 */
649 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
650 for (i = 0; i < cdev->num_hwfns; i++) {
651 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
653 p_hwfn->using_ll2 = true;
657 rc = qed_resc_alloc(cdev);
661 DP_INFO(cdev, "Allocated qed resources\n");
663 qed_resc_setup(cdev);
668 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
672 /* Mark the fastpath as free/used */
673 cdev->int_params.fp_initialized = cnt ? true : false;
675 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
676 limit = cdev->num_hwfns * 63;
677 else if (cdev->int_params.fp_msix_cnt)
678 limit = cdev->int_params.fp_msix_cnt;
683 return min_t(int, cnt, limit);
686 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
688 memset(info, 0, sizeof(struct qed_int_info));
690 if (!cdev->int_params.fp_initialized) {
692 "Protocol driver requested interrupt information, but its support is not yet configured\n");
696 /* Need to expose only MSI-X information; Single IRQ is handled solely
699 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
700 int msix_base = cdev->int_params.fp_msix_base;
702 info->msix_cnt = cdev->int_params.fp_msix_cnt;
703 info->msix = &cdev->int_params.msix_table[msix_base];
709 static int qed_slowpath_setup_int(struct qed_dev *cdev,
710 enum qed_int_mode int_mode)
712 struct qed_sb_cnt_info sb_cnt_info;
713 int num_l2_queues = 0;
717 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
718 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
722 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
723 cdev->int_params.in.int_mode = int_mode;
724 for_each_hwfn(cdev, i) {
725 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
726 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
727 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
728 cdev->int_params.in.num_vectors++; /* slowpath */
731 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
732 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
734 rc = qed_set_int_mode(cdev, false);
736 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
740 cdev->int_params.fp_msix_base = cdev->num_hwfns;
741 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
744 if (!IS_ENABLED(CONFIG_QED_RDMA))
747 for_each_hwfn(cdev, i)
748 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
750 DP_VERBOSE(cdev, QED_MSG_RDMA,
751 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
752 cdev->int_params.fp_msix_cnt, num_l2_queues);
754 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
755 cdev->int_params.rdma_msix_cnt =
756 (cdev->int_params.fp_msix_cnt - num_l2_queues)
758 cdev->int_params.rdma_msix_base =
759 cdev->int_params.fp_msix_base + num_l2_queues;
760 cdev->int_params.fp_msix_cnt = num_l2_queues;
762 cdev->int_params.rdma_msix_cnt = 0;
765 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
766 cdev->int_params.rdma_msix_cnt,
767 cdev->int_params.rdma_msix_base);
772 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
776 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
777 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
779 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
780 &cdev->int_params.in.num_vectors);
781 if (cdev->num_hwfns > 1) {
784 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
785 cdev->int_params.in.num_vectors += vectors;
788 /* We want a minimum of one fastpath vector per vf hwfn */
789 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
791 rc = qed_set_int_mode(cdev, true);
795 cdev->int_params.fp_msix_base = 0;
796 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
801 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
802 u8 *input_buf, u32 max_size, u8 *unzip_buf)
806 p_hwfn->stream->next_in = input_buf;
807 p_hwfn->stream->avail_in = input_len;
808 p_hwfn->stream->next_out = unzip_buf;
809 p_hwfn->stream->avail_out = max_size;
811 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
814 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
819 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
820 zlib_inflateEnd(p_hwfn->stream);
822 if (rc != Z_OK && rc != Z_STREAM_END) {
823 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
824 p_hwfn->stream->msg, rc);
828 return p_hwfn->stream->total_out / 4;
831 static int qed_alloc_stream_mem(struct qed_dev *cdev)
836 for_each_hwfn(cdev, i) {
837 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
839 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
843 workspace = vzalloc(zlib_inflate_workspacesize());
846 p_hwfn->stream->workspace = workspace;
852 static void qed_free_stream_mem(struct qed_dev *cdev)
856 for_each_hwfn(cdev, i) {
857 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
862 vfree(p_hwfn->stream->workspace);
863 kfree(p_hwfn->stream);
867 static void qed_update_pf_params(struct qed_dev *cdev,
868 struct qed_pf_params *params)
872 if (IS_ENABLED(CONFIG_QED_RDMA)) {
873 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
874 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
875 /* divide by 3 the MRs to avoid MF ILT overflow */
876 params->rdma_pf_params.num_mrs = RDMA_MAX_TIDS;
877 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
880 /* In case we might support RDMA, don't allow qede to be greedy
881 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
883 if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
887 num_cons = ¶ms->eth_pf_params.num_cons;
888 *num_cons = min_t(u16, *num_cons, 192);
891 for (i = 0; i < cdev->num_hwfns; i++) {
892 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
894 p_hwfn->pf_params = *params;
898 static int qed_slowpath_start(struct qed_dev *cdev,
899 struct qed_slowpath_params *params)
901 struct qed_tunn_start_params tunn_info;
902 struct qed_mcp_drv_version drv_version;
903 const u8 *data = NULL;
904 struct qed_hwfn *hwfn;
905 struct qed_ptt *p_ptt;
908 if (qed_iov_wq_start(cdev))
912 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
916 "Failed to find fw file - /lib/firmware/%s\n",
921 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
923 QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
925 DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
930 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
931 rc = qed_nic_setup(cdev);
936 rc = qed_slowpath_setup_int(cdev, params->int_mode);
938 rc = qed_slowpath_vf_setup_int(cdev);
943 /* Allocate stream for unzipping */
944 rc = qed_alloc_stream_mem(cdev);
948 /* First Dword used to diffrentiate between various sources */
949 data = cdev->firmware->data + sizeof(u32);
951 qed_dbg_pf_init(cdev);
954 memset(&tunn_info, 0, sizeof(tunn_info));
955 tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
956 1 << QED_MODE_L2GRE_TUNN |
957 1 << QED_MODE_IPGRE_TUNN |
958 1 << QED_MODE_L2GENEVE_TUNN |
959 1 << QED_MODE_IPGENEVE_TUNN;
961 tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
962 tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
963 tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
965 /* Start the slowpath */
966 rc = qed_hw_init(cdev, &tunn_info, true,
967 cdev->int_params.out.int_mode,
973 "HW initialization and function start completed successfully\n");
975 /* Allocate LL2 interface if needed */
976 if (QED_LEADING_HWFN(cdev)->using_ll2) {
977 rc = qed_ll2_alloc_if(cdev);
982 hwfn = QED_LEADING_HWFN(cdev);
983 drv_version.version = (params->drv_major << 24) |
984 (params->drv_minor << 16) |
985 (params->drv_rev << 8) |
987 strlcpy(drv_version.name, params->name,
988 MCP_DRV_VER_STR_SIZE - 4);
989 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
992 DP_NOTICE(cdev, "Failed sending drv version command\n");
997 qed_reset_vport_stats(cdev);
1004 qed_hw_timers_stop_all(cdev);
1006 qed_slowpath_irq_free(cdev);
1007 qed_free_stream_mem(cdev);
1008 qed_disable_msix(cdev);
1010 qed_resc_free(cdev);
1013 release_firmware(cdev->firmware);
1015 if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
1016 qed_ptt_release(QED_LEADING_HWFN(cdev),
1017 QED_LEADING_HWFN(cdev)->p_ptp_ptt);
1019 qed_iov_wq_stop(cdev, false);
1024 static int qed_slowpath_stop(struct qed_dev *cdev)
1029 qed_ll2_dealloc_if(cdev);
1032 qed_ptt_release(QED_LEADING_HWFN(cdev),
1033 QED_LEADING_HWFN(cdev)->p_ptp_ptt);
1034 qed_free_stream_mem(cdev);
1035 if (IS_QED_ETH_IF(cdev))
1036 qed_sriov_disable(cdev, true);
1039 qed_slowpath_irq_free(cdev);
1042 qed_disable_msix(cdev);
1043 qed_nic_reset(cdev);
1045 qed_iov_wq_stop(cdev, true);
1048 release_firmware(cdev->firmware);
1053 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
1054 char ver_str[VER_SIZE])
1058 memcpy(cdev->name, name, NAME_SIZE);
1059 for_each_hwfn(cdev, i)
1060 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1062 memcpy(cdev->ver_str, ver_str, VER_SIZE);
1063 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
1066 static u32 qed_sb_init(struct qed_dev *cdev,
1067 struct qed_sb_info *sb_info,
1069 dma_addr_t sb_phy_addr, u16 sb_id,
1070 enum qed_sb_type type)
1072 struct qed_hwfn *p_hwfn;
1078 /* RoCE uses single engine and CMT uses two engines. When using both
1079 * we force only a single engine. Storage uses only engine 0 too.
1081 if (type == QED_SB_TYPE_L2_QUEUE)
1082 n_hwfns = cdev->num_hwfns;
1086 hwfn_index = sb_id % n_hwfns;
1087 p_hwfn = &cdev->hwfns[hwfn_index];
1088 rel_sb_id = sb_id / n_hwfns;
1090 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1091 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1092 hwfn_index, rel_sb_id, sb_id);
1094 rc = qed_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info,
1095 sb_virt_addr, sb_phy_addr, rel_sb_id);
1100 static u32 qed_sb_release(struct qed_dev *cdev,
1101 struct qed_sb_info *sb_info, u16 sb_id)
1103 struct qed_hwfn *p_hwfn;
1108 hwfn_index = sb_id % cdev->num_hwfns;
1109 p_hwfn = &cdev->hwfns[hwfn_index];
1110 rel_sb_id = sb_id / cdev->num_hwfns;
1112 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1113 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1114 hwfn_index, rel_sb_id, sb_id);
1116 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1121 static bool qed_can_link_change(struct qed_dev *cdev)
1126 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1128 struct qed_hwfn *hwfn;
1129 struct qed_mcp_link_params *link_params;
1130 struct qed_ptt *ptt;
1139 /* The link should be set only once per PF */
1140 hwfn = &cdev->hwfns[0];
1142 ptt = qed_ptt_acquire(hwfn);
1146 link_params = qed_mcp_get_link_params(hwfn);
1147 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1148 link_params->speed.autoneg = params->autoneg;
1149 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1150 link_params->speed.advertised_speeds = 0;
1151 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1152 (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1153 link_params->speed.advertised_speeds |=
1154 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1155 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1156 link_params->speed.advertised_speeds |=
1157 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1158 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1159 link_params->speed.advertised_speeds |=
1160 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1161 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1162 link_params->speed.advertised_speeds |=
1163 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1164 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1165 link_params->speed.advertised_speeds |=
1166 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1167 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1168 link_params->speed.advertised_speeds |=
1169 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1171 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1172 link_params->speed.forced_speed = params->forced_speed;
1173 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1174 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1175 link_params->pause.autoneg = true;
1177 link_params->pause.autoneg = false;
1178 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1179 link_params->pause.forced_rx = true;
1181 link_params->pause.forced_rx = false;
1182 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1183 link_params->pause.forced_tx = true;
1185 link_params->pause.forced_tx = false;
1187 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1188 switch (params->loopback_mode) {
1189 case QED_LINK_LOOPBACK_INT_PHY:
1190 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1192 case QED_LINK_LOOPBACK_EXT_PHY:
1193 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1195 case QED_LINK_LOOPBACK_EXT:
1196 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1198 case QED_LINK_LOOPBACK_MAC:
1199 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1202 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1207 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1209 qed_ptt_release(hwfn, ptt);
1214 static int qed_get_port_type(u32 media_type)
1218 switch (media_type) {
1219 case MEDIA_SFPP_10G_FIBER:
1220 case MEDIA_SFP_1G_FIBER:
1221 case MEDIA_XFP_FIBER:
1222 case MEDIA_MODULE_FIBER:
1224 port_type = PORT_FIBRE;
1226 case MEDIA_DA_TWINAX:
1227 port_type = PORT_DA;
1230 port_type = PORT_TP;
1232 case MEDIA_NOT_PRESENT:
1233 port_type = PORT_NONE;
1235 case MEDIA_UNSPECIFIED:
1237 port_type = PORT_OTHER;
1243 static int qed_get_link_data(struct qed_hwfn *hwfn,
1244 struct qed_mcp_link_params *params,
1245 struct qed_mcp_link_state *link,
1246 struct qed_mcp_link_capabilities *link_caps)
1250 if (!IS_PF(hwfn->cdev)) {
1251 qed_vf_get_link_params(hwfn, params);
1252 qed_vf_get_link_state(hwfn, link);
1253 qed_vf_get_link_caps(hwfn, link_caps);
1258 p = qed_mcp_get_link_params(hwfn);
1261 memcpy(params, p, sizeof(*params));
1263 p = qed_mcp_get_link_state(hwfn);
1266 memcpy(link, p, sizeof(*link));
1268 p = qed_mcp_get_link_capabilities(hwfn);
1271 memcpy(link_caps, p, sizeof(*link_caps));
1276 static void qed_fill_link(struct qed_hwfn *hwfn,
1277 struct qed_link_output *if_link)
1279 struct qed_mcp_link_params params;
1280 struct qed_mcp_link_state link;
1281 struct qed_mcp_link_capabilities link_caps;
1284 memset(if_link, 0, sizeof(*if_link));
1286 /* Prepare source inputs */
1287 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1288 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1292 /* Set the link parameters to pass to protocol driver */
1294 if_link->link_up = true;
1296 /* TODO - at the moment assume supported and advertised speed equal */
1297 if_link->supported_caps = QED_LM_FIBRE_BIT;
1298 if (params.speed.autoneg)
1299 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1300 if (params.pause.autoneg ||
1301 (params.pause.forced_rx && params.pause.forced_tx))
1302 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1303 if (params.pause.autoneg || params.pause.forced_rx ||
1304 params.pause.forced_tx)
1305 if_link->supported_caps |= QED_LM_Pause_BIT;
1307 if_link->advertised_caps = if_link->supported_caps;
1308 if (params.speed.advertised_speeds &
1309 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1310 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1311 QED_LM_1000baseT_Full_BIT;
1312 if (params.speed.advertised_speeds &
1313 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1314 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1315 if (params.speed.advertised_speeds &
1316 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1317 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1318 if (params.speed.advertised_speeds &
1319 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1320 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1321 if (params.speed.advertised_speeds &
1322 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1323 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1324 if (params.speed.advertised_speeds &
1325 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1326 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1328 if (link_caps.speed_capabilities &
1329 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1330 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1331 QED_LM_1000baseT_Full_BIT;
1332 if (link_caps.speed_capabilities &
1333 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1334 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1335 if (link_caps.speed_capabilities &
1336 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1337 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1338 if (link_caps.speed_capabilities &
1339 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1340 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1341 if (link_caps.speed_capabilities &
1342 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1343 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1344 if (link_caps.speed_capabilities &
1345 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1346 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1349 if_link->speed = link.speed;
1351 /* TODO - fill duplex properly */
1352 if_link->duplex = DUPLEX_FULL;
1353 qed_mcp_get_media_type(hwfn->cdev, &media_type);
1354 if_link->port = qed_get_port_type(media_type);
1356 if_link->autoneg = params.speed.autoneg;
1358 if (params.pause.autoneg)
1359 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1360 if (params.pause.forced_rx)
1361 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1362 if (params.pause.forced_tx)
1363 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1365 /* Link partner capabilities */
1366 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1367 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1368 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1369 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1370 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1371 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1372 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1373 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1374 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1375 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1376 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1377 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1378 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1379 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1381 if (link.an_complete)
1382 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1384 if (link.partner_adv_pause)
1385 if_link->lp_caps |= QED_LM_Pause_BIT;
1386 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1387 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1388 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1391 static void qed_get_current_link(struct qed_dev *cdev,
1392 struct qed_link_output *if_link)
1396 qed_fill_link(&cdev->hwfns[0], if_link);
1398 for_each_hwfn(cdev, i)
1399 qed_inform_vf_link_state(&cdev->hwfns[i]);
1402 void qed_link_update(struct qed_hwfn *hwfn)
1404 void *cookie = hwfn->cdev->ops_cookie;
1405 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1406 struct qed_link_output if_link;
1408 qed_fill_link(hwfn, &if_link);
1409 qed_inform_vf_link_state(hwfn);
1411 if (IS_LEAD_HWFN(hwfn) && cookie)
1412 op->link_update(cookie, &if_link);
1415 static int qed_drain(struct qed_dev *cdev)
1417 struct qed_hwfn *hwfn;
1418 struct qed_ptt *ptt;
1424 for_each_hwfn(cdev, i) {
1425 hwfn = &cdev->hwfns[i];
1426 ptt = qed_ptt_acquire(hwfn);
1428 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1431 rc = qed_mcp_drain(hwfn, ptt);
1434 qed_ptt_release(hwfn, ptt);
1440 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
1442 *rx_coal = cdev->rx_coalesce_usecs;
1443 *tx_coal = cdev->tx_coalesce_usecs;
1446 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1449 struct qed_hwfn *hwfn;
1450 struct qed_ptt *ptt;
1454 hwfn_index = qid % cdev->num_hwfns;
1455 hwfn = &cdev->hwfns[hwfn_index];
1456 ptt = qed_ptt_acquire(hwfn);
1460 status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
1461 qid / cdev->num_hwfns, sb_id);
1464 status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
1465 qid / cdev->num_hwfns, sb_id);
1467 qed_ptt_release(hwfn, ptt);
1472 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1474 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1475 struct qed_ptt *ptt;
1478 ptt = qed_ptt_acquire(hwfn);
1482 status = qed_mcp_set_led(hwfn, ptt, mode);
1484 qed_ptt_release(hwfn, ptt);
1489 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
1491 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1492 struct qed_ptt *ptt;
1498 ptt = qed_ptt_acquire(hwfn);
1502 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
1503 : QED_OV_WOL_DISABLED);
1506 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1509 qed_ptt_release(hwfn, ptt);
1513 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
1515 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1516 struct qed_ptt *ptt;
1522 ptt = qed_ptt_acquire(hwfn);
1526 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
1527 QED_OV_DRIVER_STATE_ACTIVE :
1528 QED_OV_DRIVER_STATE_DISABLED);
1530 qed_ptt_release(hwfn, ptt);
1535 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
1537 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1538 struct qed_ptt *ptt;
1544 ptt = qed_ptt_acquire(hwfn);
1548 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
1552 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1555 qed_ptt_release(hwfn, ptt);
1559 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
1561 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1562 struct qed_ptt *ptt;
1568 ptt = qed_ptt_acquire(hwfn);
1572 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
1576 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1579 qed_ptt_release(hwfn, ptt);
1583 static struct qed_selftest_ops qed_selftest_ops_pass = {
1584 .selftest_memory = &qed_selftest_memory,
1585 .selftest_interrupt = &qed_selftest_interrupt,
1586 .selftest_register = &qed_selftest_register,
1587 .selftest_clock = &qed_selftest_clock,
1588 .selftest_nvram = &qed_selftest_nvram,
1591 const struct qed_common_ops qed_common_ops_pass = {
1592 .selftest = &qed_selftest_ops_pass,
1593 .probe = &qed_probe,
1594 .remove = &qed_remove,
1595 .set_power_state = &qed_set_power_state,
1596 .set_id = &qed_set_id,
1597 .update_pf_params = &qed_update_pf_params,
1598 .slowpath_start = &qed_slowpath_start,
1599 .slowpath_stop = &qed_slowpath_stop,
1600 .set_fp_int = &qed_set_int_fp,
1601 .get_fp_int = &qed_get_int_fp,
1602 .sb_init = &qed_sb_init,
1603 .sb_release = &qed_sb_release,
1604 .simd_handler_config = &qed_simd_handler_config,
1605 .simd_handler_clean = &qed_simd_handler_clean,
1606 .can_link_change = &qed_can_link_change,
1607 .set_link = &qed_set_link,
1608 .get_link = &qed_get_current_link,
1609 .drain = &qed_drain,
1610 .update_msglvl = &qed_init_dp,
1611 .dbg_all_data = &qed_dbg_all_data,
1612 .dbg_all_data_size = &qed_dbg_all_data_size,
1613 .chain_alloc = &qed_chain_alloc,
1614 .chain_free = &qed_chain_free,
1615 .get_coalesce = &qed_get_coalesce,
1616 .set_coalesce = &qed_set_coalesce,
1617 .set_led = &qed_set_led,
1618 .update_drv_state = &qed_update_drv_state,
1619 .update_mac = &qed_update_mac,
1620 .update_mtu = &qed_update_mtu,
1621 .update_wol = &qed_update_wol,
1624 void qed_get_protocol_stats(struct qed_dev *cdev,
1625 enum qed_mcp_protocol_type type,
1626 union qed_mcp_protocol_stats *stats)
1628 struct qed_eth_stats eth_stats;
1630 memset(stats, 0, sizeof(*stats));
1633 case QED_MCP_LAN_STATS:
1634 qed_get_vport_stats(cdev, ð_stats);
1635 stats->lan_stats.ucast_rx_pkts = eth_stats.rx_ucast_pkts;
1636 stats->lan_stats.ucast_tx_pkts = eth_stats.tx_ucast_pkts;
1637 stats->lan_stats.fcs_err = -1;
1640 DP_ERR(cdev, "Invalid protocol type = %d\n", type);