1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/version.h>
38 #include <linux/delay.h>
39 #include <asm/byteorder.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/string.h>
42 #include <linux/module.h>
43 #include <linux/interrupt.h>
44 #include <linux/workqueue.h>
45 #include <linux/ethtool.h>
46 #include <linux/etherdevice.h>
47 #include <linux/vmalloc.h>
48 #include <linux/crash_dump.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
53 #include "qed_sriov.h"
55 #include "qed_dev_api.h"
58 #include "qed_iscsi.h"
62 #include "qed_selftest.h"
63 #include "qed_debug.h"
65 #define QED_ROCE_QPS (8192)
66 #define QED_ROCE_DPIS (8)
68 static char version[] =
69 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
71 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
72 MODULE_LICENSE("GPL");
73 MODULE_VERSION(DRV_MODULE_VERSION);
75 #define FW_FILE_VERSION \
76 __stringify(FW_MAJOR_VERSION) "." \
77 __stringify(FW_MINOR_VERSION) "." \
78 __stringify(FW_REVISION_VERSION) "." \
79 __stringify(FW_ENGINEERING_VERSION)
81 #define QED_FW_FILE_NAME \
82 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
84 MODULE_FIRMWARE(QED_FW_FILE_NAME);
86 static int __init qed_init(void)
88 pr_info("%s", version);
93 static void __exit qed_cleanup(void)
95 pr_notice("qed_cleanup called\n");
98 module_init(qed_init);
99 module_exit(qed_cleanup);
101 /* Check if the DMA controller on the machine can properly handle the DMA
102 * addressing required by the device.
104 static int qed_set_coherency_mask(struct qed_dev *cdev)
106 struct device *dev = &cdev->pdev->dev;
108 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
109 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
111 "Can't request 64-bit consistent allocations\n");
114 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
115 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
122 static void qed_free_pci(struct qed_dev *cdev)
124 struct pci_dev *pdev = cdev->pdev;
127 iounmap(cdev->doorbells);
129 iounmap(cdev->regview);
130 if (atomic_read(&pdev->enable_cnt) == 1)
131 pci_release_regions(pdev);
133 pci_disable_device(pdev);
136 #define PCI_REVISION_ID_ERROR_VAL 0xff
138 /* Performs PCI initializations as well as initializing PCI-related parameters
139 * in the device structrue. Returns 0 in case of success.
141 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
148 rc = pci_enable_device(pdev);
150 DP_NOTICE(cdev, "Cannot enable PCI device\n");
154 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
155 DP_NOTICE(cdev, "No memory region found in bar #0\n");
160 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
161 DP_NOTICE(cdev, "No memory region found in bar #2\n");
166 if (atomic_read(&pdev->enable_cnt) == 1) {
167 rc = pci_request_regions(pdev, "qed");
170 "Failed to request PCI memory resources\n");
173 pci_set_master(pdev);
174 pci_save_state(pdev);
177 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
178 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
180 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
185 if (!pci_is_pcie(pdev)) {
186 DP_NOTICE(cdev, "The bus is not PCI Express\n");
191 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
192 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
193 DP_NOTICE(cdev, "Cannot find power management capability\n");
195 rc = qed_set_coherency_mask(cdev);
199 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
200 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
201 cdev->pci_params.irq = pdev->irq;
203 cdev->regview = pci_ioremap_bar(pdev, 0);
204 if (!cdev->regview) {
205 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
211 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
212 cdev->db_size = pci_resource_len(cdev->pdev, 2);
213 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
214 if (!cdev->doorbells) {
215 DP_NOTICE(cdev, "Cannot map doorbell space\n");
223 pci_release_regions(pdev);
225 pci_disable_device(pdev);
230 int qed_fill_dev_info(struct qed_dev *cdev,
231 struct qed_dev_info *dev_info)
235 memset(dev_info, 0, sizeof(struct qed_dev_info));
237 dev_info->num_hwfns = cdev->num_hwfns;
238 dev_info->pci_mem_start = cdev->pci_params.mem_start;
239 dev_info->pci_mem_end = cdev->pci_params.mem_end;
240 dev_info->pci_irq = cdev->pci_params.irq;
241 dev_info->rdma_supported = (cdev->hwfns[0].hw_info.personality ==
243 dev_info->is_mf_default = IS_MF_DEFAULT(&cdev->hwfns[0]);
244 dev_info->dev_type = cdev->type;
245 ether_addr_copy(dev_info->hw_mac, cdev->hwfns[0].hw_info.hw_mac_addr);
248 dev_info->fw_major = FW_MAJOR_VERSION;
249 dev_info->fw_minor = FW_MINOR_VERSION;
250 dev_info->fw_rev = FW_REVISION_VERSION;
251 dev_info->fw_eng = FW_ENGINEERING_VERSION;
252 dev_info->mf_mode = cdev->mf_mode;
253 dev_info->tx_switching = true;
255 if (QED_LEADING_HWFN(cdev)->hw_info.b_wol_support ==
257 dev_info->wol_support = true;
259 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
260 &dev_info->fw_minor, &dev_info->fw_rev,
265 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
267 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
268 &dev_info->mfw_rev, NULL);
270 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
271 &dev_info->flash_size);
273 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
276 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
277 &dev_info->mfw_rev, NULL);
280 dev_info->mtu = QED_LEADING_HWFN(cdev)->hw_info.mtu;
285 static void qed_free_cdev(struct qed_dev *cdev)
290 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
292 struct qed_dev *cdev;
294 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
298 qed_init_struct(cdev);
303 /* Sets the requested power state */
304 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
309 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
314 static struct qed_dev *qed_probe(struct pci_dev *pdev,
315 struct qed_probe_params *params)
317 struct qed_dev *cdev;
320 cdev = qed_alloc_cdev(pdev);
324 cdev->protocol = params->protocol;
327 cdev->b_is_vf = true;
329 qed_init_dp(cdev, params->dp_module, params->dp_level);
331 rc = qed_init_pci(cdev, pdev);
333 DP_ERR(cdev, "init pci failed\n");
336 DP_INFO(cdev, "PCI init completed successfully\n");
338 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
340 DP_ERR(cdev, "hw prepare failed\n");
344 DP_INFO(cdev, "qed_probe completed successffuly\n");
356 static void qed_remove(struct qed_dev *cdev)
365 qed_set_power_state(cdev, PCI_D3hot);
370 static void qed_disable_msix(struct qed_dev *cdev)
372 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
373 pci_disable_msix(cdev->pdev);
374 kfree(cdev->int_params.msix_table);
375 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
376 pci_disable_msi(cdev->pdev);
379 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
382 static int qed_enable_msix(struct qed_dev *cdev,
383 struct qed_int_params *int_params)
387 cnt = int_params->in.num_vectors;
389 for (i = 0; i < cnt; i++)
390 int_params->msix_table[i].entry = i;
392 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
393 int_params->in.min_msix_cnt, cnt);
394 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
395 (rc % cdev->num_hwfns)) {
396 pci_disable_msix(cdev->pdev);
398 /* If fastpath is initialized, we need at least one interrupt
399 * per hwfn [and the slow path interrupts]. New requested number
400 * should be a multiple of the number of hwfns.
402 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
404 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
405 cnt, int_params->in.num_vectors);
406 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
413 /* MSI-x configuration was achieved */
414 int_params->out.int_mode = QED_INT_MODE_MSIX;
415 int_params->out.num_vectors = rc;
419 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
426 /* This function outputs the int mode and the number of enabled msix vector */
427 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
429 struct qed_int_params *int_params = &cdev->int_params;
430 struct msix_entry *tbl;
433 switch (int_params->in.int_mode) {
434 case QED_INT_MODE_MSIX:
435 /* Allocate MSIX table */
436 cnt = int_params->in.num_vectors;
437 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
438 if (!int_params->msix_table) {
444 rc = qed_enable_msix(cdev, int_params);
448 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
449 kfree(int_params->msix_table);
454 case QED_INT_MODE_MSI:
455 if (cdev->num_hwfns == 1) {
456 rc = pci_enable_msi(cdev->pdev);
458 int_params->out.int_mode = QED_INT_MODE_MSI;
462 DP_NOTICE(cdev, "Failed to enable MSI\n");
468 case QED_INT_MODE_INTA:
469 int_params->out.int_mode = QED_INT_MODE_INTA;
473 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
474 int_params->in.int_mode);
480 DP_INFO(cdev, "Using %s interrupts\n",
481 int_params->out.int_mode == QED_INT_MODE_INTA ?
482 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
484 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
489 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
490 int index, void(*handler)(void *))
492 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
493 int relative_idx = index / cdev->num_hwfns;
495 hwfn->simd_proto_handler[relative_idx].func = handler;
496 hwfn->simd_proto_handler[relative_idx].token = token;
499 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
501 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
502 int relative_idx = index / cdev->num_hwfns;
504 memset(&hwfn->simd_proto_handler[relative_idx], 0,
505 sizeof(struct qed_simd_fp_handler));
508 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
510 tasklet_schedule((struct tasklet_struct *)tasklet);
514 static irqreturn_t qed_single_int(int irq, void *dev_instance)
516 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
517 struct qed_hwfn *hwfn;
518 irqreturn_t rc = IRQ_NONE;
522 for (i = 0; i < cdev->num_hwfns; i++) {
523 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
528 hwfn = &cdev->hwfns[i];
530 /* Slowpath interrupt */
531 if (unlikely(status & 0x1)) {
532 tasklet_schedule(hwfn->sp_dpc);
537 /* Fastpath interrupts */
538 for (j = 0; j < 64; j++) {
539 if ((0x2ULL << j) & status) {
540 hwfn->simd_proto_handler[j].func(
541 hwfn->simd_proto_handler[j].token);
542 status &= ~(0x2ULL << j);
547 if (unlikely(status))
548 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
549 "got an unknown interrupt status 0x%llx\n",
556 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
558 struct qed_dev *cdev = hwfn->cdev;
563 int_mode = cdev->int_params.out.int_mode;
564 if (int_mode == QED_INT_MODE_MSIX) {
566 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
567 id, cdev->pdev->bus->number,
568 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
569 rc = request_irq(cdev->int_params.msix_table[id].vector,
570 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
572 unsigned long flags = 0;
574 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
575 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
576 PCI_FUNC(cdev->pdev->devfn));
578 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
579 flags |= IRQF_SHARED;
581 rc = request_irq(cdev->pdev->irq, qed_single_int,
582 flags, cdev->name, cdev);
586 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
588 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
589 "Requested slowpath %s\n",
590 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
595 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
597 struct qed_dev *cdev = p_hwfn->cdev;
598 u8 id = p_hwfn->my_id;
601 int_mode = cdev->int_params.out.int_mode;
602 if (int_mode == QED_INT_MODE_MSIX)
603 synchronize_irq(cdev->int_params.msix_table[id].vector);
605 synchronize_irq(cdev->pdev->irq);
608 static void qed_slowpath_irq_free(struct qed_dev *cdev)
612 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
613 for_each_hwfn(cdev, i) {
614 if (!cdev->hwfns[i].b_int_requested)
616 synchronize_irq(cdev->int_params.msix_table[i].vector);
617 free_irq(cdev->int_params.msix_table[i].vector,
618 cdev->hwfns[i].sp_dpc);
621 if (QED_LEADING_HWFN(cdev)->b_int_requested)
622 free_irq(cdev->pdev->irq, cdev);
624 qed_int_disable_post_isr_release(cdev);
627 static int qed_nic_stop(struct qed_dev *cdev)
631 rc = qed_hw_stop(cdev);
633 for (i = 0; i < cdev->num_hwfns; i++) {
634 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
636 if (p_hwfn->b_sp_dpc_enabled) {
637 tasklet_disable(p_hwfn->sp_dpc);
638 p_hwfn->b_sp_dpc_enabled = false;
639 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
640 "Disabled sp taskelt [hwfn %d] at %p\n",
645 qed_dbg_pf_exit(cdev);
650 static int qed_nic_setup(struct qed_dev *cdev)
654 /* Determine if interface is going to require LL2 */
655 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
656 for (i = 0; i < cdev->num_hwfns; i++) {
657 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
659 p_hwfn->using_ll2 = true;
663 rc = qed_resc_alloc(cdev);
667 DP_INFO(cdev, "Allocated qed resources\n");
669 qed_resc_setup(cdev);
674 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
678 /* Mark the fastpath as free/used */
679 cdev->int_params.fp_initialized = cnt ? true : false;
681 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
682 limit = cdev->num_hwfns * 63;
683 else if (cdev->int_params.fp_msix_cnt)
684 limit = cdev->int_params.fp_msix_cnt;
689 return min_t(int, cnt, limit);
692 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
694 memset(info, 0, sizeof(struct qed_int_info));
696 if (!cdev->int_params.fp_initialized) {
698 "Protocol driver requested interrupt information, but its support is not yet configured\n");
702 /* Need to expose only MSI-X information; Single IRQ is handled solely
705 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
706 int msix_base = cdev->int_params.fp_msix_base;
708 info->msix_cnt = cdev->int_params.fp_msix_cnt;
709 info->msix = &cdev->int_params.msix_table[msix_base];
715 static int qed_slowpath_setup_int(struct qed_dev *cdev,
716 enum qed_int_mode int_mode)
718 struct qed_sb_cnt_info sb_cnt_info;
719 int num_l2_queues = 0;
723 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
724 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
728 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
729 cdev->int_params.in.int_mode = int_mode;
730 for_each_hwfn(cdev, i) {
731 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
732 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
733 cdev->int_params.in.num_vectors += sb_cnt_info.sb_cnt;
734 cdev->int_params.in.num_vectors++; /* slowpath */
737 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
738 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
740 rc = qed_set_int_mode(cdev, false);
742 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
746 cdev->int_params.fp_msix_base = cdev->num_hwfns;
747 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
750 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
751 QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH_ROCE)
754 for_each_hwfn(cdev, i)
755 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
757 DP_VERBOSE(cdev, QED_MSG_RDMA,
758 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
759 cdev->int_params.fp_msix_cnt, num_l2_queues);
761 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
762 cdev->int_params.rdma_msix_cnt =
763 (cdev->int_params.fp_msix_cnt - num_l2_queues)
765 cdev->int_params.rdma_msix_base =
766 cdev->int_params.fp_msix_base + num_l2_queues;
767 cdev->int_params.fp_msix_cnt = num_l2_queues;
769 cdev->int_params.rdma_msix_cnt = 0;
772 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
773 cdev->int_params.rdma_msix_cnt,
774 cdev->int_params.rdma_msix_base);
779 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
783 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
784 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
786 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
787 &cdev->int_params.in.num_vectors);
788 if (cdev->num_hwfns > 1) {
791 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
792 cdev->int_params.in.num_vectors += vectors;
795 /* We want a minimum of one fastpath vector per vf hwfn */
796 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
798 rc = qed_set_int_mode(cdev, true);
802 cdev->int_params.fp_msix_base = 0;
803 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
808 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
809 u8 *input_buf, u32 max_size, u8 *unzip_buf)
813 p_hwfn->stream->next_in = input_buf;
814 p_hwfn->stream->avail_in = input_len;
815 p_hwfn->stream->next_out = unzip_buf;
816 p_hwfn->stream->avail_out = max_size;
818 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
821 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
826 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
827 zlib_inflateEnd(p_hwfn->stream);
829 if (rc != Z_OK && rc != Z_STREAM_END) {
830 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
831 p_hwfn->stream->msg, rc);
835 return p_hwfn->stream->total_out / 4;
838 static int qed_alloc_stream_mem(struct qed_dev *cdev)
843 for_each_hwfn(cdev, i) {
844 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
846 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
850 workspace = vzalloc(zlib_inflate_workspacesize());
853 p_hwfn->stream->workspace = workspace;
859 static void qed_free_stream_mem(struct qed_dev *cdev)
863 for_each_hwfn(cdev, i) {
864 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
869 vfree(p_hwfn->stream->workspace);
870 kfree(p_hwfn->stream);
874 static void qed_update_pf_params(struct qed_dev *cdev,
875 struct qed_pf_params *params)
879 if (IS_ENABLED(CONFIG_QED_RDMA)) {
880 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
881 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
882 /* divide by 3 the MRs to avoid MF ILT overflow */
883 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
886 if (cdev->num_hwfns > 1 || IS_VF(cdev))
887 params->eth_pf_params.num_arfs_filters = 0;
889 /* In case we might support RDMA, don't allow qede to be greedy
890 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
892 if (QED_LEADING_HWFN(cdev)->hw_info.personality ==
896 num_cons = ¶ms->eth_pf_params.num_cons;
897 *num_cons = min_t(u16, *num_cons, 192);
900 for (i = 0; i < cdev->num_hwfns; i++) {
901 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
903 p_hwfn->pf_params = *params;
907 static int qed_slowpath_start(struct qed_dev *cdev,
908 struct qed_slowpath_params *params)
910 struct qed_drv_load_params drv_load_params;
911 struct qed_hw_init_params hw_init_params;
912 struct qed_tunn_start_params tunn_info;
913 struct qed_mcp_drv_version drv_version;
914 const u8 *data = NULL;
915 struct qed_hwfn *hwfn;
916 struct qed_ptt *p_ptt;
919 if (qed_iov_wq_start(cdev))
923 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
927 "Failed to find fw file - /lib/firmware/%s\n",
932 #ifdef CONFIG_RFS_ACCEL
933 if (cdev->num_hwfns == 1) {
934 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
936 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
939 "Failed to acquire PTT for aRFS\n");
944 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
946 QED_LEADING_HWFN(cdev)->p_ptp_ptt = p_ptt;
948 DP_NOTICE(cdev, "Failed to acquire PTT for PTP\n");
953 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
954 rc = qed_nic_setup(cdev);
959 rc = qed_slowpath_setup_int(cdev, params->int_mode);
961 rc = qed_slowpath_vf_setup_int(cdev);
966 /* Allocate stream for unzipping */
967 rc = qed_alloc_stream_mem(cdev);
971 /* First Dword used to diffrentiate between various sources */
972 data = cdev->firmware->data + sizeof(u32);
974 qed_dbg_pf_init(cdev);
977 memset(&tunn_info, 0, sizeof(tunn_info));
978 tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN |
979 1 << QED_MODE_L2GRE_TUNN |
980 1 << QED_MODE_IPGRE_TUNN |
981 1 << QED_MODE_L2GENEVE_TUNN |
982 1 << QED_MODE_IPGENEVE_TUNN;
984 tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN;
985 tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN;
986 tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN;
988 /* Start the slowpath */
989 memset(&hw_init_params, 0, sizeof(hw_init_params));
990 hw_init_params.p_tunn = &tunn_info;
991 hw_init_params.b_hw_start = true;
992 hw_init_params.int_mode = cdev->int_params.out.int_mode;
993 hw_init_params.allow_npar_tx_switch = true;
994 hw_init_params.bin_fw_data = data;
996 memset(&drv_load_params, 0, sizeof(drv_load_params));
997 drv_load_params.is_crash_kernel = is_kdump_kernel();
998 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
999 drv_load_params.avoid_eng_reset = false;
1000 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1001 hw_init_params.p_drv_load_params = &drv_load_params;
1003 rc = qed_hw_init(cdev, &hw_init_params);
1008 "HW initialization and function start completed successfully\n");
1010 /* Allocate LL2 interface if needed */
1011 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1012 rc = qed_ll2_alloc_if(cdev);
1017 hwfn = QED_LEADING_HWFN(cdev);
1018 drv_version.version = (params->drv_major << 24) |
1019 (params->drv_minor << 16) |
1020 (params->drv_rev << 8) |
1022 strlcpy(drv_version.name, params->name,
1023 MCP_DRV_VER_STR_SIZE - 4);
1024 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1027 DP_NOTICE(cdev, "Failed sending drv version command\n");
1032 qed_reset_vport_stats(cdev);
1039 qed_hw_timers_stop_all(cdev);
1041 qed_slowpath_irq_free(cdev);
1042 qed_free_stream_mem(cdev);
1043 qed_disable_msix(cdev);
1045 qed_resc_free(cdev);
1048 release_firmware(cdev->firmware);
1050 #ifdef CONFIG_RFS_ACCEL
1051 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1052 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1053 qed_ptt_release(QED_LEADING_HWFN(cdev),
1054 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1056 if (IS_PF(cdev) && QED_LEADING_HWFN(cdev)->p_ptp_ptt)
1057 qed_ptt_release(QED_LEADING_HWFN(cdev),
1058 QED_LEADING_HWFN(cdev)->p_ptp_ptt);
1060 qed_iov_wq_stop(cdev, false);
1065 static int qed_slowpath_stop(struct qed_dev *cdev)
1070 qed_ll2_dealloc_if(cdev);
1073 #ifdef CONFIG_RFS_ACCEL
1074 if (cdev->num_hwfns == 1)
1075 qed_ptt_release(QED_LEADING_HWFN(cdev),
1076 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1078 qed_ptt_release(QED_LEADING_HWFN(cdev),
1079 QED_LEADING_HWFN(cdev)->p_ptp_ptt);
1080 qed_free_stream_mem(cdev);
1081 if (IS_QED_ETH_IF(cdev))
1082 qed_sriov_disable(cdev, true);
1085 qed_slowpath_irq_free(cdev);
1088 qed_disable_msix(cdev);
1090 qed_resc_free(cdev);
1092 qed_iov_wq_stop(cdev, true);
1095 release_firmware(cdev->firmware);
1100 static void qed_set_id(struct qed_dev *cdev, char name[NAME_SIZE],
1101 char ver_str[VER_SIZE])
1105 memcpy(cdev->name, name, NAME_SIZE);
1106 for_each_hwfn(cdev, i)
1107 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1109 memcpy(cdev->ver_str, ver_str, VER_SIZE);
1110 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
1113 static u32 qed_sb_init(struct qed_dev *cdev,
1114 struct qed_sb_info *sb_info,
1116 dma_addr_t sb_phy_addr, u16 sb_id,
1117 enum qed_sb_type type)
1119 struct qed_hwfn *p_hwfn;
1120 struct qed_ptt *p_ptt;
1126 /* RoCE uses single engine and CMT uses two engines. When using both
1127 * we force only a single engine. Storage uses only engine 0 too.
1129 if (type == QED_SB_TYPE_L2_QUEUE)
1130 n_hwfns = cdev->num_hwfns;
1134 hwfn_index = sb_id % n_hwfns;
1135 p_hwfn = &cdev->hwfns[hwfn_index];
1136 rel_sb_id = sb_id / n_hwfns;
1138 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1139 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1140 hwfn_index, rel_sb_id, sb_id);
1142 if (IS_PF(p_hwfn->cdev)) {
1143 p_ptt = qed_ptt_acquire(p_hwfn);
1147 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1148 sb_phy_addr, rel_sb_id);
1149 qed_ptt_release(p_hwfn, p_ptt);
1151 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1152 sb_phy_addr, rel_sb_id);
1158 static u32 qed_sb_release(struct qed_dev *cdev,
1159 struct qed_sb_info *sb_info, u16 sb_id)
1161 struct qed_hwfn *p_hwfn;
1166 hwfn_index = sb_id % cdev->num_hwfns;
1167 p_hwfn = &cdev->hwfns[hwfn_index];
1168 rel_sb_id = sb_id / cdev->num_hwfns;
1170 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1171 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1172 hwfn_index, rel_sb_id, sb_id);
1174 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1179 static bool qed_can_link_change(struct qed_dev *cdev)
1184 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1186 struct qed_hwfn *hwfn;
1187 struct qed_mcp_link_params *link_params;
1188 struct qed_ptt *ptt;
1194 /* The link should be set only once per PF */
1195 hwfn = &cdev->hwfns[0];
1197 /* When VF wants to set link, force it to read the bulletin instead.
1198 * This mimics the PF behavior, where a noitification [both immediate
1199 * and possible later] would be generated when changing properties.
1202 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1206 ptt = qed_ptt_acquire(hwfn);
1210 link_params = qed_mcp_get_link_params(hwfn);
1211 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1212 link_params->speed.autoneg = params->autoneg;
1213 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1214 link_params->speed.advertised_speeds = 0;
1215 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1216 (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1217 link_params->speed.advertised_speeds |=
1218 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1219 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1220 link_params->speed.advertised_speeds |=
1221 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1222 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1223 link_params->speed.advertised_speeds |=
1224 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1225 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1226 link_params->speed.advertised_speeds |=
1227 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1228 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1229 link_params->speed.advertised_speeds |=
1230 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1231 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1232 link_params->speed.advertised_speeds |=
1233 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1235 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1236 link_params->speed.forced_speed = params->forced_speed;
1237 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1238 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1239 link_params->pause.autoneg = true;
1241 link_params->pause.autoneg = false;
1242 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1243 link_params->pause.forced_rx = true;
1245 link_params->pause.forced_rx = false;
1246 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1247 link_params->pause.forced_tx = true;
1249 link_params->pause.forced_tx = false;
1251 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1252 switch (params->loopback_mode) {
1253 case QED_LINK_LOOPBACK_INT_PHY:
1254 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1256 case QED_LINK_LOOPBACK_EXT_PHY:
1257 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1259 case QED_LINK_LOOPBACK_EXT:
1260 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1262 case QED_LINK_LOOPBACK_MAC:
1263 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1266 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1271 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1273 qed_ptt_release(hwfn, ptt);
1278 static int qed_get_port_type(u32 media_type)
1282 switch (media_type) {
1283 case MEDIA_SFPP_10G_FIBER:
1284 case MEDIA_SFP_1G_FIBER:
1285 case MEDIA_XFP_FIBER:
1286 case MEDIA_MODULE_FIBER:
1288 port_type = PORT_FIBRE;
1290 case MEDIA_DA_TWINAX:
1291 port_type = PORT_DA;
1294 port_type = PORT_TP;
1296 case MEDIA_NOT_PRESENT:
1297 port_type = PORT_NONE;
1299 case MEDIA_UNSPECIFIED:
1301 port_type = PORT_OTHER;
1307 static int qed_get_link_data(struct qed_hwfn *hwfn,
1308 struct qed_mcp_link_params *params,
1309 struct qed_mcp_link_state *link,
1310 struct qed_mcp_link_capabilities *link_caps)
1314 if (!IS_PF(hwfn->cdev)) {
1315 qed_vf_get_link_params(hwfn, params);
1316 qed_vf_get_link_state(hwfn, link);
1317 qed_vf_get_link_caps(hwfn, link_caps);
1322 p = qed_mcp_get_link_params(hwfn);
1325 memcpy(params, p, sizeof(*params));
1327 p = qed_mcp_get_link_state(hwfn);
1330 memcpy(link, p, sizeof(*link));
1332 p = qed_mcp_get_link_capabilities(hwfn);
1335 memcpy(link_caps, p, sizeof(*link_caps));
1340 static void qed_fill_link(struct qed_hwfn *hwfn,
1341 struct qed_link_output *if_link)
1343 struct qed_mcp_link_params params;
1344 struct qed_mcp_link_state link;
1345 struct qed_mcp_link_capabilities link_caps;
1348 memset(if_link, 0, sizeof(*if_link));
1350 /* Prepare source inputs */
1351 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1352 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1356 /* Set the link parameters to pass to protocol driver */
1358 if_link->link_up = true;
1360 /* TODO - at the moment assume supported and advertised speed equal */
1361 if_link->supported_caps = QED_LM_FIBRE_BIT;
1362 if (params.speed.autoneg)
1363 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1364 if (params.pause.autoneg ||
1365 (params.pause.forced_rx && params.pause.forced_tx))
1366 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1367 if (params.pause.autoneg || params.pause.forced_rx ||
1368 params.pause.forced_tx)
1369 if_link->supported_caps |= QED_LM_Pause_BIT;
1371 if_link->advertised_caps = if_link->supported_caps;
1372 if (params.speed.advertised_speeds &
1373 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1374 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1375 QED_LM_1000baseT_Full_BIT;
1376 if (params.speed.advertised_speeds &
1377 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1378 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1379 if (params.speed.advertised_speeds &
1380 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1381 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1382 if (params.speed.advertised_speeds &
1383 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1384 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1385 if (params.speed.advertised_speeds &
1386 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1387 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1388 if (params.speed.advertised_speeds &
1389 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1390 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1392 if (link_caps.speed_capabilities &
1393 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1394 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1395 QED_LM_1000baseT_Full_BIT;
1396 if (link_caps.speed_capabilities &
1397 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1398 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1399 if (link_caps.speed_capabilities &
1400 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1401 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1402 if (link_caps.speed_capabilities &
1403 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1404 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1405 if (link_caps.speed_capabilities &
1406 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1407 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1408 if (link_caps.speed_capabilities &
1409 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1410 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1413 if_link->speed = link.speed;
1415 /* TODO - fill duplex properly */
1416 if_link->duplex = DUPLEX_FULL;
1417 qed_mcp_get_media_type(hwfn->cdev, &media_type);
1418 if_link->port = qed_get_port_type(media_type);
1420 if_link->autoneg = params.speed.autoneg;
1422 if (params.pause.autoneg)
1423 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1424 if (params.pause.forced_rx)
1425 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1426 if (params.pause.forced_tx)
1427 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1429 /* Link partner capabilities */
1430 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1431 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1432 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1433 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1434 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1435 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1436 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1437 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1438 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1439 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1440 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1441 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1442 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1443 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1445 if (link.an_complete)
1446 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1448 if (link.partner_adv_pause)
1449 if_link->lp_caps |= QED_LM_Pause_BIT;
1450 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1451 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1452 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1455 static void qed_get_current_link(struct qed_dev *cdev,
1456 struct qed_link_output *if_link)
1460 qed_fill_link(&cdev->hwfns[0], if_link);
1462 for_each_hwfn(cdev, i)
1463 qed_inform_vf_link_state(&cdev->hwfns[i]);
1466 void qed_link_update(struct qed_hwfn *hwfn)
1468 void *cookie = hwfn->cdev->ops_cookie;
1469 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1470 struct qed_link_output if_link;
1472 qed_fill_link(hwfn, &if_link);
1473 qed_inform_vf_link_state(hwfn);
1475 if (IS_LEAD_HWFN(hwfn) && cookie)
1476 op->link_update(cookie, &if_link);
1479 static int qed_drain(struct qed_dev *cdev)
1481 struct qed_hwfn *hwfn;
1482 struct qed_ptt *ptt;
1488 for_each_hwfn(cdev, i) {
1489 hwfn = &cdev->hwfns[i];
1490 ptt = qed_ptt_acquire(hwfn);
1492 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1495 rc = qed_mcp_drain(hwfn, ptt);
1498 qed_ptt_release(hwfn, ptt);
1504 static void qed_get_coalesce(struct qed_dev *cdev, u16 *rx_coal, u16 *tx_coal)
1506 *rx_coal = cdev->rx_coalesce_usecs;
1507 *tx_coal = cdev->tx_coalesce_usecs;
1510 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1513 struct qed_hwfn *hwfn;
1514 struct qed_ptt *ptt;
1518 hwfn_index = qid % cdev->num_hwfns;
1519 hwfn = &cdev->hwfns[hwfn_index];
1520 ptt = qed_ptt_acquire(hwfn);
1524 status = qed_set_rxq_coalesce(hwfn, ptt, rx_coal,
1525 qid / cdev->num_hwfns, sb_id);
1528 status = qed_set_txq_coalesce(hwfn, ptt, tx_coal,
1529 qid / cdev->num_hwfns, sb_id);
1531 qed_ptt_release(hwfn, ptt);
1536 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1538 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1539 struct qed_ptt *ptt;
1542 ptt = qed_ptt_acquire(hwfn);
1546 status = qed_mcp_set_led(hwfn, ptt, mode);
1548 qed_ptt_release(hwfn, ptt);
1553 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
1555 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1556 struct qed_ptt *ptt;
1562 ptt = qed_ptt_acquire(hwfn);
1566 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
1567 : QED_OV_WOL_DISABLED);
1570 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1573 qed_ptt_release(hwfn, ptt);
1577 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
1579 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1580 struct qed_ptt *ptt;
1586 ptt = qed_ptt_acquire(hwfn);
1590 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
1591 QED_OV_DRIVER_STATE_ACTIVE :
1592 QED_OV_DRIVER_STATE_DISABLED);
1594 qed_ptt_release(hwfn, ptt);
1599 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
1601 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1602 struct qed_ptt *ptt;
1608 ptt = qed_ptt_acquire(hwfn);
1612 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
1616 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1619 qed_ptt_release(hwfn, ptt);
1623 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
1625 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1626 struct qed_ptt *ptt;
1632 ptt = qed_ptt_acquire(hwfn);
1636 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
1640 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
1643 qed_ptt_release(hwfn, ptt);
1647 static struct qed_selftest_ops qed_selftest_ops_pass = {
1648 .selftest_memory = &qed_selftest_memory,
1649 .selftest_interrupt = &qed_selftest_interrupt,
1650 .selftest_register = &qed_selftest_register,
1651 .selftest_clock = &qed_selftest_clock,
1652 .selftest_nvram = &qed_selftest_nvram,
1655 const struct qed_common_ops qed_common_ops_pass = {
1656 .selftest = &qed_selftest_ops_pass,
1657 .probe = &qed_probe,
1658 .remove = &qed_remove,
1659 .set_power_state = &qed_set_power_state,
1660 .set_id = &qed_set_id,
1661 .update_pf_params = &qed_update_pf_params,
1662 .slowpath_start = &qed_slowpath_start,
1663 .slowpath_stop = &qed_slowpath_stop,
1664 .set_fp_int = &qed_set_int_fp,
1665 .get_fp_int = &qed_get_int_fp,
1666 .sb_init = &qed_sb_init,
1667 .sb_release = &qed_sb_release,
1668 .simd_handler_config = &qed_simd_handler_config,
1669 .simd_handler_clean = &qed_simd_handler_clean,
1670 .dbg_grc = &qed_dbg_grc,
1671 .dbg_grc_size = &qed_dbg_grc_size,
1672 .can_link_change = &qed_can_link_change,
1673 .set_link = &qed_set_link,
1674 .get_link = &qed_get_current_link,
1675 .drain = &qed_drain,
1676 .update_msglvl = &qed_init_dp,
1677 .dbg_all_data = &qed_dbg_all_data,
1678 .dbg_all_data_size = &qed_dbg_all_data_size,
1679 .chain_alloc = &qed_chain_alloc,
1680 .chain_free = &qed_chain_free,
1681 .get_coalesce = &qed_get_coalesce,
1682 .set_coalesce = &qed_set_coalesce,
1683 .set_led = &qed_set_led,
1684 .update_drv_state = &qed_update_drv_state,
1685 .update_mac = &qed_update_mac,
1686 .update_mtu = &qed_update_mtu,
1687 .update_wol = &qed_update_wol,
1690 void qed_get_protocol_stats(struct qed_dev *cdev,
1691 enum qed_mcp_protocol_type type,
1692 union qed_mcp_protocol_stats *stats)
1694 struct qed_eth_stats eth_stats;
1696 memset(stats, 0, sizeof(*stats));
1699 case QED_MCP_LAN_STATS:
1700 qed_get_vport_stats(cdev, ð_stats);
1701 stats->lan_stats.ucast_rx_pkts =
1702 eth_stats.common.rx_ucast_pkts;
1703 stats->lan_stats.ucast_tx_pkts =
1704 eth_stats.common.tx_ucast_pkts;
1705 stats->lan_stats.fcs_err = -1;
1707 case QED_MCP_FCOE_STATS:
1708 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
1710 case QED_MCP_ISCSI_STATS:
1711 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
1714 DP_ERR(cdev, "Invalid protocol type = %d\n", type);