1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
53 #include "qed_sriov.h"
55 #include "qed_dev_api.h"
58 #include "qed_iscsi.h"
62 #include "qed_selftest.h"
63 #include "qed_debug.h"
65 #define QED_ROCE_QPS (8192)
66 #define QED_ROCE_DPIS (8)
67 #define QED_RDMA_SRQS QED_ROCE_QPS
69 static char version[] =
70 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
72 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 #define FW_FILE_VERSION \
77 __stringify(FW_MAJOR_VERSION) "." \
78 __stringify(FW_MINOR_VERSION) "." \
79 __stringify(FW_REVISION_VERSION) "." \
80 __stringify(FW_ENGINEERING_VERSION)
82 #define QED_FW_FILE_NAME \
83 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
85 MODULE_FIRMWARE(QED_FW_FILE_NAME);
87 static int __init qed_init(void)
89 pr_info("%s", version);
94 static void __exit qed_cleanup(void)
96 pr_notice("qed_cleanup called\n");
99 module_init(qed_init);
100 module_exit(qed_cleanup);
102 /* Check if the DMA controller on the machine can properly handle the DMA
103 * addressing required by the device.
105 static int qed_set_coherency_mask(struct qed_dev *cdev)
107 struct device *dev = &cdev->pdev->dev;
109 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
110 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
112 "Can't request 64-bit consistent allocations\n");
115 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
116 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
123 static void qed_free_pci(struct qed_dev *cdev)
125 struct pci_dev *pdev = cdev->pdev;
127 if (cdev->doorbells && cdev->db_size)
128 iounmap(cdev->doorbells);
130 iounmap(cdev->regview);
131 if (atomic_read(&pdev->enable_cnt) == 1)
132 pci_release_regions(pdev);
134 pci_disable_device(pdev);
137 #define PCI_REVISION_ID_ERROR_VAL 0xff
139 /* Performs PCI initializations as well as initializing PCI-related parameters
140 * in the device structrue. Returns 0 in case of success.
142 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
149 rc = pci_enable_device(pdev);
151 DP_NOTICE(cdev, "Cannot enable PCI device\n");
155 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
156 DP_NOTICE(cdev, "No memory region found in bar #0\n");
161 if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
162 DP_NOTICE(cdev, "No memory region found in bar #2\n");
167 if (atomic_read(&pdev->enable_cnt) == 1) {
168 rc = pci_request_regions(pdev, "qed");
171 "Failed to request PCI memory resources\n");
174 pci_set_master(pdev);
175 pci_save_state(pdev);
178 pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
179 if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
181 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
186 if (!pci_is_pcie(pdev)) {
187 DP_NOTICE(cdev, "The bus is not PCI Express\n");
192 cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
193 if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
194 DP_NOTICE(cdev, "Cannot find power management capability\n");
196 rc = qed_set_coherency_mask(cdev);
200 cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
201 cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
202 cdev->pci_params.irq = pdev->irq;
204 cdev->regview = pci_ioremap_bar(pdev, 0);
205 if (!cdev->regview) {
206 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
211 cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
212 cdev->db_size = pci_resource_len(cdev->pdev, 2);
213 if (!cdev->db_size) {
215 DP_NOTICE(cdev, "No Doorbell bar available\n");
222 cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
224 if (!cdev->doorbells) {
225 DP_NOTICE(cdev, "Cannot map doorbell space\n");
232 pci_release_regions(pdev);
234 pci_disable_device(pdev);
239 int qed_fill_dev_info(struct qed_dev *cdev,
240 struct qed_dev_info *dev_info)
242 struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
243 struct qed_hw_info *hw_info = &p_hwfn->hw_info;
244 struct qed_tunnel_info *tun = &cdev->tunnel;
247 memset(dev_info, 0, sizeof(struct qed_dev_info));
249 if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
250 tun->vxlan.b_mode_enabled)
251 dev_info->vxlan_enable = true;
253 if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
254 tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
255 tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
256 dev_info->gre_enable = true;
258 if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
259 tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
260 tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
261 dev_info->geneve_enable = true;
263 dev_info->num_hwfns = cdev->num_hwfns;
264 dev_info->pci_mem_start = cdev->pci_params.mem_start;
265 dev_info->pci_mem_end = cdev->pci_params.mem_end;
266 dev_info->pci_irq = cdev->pci_params.irq;
267 dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
268 dev_info->dev_type = cdev->type;
269 ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
272 dev_info->fw_major = FW_MAJOR_VERSION;
273 dev_info->fw_minor = FW_MINOR_VERSION;
274 dev_info->fw_rev = FW_REVISION_VERSION;
275 dev_info->fw_eng = FW_ENGINEERING_VERSION;
276 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
278 dev_info->tx_switching = true;
280 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
281 dev_info->wol_support = true;
283 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
285 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
286 &dev_info->fw_minor, &dev_info->fw_rev,
291 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
293 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
294 &dev_info->mfw_rev, NULL);
296 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
297 &dev_info->mbi_version);
299 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
300 &dev_info->flash_size);
302 qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
305 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
306 &dev_info->mfw_rev, NULL);
309 dev_info->mtu = hw_info->mtu;
314 static void qed_free_cdev(struct qed_dev *cdev)
319 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
321 struct qed_dev *cdev;
323 cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
327 qed_init_struct(cdev);
332 /* Sets the requested power state */
333 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
338 DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
343 static struct qed_dev *qed_probe(struct pci_dev *pdev,
344 struct qed_probe_params *params)
346 struct qed_dev *cdev;
349 cdev = qed_alloc_cdev(pdev);
353 cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
354 cdev->protocol = params->protocol;
357 cdev->b_is_vf = true;
359 qed_init_dp(cdev, params->dp_module, params->dp_level);
361 rc = qed_init_pci(cdev, pdev);
363 DP_ERR(cdev, "init pci failed\n");
366 DP_INFO(cdev, "PCI init completed successfully\n");
368 rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
370 DP_ERR(cdev, "hw prepare failed\n");
374 DP_INFO(cdev, "qed_probe completed successffuly\n");
386 static void qed_remove(struct qed_dev *cdev)
395 qed_set_power_state(cdev, PCI_D3hot);
400 static void qed_disable_msix(struct qed_dev *cdev)
402 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
403 pci_disable_msix(cdev->pdev);
404 kfree(cdev->int_params.msix_table);
405 } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
406 pci_disable_msi(cdev->pdev);
409 memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
412 static int qed_enable_msix(struct qed_dev *cdev,
413 struct qed_int_params *int_params)
417 cnt = int_params->in.num_vectors;
419 for (i = 0; i < cnt; i++)
420 int_params->msix_table[i].entry = i;
422 rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
423 int_params->in.min_msix_cnt, cnt);
424 if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
425 (rc % cdev->num_hwfns)) {
426 pci_disable_msix(cdev->pdev);
428 /* If fastpath is initialized, we need at least one interrupt
429 * per hwfn [and the slow path interrupts]. New requested number
430 * should be a multiple of the number of hwfns.
432 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
434 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
435 cnt, int_params->in.num_vectors);
436 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
443 /* MSI-x configuration was achieved */
444 int_params->out.int_mode = QED_INT_MODE_MSIX;
445 int_params->out.num_vectors = rc;
449 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
456 /* This function outputs the int mode and the number of enabled msix vector */
457 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
459 struct qed_int_params *int_params = &cdev->int_params;
460 struct msix_entry *tbl;
463 switch (int_params->in.int_mode) {
464 case QED_INT_MODE_MSIX:
465 /* Allocate MSIX table */
466 cnt = int_params->in.num_vectors;
467 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
468 if (!int_params->msix_table) {
474 rc = qed_enable_msix(cdev, int_params);
478 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
479 kfree(int_params->msix_table);
484 case QED_INT_MODE_MSI:
485 if (cdev->num_hwfns == 1) {
486 rc = pci_enable_msi(cdev->pdev);
488 int_params->out.int_mode = QED_INT_MODE_MSI;
492 DP_NOTICE(cdev, "Failed to enable MSI\n");
498 case QED_INT_MODE_INTA:
499 int_params->out.int_mode = QED_INT_MODE_INTA;
503 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
504 int_params->in.int_mode);
510 DP_INFO(cdev, "Using %s interrupts\n",
511 int_params->out.int_mode == QED_INT_MODE_INTA ?
512 "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
514 cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
519 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
520 int index, void(*handler)(void *))
522 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
523 int relative_idx = index / cdev->num_hwfns;
525 hwfn->simd_proto_handler[relative_idx].func = handler;
526 hwfn->simd_proto_handler[relative_idx].token = token;
529 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
531 struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
532 int relative_idx = index / cdev->num_hwfns;
534 memset(&hwfn->simd_proto_handler[relative_idx], 0,
535 sizeof(struct qed_simd_fp_handler));
538 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
540 tasklet_schedule((struct tasklet_struct *)tasklet);
544 static irqreturn_t qed_single_int(int irq, void *dev_instance)
546 struct qed_dev *cdev = (struct qed_dev *)dev_instance;
547 struct qed_hwfn *hwfn;
548 irqreturn_t rc = IRQ_NONE;
552 for (i = 0; i < cdev->num_hwfns; i++) {
553 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
558 hwfn = &cdev->hwfns[i];
560 /* Slowpath interrupt */
561 if (unlikely(status & 0x1)) {
562 tasklet_schedule(hwfn->sp_dpc);
567 /* Fastpath interrupts */
568 for (j = 0; j < 64; j++) {
569 if ((0x2ULL << j) & status) {
570 hwfn->simd_proto_handler[j].func(
571 hwfn->simd_proto_handler[j].token);
572 status &= ~(0x2ULL << j);
577 if (unlikely(status))
578 DP_VERBOSE(hwfn, NETIF_MSG_INTR,
579 "got an unknown interrupt status 0x%llx\n",
586 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
588 struct qed_dev *cdev = hwfn->cdev;
593 int_mode = cdev->int_params.out.int_mode;
594 if (int_mode == QED_INT_MODE_MSIX) {
596 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
597 id, cdev->pdev->bus->number,
598 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
599 rc = request_irq(cdev->int_params.msix_table[id].vector,
600 qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
602 unsigned long flags = 0;
604 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
605 cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
606 PCI_FUNC(cdev->pdev->devfn));
608 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
609 flags |= IRQF_SHARED;
611 rc = request_irq(cdev->pdev->irq, qed_single_int,
612 flags, cdev->name, cdev);
616 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
618 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
619 "Requested slowpath %s\n",
620 (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
625 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
627 /* Calling the disable function will make sure that any
628 * currently-running function is completed. The following call to the
629 * enable function makes this sequence a flush-like operation.
631 if (p_hwfn->b_sp_dpc_enabled) {
632 tasklet_disable(p_hwfn->sp_dpc);
633 tasklet_enable(p_hwfn->sp_dpc);
637 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
639 struct qed_dev *cdev = p_hwfn->cdev;
640 u8 id = p_hwfn->my_id;
643 int_mode = cdev->int_params.out.int_mode;
644 if (int_mode == QED_INT_MODE_MSIX)
645 synchronize_irq(cdev->int_params.msix_table[id].vector);
647 synchronize_irq(cdev->pdev->irq);
649 qed_slowpath_tasklet_flush(p_hwfn);
652 static void qed_slowpath_irq_free(struct qed_dev *cdev)
656 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
657 for_each_hwfn(cdev, i) {
658 if (!cdev->hwfns[i].b_int_requested)
660 synchronize_irq(cdev->int_params.msix_table[i].vector);
661 free_irq(cdev->int_params.msix_table[i].vector,
662 cdev->hwfns[i].sp_dpc);
665 if (QED_LEADING_HWFN(cdev)->b_int_requested)
666 free_irq(cdev->pdev->irq, cdev);
668 qed_int_disable_post_isr_release(cdev);
671 static int qed_nic_stop(struct qed_dev *cdev)
675 rc = qed_hw_stop(cdev);
677 for (i = 0; i < cdev->num_hwfns; i++) {
678 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
680 if (p_hwfn->b_sp_dpc_enabled) {
681 tasklet_disable(p_hwfn->sp_dpc);
682 p_hwfn->b_sp_dpc_enabled = false;
683 DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
684 "Disabled sp tasklet [hwfn %d] at %p\n",
689 qed_dbg_pf_exit(cdev);
694 static int qed_nic_setup(struct qed_dev *cdev)
698 /* Determine if interface is going to require LL2 */
699 if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
700 for (i = 0; i < cdev->num_hwfns; i++) {
701 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
703 p_hwfn->using_ll2 = true;
707 rc = qed_resc_alloc(cdev);
711 DP_INFO(cdev, "Allocated qed resources\n");
713 qed_resc_setup(cdev);
718 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
722 /* Mark the fastpath as free/used */
723 cdev->int_params.fp_initialized = cnt ? true : false;
725 if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
726 limit = cdev->num_hwfns * 63;
727 else if (cdev->int_params.fp_msix_cnt)
728 limit = cdev->int_params.fp_msix_cnt;
733 return min_t(int, cnt, limit);
736 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
738 memset(info, 0, sizeof(struct qed_int_info));
740 if (!cdev->int_params.fp_initialized) {
742 "Protocol driver requested interrupt information, but its support is not yet configured\n");
746 /* Need to expose only MSI-X information; Single IRQ is handled solely
749 if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
750 int msix_base = cdev->int_params.fp_msix_base;
752 info->msix_cnt = cdev->int_params.fp_msix_cnt;
753 info->msix = &cdev->int_params.msix_table[msix_base];
759 static int qed_slowpath_setup_int(struct qed_dev *cdev,
760 enum qed_int_mode int_mode)
762 struct qed_sb_cnt_info sb_cnt_info;
763 int num_l2_queues = 0;
767 if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
768 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
772 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
773 cdev->int_params.in.int_mode = int_mode;
774 for_each_hwfn(cdev, i) {
775 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
776 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
777 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
778 cdev->int_params.in.num_vectors++; /* slowpath */
781 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
782 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
784 rc = qed_set_int_mode(cdev, false);
786 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
790 cdev->int_params.fp_msix_base = cdev->num_hwfns;
791 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
794 if (!IS_ENABLED(CONFIG_QED_RDMA) ||
795 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
798 for_each_hwfn(cdev, i)
799 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
801 DP_VERBOSE(cdev, QED_MSG_RDMA,
802 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
803 cdev->int_params.fp_msix_cnt, num_l2_queues);
805 if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
806 cdev->int_params.rdma_msix_cnt =
807 (cdev->int_params.fp_msix_cnt - num_l2_queues)
809 cdev->int_params.rdma_msix_base =
810 cdev->int_params.fp_msix_base + num_l2_queues;
811 cdev->int_params.fp_msix_cnt = num_l2_queues;
813 cdev->int_params.rdma_msix_cnt = 0;
816 DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
817 cdev->int_params.rdma_msix_cnt,
818 cdev->int_params.rdma_msix_base);
823 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
827 memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
828 cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
830 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
831 &cdev->int_params.in.num_vectors);
832 if (cdev->num_hwfns > 1) {
835 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
836 cdev->int_params.in.num_vectors += vectors;
839 /* We want a minimum of one fastpath vector per vf hwfn */
840 cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
842 rc = qed_set_int_mode(cdev, true);
846 cdev->int_params.fp_msix_base = 0;
847 cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
852 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
853 u8 *input_buf, u32 max_size, u8 *unzip_buf)
857 p_hwfn->stream->next_in = input_buf;
858 p_hwfn->stream->avail_in = input_len;
859 p_hwfn->stream->next_out = unzip_buf;
860 p_hwfn->stream->avail_out = max_size;
862 rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
865 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
870 rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
871 zlib_inflateEnd(p_hwfn->stream);
873 if (rc != Z_OK && rc != Z_STREAM_END) {
874 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
875 p_hwfn->stream->msg, rc);
879 return p_hwfn->stream->total_out / 4;
882 static int qed_alloc_stream_mem(struct qed_dev *cdev)
887 for_each_hwfn(cdev, i) {
888 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
890 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
894 workspace = vzalloc(zlib_inflate_workspacesize());
897 p_hwfn->stream->workspace = workspace;
903 static void qed_free_stream_mem(struct qed_dev *cdev)
907 for_each_hwfn(cdev, i) {
908 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
913 vfree(p_hwfn->stream->workspace);
914 kfree(p_hwfn->stream);
918 static void qed_update_pf_params(struct qed_dev *cdev,
919 struct qed_pf_params *params)
923 if (IS_ENABLED(CONFIG_QED_RDMA)) {
924 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
925 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
926 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
927 /* divide by 3 the MRs to avoid MF ILT overflow */
928 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
931 if (cdev->num_hwfns > 1 || IS_VF(cdev))
932 params->eth_pf_params.num_arfs_filters = 0;
934 /* In case we might support RDMA, don't allow qede to be greedy
935 * with the L2 contexts. Allow for 64 queues [rx, tx, xdp] per hwfn.
937 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
940 num_cons = ¶ms->eth_pf_params.num_cons;
941 *num_cons = min_t(u16, *num_cons, 192);
944 for (i = 0; i < cdev->num_hwfns; i++) {
945 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
947 p_hwfn->pf_params = *params;
951 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
958 for_each_hwfn(cdev, i) {
959 if (!cdev->hwfns[i].slowpath_wq)
962 flush_workqueue(cdev->hwfns[i].slowpath_wq);
963 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
967 static void qed_slowpath_task(struct work_struct *work)
969 struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
971 struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
974 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
978 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
979 &hwfn->slowpath_task_flags))
980 qed_mfw_process_tlv_req(hwfn, ptt);
982 qed_ptt_release(hwfn, ptt);
985 static int qed_slowpath_wq_start(struct qed_dev *cdev)
987 struct qed_hwfn *hwfn;
988 char name[NAME_SIZE];
994 for_each_hwfn(cdev, i) {
995 hwfn = &cdev->hwfns[i];
997 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
998 cdev->pdev->bus->number,
999 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1001 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1002 if (!hwfn->slowpath_wq) {
1003 DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1007 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1013 static int qed_slowpath_start(struct qed_dev *cdev,
1014 struct qed_slowpath_params *params)
1016 struct qed_drv_load_params drv_load_params;
1017 struct qed_hw_init_params hw_init_params;
1018 struct qed_mcp_drv_version drv_version;
1019 struct qed_tunnel_info tunn_info;
1020 const u8 *data = NULL;
1021 struct qed_hwfn *hwfn;
1022 struct qed_ptt *p_ptt;
1025 if (qed_iov_wq_start(cdev))
1028 if (qed_slowpath_wq_start(cdev))
1032 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1036 "Failed to find fw file - /lib/firmware/%s\n",
1041 if (cdev->num_hwfns == 1) {
1042 p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1044 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1047 "Failed to acquire PTT for aRFS\n");
1053 cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1054 rc = qed_nic_setup(cdev);
1059 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1061 rc = qed_slowpath_vf_setup_int(cdev);
1066 /* Allocate stream for unzipping */
1067 rc = qed_alloc_stream_mem(cdev);
1071 /* First Dword used to differentiate between various sources */
1072 data = cdev->firmware->data + sizeof(u32);
1074 qed_dbg_pf_init(cdev);
1077 /* Start the slowpath */
1078 memset(&hw_init_params, 0, sizeof(hw_init_params));
1079 memset(&tunn_info, 0, sizeof(tunn_info));
1080 tunn_info.vxlan.b_mode_enabled = true;
1081 tunn_info.l2_gre.b_mode_enabled = true;
1082 tunn_info.ip_gre.b_mode_enabled = true;
1083 tunn_info.l2_geneve.b_mode_enabled = true;
1084 tunn_info.ip_geneve.b_mode_enabled = true;
1085 tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1086 tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1087 tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1088 tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1089 tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1090 hw_init_params.p_tunn = &tunn_info;
1091 hw_init_params.b_hw_start = true;
1092 hw_init_params.int_mode = cdev->int_params.out.int_mode;
1093 hw_init_params.allow_npar_tx_switch = true;
1094 hw_init_params.bin_fw_data = data;
1096 memset(&drv_load_params, 0, sizeof(drv_load_params));
1097 drv_load_params.is_crash_kernel = is_kdump_kernel();
1098 drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1099 drv_load_params.avoid_eng_reset = false;
1100 drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1101 hw_init_params.p_drv_load_params = &drv_load_params;
1103 rc = qed_hw_init(cdev, &hw_init_params);
1108 "HW initialization and function start completed successfully\n");
1111 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1112 BIT(QED_MODE_L2GENEVE_TUNN) |
1113 BIT(QED_MODE_IPGENEVE_TUNN) |
1114 BIT(QED_MODE_L2GRE_TUNN) |
1115 BIT(QED_MODE_IPGRE_TUNN));
1118 /* Allocate LL2 interface if needed */
1119 if (QED_LEADING_HWFN(cdev)->using_ll2) {
1120 rc = qed_ll2_alloc_if(cdev);
1125 hwfn = QED_LEADING_HWFN(cdev);
1126 drv_version.version = (params->drv_major << 24) |
1127 (params->drv_minor << 16) |
1128 (params->drv_rev << 8) |
1130 strlcpy(drv_version.name, params->name,
1131 MCP_DRV_VER_STR_SIZE - 4);
1132 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1135 DP_NOTICE(cdev, "Failed sending drv version command\n");
1140 qed_reset_vport_stats(cdev);
1147 qed_hw_timers_stop_all(cdev);
1149 qed_slowpath_irq_free(cdev);
1150 qed_free_stream_mem(cdev);
1151 qed_disable_msix(cdev);
1153 qed_resc_free(cdev);
1156 release_firmware(cdev->firmware);
1158 if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1159 QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1160 qed_ptt_release(QED_LEADING_HWFN(cdev),
1161 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1163 qed_iov_wq_stop(cdev, false);
1165 qed_slowpath_wq_stop(cdev);
1170 static int qed_slowpath_stop(struct qed_dev *cdev)
1175 qed_slowpath_wq_stop(cdev);
1177 qed_ll2_dealloc_if(cdev);
1180 if (cdev->num_hwfns == 1)
1181 qed_ptt_release(QED_LEADING_HWFN(cdev),
1182 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1183 qed_free_stream_mem(cdev);
1184 if (IS_QED_ETH_IF(cdev))
1185 qed_sriov_disable(cdev, true);
1191 qed_slowpath_irq_free(cdev);
1193 qed_disable_msix(cdev);
1195 qed_resc_free(cdev);
1197 qed_iov_wq_stop(cdev, true);
1200 release_firmware(cdev->firmware);
1205 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1209 memcpy(cdev->name, name, NAME_SIZE);
1210 for_each_hwfn(cdev, i)
1211 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1214 static u32 qed_sb_init(struct qed_dev *cdev,
1215 struct qed_sb_info *sb_info,
1217 dma_addr_t sb_phy_addr, u16 sb_id,
1218 enum qed_sb_type type)
1220 struct qed_hwfn *p_hwfn;
1221 struct qed_ptt *p_ptt;
1227 /* RoCE uses single engine and CMT uses two engines. When using both
1228 * we force only a single engine. Storage uses only engine 0 too.
1230 if (type == QED_SB_TYPE_L2_QUEUE)
1231 n_hwfns = cdev->num_hwfns;
1235 hwfn_index = sb_id % n_hwfns;
1236 p_hwfn = &cdev->hwfns[hwfn_index];
1237 rel_sb_id = sb_id / n_hwfns;
1239 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1240 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1241 hwfn_index, rel_sb_id, sb_id);
1243 if (IS_PF(p_hwfn->cdev)) {
1244 p_ptt = qed_ptt_acquire(p_hwfn);
1248 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1249 sb_phy_addr, rel_sb_id);
1250 qed_ptt_release(p_hwfn, p_ptt);
1252 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1253 sb_phy_addr, rel_sb_id);
1259 static u32 qed_sb_release(struct qed_dev *cdev,
1260 struct qed_sb_info *sb_info, u16 sb_id)
1262 struct qed_hwfn *p_hwfn;
1267 hwfn_index = sb_id % cdev->num_hwfns;
1268 p_hwfn = &cdev->hwfns[hwfn_index];
1269 rel_sb_id = sb_id / cdev->num_hwfns;
1271 DP_VERBOSE(cdev, NETIF_MSG_INTR,
1272 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1273 hwfn_index, rel_sb_id, sb_id);
1275 rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1280 static bool qed_can_link_change(struct qed_dev *cdev)
1285 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1287 struct qed_hwfn *hwfn;
1288 struct qed_mcp_link_params *link_params;
1289 struct qed_ptt *ptt;
1295 /* The link should be set only once per PF */
1296 hwfn = &cdev->hwfns[0];
1298 /* When VF wants to set link, force it to read the bulletin instead.
1299 * This mimics the PF behavior, where a noitification [both immediate
1300 * and possible later] would be generated when changing properties.
1303 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1307 ptt = qed_ptt_acquire(hwfn);
1311 link_params = qed_mcp_get_link_params(hwfn);
1312 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1313 link_params->speed.autoneg = params->autoneg;
1314 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1315 link_params->speed.advertised_speeds = 0;
1316 if ((params->adv_speeds & QED_LM_1000baseT_Half_BIT) ||
1317 (params->adv_speeds & QED_LM_1000baseT_Full_BIT))
1318 link_params->speed.advertised_speeds |=
1319 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1320 if (params->adv_speeds & QED_LM_10000baseKR_Full_BIT)
1321 link_params->speed.advertised_speeds |=
1322 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1323 if (params->adv_speeds & QED_LM_25000baseKR_Full_BIT)
1324 link_params->speed.advertised_speeds |=
1325 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1326 if (params->adv_speeds & QED_LM_40000baseLR4_Full_BIT)
1327 link_params->speed.advertised_speeds |=
1328 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1329 if (params->adv_speeds & QED_LM_50000baseKR2_Full_BIT)
1330 link_params->speed.advertised_speeds |=
1331 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1332 if (params->adv_speeds & QED_LM_100000baseKR4_Full_BIT)
1333 link_params->speed.advertised_speeds |=
1334 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1336 if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1337 link_params->speed.forced_speed = params->forced_speed;
1338 if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1339 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1340 link_params->pause.autoneg = true;
1342 link_params->pause.autoneg = false;
1343 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1344 link_params->pause.forced_rx = true;
1346 link_params->pause.forced_rx = false;
1347 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1348 link_params->pause.forced_tx = true;
1350 link_params->pause.forced_tx = false;
1352 if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1353 switch (params->loopback_mode) {
1354 case QED_LINK_LOOPBACK_INT_PHY:
1355 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1357 case QED_LINK_LOOPBACK_EXT_PHY:
1358 link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1360 case QED_LINK_LOOPBACK_EXT:
1361 link_params->loopback_mode = ETH_LOOPBACK_EXT;
1363 case QED_LINK_LOOPBACK_MAC:
1364 link_params->loopback_mode = ETH_LOOPBACK_MAC;
1367 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1372 if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1373 memcpy(&link_params->eee, ¶ms->eee,
1374 sizeof(link_params->eee));
1376 rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1378 qed_ptt_release(hwfn, ptt);
1383 static int qed_get_port_type(u32 media_type)
1387 switch (media_type) {
1388 case MEDIA_SFPP_10G_FIBER:
1389 case MEDIA_SFP_1G_FIBER:
1390 case MEDIA_XFP_FIBER:
1391 case MEDIA_MODULE_FIBER:
1393 port_type = PORT_FIBRE;
1395 case MEDIA_DA_TWINAX:
1396 port_type = PORT_DA;
1399 port_type = PORT_TP;
1401 case MEDIA_NOT_PRESENT:
1402 port_type = PORT_NONE;
1404 case MEDIA_UNSPECIFIED:
1406 port_type = PORT_OTHER;
1412 static int qed_get_link_data(struct qed_hwfn *hwfn,
1413 struct qed_mcp_link_params *params,
1414 struct qed_mcp_link_state *link,
1415 struct qed_mcp_link_capabilities *link_caps)
1419 if (!IS_PF(hwfn->cdev)) {
1420 qed_vf_get_link_params(hwfn, params);
1421 qed_vf_get_link_state(hwfn, link);
1422 qed_vf_get_link_caps(hwfn, link_caps);
1427 p = qed_mcp_get_link_params(hwfn);
1430 memcpy(params, p, sizeof(*params));
1432 p = qed_mcp_get_link_state(hwfn);
1435 memcpy(link, p, sizeof(*link));
1437 p = qed_mcp_get_link_capabilities(hwfn);
1440 memcpy(link_caps, p, sizeof(*link_caps));
1445 static void qed_fill_link(struct qed_hwfn *hwfn,
1446 struct qed_link_output *if_link)
1448 struct qed_mcp_link_params params;
1449 struct qed_mcp_link_state link;
1450 struct qed_mcp_link_capabilities link_caps;
1453 memset(if_link, 0, sizeof(*if_link));
1455 /* Prepare source inputs */
1456 if (qed_get_link_data(hwfn, ¶ms, &link, &link_caps)) {
1457 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1461 /* Set the link parameters to pass to protocol driver */
1463 if_link->link_up = true;
1465 /* TODO - at the moment assume supported and advertised speed equal */
1466 if_link->supported_caps = QED_LM_FIBRE_BIT;
1467 if (link_caps.default_speed_autoneg)
1468 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1469 if (params.pause.autoneg ||
1470 (params.pause.forced_rx && params.pause.forced_tx))
1471 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1472 if (params.pause.autoneg || params.pause.forced_rx ||
1473 params.pause.forced_tx)
1474 if_link->supported_caps |= QED_LM_Pause_BIT;
1476 if_link->advertised_caps = if_link->supported_caps;
1477 if (params.speed.autoneg)
1478 if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1480 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1481 if (params.speed.advertised_speeds &
1482 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1483 if_link->advertised_caps |= QED_LM_1000baseT_Half_BIT |
1484 QED_LM_1000baseT_Full_BIT;
1485 if (params.speed.advertised_speeds &
1486 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1487 if_link->advertised_caps |= QED_LM_10000baseKR_Full_BIT;
1488 if (params.speed.advertised_speeds &
1489 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1490 if_link->advertised_caps |= QED_LM_25000baseKR_Full_BIT;
1491 if (params.speed.advertised_speeds &
1492 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1493 if_link->advertised_caps |= QED_LM_40000baseLR4_Full_BIT;
1494 if (params.speed.advertised_speeds &
1495 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1496 if_link->advertised_caps |= QED_LM_50000baseKR2_Full_BIT;
1497 if (params.speed.advertised_speeds &
1498 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1499 if_link->advertised_caps |= QED_LM_100000baseKR4_Full_BIT;
1501 if (link_caps.speed_capabilities &
1502 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1503 if_link->supported_caps |= QED_LM_1000baseT_Half_BIT |
1504 QED_LM_1000baseT_Full_BIT;
1505 if (link_caps.speed_capabilities &
1506 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1507 if_link->supported_caps |= QED_LM_10000baseKR_Full_BIT;
1508 if (link_caps.speed_capabilities &
1509 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1510 if_link->supported_caps |= QED_LM_25000baseKR_Full_BIT;
1511 if (link_caps.speed_capabilities &
1512 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1513 if_link->supported_caps |= QED_LM_40000baseLR4_Full_BIT;
1514 if (link_caps.speed_capabilities &
1515 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1516 if_link->supported_caps |= QED_LM_50000baseKR2_Full_BIT;
1517 if (link_caps.speed_capabilities &
1518 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1519 if_link->supported_caps |= QED_LM_100000baseKR4_Full_BIT;
1522 if_link->speed = link.speed;
1524 /* TODO - fill duplex properly */
1525 if_link->duplex = DUPLEX_FULL;
1526 qed_mcp_get_media_type(hwfn->cdev, &media_type);
1527 if_link->port = qed_get_port_type(media_type);
1529 if_link->autoneg = params.speed.autoneg;
1531 if (params.pause.autoneg)
1532 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1533 if (params.pause.forced_rx)
1534 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1535 if (params.pause.forced_tx)
1536 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1538 /* Link partner capabilities */
1539 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_HD)
1540 if_link->lp_caps |= QED_LM_1000baseT_Half_BIT;
1541 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_1G_FD)
1542 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1543 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1544 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1545 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1546 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1547 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1548 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1549 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1550 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1551 if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1552 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1554 if (link.an_complete)
1555 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1557 if (link.partner_adv_pause)
1558 if_link->lp_caps |= QED_LM_Pause_BIT;
1559 if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1560 link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1561 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1563 if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1564 if_link->eee_supported = false;
1566 if_link->eee_supported = true;
1567 if_link->eee_active = link.eee_active;
1568 if_link->sup_caps = link_caps.eee_speed_caps;
1569 /* MFW clears adv_caps on eee disable; use configured value */
1570 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1571 params.eee.adv_caps;
1572 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1573 if_link->eee.enable = params.eee.enable;
1574 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1575 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1579 static void qed_get_current_link(struct qed_dev *cdev,
1580 struct qed_link_output *if_link)
1584 qed_fill_link(&cdev->hwfns[0], if_link);
1586 for_each_hwfn(cdev, i)
1587 qed_inform_vf_link_state(&cdev->hwfns[i]);
1590 void qed_link_update(struct qed_hwfn *hwfn)
1592 void *cookie = hwfn->cdev->ops_cookie;
1593 struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1594 struct qed_link_output if_link;
1596 qed_fill_link(hwfn, &if_link);
1597 qed_inform_vf_link_state(hwfn);
1599 if (IS_LEAD_HWFN(hwfn) && cookie)
1600 op->link_update(cookie, &if_link);
1603 static int qed_drain(struct qed_dev *cdev)
1605 struct qed_hwfn *hwfn;
1606 struct qed_ptt *ptt;
1612 for_each_hwfn(cdev, i) {
1613 hwfn = &cdev->hwfns[i];
1614 ptt = qed_ptt_acquire(hwfn);
1616 DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1619 rc = qed_mcp_drain(hwfn, ptt);
1622 qed_ptt_release(hwfn, ptt);
1628 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1629 struct qed_nvm_image_att *nvm_image,
1636 /* Allocate a buffer for holding the nvram image */
1637 buf = kzalloc(nvm_image->length, GFP_KERNEL);
1641 /* Read image into buffer */
1642 rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1643 buf, nvm_image->length);
1645 DP_ERR(cdev, "Failed reading image from nvm\n");
1649 /* Convert the buffer into big-endian format (excluding the
1650 * closing 4 bytes of CRC).
1652 for (j = 0; j < nvm_image->length - 4; j += 4) {
1653 val = cpu_to_be32(*(u32 *)&buf[j]);
1654 *(u32 *)&buf[j] = val;
1657 /* Calc CRC for the "actual" image buffer, i.e. not including
1658 * the last 4 CRC bytes.
1660 *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
1668 /* Binary file format -
1669 * /----------------------------------------------------------------------\
1670 * 0B | 0x4 [command index] |
1671 * 4B | image_type | Options | Number of register settings |
1675 * \----------------------------------------------------------------------/
1676 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
1677 * Options - 0'b - Calculate & Update CRC for image
1679 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
1682 struct qed_nvm_image_att nvm_image;
1683 struct qed_hwfn *p_hwfn;
1684 bool is_crc = false;
1690 image_type = **data;
1691 p_hwfn = QED_LEADING_HWFN(cdev);
1692 for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
1693 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
1695 if (i == p_hwfn->nvm_info.num_images) {
1696 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
1701 nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
1702 nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
1704 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1705 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
1706 **data, image_type, nvm_image.start_addr,
1707 nvm_image.start_addr + nvm_image.length - 1);
1709 is_crc = !!(**data & BIT(0));
1711 len = *((u16 *)*data);
1716 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
1718 DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
1722 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1723 (nvm_image.start_addr +
1724 nvm_image.length - 4), (u8 *)&crc, 4);
1726 DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
1727 nvm_image.start_addr + nvm_image.length - 4, rc);
1731 /* Iterate over the values for setting */
1733 u32 offset, mask, value, cur_value;
1736 value = *((u32 *)*data);
1738 mask = *((u32 *)*data);
1740 offset = *((u32 *)*data);
1743 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
1746 DP_ERR(cdev, "Failed reading from %08x\n",
1747 nvm_image.start_addr + offset);
1751 cur_value = le32_to_cpu(*((__le32 *)buf));
1752 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1753 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
1754 nvm_image.start_addr + offset, cur_value,
1755 (cur_value & ~mask) | (value & mask), value, mask);
1756 value = (value & mask) | (cur_value & ~mask);
1757 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
1758 nvm_image.start_addr + offset,
1761 DP_ERR(cdev, "Failed writing to %08x\n",
1762 nvm_image.start_addr + offset);
1772 /* Binary file format -
1773 * /----------------------------------------------------------------------\
1774 * 0B | 0x3 [command index] |
1775 * 4B | b'0: check_response? | b'1-31 reserved |
1776 * 8B | File-type | reserved |
1777 * \----------------------------------------------------------------------/
1778 * Start a new file of the provided type
1780 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
1781 const u8 **data, bool *check_resp)
1786 *check_resp = !!(**data & BIT(0));
1789 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1790 "About to start a new file of type %02x\n", **data);
1791 rc = qed_mcp_nvm_put_file_begin(cdev, **data);
1797 /* Binary file format -
1798 * /----------------------------------------------------------------------\
1799 * 0B | 0x2 [command index] |
1800 * 4B | Length in bytes |
1801 * 8B | b'0: check_response? | b'1-31 reserved |
1802 * 12B | Offset in bytes |
1804 * \----------------------------------------------------------------------/
1805 * Write data as part of a file that was previously started. Data should be
1806 * of length equal to that provided in the message
1808 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
1809 const u8 **data, bool *check_resp)
1815 len = *((u32 *)(*data));
1817 *check_resp = !!(**data & BIT(0));
1819 offset = *((u32 *)(*data));
1822 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1823 "About to write File-data: %08x bytes to offset %08x\n",
1826 rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
1827 (char *)(*data), len);
1833 /* Binary file format [General header] -
1834 * /----------------------------------------------------------------------\
1835 * 0B | QED_NVM_SIGNATURE |
1836 * 4B | Length in bytes |
1837 * 8B | Highest command in this batchfile | Reserved |
1838 * \----------------------------------------------------------------------/
1840 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
1841 const struct firmware *image,
1846 /* Check minimum size */
1847 if (image->size < 12) {
1848 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
1852 /* Check signature */
1853 signature = *((u32 *)(*data));
1854 if (signature != QED_NVM_SIGNATURE) {
1855 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
1860 /* Validate internal size equals the image-size */
1861 len = *((u32 *)(*data));
1862 if (len != image->size) {
1863 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
1864 len, (u32)image->size);
1869 /* Make sure driver familiar with all commands necessary for this */
1870 if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
1871 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
1881 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
1883 const struct firmware *image;
1884 const u8 *data, *data_end;
1888 rc = request_firmware(&image, name, &cdev->pdev->dev);
1890 DP_ERR(cdev, "Failed to find '%s'\n", name);
1894 DP_VERBOSE(cdev, NETIF_MSG_DRV,
1895 "Flashing '%s' - firmware's data at %p, size is %08x\n",
1896 name, image->data, (u32)image->size);
1898 data_end = data + image->size;
1900 rc = qed_nvm_flash_image_validate(cdev, image, &data);
1904 while (data < data_end) {
1905 bool check_resp = false;
1907 /* Parse the actual command */
1908 cmd_type = *((u32 *)data);
1910 case QED_NVM_FLASH_CMD_FILE_DATA:
1911 rc = qed_nvm_flash_image_file_data(cdev, &data,
1914 case QED_NVM_FLASH_CMD_FILE_START:
1915 rc = qed_nvm_flash_image_file_start(cdev, &data,
1918 case QED_NVM_FLASH_CMD_NVM_CHANGE:
1919 rc = qed_nvm_flash_image_access(cdev, &data,
1923 DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
1929 DP_ERR(cdev, "Command %08x failed\n", cmd_type);
1933 /* Check response if needed */
1935 u32 mcp_response = 0;
1937 if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
1938 DP_ERR(cdev, "Failed getting MCP response\n");
1943 switch (mcp_response & FW_MSG_CODE_MASK) {
1944 case FW_MSG_CODE_OK:
1945 case FW_MSG_CODE_NVM_OK:
1946 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
1947 case FW_MSG_CODE_PHY_OK:
1950 DP_ERR(cdev, "MFW returns error: %08x\n",
1959 release_firmware(image);
1964 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
1967 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1969 return qed_mcp_get_nvm_image(hwfn, type, buf, len);
1972 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
1975 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
1978 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
1980 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1981 struct qed_ptt *ptt;
1984 ptt = qed_ptt_acquire(hwfn);
1988 status = qed_mcp_set_led(hwfn, ptt, mode);
1990 qed_ptt_release(hwfn, ptt);
1995 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
1997 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
1998 struct qed_ptt *ptt;
2004 ptt = qed_ptt_acquire(hwfn);
2008 rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2009 : QED_OV_WOL_DISABLED);
2012 rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2015 qed_ptt_release(hwfn, ptt);
2019 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2021 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2022 struct qed_ptt *ptt;
2028 ptt = qed_ptt_acquire(hwfn);
2032 status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2033 QED_OV_DRIVER_STATE_ACTIVE :
2034 QED_OV_DRIVER_STATE_DISABLED);
2036 qed_ptt_release(hwfn, ptt);
2041 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2043 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2044 struct qed_ptt *ptt;
2050 ptt = qed_ptt_acquire(hwfn);
2054 status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2058 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2061 qed_ptt_release(hwfn, ptt);
2065 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2067 struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2068 struct qed_ptt *ptt;
2074 ptt = qed_ptt_acquire(hwfn);
2078 status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2082 status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2085 qed_ptt_release(hwfn, ptt);
2089 static struct qed_selftest_ops qed_selftest_ops_pass = {
2090 .selftest_memory = &qed_selftest_memory,
2091 .selftest_interrupt = &qed_selftest_interrupt,
2092 .selftest_register = &qed_selftest_register,
2093 .selftest_clock = &qed_selftest_clock,
2094 .selftest_nvram = &qed_selftest_nvram,
2097 const struct qed_common_ops qed_common_ops_pass = {
2098 .selftest = &qed_selftest_ops_pass,
2099 .probe = &qed_probe,
2100 .remove = &qed_remove,
2101 .set_power_state = &qed_set_power_state,
2102 .set_name = &qed_set_name,
2103 .update_pf_params = &qed_update_pf_params,
2104 .slowpath_start = &qed_slowpath_start,
2105 .slowpath_stop = &qed_slowpath_stop,
2106 .set_fp_int = &qed_set_int_fp,
2107 .get_fp_int = &qed_get_int_fp,
2108 .sb_init = &qed_sb_init,
2109 .sb_release = &qed_sb_release,
2110 .simd_handler_config = &qed_simd_handler_config,
2111 .simd_handler_clean = &qed_simd_handler_clean,
2112 .dbg_grc = &qed_dbg_grc,
2113 .dbg_grc_size = &qed_dbg_grc_size,
2114 .can_link_change = &qed_can_link_change,
2115 .set_link = &qed_set_link,
2116 .get_link = &qed_get_current_link,
2117 .drain = &qed_drain,
2118 .update_msglvl = &qed_init_dp,
2119 .dbg_all_data = &qed_dbg_all_data,
2120 .dbg_all_data_size = &qed_dbg_all_data_size,
2121 .chain_alloc = &qed_chain_alloc,
2122 .chain_free = &qed_chain_free,
2123 .nvm_flash = &qed_nvm_flash,
2124 .nvm_get_image = &qed_nvm_get_image,
2125 .set_coalesce = &qed_set_coalesce,
2126 .set_led = &qed_set_led,
2127 .update_drv_state = &qed_update_drv_state,
2128 .update_mac = &qed_update_mac,
2129 .update_mtu = &qed_update_mtu,
2130 .update_wol = &qed_update_wol,
2133 void qed_get_protocol_stats(struct qed_dev *cdev,
2134 enum qed_mcp_protocol_type type,
2135 union qed_mcp_protocol_stats *stats)
2137 struct qed_eth_stats eth_stats;
2139 memset(stats, 0, sizeof(*stats));
2142 case QED_MCP_LAN_STATS:
2143 qed_get_vport_stats(cdev, ð_stats);
2144 stats->lan_stats.ucast_rx_pkts =
2145 eth_stats.common.rx_ucast_pkts;
2146 stats->lan_stats.ucast_tx_pkts =
2147 eth_stats.common.tx_ucast_pkts;
2148 stats->lan_stats.fcs_err = -1;
2150 case QED_MCP_FCOE_STATS:
2151 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2153 case QED_MCP_ISCSI_STATS:
2154 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2157 DP_VERBOSE(cdev, QED_MSG_SP,
2158 "Invalid protocol type = %d\n", type);
2163 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2165 DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2166 "Scheduling slowpath task [Flag: %d]\n",
2167 QED_SLOWPATH_MFW_TLV_REQ);
2168 smp_mb__before_atomic();
2169 set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2170 smp_mb__after_atomic();
2171 queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2177 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2179 struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2180 struct qed_eth_stats_common *p_common;
2181 struct qed_generic_tlvs gen_tlvs;
2182 struct qed_eth_stats stats;
2185 memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2186 op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2188 if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2189 tlv->flags.ipv4_csum_offload = true;
2190 if (gen_tlvs.feat_flags & QED_TLV_LSO)
2191 tlv->flags.lso_supported = true;
2192 tlv->flags.b_set = true;
2194 for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2195 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2196 ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2197 tlv->mac_set[i] = true;
2201 qed_get_vport_stats(cdev, &stats);
2202 p_common = &stats.common;
2203 tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2204 p_common->rx_bcast_pkts;
2205 tlv->rx_frames_set = true;
2206 tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2207 p_common->rx_bcast_bytes;
2208 tlv->rx_bytes_set = true;
2209 tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2210 p_common->tx_bcast_pkts;
2211 tlv->tx_frames_set = true;
2212 tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2213 p_common->tx_bcast_bytes;
2214 tlv->rx_bytes_set = true;
2217 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2218 union qed_mfw_tlv_data *tlv_buf)
2220 struct qed_dev *cdev = hwfn->cdev;
2221 struct qed_common_cb_ops *ops;
2223 ops = cdev->protocol_ops.common;
2224 if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2225 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2230 case QED_MFW_TLV_GENERIC:
2231 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2233 case QED_MFW_TLV_ETH:
2234 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2236 case QED_MFW_TLV_FCOE:
2237 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2239 case QED_MFW_TLV_ISCSI:
2240 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);