Merge tag 'nfs-for-5.4-3' of git://git.linux-nfs.org/projects/anna/linux-nfs
[linux-2.6-block.git] / drivers / net / ethernet / qlogic / qed / qed_main.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015-2017  QLogic Corporation
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and /or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
51 #include <net/devlink.h>
52
53 #include "qed.h"
54 #include "qed_sriov.h"
55 #include "qed_sp.h"
56 #include "qed_dev_api.h"
57 #include "qed_ll2.h"
58 #include "qed_fcoe.h"
59 #include "qed_iscsi.h"
60
61 #include "qed_mcp.h"
62 #include "qed_reg_addr.h"
63 #include "qed_hw.h"
64 #include "qed_selftest.h"
65 #include "qed_debug.h"
66
67 #define QED_ROCE_QPS                    (8192)
68 #define QED_ROCE_DPIS                   (8)
69 #define QED_RDMA_SRQS                   QED_ROCE_QPS
70 #define QED_NVM_CFG_SET_FLAGS           0xE
71 #define QED_NVM_CFG_SET_PF_FLAGS        0x1E
72 #define QED_NVM_CFG_GET_FLAGS           0xA
73 #define QED_NVM_CFG_GET_PF_FLAGS        0x1A
74
75 static char version[] =
76         "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION "\n";
77
78 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81
82 #define FW_FILE_VERSION                         \
83         __stringify(FW_MAJOR_VERSION) "."       \
84         __stringify(FW_MINOR_VERSION) "."       \
85         __stringify(FW_REVISION_VERSION) "."    \
86         __stringify(FW_ENGINEERING_VERSION)
87
88 #define QED_FW_FILE_NAME        \
89         "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
90
91 MODULE_FIRMWARE(QED_FW_FILE_NAME);
92
93 static int __init qed_init(void)
94 {
95         pr_info("%s", version);
96
97         return 0;
98 }
99
100 static void __exit qed_cleanup(void)
101 {
102         pr_notice("qed_cleanup called\n");
103 }
104
105 module_init(qed_init);
106 module_exit(qed_cleanup);
107
108 /* Check if the DMA controller on the machine can properly handle the DMA
109  * addressing required by the device.
110 */
111 static int qed_set_coherency_mask(struct qed_dev *cdev)
112 {
113         struct device *dev = &cdev->pdev->dev;
114
115         if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
116                 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
117                         DP_NOTICE(cdev,
118                                   "Can't request 64-bit consistent allocations\n");
119                         return -EIO;
120                 }
121         } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
122                 DP_NOTICE(cdev, "Can't request 64b/32b DMA addresses\n");
123                 return -EIO;
124         }
125
126         return 0;
127 }
128
129 static void qed_free_pci(struct qed_dev *cdev)
130 {
131         struct pci_dev *pdev = cdev->pdev;
132
133         if (cdev->doorbells && cdev->db_size)
134                 iounmap(cdev->doorbells);
135         if (cdev->regview)
136                 iounmap(cdev->regview);
137         if (atomic_read(&pdev->enable_cnt) == 1)
138                 pci_release_regions(pdev);
139
140         pci_disable_device(pdev);
141 }
142
143 #define PCI_REVISION_ID_ERROR_VAL       0xff
144
145 /* Performs PCI initializations as well as initializing PCI-related parameters
146  * in the device structrue. Returns 0 in case of success.
147  */
148 static int qed_init_pci(struct qed_dev *cdev, struct pci_dev *pdev)
149 {
150         u8 rev_id;
151         int rc;
152
153         cdev->pdev = pdev;
154
155         rc = pci_enable_device(pdev);
156         if (rc) {
157                 DP_NOTICE(cdev, "Cannot enable PCI device\n");
158                 goto err0;
159         }
160
161         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
162                 DP_NOTICE(cdev, "No memory region found in bar #0\n");
163                 rc = -EIO;
164                 goto err1;
165         }
166
167         if (IS_PF(cdev) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
168                 DP_NOTICE(cdev, "No memory region found in bar #2\n");
169                 rc = -EIO;
170                 goto err1;
171         }
172
173         if (atomic_read(&pdev->enable_cnt) == 1) {
174                 rc = pci_request_regions(pdev, "qed");
175                 if (rc) {
176                         DP_NOTICE(cdev,
177                                   "Failed to request PCI memory resources\n");
178                         goto err1;
179                 }
180                 pci_set_master(pdev);
181                 pci_save_state(pdev);
182         }
183
184         pci_read_config_byte(pdev, PCI_REVISION_ID, &rev_id);
185         if (rev_id == PCI_REVISION_ID_ERROR_VAL) {
186                 DP_NOTICE(cdev,
187                           "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
188                           rev_id);
189                 rc = -ENODEV;
190                 goto err2;
191         }
192         if (!pci_is_pcie(pdev)) {
193                 DP_NOTICE(cdev, "The bus is not PCI Express\n");
194                 rc = -EIO;
195                 goto err2;
196         }
197
198         cdev->pci_params.pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
199         if (IS_PF(cdev) && !cdev->pci_params.pm_cap)
200                 DP_NOTICE(cdev, "Cannot find power management capability\n");
201
202         rc = qed_set_coherency_mask(cdev);
203         if (rc)
204                 goto err2;
205
206         cdev->pci_params.mem_start = pci_resource_start(pdev, 0);
207         cdev->pci_params.mem_end = pci_resource_end(pdev, 0);
208         cdev->pci_params.irq = pdev->irq;
209
210         cdev->regview = pci_ioremap_bar(pdev, 0);
211         if (!cdev->regview) {
212                 DP_NOTICE(cdev, "Cannot map register space, aborting\n");
213                 rc = -ENOMEM;
214                 goto err2;
215         }
216
217         cdev->db_phys_addr = pci_resource_start(cdev->pdev, 2);
218         cdev->db_size = pci_resource_len(cdev->pdev, 2);
219         if (!cdev->db_size) {
220                 if (IS_PF(cdev)) {
221                         DP_NOTICE(cdev, "No Doorbell bar available\n");
222                         return -EINVAL;
223                 } else {
224                         return 0;
225                 }
226         }
227
228         cdev->doorbells = ioremap_wc(cdev->db_phys_addr, cdev->db_size);
229
230         if (!cdev->doorbells) {
231                 DP_NOTICE(cdev, "Cannot map doorbell space\n");
232                 return -ENOMEM;
233         }
234
235         return 0;
236
237 err2:
238         pci_release_regions(pdev);
239 err1:
240         pci_disable_device(pdev);
241 err0:
242         return rc;
243 }
244
245 int qed_fill_dev_info(struct qed_dev *cdev,
246                       struct qed_dev_info *dev_info)
247 {
248         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
249         struct qed_hw_info *hw_info = &p_hwfn->hw_info;
250         struct qed_tunnel_info *tun = &cdev->tunnel;
251         struct qed_ptt  *ptt;
252
253         memset(dev_info, 0, sizeof(struct qed_dev_info));
254
255         if (tun->vxlan.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
256             tun->vxlan.b_mode_enabled)
257                 dev_info->vxlan_enable = true;
258
259         if (tun->l2_gre.b_mode_enabled && tun->ip_gre.b_mode_enabled &&
260             tun->l2_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
261             tun->ip_gre.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
262                 dev_info->gre_enable = true;
263
264         if (tun->l2_geneve.b_mode_enabled && tun->ip_geneve.b_mode_enabled &&
265             tun->l2_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN &&
266             tun->ip_geneve.tun_cls == QED_TUNN_CLSS_MAC_VLAN)
267                 dev_info->geneve_enable = true;
268
269         dev_info->num_hwfns = cdev->num_hwfns;
270         dev_info->pci_mem_start = cdev->pci_params.mem_start;
271         dev_info->pci_mem_end = cdev->pci_params.mem_end;
272         dev_info->pci_irq = cdev->pci_params.irq;
273         dev_info->rdma_supported = QED_IS_RDMA_PERSONALITY(p_hwfn);
274         dev_info->dev_type = cdev->type;
275         ether_addr_copy(dev_info->hw_mac, hw_info->hw_mac_addr);
276
277         if (IS_PF(cdev)) {
278                 dev_info->fw_major = FW_MAJOR_VERSION;
279                 dev_info->fw_minor = FW_MINOR_VERSION;
280                 dev_info->fw_rev = FW_REVISION_VERSION;
281                 dev_info->fw_eng = FW_ENGINEERING_VERSION;
282                 dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH,
283                                                        &cdev->mf_bits);
284                 dev_info->tx_switching = true;
285
286                 if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME)
287                         dev_info->wol_support = true;
288
289                 dev_info->smart_an = qed_mcp_is_smart_an_supported(p_hwfn);
290
291                 dev_info->abs_pf_id = QED_LEADING_HWFN(cdev)->abs_pf_id;
292         } else {
293                 qed_vf_get_fw_version(&cdev->hwfns[0], &dev_info->fw_major,
294                                       &dev_info->fw_minor, &dev_info->fw_rev,
295                                       &dev_info->fw_eng);
296         }
297
298         if (IS_PF(cdev)) {
299                 ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
300                 if (ptt) {
301                         qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), ptt,
302                                             &dev_info->mfw_rev, NULL);
303
304                         qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev), ptt,
305                                             &dev_info->mbi_version);
306
307                         qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev), ptt,
308                                                &dev_info->flash_size);
309
310                         qed_ptt_release(QED_LEADING_HWFN(cdev), ptt);
311                 }
312         } else {
313                 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev), NULL,
314                                     &dev_info->mfw_rev, NULL);
315         }
316
317         dev_info->mtu = hw_info->mtu;
318
319         return 0;
320 }
321
322 static void qed_free_cdev(struct qed_dev *cdev)
323 {
324         kfree((void *)cdev);
325 }
326
327 static struct qed_dev *qed_alloc_cdev(struct pci_dev *pdev)
328 {
329         struct qed_dev *cdev;
330
331         cdev = kzalloc(sizeof(*cdev), GFP_KERNEL);
332         if (!cdev)
333                 return cdev;
334
335         qed_init_struct(cdev);
336
337         return cdev;
338 }
339
340 /* Sets the requested power state */
341 static int qed_set_power_state(struct qed_dev *cdev, pci_power_t state)
342 {
343         if (!cdev)
344                 return -ENODEV;
345
346         DP_VERBOSE(cdev, NETIF_MSG_DRV, "Omitting Power state change\n");
347         return 0;
348 }
349
350 struct qed_devlink {
351         struct qed_dev *cdev;
352 };
353
354 enum qed_devlink_param_id {
355         QED_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
356         QED_DEVLINK_PARAM_ID_IWARP_CMT,
357 };
358
359 static int qed_dl_param_get(struct devlink *dl, u32 id,
360                             struct devlink_param_gset_ctx *ctx)
361 {
362         struct qed_devlink *qed_dl;
363         struct qed_dev *cdev;
364
365         qed_dl = devlink_priv(dl);
366         cdev = qed_dl->cdev;
367         ctx->val.vbool = cdev->iwarp_cmt;
368
369         return 0;
370 }
371
372 static int qed_dl_param_set(struct devlink *dl, u32 id,
373                             struct devlink_param_gset_ctx *ctx)
374 {
375         struct qed_devlink *qed_dl;
376         struct qed_dev *cdev;
377
378         qed_dl = devlink_priv(dl);
379         cdev = qed_dl->cdev;
380         cdev->iwarp_cmt = ctx->val.vbool;
381
382         return 0;
383 }
384
385 static const struct devlink_param qed_devlink_params[] = {
386         DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT,
387                              "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL,
388                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
389                              qed_dl_param_get, qed_dl_param_set, NULL),
390 };
391
392 static const struct devlink_ops qed_dl_ops;
393
394 static int qed_devlink_register(struct qed_dev *cdev)
395 {
396         union devlink_param_value value;
397         struct qed_devlink *qed_dl;
398         struct devlink *dl;
399         int rc;
400
401         dl = devlink_alloc(&qed_dl_ops, sizeof(*qed_dl));
402         if (!dl)
403                 return -ENOMEM;
404
405         qed_dl = devlink_priv(dl);
406
407         cdev->dl = dl;
408         qed_dl->cdev = cdev;
409
410         rc = devlink_register(dl, &cdev->pdev->dev);
411         if (rc)
412                 goto err_free;
413
414         rc = devlink_params_register(dl, qed_devlink_params,
415                                      ARRAY_SIZE(qed_devlink_params));
416         if (rc)
417                 goto err_unregister;
418
419         value.vbool = false;
420         devlink_param_driverinit_value_set(dl,
421                                            QED_DEVLINK_PARAM_ID_IWARP_CMT,
422                                            value);
423
424         devlink_params_publish(dl);
425         cdev->iwarp_cmt = false;
426
427         return 0;
428
429 err_unregister:
430         devlink_unregister(dl);
431
432 err_free:
433         cdev->dl = NULL;
434         devlink_free(dl);
435
436         return rc;
437 }
438
439 static void qed_devlink_unregister(struct qed_dev *cdev)
440 {
441         if (!cdev->dl)
442                 return;
443
444         devlink_params_unregister(cdev->dl, qed_devlink_params,
445                                   ARRAY_SIZE(qed_devlink_params));
446
447         devlink_unregister(cdev->dl);
448         devlink_free(cdev->dl);
449 }
450
451 /* probing */
452 static struct qed_dev *qed_probe(struct pci_dev *pdev,
453                                  struct qed_probe_params *params)
454 {
455         struct qed_dev *cdev;
456         int rc;
457
458         cdev = qed_alloc_cdev(pdev);
459         if (!cdev)
460                 goto err0;
461
462         cdev->drv_type = DRV_ID_DRV_TYPE_LINUX;
463         cdev->protocol = params->protocol;
464
465         if (params->is_vf)
466                 cdev->b_is_vf = true;
467
468         qed_init_dp(cdev, params->dp_module, params->dp_level);
469
470         cdev->recov_in_prog = params->recov_in_prog;
471
472         rc = qed_init_pci(cdev, pdev);
473         if (rc) {
474                 DP_ERR(cdev, "init pci failed\n");
475                 goto err1;
476         }
477         DP_INFO(cdev, "PCI init completed successfully\n");
478
479         rc = qed_devlink_register(cdev);
480         if (rc) {
481                 DP_INFO(cdev, "Failed to register devlink.\n");
482                 goto err2;
483         }
484
485         rc = qed_hw_prepare(cdev, QED_PCI_DEFAULT);
486         if (rc) {
487                 DP_ERR(cdev, "hw prepare failed\n");
488                 goto err2;
489         }
490
491         DP_INFO(cdev, "qed_probe completed successfully\n");
492
493         return cdev;
494
495 err2:
496         qed_free_pci(cdev);
497 err1:
498         qed_free_cdev(cdev);
499 err0:
500         return NULL;
501 }
502
503 static void qed_remove(struct qed_dev *cdev)
504 {
505         if (!cdev)
506                 return;
507
508         qed_hw_remove(cdev);
509
510         qed_free_pci(cdev);
511
512         qed_set_power_state(cdev, PCI_D3hot);
513
514         qed_devlink_unregister(cdev);
515
516         qed_free_cdev(cdev);
517 }
518
519 static void qed_disable_msix(struct qed_dev *cdev)
520 {
521         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
522                 pci_disable_msix(cdev->pdev);
523                 kfree(cdev->int_params.msix_table);
524         } else if (cdev->int_params.out.int_mode == QED_INT_MODE_MSI) {
525                 pci_disable_msi(cdev->pdev);
526         }
527
528         memset(&cdev->int_params.out, 0, sizeof(struct qed_int_param));
529 }
530
531 static int qed_enable_msix(struct qed_dev *cdev,
532                            struct qed_int_params *int_params)
533 {
534         int i, rc, cnt;
535
536         cnt = int_params->in.num_vectors;
537
538         for (i = 0; i < cnt; i++)
539                 int_params->msix_table[i].entry = i;
540
541         rc = pci_enable_msix_range(cdev->pdev, int_params->msix_table,
542                                    int_params->in.min_msix_cnt, cnt);
543         if (rc < cnt && rc >= int_params->in.min_msix_cnt &&
544             (rc % cdev->num_hwfns)) {
545                 pci_disable_msix(cdev->pdev);
546
547                 /* If fastpath is initialized, we need at least one interrupt
548                  * per hwfn [and the slow path interrupts]. New requested number
549                  * should be a multiple of the number of hwfns.
550                  */
551                 cnt = (rc / cdev->num_hwfns) * cdev->num_hwfns;
552                 DP_NOTICE(cdev,
553                           "Trying to enable MSI-X with less vectors (%d out of %d)\n",
554                           cnt, int_params->in.num_vectors);
555                 rc = pci_enable_msix_exact(cdev->pdev, int_params->msix_table,
556                                            cnt);
557                 if (!rc)
558                         rc = cnt;
559         }
560
561         if (rc > 0) {
562                 /* MSI-x configuration was achieved */
563                 int_params->out.int_mode = QED_INT_MODE_MSIX;
564                 int_params->out.num_vectors = rc;
565                 rc = 0;
566         } else {
567                 DP_NOTICE(cdev,
568                           "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
569                           cnt, rc);
570         }
571
572         return rc;
573 }
574
575 /* This function outputs the int mode and the number of enabled msix vector */
576 static int qed_set_int_mode(struct qed_dev *cdev, bool force_mode)
577 {
578         struct qed_int_params *int_params = &cdev->int_params;
579         struct msix_entry *tbl;
580         int rc = 0, cnt;
581
582         switch (int_params->in.int_mode) {
583         case QED_INT_MODE_MSIX:
584                 /* Allocate MSIX table */
585                 cnt = int_params->in.num_vectors;
586                 int_params->msix_table = kcalloc(cnt, sizeof(*tbl), GFP_KERNEL);
587                 if (!int_params->msix_table) {
588                         rc = -ENOMEM;
589                         goto out;
590                 }
591
592                 /* Enable MSIX */
593                 rc = qed_enable_msix(cdev, int_params);
594                 if (!rc)
595                         goto out;
596
597                 DP_NOTICE(cdev, "Failed to enable MSI-X\n");
598                 kfree(int_params->msix_table);
599                 if (force_mode)
600                         goto out;
601                 /* Fallthrough */
602
603         case QED_INT_MODE_MSI:
604                 if (cdev->num_hwfns == 1) {
605                         rc = pci_enable_msi(cdev->pdev);
606                         if (!rc) {
607                                 int_params->out.int_mode = QED_INT_MODE_MSI;
608                                 goto out;
609                         }
610
611                         DP_NOTICE(cdev, "Failed to enable MSI\n");
612                         if (force_mode)
613                                 goto out;
614                 }
615                 /* Fallthrough */
616
617         case QED_INT_MODE_INTA:
618                         int_params->out.int_mode = QED_INT_MODE_INTA;
619                         rc = 0;
620                         goto out;
621         default:
622                 DP_NOTICE(cdev, "Unknown int_mode value %d\n",
623                           int_params->in.int_mode);
624                 rc = -EINVAL;
625         }
626
627 out:
628         if (!rc)
629                 DP_INFO(cdev, "Using %s interrupts\n",
630                         int_params->out.int_mode == QED_INT_MODE_INTA ?
631                         "INTa" : int_params->out.int_mode == QED_INT_MODE_MSI ?
632                         "MSI" : "MSIX");
633         cdev->int_coalescing_mode = QED_COAL_MODE_ENABLE;
634
635         return rc;
636 }
637
638 static void qed_simd_handler_config(struct qed_dev *cdev, void *token,
639                                     int index, void(*handler)(void *))
640 {
641         struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
642         int relative_idx = index / cdev->num_hwfns;
643
644         hwfn->simd_proto_handler[relative_idx].func = handler;
645         hwfn->simd_proto_handler[relative_idx].token = token;
646 }
647
648 static void qed_simd_handler_clean(struct qed_dev *cdev, int index)
649 {
650         struct qed_hwfn *hwfn = &cdev->hwfns[index % cdev->num_hwfns];
651         int relative_idx = index / cdev->num_hwfns;
652
653         memset(&hwfn->simd_proto_handler[relative_idx], 0,
654                sizeof(struct qed_simd_fp_handler));
655 }
656
657 static irqreturn_t qed_msix_sp_int(int irq, void *tasklet)
658 {
659         tasklet_schedule((struct tasklet_struct *)tasklet);
660         return IRQ_HANDLED;
661 }
662
663 static irqreturn_t qed_single_int(int irq, void *dev_instance)
664 {
665         struct qed_dev *cdev = (struct qed_dev *)dev_instance;
666         struct qed_hwfn *hwfn;
667         irqreturn_t rc = IRQ_NONE;
668         u64 status;
669         int i, j;
670
671         for (i = 0; i < cdev->num_hwfns; i++) {
672                 status = qed_int_igu_read_sisr_reg(&cdev->hwfns[i]);
673
674                 if (!status)
675                         continue;
676
677                 hwfn = &cdev->hwfns[i];
678
679                 /* Slowpath interrupt */
680                 if (unlikely(status & 0x1)) {
681                         tasklet_schedule(hwfn->sp_dpc);
682                         status &= ~0x1;
683                         rc = IRQ_HANDLED;
684                 }
685
686                 /* Fastpath interrupts */
687                 for (j = 0; j < 64; j++) {
688                         if ((0x2ULL << j) & status) {
689                                 struct qed_simd_fp_handler *p_handler =
690                                         &hwfn->simd_proto_handler[j];
691
692                                 if (p_handler->func)
693                                         p_handler->func(p_handler->token);
694                                 else
695                                         DP_NOTICE(hwfn,
696                                                   "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
697                                                   j, status);
698
699                                 status &= ~(0x2ULL << j);
700                                 rc = IRQ_HANDLED;
701                         }
702                 }
703
704                 if (unlikely(status))
705                         DP_VERBOSE(hwfn, NETIF_MSG_INTR,
706                                    "got an unknown interrupt status 0x%llx\n",
707                                    status);
708         }
709
710         return rc;
711 }
712
713 int qed_slowpath_irq_req(struct qed_hwfn *hwfn)
714 {
715         struct qed_dev *cdev = hwfn->cdev;
716         u32 int_mode;
717         int rc = 0;
718         u8 id;
719
720         int_mode = cdev->int_params.out.int_mode;
721         if (int_mode == QED_INT_MODE_MSIX) {
722                 id = hwfn->my_id;
723                 snprintf(hwfn->name, NAME_SIZE, "sp-%d-%02x:%02x.%02x",
724                          id, cdev->pdev->bus->number,
725                          PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
726                 rc = request_irq(cdev->int_params.msix_table[id].vector,
727                                  qed_msix_sp_int, 0, hwfn->name, hwfn->sp_dpc);
728         } else {
729                 unsigned long flags = 0;
730
731                 snprintf(cdev->name, NAME_SIZE, "%02x:%02x.%02x",
732                          cdev->pdev->bus->number, PCI_SLOT(cdev->pdev->devfn),
733                          PCI_FUNC(cdev->pdev->devfn));
734
735                 if (cdev->int_params.out.int_mode == QED_INT_MODE_INTA)
736                         flags |= IRQF_SHARED;
737
738                 rc = request_irq(cdev->pdev->irq, qed_single_int,
739                                  flags, cdev->name, cdev);
740         }
741
742         if (rc)
743                 DP_NOTICE(cdev, "request_irq failed, rc = %d\n", rc);
744         else
745                 DP_VERBOSE(hwfn, (NETIF_MSG_INTR | QED_MSG_SP),
746                            "Requested slowpath %s\n",
747                            (int_mode == QED_INT_MODE_MSIX) ? "MSI-X" : "IRQ");
748
749         return rc;
750 }
751
752 static void qed_slowpath_tasklet_flush(struct qed_hwfn *p_hwfn)
753 {
754         /* Calling the disable function will make sure that any
755          * currently-running function is completed. The following call to the
756          * enable function makes this sequence a flush-like operation.
757          */
758         if (p_hwfn->b_sp_dpc_enabled) {
759                 tasklet_disable(p_hwfn->sp_dpc);
760                 tasklet_enable(p_hwfn->sp_dpc);
761         }
762 }
763
764 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn)
765 {
766         struct qed_dev *cdev = p_hwfn->cdev;
767         u8 id = p_hwfn->my_id;
768         u32 int_mode;
769
770         int_mode = cdev->int_params.out.int_mode;
771         if (int_mode == QED_INT_MODE_MSIX)
772                 synchronize_irq(cdev->int_params.msix_table[id].vector);
773         else
774                 synchronize_irq(cdev->pdev->irq);
775
776         qed_slowpath_tasklet_flush(p_hwfn);
777 }
778
779 static void qed_slowpath_irq_free(struct qed_dev *cdev)
780 {
781         int i;
782
783         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
784                 for_each_hwfn(cdev, i) {
785                         if (!cdev->hwfns[i].b_int_requested)
786                                 break;
787                         synchronize_irq(cdev->int_params.msix_table[i].vector);
788                         free_irq(cdev->int_params.msix_table[i].vector,
789                                  cdev->hwfns[i].sp_dpc);
790                 }
791         } else {
792                 if (QED_LEADING_HWFN(cdev)->b_int_requested)
793                         free_irq(cdev->pdev->irq, cdev);
794         }
795         qed_int_disable_post_isr_release(cdev);
796 }
797
798 static int qed_nic_stop(struct qed_dev *cdev)
799 {
800         int i, rc;
801
802         rc = qed_hw_stop(cdev);
803
804         for (i = 0; i < cdev->num_hwfns; i++) {
805                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
806
807                 if (p_hwfn->b_sp_dpc_enabled) {
808                         tasklet_disable(p_hwfn->sp_dpc);
809                         p_hwfn->b_sp_dpc_enabled = false;
810                         DP_VERBOSE(cdev, NETIF_MSG_IFDOWN,
811                                    "Disabled sp tasklet [hwfn %d] at %p\n",
812                                    i, p_hwfn->sp_dpc);
813                 }
814         }
815
816         qed_dbg_pf_exit(cdev);
817
818         return rc;
819 }
820
821 static int qed_nic_setup(struct qed_dev *cdev)
822 {
823         int rc, i;
824
825         /* Determine if interface is going to require LL2 */
826         if (QED_LEADING_HWFN(cdev)->hw_info.personality != QED_PCI_ETH) {
827                 for (i = 0; i < cdev->num_hwfns; i++) {
828                         struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
829
830                         p_hwfn->using_ll2 = true;
831                 }
832         }
833
834         rc = qed_resc_alloc(cdev);
835         if (rc)
836                 return rc;
837
838         DP_INFO(cdev, "Allocated qed resources\n");
839
840         qed_resc_setup(cdev);
841
842         return rc;
843 }
844
845 static int qed_set_int_fp(struct qed_dev *cdev, u16 cnt)
846 {
847         int limit = 0;
848
849         /* Mark the fastpath as free/used */
850         cdev->int_params.fp_initialized = cnt ? true : false;
851
852         if (cdev->int_params.out.int_mode != QED_INT_MODE_MSIX)
853                 limit = cdev->num_hwfns * 63;
854         else if (cdev->int_params.fp_msix_cnt)
855                 limit = cdev->int_params.fp_msix_cnt;
856
857         if (!limit)
858                 return -ENOMEM;
859
860         return min_t(int, cnt, limit);
861 }
862
863 static int qed_get_int_fp(struct qed_dev *cdev, struct qed_int_info *info)
864 {
865         memset(info, 0, sizeof(struct qed_int_info));
866
867         if (!cdev->int_params.fp_initialized) {
868                 DP_INFO(cdev,
869                         "Protocol driver requested interrupt information, but its support is not yet configured\n");
870                 return -EINVAL;
871         }
872
873         /* Need to expose only MSI-X information; Single IRQ is handled solely
874          * by qed.
875          */
876         if (cdev->int_params.out.int_mode == QED_INT_MODE_MSIX) {
877                 int msix_base = cdev->int_params.fp_msix_base;
878
879                 info->msix_cnt = cdev->int_params.fp_msix_cnt;
880                 info->msix = &cdev->int_params.msix_table[msix_base];
881         }
882
883         return 0;
884 }
885
886 static int qed_slowpath_setup_int(struct qed_dev *cdev,
887                                   enum qed_int_mode int_mode)
888 {
889         struct qed_sb_cnt_info sb_cnt_info;
890         int num_l2_queues = 0;
891         int rc;
892         int i;
893
894         if ((int_mode == QED_INT_MODE_MSI) && (cdev->num_hwfns > 1)) {
895                 DP_NOTICE(cdev, "MSI mode is not supported for CMT devices\n");
896                 return -EINVAL;
897         }
898
899         memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
900         cdev->int_params.in.int_mode = int_mode;
901         for_each_hwfn(cdev, i) {
902                 memset(&sb_cnt_info, 0, sizeof(sb_cnt_info));
903                 qed_int_get_num_sbs(&cdev->hwfns[i], &sb_cnt_info);
904                 cdev->int_params.in.num_vectors += sb_cnt_info.cnt;
905                 cdev->int_params.in.num_vectors++; /* slowpath */
906         }
907
908         /* We want a minimum of one slowpath and one fastpath vector per hwfn */
909         cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
910
911         if (is_kdump_kernel()) {
912                 DP_INFO(cdev,
913                         "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
914                         cdev->int_params.in.min_msix_cnt);
915                 cdev->int_params.in.num_vectors =
916                         cdev->int_params.in.min_msix_cnt;
917         }
918
919         rc = qed_set_int_mode(cdev, false);
920         if (rc)  {
921                 DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
922                 return rc;
923         }
924
925         cdev->int_params.fp_msix_base = cdev->num_hwfns;
926         cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors -
927                                        cdev->num_hwfns;
928
929         if (!IS_ENABLED(CONFIG_QED_RDMA) ||
930             !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev)))
931                 return 0;
932
933         for_each_hwfn(cdev, i)
934                 num_l2_queues += FEAT_NUM(&cdev->hwfns[i], QED_PF_L2_QUE);
935
936         DP_VERBOSE(cdev, QED_MSG_RDMA,
937                    "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
938                    cdev->int_params.fp_msix_cnt, num_l2_queues);
939
940         if (cdev->int_params.fp_msix_cnt > num_l2_queues) {
941                 cdev->int_params.rdma_msix_cnt =
942                         (cdev->int_params.fp_msix_cnt - num_l2_queues)
943                         / cdev->num_hwfns;
944                 cdev->int_params.rdma_msix_base =
945                         cdev->int_params.fp_msix_base + num_l2_queues;
946                 cdev->int_params.fp_msix_cnt = num_l2_queues;
947         } else {
948                 cdev->int_params.rdma_msix_cnt = 0;
949         }
950
951         DP_VERBOSE(cdev, QED_MSG_RDMA, "roce_msix_cnt=%d roce_msix_base=%d\n",
952                    cdev->int_params.rdma_msix_cnt,
953                    cdev->int_params.rdma_msix_base);
954
955         return 0;
956 }
957
958 static int qed_slowpath_vf_setup_int(struct qed_dev *cdev)
959 {
960         int rc;
961
962         memset(&cdev->int_params, 0, sizeof(struct qed_int_params));
963         cdev->int_params.in.int_mode = QED_INT_MODE_MSIX;
964
965         qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev),
966                             &cdev->int_params.in.num_vectors);
967         if (cdev->num_hwfns > 1) {
968                 u8 vectors = 0;
969
970                 qed_vf_get_num_rxqs(&cdev->hwfns[1], &vectors);
971                 cdev->int_params.in.num_vectors += vectors;
972         }
973
974         /* We want a minimum of one fastpath vector per vf hwfn */
975         cdev->int_params.in.min_msix_cnt = cdev->num_hwfns;
976
977         rc = qed_set_int_mode(cdev, true);
978         if (rc)
979                 return rc;
980
981         cdev->int_params.fp_msix_base = 0;
982         cdev->int_params.fp_msix_cnt = cdev->int_params.out.num_vectors;
983
984         return 0;
985 }
986
987 u32 qed_unzip_data(struct qed_hwfn *p_hwfn, u32 input_len,
988                    u8 *input_buf, u32 max_size, u8 *unzip_buf)
989 {
990         int rc;
991
992         p_hwfn->stream->next_in = input_buf;
993         p_hwfn->stream->avail_in = input_len;
994         p_hwfn->stream->next_out = unzip_buf;
995         p_hwfn->stream->avail_out = max_size;
996
997         rc = zlib_inflateInit2(p_hwfn->stream, MAX_WBITS);
998
999         if (rc != Z_OK) {
1000                 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "zlib init failed, rc = %d\n",
1001                            rc);
1002                 return 0;
1003         }
1004
1005         rc = zlib_inflate(p_hwfn->stream, Z_FINISH);
1006         zlib_inflateEnd(p_hwfn->stream);
1007
1008         if (rc != Z_OK && rc != Z_STREAM_END) {
1009                 DP_VERBOSE(p_hwfn, NETIF_MSG_DRV, "FW unzip error: %s, rc=%d\n",
1010                            p_hwfn->stream->msg, rc);
1011                 return 0;
1012         }
1013
1014         return p_hwfn->stream->total_out / 4;
1015 }
1016
1017 static int qed_alloc_stream_mem(struct qed_dev *cdev)
1018 {
1019         int i;
1020         void *workspace;
1021
1022         for_each_hwfn(cdev, i) {
1023                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1024
1025                 p_hwfn->stream = kzalloc(sizeof(*p_hwfn->stream), GFP_KERNEL);
1026                 if (!p_hwfn->stream)
1027                         return -ENOMEM;
1028
1029                 workspace = vzalloc(zlib_inflate_workspacesize());
1030                 if (!workspace)
1031                         return -ENOMEM;
1032                 p_hwfn->stream->workspace = workspace;
1033         }
1034
1035         return 0;
1036 }
1037
1038 static void qed_free_stream_mem(struct qed_dev *cdev)
1039 {
1040         int i;
1041
1042         for_each_hwfn(cdev, i) {
1043                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1044
1045                 if (!p_hwfn->stream)
1046                         return;
1047
1048                 vfree(p_hwfn->stream->workspace);
1049                 kfree(p_hwfn->stream);
1050         }
1051 }
1052
1053 static void qed_update_pf_params(struct qed_dev *cdev,
1054                                  struct qed_pf_params *params)
1055 {
1056         int i;
1057
1058         if (IS_ENABLED(CONFIG_QED_RDMA)) {
1059                 params->rdma_pf_params.num_qps = QED_ROCE_QPS;
1060                 params->rdma_pf_params.min_dpis = QED_ROCE_DPIS;
1061                 params->rdma_pf_params.num_srqs = QED_RDMA_SRQS;
1062                 /* divide by 3 the MRs to avoid MF ILT overflow */
1063                 params->rdma_pf_params.gl_pi = QED_ROCE_PROTOCOL_INDEX;
1064         }
1065
1066         if (cdev->num_hwfns > 1 || IS_VF(cdev))
1067                 params->eth_pf_params.num_arfs_filters = 0;
1068
1069         /* In case we might support RDMA, don't allow qede to be greedy
1070          * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1071          * per hwfn.
1072          */
1073         if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev))) {
1074                 u16 *num_cons;
1075
1076                 num_cons = &params->eth_pf_params.num_cons;
1077                 *num_cons = min_t(u16, *num_cons, QED_MAX_L2_CONS);
1078         }
1079
1080         for (i = 0; i < cdev->num_hwfns; i++) {
1081                 struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
1082
1083                 p_hwfn->pf_params = *params;
1084         }
1085 }
1086
1087 #define QED_PERIODIC_DB_REC_COUNT               10
1088 #define QED_PERIODIC_DB_REC_INTERVAL_MS         100
1089 #define QED_PERIODIC_DB_REC_INTERVAL \
1090         msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1091 #define QED_PERIODIC_DB_REC_WAIT_COUNT          10
1092 #define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
1093         (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
1094
1095 static int qed_slowpath_delayed_work(struct qed_hwfn *hwfn,
1096                                      enum qed_slowpath_wq_flag wq_flag,
1097                                      unsigned long delay)
1098 {
1099         if (!hwfn->slowpath_wq_active)
1100                 return -EINVAL;
1101
1102         /* Memory barrier for setting atomic bit */
1103         smp_mb__before_atomic();
1104         set_bit(wq_flag, &hwfn->slowpath_task_flags);
1105         smp_mb__after_atomic();
1106         queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, delay);
1107
1108         return 0;
1109 }
1110
1111 void qed_periodic_db_rec_start(struct qed_hwfn *p_hwfn)
1112 {
1113         /* Reset periodic Doorbell Recovery counter */
1114         p_hwfn->periodic_db_rec_count = QED_PERIODIC_DB_REC_COUNT;
1115
1116         /* Don't schedule periodic Doorbell Recovery if already scheduled */
1117         if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1118                      &p_hwfn->slowpath_task_flags))
1119                 return;
1120
1121         qed_slowpath_delayed_work(p_hwfn, QED_SLOWPATH_PERIODIC_DB_REC,
1122                                   QED_PERIODIC_DB_REC_INTERVAL);
1123 }
1124
1125 static void qed_slowpath_wq_stop(struct qed_dev *cdev)
1126 {
1127         int i, sleep_count = QED_PERIODIC_DB_REC_WAIT_COUNT;
1128
1129         if (IS_VF(cdev))
1130                 return;
1131
1132         for_each_hwfn(cdev, i) {
1133                 if (!cdev->hwfns[i].slowpath_wq)
1134                         continue;
1135
1136                 /* Stop queuing new delayed works */
1137                 cdev->hwfns[i].slowpath_wq_active = false;
1138
1139                 /* Wait until the last periodic doorbell recovery is executed */
1140                 while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1141                                 &cdev->hwfns[i].slowpath_task_flags) &&
1142                        sleep_count--)
1143                         msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL);
1144
1145                 flush_workqueue(cdev->hwfns[i].slowpath_wq);
1146                 destroy_workqueue(cdev->hwfns[i].slowpath_wq);
1147         }
1148 }
1149
1150 static void qed_slowpath_task(struct work_struct *work)
1151 {
1152         struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
1153                                              slowpath_task.work);
1154         struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
1155
1156         if (!ptt) {
1157                 if (hwfn->slowpath_wq_active)
1158                         queue_delayed_work(hwfn->slowpath_wq,
1159                                            &hwfn->slowpath_task, 0);
1160
1161                 return;
1162         }
1163
1164         if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
1165                                &hwfn->slowpath_task_flags))
1166                 qed_mfw_process_tlv_req(hwfn, ptt);
1167
1168         if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC,
1169                                &hwfn->slowpath_task_flags)) {
1170                 qed_db_rec_handler(hwfn, ptt);
1171                 if (hwfn->periodic_db_rec_count--)
1172                         qed_slowpath_delayed_work(hwfn,
1173                                                   QED_SLOWPATH_PERIODIC_DB_REC,
1174                                                   QED_PERIODIC_DB_REC_INTERVAL);
1175         }
1176
1177         qed_ptt_release(hwfn, ptt);
1178 }
1179
1180 static int qed_slowpath_wq_start(struct qed_dev *cdev)
1181 {
1182         struct qed_hwfn *hwfn;
1183         char name[NAME_SIZE];
1184         int i;
1185
1186         if (IS_VF(cdev))
1187                 return 0;
1188
1189         for_each_hwfn(cdev, i) {
1190                 hwfn = &cdev->hwfns[i];
1191
1192                 snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
1193                          cdev->pdev->bus->number,
1194                          PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
1195
1196                 hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
1197                 if (!hwfn->slowpath_wq) {
1198                         DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
1199                         return -ENOMEM;
1200                 }
1201
1202                 INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
1203                 hwfn->slowpath_wq_active = true;
1204         }
1205
1206         return 0;
1207 }
1208
1209 static int qed_slowpath_start(struct qed_dev *cdev,
1210                               struct qed_slowpath_params *params)
1211 {
1212         struct qed_drv_load_params drv_load_params;
1213         struct qed_hw_init_params hw_init_params;
1214         struct qed_mcp_drv_version drv_version;
1215         struct qed_tunnel_info tunn_info;
1216         const u8 *data = NULL;
1217         struct qed_hwfn *hwfn;
1218         struct qed_ptt *p_ptt;
1219         int rc = -EINVAL;
1220
1221         if (qed_iov_wq_start(cdev))
1222                 goto err;
1223
1224         if (qed_slowpath_wq_start(cdev))
1225                 goto err;
1226
1227         if (IS_PF(cdev)) {
1228                 rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
1229                                       &cdev->pdev->dev);
1230                 if (rc) {
1231                         DP_NOTICE(cdev,
1232                                   "Failed to find fw file - /lib/firmware/%s\n",
1233                                   QED_FW_FILE_NAME);
1234                         goto err;
1235                 }
1236
1237                 if (cdev->num_hwfns == 1) {
1238                         p_ptt = qed_ptt_acquire(QED_LEADING_HWFN(cdev));
1239                         if (p_ptt) {
1240                                 QED_LEADING_HWFN(cdev)->p_arfs_ptt = p_ptt;
1241                         } else {
1242                                 DP_NOTICE(cdev,
1243                                           "Failed to acquire PTT for aRFS\n");
1244                                 goto err;
1245                         }
1246                 }
1247         }
1248
1249         cdev->rx_coalesce_usecs = QED_DEFAULT_RX_USECS;
1250         rc = qed_nic_setup(cdev);
1251         if (rc)
1252                 goto err;
1253
1254         if (IS_PF(cdev))
1255                 rc = qed_slowpath_setup_int(cdev, params->int_mode);
1256         else
1257                 rc = qed_slowpath_vf_setup_int(cdev);
1258         if (rc)
1259                 goto err1;
1260
1261         if (IS_PF(cdev)) {
1262                 /* Allocate stream for unzipping */
1263                 rc = qed_alloc_stream_mem(cdev);
1264                 if (rc)
1265                         goto err2;
1266
1267                 /* First Dword used to differentiate between various sources */
1268                 data = cdev->firmware->data + sizeof(u32);
1269
1270                 qed_dbg_pf_init(cdev);
1271         }
1272
1273         /* Start the slowpath */
1274         memset(&hw_init_params, 0, sizeof(hw_init_params));
1275         memset(&tunn_info, 0, sizeof(tunn_info));
1276         tunn_info.vxlan.b_mode_enabled = true;
1277         tunn_info.l2_gre.b_mode_enabled = true;
1278         tunn_info.ip_gre.b_mode_enabled = true;
1279         tunn_info.l2_geneve.b_mode_enabled = true;
1280         tunn_info.ip_geneve.b_mode_enabled = true;
1281         tunn_info.vxlan.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1282         tunn_info.l2_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1283         tunn_info.ip_gre.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1284         tunn_info.l2_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1285         tunn_info.ip_geneve.tun_cls = QED_TUNN_CLSS_MAC_VLAN;
1286         hw_init_params.p_tunn = &tunn_info;
1287         hw_init_params.b_hw_start = true;
1288         hw_init_params.int_mode = cdev->int_params.out.int_mode;
1289         hw_init_params.allow_npar_tx_switch = true;
1290         hw_init_params.bin_fw_data = data;
1291
1292         memset(&drv_load_params, 0, sizeof(drv_load_params));
1293         drv_load_params.is_crash_kernel = is_kdump_kernel();
1294         drv_load_params.mfw_timeout_val = QED_LOAD_REQ_LOCK_TO_DEFAULT;
1295         drv_load_params.avoid_eng_reset = false;
1296         drv_load_params.override_force_load = QED_OVERRIDE_FORCE_LOAD_NONE;
1297         hw_init_params.p_drv_load_params = &drv_load_params;
1298
1299         rc = qed_hw_init(cdev, &hw_init_params);
1300         if (rc)
1301                 goto err2;
1302
1303         DP_INFO(cdev,
1304                 "HW initialization and function start completed successfully\n");
1305
1306         if (IS_PF(cdev)) {
1307                 cdev->tunn_feature_mask = (BIT(QED_MODE_VXLAN_TUNN) |
1308                                            BIT(QED_MODE_L2GENEVE_TUNN) |
1309                                            BIT(QED_MODE_IPGENEVE_TUNN) |
1310                                            BIT(QED_MODE_L2GRE_TUNN) |
1311                                            BIT(QED_MODE_IPGRE_TUNN));
1312         }
1313
1314         /* Allocate LL2 interface if needed */
1315         if (QED_LEADING_HWFN(cdev)->using_ll2) {
1316                 rc = qed_ll2_alloc_if(cdev);
1317                 if (rc)
1318                         goto err3;
1319         }
1320         if (IS_PF(cdev)) {
1321                 hwfn = QED_LEADING_HWFN(cdev);
1322                 drv_version.version = (params->drv_major << 24) |
1323                                       (params->drv_minor << 16) |
1324                                       (params->drv_rev << 8) |
1325                                       (params->drv_eng);
1326                 strlcpy(drv_version.name, params->name,
1327                         MCP_DRV_VER_STR_SIZE - 4);
1328                 rc = qed_mcp_send_drv_version(hwfn, hwfn->p_main_ptt,
1329                                               &drv_version);
1330                 if (rc) {
1331                         DP_NOTICE(cdev, "Failed sending drv version command\n");
1332                         goto err4;
1333                 }
1334         }
1335
1336         qed_reset_vport_stats(cdev);
1337
1338         return 0;
1339
1340 err4:
1341         qed_ll2_dealloc_if(cdev);
1342 err3:
1343         qed_hw_stop(cdev);
1344 err2:
1345         qed_hw_timers_stop_all(cdev);
1346         if (IS_PF(cdev))
1347                 qed_slowpath_irq_free(cdev);
1348         qed_free_stream_mem(cdev);
1349         qed_disable_msix(cdev);
1350 err1:
1351         qed_resc_free(cdev);
1352 err:
1353         if (IS_PF(cdev))
1354                 release_firmware(cdev->firmware);
1355
1356         if (IS_PF(cdev) && (cdev->num_hwfns == 1) &&
1357             QED_LEADING_HWFN(cdev)->p_arfs_ptt)
1358                 qed_ptt_release(QED_LEADING_HWFN(cdev),
1359                                 QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1360
1361         qed_iov_wq_stop(cdev, false);
1362
1363         qed_slowpath_wq_stop(cdev);
1364
1365         return rc;
1366 }
1367
1368 static int qed_slowpath_stop(struct qed_dev *cdev)
1369 {
1370         if (!cdev)
1371                 return -ENODEV;
1372
1373         qed_slowpath_wq_stop(cdev);
1374
1375         qed_ll2_dealloc_if(cdev);
1376
1377         if (IS_PF(cdev)) {
1378                 if (cdev->num_hwfns == 1)
1379                         qed_ptt_release(QED_LEADING_HWFN(cdev),
1380                                         QED_LEADING_HWFN(cdev)->p_arfs_ptt);
1381                 qed_free_stream_mem(cdev);
1382                 if (IS_QED_ETH_IF(cdev))
1383                         qed_sriov_disable(cdev, true);
1384         }
1385
1386         qed_nic_stop(cdev);
1387
1388         if (IS_PF(cdev))
1389                 qed_slowpath_irq_free(cdev);
1390
1391         qed_disable_msix(cdev);
1392
1393         qed_resc_free(cdev);
1394
1395         qed_iov_wq_stop(cdev, true);
1396
1397         if (IS_PF(cdev))
1398                 release_firmware(cdev->firmware);
1399
1400         return 0;
1401 }
1402
1403 static void qed_set_name(struct qed_dev *cdev, char name[NAME_SIZE])
1404 {
1405         int i;
1406
1407         memcpy(cdev->name, name, NAME_SIZE);
1408         for_each_hwfn(cdev, i)
1409                 snprintf(cdev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i);
1410 }
1411
1412 static u32 qed_sb_init(struct qed_dev *cdev,
1413                        struct qed_sb_info *sb_info,
1414                        void *sb_virt_addr,
1415                        dma_addr_t sb_phy_addr, u16 sb_id,
1416                        enum qed_sb_type type)
1417 {
1418         struct qed_hwfn *p_hwfn;
1419         struct qed_ptt *p_ptt;
1420         u16 rel_sb_id;
1421         u32 rc;
1422
1423         /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1424         if (type == QED_SB_TYPE_L2_QUEUE) {
1425                 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1426                 rel_sb_id = sb_id / cdev->num_hwfns;
1427         } else {
1428                 p_hwfn = QED_AFFIN_HWFN(cdev);
1429                 rel_sb_id = sb_id;
1430         }
1431
1432         DP_VERBOSE(cdev, NETIF_MSG_INTR,
1433                    "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1434                    IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1435
1436         if (IS_PF(p_hwfn->cdev)) {
1437                 p_ptt = qed_ptt_acquire(p_hwfn);
1438                 if (!p_ptt)
1439                         return -EBUSY;
1440
1441                 rc = qed_int_sb_init(p_hwfn, p_ptt, sb_info, sb_virt_addr,
1442                                      sb_phy_addr, rel_sb_id);
1443                 qed_ptt_release(p_hwfn, p_ptt);
1444         } else {
1445                 rc = qed_int_sb_init(p_hwfn, NULL, sb_info, sb_virt_addr,
1446                                      sb_phy_addr, rel_sb_id);
1447         }
1448
1449         return rc;
1450 }
1451
1452 static u32 qed_sb_release(struct qed_dev *cdev,
1453                           struct qed_sb_info *sb_info,
1454                           u16 sb_id,
1455                           enum qed_sb_type type)
1456 {
1457         struct qed_hwfn *p_hwfn;
1458         u16 rel_sb_id;
1459         u32 rc;
1460
1461         /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1462         if (type == QED_SB_TYPE_L2_QUEUE) {
1463                 p_hwfn = &cdev->hwfns[sb_id % cdev->num_hwfns];
1464                 rel_sb_id = sb_id / cdev->num_hwfns;
1465         } else {
1466                 p_hwfn = QED_AFFIN_HWFN(cdev);
1467                 rel_sb_id = sb_id;
1468         }
1469
1470         DP_VERBOSE(cdev, NETIF_MSG_INTR,
1471                    "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1472                    IS_LEAD_HWFN(p_hwfn) ? 0 : 1, rel_sb_id, sb_id);
1473
1474         rc = qed_int_sb_release(p_hwfn, sb_info, rel_sb_id);
1475
1476         return rc;
1477 }
1478
1479 static bool qed_can_link_change(struct qed_dev *cdev)
1480 {
1481         return true;
1482 }
1483
1484 static int qed_set_link(struct qed_dev *cdev, struct qed_link_params *params)
1485 {
1486         struct qed_hwfn *hwfn;
1487         struct qed_mcp_link_params *link_params;
1488         struct qed_ptt *ptt;
1489         u32 sup_caps;
1490         int rc;
1491
1492         if (!cdev)
1493                 return -ENODEV;
1494
1495         /* The link should be set only once per PF */
1496         hwfn = &cdev->hwfns[0];
1497
1498         /* When VF wants to set link, force it to read the bulletin instead.
1499          * This mimics the PF behavior, where a noitification [both immediate
1500          * and possible later] would be generated when changing properties.
1501          */
1502         if (IS_VF(cdev)) {
1503                 qed_schedule_iov(hwfn, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG);
1504                 return 0;
1505         }
1506
1507         ptt = qed_ptt_acquire(hwfn);
1508         if (!ptt)
1509                 return -EBUSY;
1510
1511         link_params = qed_mcp_get_link_params(hwfn);
1512         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG)
1513                 link_params->speed.autoneg = params->autoneg;
1514         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS) {
1515                 link_params->speed.advertised_speeds = 0;
1516                 sup_caps = QED_LM_1000baseT_Full_BIT |
1517                            QED_LM_1000baseKX_Full_BIT |
1518                            QED_LM_1000baseX_Full_BIT;
1519                 if (params->adv_speeds & sup_caps)
1520                         link_params->speed.advertised_speeds |=
1521                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G;
1522                 sup_caps = QED_LM_10000baseT_Full_BIT |
1523                            QED_LM_10000baseKR_Full_BIT |
1524                            QED_LM_10000baseKX4_Full_BIT |
1525                            QED_LM_10000baseR_FEC_BIT |
1526                            QED_LM_10000baseCR_Full_BIT |
1527                            QED_LM_10000baseSR_Full_BIT |
1528                            QED_LM_10000baseLR_Full_BIT |
1529                            QED_LM_10000baseLRM_Full_BIT;
1530                 if (params->adv_speeds & sup_caps)
1531                         link_params->speed.advertised_speeds |=
1532                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
1533                 if (params->adv_speeds & QED_LM_20000baseKR2_Full_BIT)
1534                         link_params->speed.advertised_speeds |=
1535                                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G;
1536                 sup_caps = QED_LM_25000baseKR_Full_BIT |
1537                            QED_LM_25000baseCR_Full_BIT |
1538                            QED_LM_25000baseSR_Full_BIT;
1539                 if (params->adv_speeds & sup_caps)
1540                         link_params->speed.advertised_speeds |=
1541                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G;
1542                 sup_caps = QED_LM_40000baseLR4_Full_BIT |
1543                            QED_LM_40000baseKR4_Full_BIT |
1544                            QED_LM_40000baseCR4_Full_BIT |
1545                            QED_LM_40000baseSR4_Full_BIT;
1546                 if (params->adv_speeds & sup_caps)
1547                         link_params->speed.advertised_speeds |=
1548                                 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G;
1549                 sup_caps = QED_LM_50000baseKR2_Full_BIT |
1550                            QED_LM_50000baseCR2_Full_BIT |
1551                            QED_LM_50000baseSR2_Full_BIT;
1552                 if (params->adv_speeds & sup_caps)
1553                         link_params->speed.advertised_speeds |=
1554                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G;
1555                 sup_caps = QED_LM_100000baseKR4_Full_BIT |
1556                            QED_LM_100000baseSR4_Full_BIT |
1557                            QED_LM_100000baseCR4_Full_BIT |
1558                            QED_LM_100000baseLR4_ER4_Full_BIT;
1559                 if (params->adv_speeds & sup_caps)
1560                         link_params->speed.advertised_speeds |=
1561                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G;
1562         }
1563         if (params->override_flags & QED_LINK_OVERRIDE_SPEED_FORCED_SPEED)
1564                 link_params->speed.forced_speed = params->forced_speed;
1565         if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) {
1566                 if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE)
1567                         link_params->pause.autoneg = true;
1568                 else
1569                         link_params->pause.autoneg = false;
1570                 if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE)
1571                         link_params->pause.forced_rx = true;
1572                 else
1573                         link_params->pause.forced_rx = false;
1574                 if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE)
1575                         link_params->pause.forced_tx = true;
1576                 else
1577                         link_params->pause.forced_tx = false;
1578         }
1579         if (params->override_flags & QED_LINK_OVERRIDE_LOOPBACK_MODE) {
1580                 switch (params->loopback_mode) {
1581                 case QED_LINK_LOOPBACK_INT_PHY:
1582                         link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1583                         break;
1584                 case QED_LINK_LOOPBACK_EXT_PHY:
1585                         link_params->loopback_mode = ETH_LOOPBACK_EXT_PHY;
1586                         break;
1587                 case QED_LINK_LOOPBACK_EXT:
1588                         link_params->loopback_mode = ETH_LOOPBACK_EXT;
1589                         break;
1590                 case QED_LINK_LOOPBACK_MAC:
1591                         link_params->loopback_mode = ETH_LOOPBACK_MAC;
1592                         break;
1593                 default:
1594                         link_params->loopback_mode = ETH_LOOPBACK_NONE;
1595                         break;
1596                 }
1597         }
1598
1599         if (params->override_flags & QED_LINK_OVERRIDE_EEE_CONFIG)
1600                 memcpy(&link_params->eee, &params->eee,
1601                        sizeof(link_params->eee));
1602
1603         rc = qed_mcp_set_link(hwfn, ptt, params->link_up);
1604
1605         qed_ptt_release(hwfn, ptt);
1606
1607         return rc;
1608 }
1609
1610 static int qed_get_port_type(u32 media_type)
1611 {
1612         int port_type;
1613
1614         switch (media_type) {
1615         case MEDIA_SFPP_10G_FIBER:
1616         case MEDIA_SFP_1G_FIBER:
1617         case MEDIA_XFP_FIBER:
1618         case MEDIA_MODULE_FIBER:
1619         case MEDIA_KR:
1620                 port_type = PORT_FIBRE;
1621                 break;
1622         case MEDIA_DA_TWINAX:
1623                 port_type = PORT_DA;
1624                 break;
1625         case MEDIA_BASE_T:
1626                 port_type = PORT_TP;
1627                 break;
1628         case MEDIA_NOT_PRESENT:
1629                 port_type = PORT_NONE;
1630                 break;
1631         case MEDIA_UNSPECIFIED:
1632         default:
1633                 port_type = PORT_OTHER;
1634                 break;
1635         }
1636         return port_type;
1637 }
1638
1639 static int qed_get_link_data(struct qed_hwfn *hwfn,
1640                              struct qed_mcp_link_params *params,
1641                              struct qed_mcp_link_state *link,
1642                              struct qed_mcp_link_capabilities *link_caps)
1643 {
1644         void *p;
1645
1646         if (!IS_PF(hwfn->cdev)) {
1647                 qed_vf_get_link_params(hwfn, params);
1648                 qed_vf_get_link_state(hwfn, link);
1649                 qed_vf_get_link_caps(hwfn, link_caps);
1650
1651                 return 0;
1652         }
1653
1654         p = qed_mcp_get_link_params(hwfn);
1655         if (!p)
1656                 return -ENXIO;
1657         memcpy(params, p, sizeof(*params));
1658
1659         p = qed_mcp_get_link_state(hwfn);
1660         if (!p)
1661                 return -ENXIO;
1662         memcpy(link, p, sizeof(*link));
1663
1664         p = qed_mcp_get_link_capabilities(hwfn);
1665         if (!p)
1666                 return -ENXIO;
1667         memcpy(link_caps, p, sizeof(*link_caps));
1668
1669         return 0;
1670 }
1671
1672 static void qed_fill_link_capability(struct qed_hwfn *hwfn,
1673                                      struct qed_ptt *ptt, u32 capability,
1674                                      u32 *if_capability)
1675 {
1676         u32 media_type, tcvr_state, tcvr_type;
1677         u32 speed_mask, board_cfg;
1678
1679         if (qed_mcp_get_media_type(hwfn, ptt, &media_type))
1680                 media_type = MEDIA_UNSPECIFIED;
1681
1682         if (qed_mcp_get_transceiver_data(hwfn, ptt, &tcvr_state, &tcvr_type))
1683                 tcvr_type = ETH_TRANSCEIVER_STATE_UNPLUGGED;
1684
1685         if (qed_mcp_trans_speed_mask(hwfn, ptt, &speed_mask))
1686                 speed_mask = 0xFFFFFFFF;
1687
1688         if (qed_mcp_get_board_config(hwfn, ptt, &board_cfg))
1689                 board_cfg = NVM_CFG1_PORT_PORT_TYPE_UNDEFINED;
1690
1691         DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
1692                    "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1693                    media_type, tcvr_state, tcvr_type, speed_mask, board_cfg);
1694
1695         switch (media_type) {
1696         case MEDIA_DA_TWINAX:
1697                 *if_capability |= QED_LM_FIBRE_BIT;
1698                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1699                         *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1700                 /* For DAC media multiple speed capabilities are supported*/
1701                 capability = capability & speed_mask;
1702                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1703                         *if_capability |= QED_LM_1000baseKX_Full_BIT;
1704                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1705                         *if_capability |= QED_LM_10000baseCR_Full_BIT;
1706                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1707                         *if_capability |= QED_LM_40000baseCR4_Full_BIT;
1708                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1709                         *if_capability |= QED_LM_25000baseCR_Full_BIT;
1710                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1711                         *if_capability |= QED_LM_50000baseCR2_Full_BIT;
1712                 if (capability &
1713                         NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1714                         *if_capability |= QED_LM_100000baseCR4_Full_BIT;
1715                 break;
1716         case MEDIA_BASE_T:
1717                 *if_capability |= QED_LM_TP_BIT;
1718                 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_EXT_PHY) {
1719                         if (capability &
1720                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1721                                 *if_capability |= QED_LM_1000baseT_Full_BIT;
1722                         }
1723                         if (capability &
1724                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1725                                 *if_capability |= QED_LM_10000baseT_Full_BIT;
1726                         }
1727                 }
1728                 if (board_cfg & NVM_CFG1_PORT_PORT_TYPE_MODULE) {
1729                         *if_capability |= QED_LM_FIBRE_BIT;
1730                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_1000BASET)
1731                                 *if_capability |= QED_LM_1000baseT_Full_BIT;
1732                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_BASET)
1733                                 *if_capability |= QED_LM_10000baseT_Full_BIT;
1734                 }
1735                 break;
1736         case MEDIA_SFP_1G_FIBER:
1737         case MEDIA_SFPP_10G_FIBER:
1738         case MEDIA_XFP_FIBER:
1739         case MEDIA_MODULE_FIBER:
1740                 *if_capability |= QED_LM_FIBRE_BIT;
1741                 if (capability &
1742                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1743                         if ((tcvr_type == ETH_TRANSCEIVER_TYPE_1G_LX) ||
1744                             (tcvr_type == ETH_TRANSCEIVER_TYPE_1G_SX))
1745                                 *if_capability |= QED_LM_1000baseKX_Full_BIT;
1746                 }
1747                 if (capability &
1748                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1749                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_SR)
1750                                 *if_capability |= QED_LM_10000baseSR_Full_BIT;
1751                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LR)
1752                                 *if_capability |= QED_LM_10000baseLR_Full_BIT;
1753                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_LRM)
1754                                 *if_capability |= QED_LM_10000baseLRM_Full_BIT;
1755                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_10G_ER)
1756                                 *if_capability |= QED_LM_10000baseR_FEC_BIT;
1757                 }
1758                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1759                         *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1760                 if (capability &
1761                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G) {
1762                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_25G_SR)
1763                                 *if_capability |= QED_LM_25000baseSR_Full_BIT;
1764                 }
1765                 if (capability &
1766                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G) {
1767                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_LR4)
1768                                 *if_capability |= QED_LM_40000baseLR4_Full_BIT;
1769                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_40G_SR4)
1770                                 *if_capability |= QED_LM_40000baseSR4_Full_BIT;
1771                 }
1772                 if (capability &
1773                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1774                         *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1775                 if (capability &
1776                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G) {
1777                         if (tcvr_type == ETH_TRANSCEIVER_TYPE_100G_SR4)
1778                                 *if_capability |= QED_LM_100000baseSR4_Full_BIT;
1779                 }
1780
1781                 break;
1782         case MEDIA_KR:
1783                 *if_capability |= QED_LM_Backplane_BIT;
1784                 if (capability & NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G)
1785                         *if_capability |= QED_LM_20000baseKR2_Full_BIT;
1786                 if (capability &
1787                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G)
1788                         *if_capability |= QED_LM_1000baseKX_Full_BIT;
1789                 if (capability &
1790                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G)
1791                         *if_capability |= QED_LM_10000baseKR_Full_BIT;
1792                 if (capability &
1793                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G)
1794                         *if_capability |= QED_LM_25000baseKR_Full_BIT;
1795                 if (capability &
1796                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G)
1797                         *if_capability |= QED_LM_40000baseKR4_Full_BIT;
1798                 if (capability &
1799                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G)
1800                         *if_capability |= QED_LM_50000baseKR2_Full_BIT;
1801                 if (capability &
1802                     NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G)
1803                         *if_capability |= QED_LM_100000baseKR4_Full_BIT;
1804                 break;
1805         case MEDIA_UNSPECIFIED:
1806         case MEDIA_NOT_PRESENT:
1807                 DP_VERBOSE(hwfn->cdev, QED_MSG_DEBUG,
1808                            "Unknown media and transceiver type;\n");
1809                 break;
1810         }
1811 }
1812
1813 static void qed_fill_link(struct qed_hwfn *hwfn,
1814                           struct qed_ptt *ptt,
1815                           struct qed_link_output *if_link)
1816 {
1817         struct qed_mcp_link_capabilities link_caps;
1818         struct qed_mcp_link_params params;
1819         struct qed_mcp_link_state link;
1820         u32 media_type;
1821
1822         memset(if_link, 0, sizeof(*if_link));
1823
1824         /* Prepare source inputs */
1825         if (qed_get_link_data(hwfn, &params, &link, &link_caps)) {
1826                 dev_warn(&hwfn->cdev->pdev->dev, "no link data available\n");
1827                 return;
1828         }
1829
1830         /* Set the link parameters to pass to protocol driver */
1831         if (link.link_up)
1832                 if_link->link_up = true;
1833
1834         /* TODO - at the moment assume supported and advertised speed equal */
1835         if (link_caps.default_speed_autoneg)
1836                 if_link->supported_caps |= QED_LM_Autoneg_BIT;
1837         if (params.pause.autoneg ||
1838             (params.pause.forced_rx && params.pause.forced_tx))
1839                 if_link->supported_caps |= QED_LM_Asym_Pause_BIT;
1840         if (params.pause.autoneg || params.pause.forced_rx ||
1841             params.pause.forced_tx)
1842                 if_link->supported_caps |= QED_LM_Pause_BIT;
1843
1844         if_link->advertised_caps = if_link->supported_caps;
1845         if (params.speed.autoneg)
1846                 if_link->advertised_caps |= QED_LM_Autoneg_BIT;
1847         else
1848                 if_link->advertised_caps &= ~QED_LM_Autoneg_BIT;
1849
1850         /* Fill link advertised capability*/
1851         qed_fill_link_capability(hwfn, ptt, params.speed.advertised_speeds,
1852                                  &if_link->advertised_caps);
1853         /* Fill link supported capability*/
1854         qed_fill_link_capability(hwfn, ptt, link_caps.speed_capabilities,
1855                                  &if_link->supported_caps);
1856
1857         if (link.link_up)
1858                 if_link->speed = link.speed;
1859
1860         /* TODO - fill duplex properly */
1861         if_link->duplex = DUPLEX_FULL;
1862         qed_mcp_get_media_type(hwfn, ptt, &media_type);
1863         if_link->port = qed_get_port_type(media_type);
1864
1865         if_link->autoneg = params.speed.autoneg;
1866
1867         if (params.pause.autoneg)
1868                 if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE;
1869         if (params.pause.forced_rx)
1870                 if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE;
1871         if (params.pause.forced_tx)
1872                 if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE;
1873
1874         /* Link partner capabilities */
1875         if (link.partner_adv_speed &
1876             QED_LINK_PARTNER_SPEED_1G_FD)
1877                 if_link->lp_caps |= QED_LM_1000baseT_Full_BIT;
1878         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_10G)
1879                 if_link->lp_caps |= QED_LM_10000baseKR_Full_BIT;
1880         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_20G)
1881                 if_link->lp_caps |= QED_LM_20000baseKR2_Full_BIT;
1882         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_25G)
1883                 if_link->lp_caps |= QED_LM_25000baseKR_Full_BIT;
1884         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_40G)
1885                 if_link->lp_caps |= QED_LM_40000baseLR4_Full_BIT;
1886         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_50G)
1887                 if_link->lp_caps |= QED_LM_50000baseKR2_Full_BIT;
1888         if (link.partner_adv_speed & QED_LINK_PARTNER_SPEED_100G)
1889                 if_link->lp_caps |= QED_LM_100000baseKR4_Full_BIT;
1890
1891         if (link.an_complete)
1892                 if_link->lp_caps |= QED_LM_Autoneg_BIT;
1893
1894         if (link.partner_adv_pause)
1895                 if_link->lp_caps |= QED_LM_Pause_BIT;
1896         if (link.partner_adv_pause == QED_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1897             link.partner_adv_pause == QED_LINK_PARTNER_BOTH_PAUSE)
1898                 if_link->lp_caps |= QED_LM_Asym_Pause_BIT;
1899
1900         if (link_caps.default_eee == QED_MCP_EEE_UNSUPPORTED) {
1901                 if_link->eee_supported = false;
1902         } else {
1903                 if_link->eee_supported = true;
1904                 if_link->eee_active = link.eee_active;
1905                 if_link->sup_caps = link_caps.eee_speed_caps;
1906                 /* MFW clears adv_caps on eee disable; use configured value */
1907                 if_link->eee.adv_caps = link.eee_adv_caps ? link.eee_adv_caps :
1908                                         params.eee.adv_caps;
1909                 if_link->eee.lp_adv_caps = link.eee_lp_adv_caps;
1910                 if_link->eee.enable = params.eee.enable;
1911                 if_link->eee.tx_lpi_enable = params.eee.tx_lpi_enable;
1912                 if_link->eee.tx_lpi_timer = params.eee.tx_lpi_timer;
1913         }
1914 }
1915
1916 static void qed_get_current_link(struct qed_dev *cdev,
1917                                  struct qed_link_output *if_link)
1918 {
1919         struct qed_hwfn *hwfn;
1920         struct qed_ptt *ptt;
1921         int i;
1922
1923         hwfn = &cdev->hwfns[0];
1924         if (IS_PF(cdev)) {
1925                 ptt = qed_ptt_acquire(hwfn);
1926                 if (ptt) {
1927                         qed_fill_link(hwfn, ptt, if_link);
1928                         qed_ptt_release(hwfn, ptt);
1929                 } else {
1930                         DP_NOTICE(hwfn, "Failed to fill link; No PTT\n");
1931                 }
1932         } else {
1933                 qed_fill_link(hwfn, NULL, if_link);
1934         }
1935
1936         for_each_hwfn(cdev, i)
1937                 qed_inform_vf_link_state(&cdev->hwfns[i]);
1938 }
1939
1940 void qed_link_update(struct qed_hwfn *hwfn, struct qed_ptt *ptt)
1941 {
1942         void *cookie = hwfn->cdev->ops_cookie;
1943         struct qed_common_cb_ops *op = hwfn->cdev->protocol_ops.common;
1944         struct qed_link_output if_link;
1945
1946         qed_fill_link(hwfn, ptt, &if_link);
1947         qed_inform_vf_link_state(hwfn);
1948
1949         if (IS_LEAD_HWFN(hwfn) && cookie)
1950                 op->link_update(cookie, &if_link);
1951 }
1952
1953 static int qed_drain(struct qed_dev *cdev)
1954 {
1955         struct qed_hwfn *hwfn;
1956         struct qed_ptt *ptt;
1957         int i, rc;
1958
1959         if (IS_VF(cdev))
1960                 return 0;
1961
1962         for_each_hwfn(cdev, i) {
1963                 hwfn = &cdev->hwfns[i];
1964                 ptt = qed_ptt_acquire(hwfn);
1965                 if (!ptt) {
1966                         DP_NOTICE(hwfn, "Failed to drain NIG; No PTT\n");
1967                         return -EBUSY;
1968                 }
1969                 rc = qed_mcp_drain(hwfn, ptt);
1970                 qed_ptt_release(hwfn, ptt);
1971                 if (rc)
1972                         return rc;
1973         }
1974
1975         return 0;
1976 }
1977
1978 static u32 qed_nvm_flash_image_access_crc(struct qed_dev *cdev,
1979                                           struct qed_nvm_image_att *nvm_image,
1980                                           u32 *crc)
1981 {
1982         u8 *buf = NULL;
1983         int rc, j;
1984         u32 val;
1985
1986         /* Allocate a buffer for holding the nvram image */
1987         buf = kzalloc(nvm_image->length, GFP_KERNEL);
1988         if (!buf)
1989                 return -ENOMEM;
1990
1991         /* Read image into buffer */
1992         rc = qed_mcp_nvm_read(cdev, nvm_image->start_addr,
1993                               buf, nvm_image->length);
1994         if (rc) {
1995                 DP_ERR(cdev, "Failed reading image from nvm\n");
1996                 goto out;
1997         }
1998
1999         /* Convert the buffer into big-endian format (excluding the
2000          * closing 4 bytes of CRC).
2001          */
2002         for (j = 0; j < nvm_image->length - 4; j += 4) {
2003                 val = cpu_to_be32(*(u32 *)&buf[j]);
2004                 *(u32 *)&buf[j] = val;
2005         }
2006
2007         /* Calc CRC for the "actual" image buffer, i.e. not including
2008          * the last 4 CRC bytes.
2009          */
2010         *crc = (~cpu_to_be32(crc32(0xffffffff, buf, nvm_image->length - 4)));
2011
2012 out:
2013         kfree(buf);
2014
2015         return rc;
2016 }
2017
2018 /* Binary file format -
2019  *     /----------------------------------------------------------------------\
2020  * 0B  |                       0x4 [command index]                            |
2021  * 4B  | image_type     | Options        |  Number of register settings       |
2022  * 8B  |                       Value                                          |
2023  * 12B |                       Mask                                           |
2024  * 16B |                       Offset                                         |
2025  *     \----------------------------------------------------------------------/
2026  * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2027  * Options - 0'b - Calculate & Update CRC for image
2028  */
2029 static int qed_nvm_flash_image_access(struct qed_dev *cdev, const u8 **data,
2030                                       bool *check_resp)
2031 {
2032         struct qed_nvm_image_att nvm_image;
2033         struct qed_hwfn *p_hwfn;
2034         bool is_crc = false;
2035         u32 image_type;
2036         int rc = 0, i;
2037         u16 len;
2038
2039         *data += 4;
2040         image_type = **data;
2041         p_hwfn = QED_LEADING_HWFN(cdev);
2042         for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
2043                 if (image_type == p_hwfn->nvm_info.image_att[i].image_type)
2044                         break;
2045         if (i == p_hwfn->nvm_info.num_images) {
2046                 DP_ERR(cdev, "Failed to find nvram image of type %08x\n",
2047                        image_type);
2048                 return -ENOENT;
2049         }
2050
2051         nvm_image.start_addr = p_hwfn->nvm_info.image_att[i].nvm_start_addr;
2052         nvm_image.length = p_hwfn->nvm_info.image_att[i].len;
2053
2054         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2055                    "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2056                    **data, image_type, nvm_image.start_addr,
2057                    nvm_image.start_addr + nvm_image.length - 1);
2058         (*data)++;
2059         is_crc = !!(**data & BIT(0));
2060         (*data)++;
2061         len = *((u16 *)*data);
2062         *data += 2;
2063         if (is_crc) {
2064                 u32 crc = 0;
2065
2066                 rc = qed_nvm_flash_image_access_crc(cdev, &nvm_image, &crc);
2067                 if (rc) {
2068                         DP_ERR(cdev, "Failed calculating CRC, rc = %d\n", rc);
2069                         goto exit;
2070                 }
2071
2072                 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2073                                        (nvm_image.start_addr +
2074                                         nvm_image.length - 4), (u8 *)&crc, 4);
2075                 if (rc)
2076                         DP_ERR(cdev, "Failed writing to %08x, rc = %d\n",
2077                                nvm_image.start_addr + nvm_image.length - 4, rc);
2078                 goto exit;
2079         }
2080
2081         /* Iterate over the values for setting */
2082         while (len) {
2083                 u32 offset, mask, value, cur_value;
2084                 u8 buf[4];
2085
2086                 value = *((u32 *)*data);
2087                 *data += 4;
2088                 mask = *((u32 *)*data);
2089                 *data += 4;
2090                 offset = *((u32 *)*data);
2091                 *data += 4;
2092
2093                 rc = qed_mcp_nvm_read(cdev, nvm_image.start_addr + offset, buf,
2094                                       4);
2095                 if (rc) {
2096                         DP_ERR(cdev, "Failed reading from %08x\n",
2097                                nvm_image.start_addr + offset);
2098                         goto exit;
2099                 }
2100
2101                 cur_value = le32_to_cpu(*((__le32 *)buf));
2102                 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2103                            "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2104                            nvm_image.start_addr + offset, cur_value,
2105                            (cur_value & ~mask) | (value & mask), value, mask);
2106                 value = (value & mask) | (cur_value & ~mask);
2107                 rc = qed_mcp_nvm_write(cdev, QED_NVM_WRITE_NVRAM,
2108                                        nvm_image.start_addr + offset,
2109                                        (u8 *)&value, 4);
2110                 if (rc) {
2111                         DP_ERR(cdev, "Failed writing to %08x\n",
2112                                nvm_image.start_addr + offset);
2113                         goto exit;
2114                 }
2115
2116                 len--;
2117         }
2118 exit:
2119         return rc;
2120 }
2121
2122 /* Binary file format -
2123  *     /----------------------------------------------------------------------\
2124  * 0B  |                       0x3 [command index]                            |
2125  * 4B  | b'0: check_response?   | b'1-31  reserved                            |
2126  * 8B  | File-type |                   reserved                               |
2127  * 12B |                    Image length in bytes                             |
2128  *     \----------------------------------------------------------------------/
2129  *     Start a new file of the provided type
2130  */
2131 static int qed_nvm_flash_image_file_start(struct qed_dev *cdev,
2132                                           const u8 **data, bool *check_resp)
2133 {
2134         u32 file_type, file_size = 0;
2135         int rc;
2136
2137         *data += 4;
2138         *check_resp = !!(**data & BIT(0));
2139         *data += 4;
2140         file_type = **data;
2141
2142         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2143                    "About to start a new file of type %02x\n", file_type);
2144         if (file_type == DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI) {
2145                 *data += 4;
2146                 file_size = *((u32 *)(*data));
2147         }
2148
2149         rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_BEGIN, file_type,
2150                                (u8 *)(&file_size), 4);
2151         *data += 4;
2152
2153         return rc;
2154 }
2155
2156 /* Binary file format -
2157  *     /----------------------------------------------------------------------\
2158  * 0B  |                       0x2 [command index]                            |
2159  * 4B  |                       Length in bytes                                |
2160  * 8B  | b'0: check_response?   | b'1-31  reserved                            |
2161  * 12B |                       Offset in bytes                                |
2162  * 16B |                       Data ...                                       |
2163  *     \----------------------------------------------------------------------/
2164  *     Write data as part of a file that was previously started. Data should be
2165  *     of length equal to that provided in the message
2166  */
2167 static int qed_nvm_flash_image_file_data(struct qed_dev *cdev,
2168                                          const u8 **data, bool *check_resp)
2169 {
2170         u32 offset, len;
2171         int rc;
2172
2173         *data += 4;
2174         len = *((u32 *)(*data));
2175         *data += 4;
2176         *check_resp = !!(**data & BIT(0));
2177         *data += 4;
2178         offset = *((u32 *)(*data));
2179         *data += 4;
2180
2181         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2182                    "About to write File-data: %08x bytes to offset %08x\n",
2183                    len, offset);
2184
2185         rc = qed_mcp_nvm_write(cdev, QED_PUT_FILE_DATA, offset,
2186                                (char *)(*data), len);
2187         *data += len;
2188
2189         return rc;
2190 }
2191
2192 /* Binary file format [General header] -
2193  *     /----------------------------------------------------------------------\
2194  * 0B  |                       QED_NVM_SIGNATURE                              |
2195  * 4B  |                       Length in bytes                                |
2196  * 8B  | Highest command in this batchfile |          Reserved                |
2197  *     \----------------------------------------------------------------------/
2198  */
2199 static int qed_nvm_flash_image_validate(struct qed_dev *cdev,
2200                                         const struct firmware *image,
2201                                         const u8 **data)
2202 {
2203         u32 signature, len;
2204
2205         /* Check minimum size */
2206         if (image->size < 12) {
2207                 DP_ERR(cdev, "Image is too short [%08x]\n", (u32)image->size);
2208                 return -EINVAL;
2209         }
2210
2211         /* Check signature */
2212         signature = *((u32 *)(*data));
2213         if (signature != QED_NVM_SIGNATURE) {
2214                 DP_ERR(cdev, "Wrong signature '%08x'\n", signature);
2215                 return -EINVAL;
2216         }
2217
2218         *data += 4;
2219         /* Validate internal size equals the image-size */
2220         len = *((u32 *)(*data));
2221         if (len != image->size) {
2222                 DP_ERR(cdev, "Size mismatch: internal = %08x image = %08x\n",
2223                        len, (u32)image->size);
2224                 return -EINVAL;
2225         }
2226
2227         *data += 4;
2228         /* Make sure driver familiar with all commands necessary for this */
2229         if (*((u16 *)(*data)) >= QED_NVM_FLASH_CMD_NVM_MAX) {
2230                 DP_ERR(cdev, "File contains unsupported commands [Need %04x]\n",
2231                        *((u16 *)(*data)));
2232                 return -EINVAL;
2233         }
2234
2235         *data += 4;
2236
2237         return 0;
2238 }
2239
2240 /* Binary file format -
2241  *     /----------------------------------------------------------------------\
2242  * 0B  |                       0x5 [command index]                            |
2243  * 4B  | Number of config attributes     |          Reserved                  |
2244  * 4B  | Config ID                       | Entity ID      | Length            |
2245  * 4B  | Value                                                                |
2246  *     |                                                                      |
2247  *     \----------------------------------------------------------------------/
2248  * There can be several cfg_id-entity_id-Length-Value sets as specified by
2249  * 'Number of config attributes'.
2250  *
2251  * The API parses config attributes from the user provided buffer and flashes
2252  * them to the respective NVM path using Management FW inerface.
2253  */
2254 static int qed_nvm_flash_cfg_write(struct qed_dev *cdev, const u8 **data)
2255 {
2256         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2257         u8 entity_id, len, buf[32];
2258         struct qed_ptt *ptt;
2259         u16 cfg_id, count;
2260         int rc = 0, i;
2261         u32 flags;
2262
2263         ptt = qed_ptt_acquire(hwfn);
2264         if (!ptt)
2265                 return -EAGAIN;
2266
2267         /* NVM CFG ID attribute header */
2268         *data += 4;
2269         count = *((u16 *)*data);
2270         *data += 4;
2271
2272         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2273                    "Read config ids: num_attrs = %0d\n", count);
2274         /* NVM CFG ID attributes */
2275         for (i = 0; i < count; i++) {
2276                 cfg_id = *((u16 *)*data);
2277                 *data += 2;
2278                 entity_id = **data;
2279                 (*data)++;
2280                 len = **data;
2281                 (*data)++;
2282                 memcpy(buf, *data, len);
2283                 *data += len;
2284
2285                 flags = entity_id ? QED_NVM_CFG_SET_PF_FLAGS :
2286                         QED_NVM_CFG_SET_FLAGS;
2287
2288                 DP_VERBOSE(cdev, NETIF_MSG_DRV,
2289                            "cfg_id = %d entity = %d len = %d\n", cfg_id,
2290                            entity_id, len);
2291                 rc = qed_mcp_nvm_set_cfg(hwfn, ptt, cfg_id, entity_id, flags,
2292                                          buf, len);
2293                 if (rc) {
2294                         DP_ERR(cdev, "Error %d configuring %d\n", rc, cfg_id);
2295                         break;
2296                 }
2297         }
2298
2299         qed_ptt_release(hwfn, ptt);
2300
2301         return rc;
2302 }
2303
2304 #define QED_MAX_NVM_BUF_LEN     32
2305 static int qed_nvm_flash_cfg_len(struct qed_dev *cdev, u32 cmd)
2306 {
2307         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2308         u8 buf[QED_MAX_NVM_BUF_LEN];
2309         struct qed_ptt *ptt;
2310         u32 len;
2311         int rc;
2312
2313         ptt = qed_ptt_acquire(hwfn);
2314         if (!ptt)
2315                 return QED_MAX_NVM_BUF_LEN;
2316
2317         rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, 0, QED_NVM_CFG_GET_FLAGS, buf,
2318                                  &len);
2319         if (rc || !len) {
2320                 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2321                 len = QED_MAX_NVM_BUF_LEN;
2322         }
2323
2324         qed_ptt_release(hwfn, ptt);
2325
2326         return len;
2327 }
2328
2329 static int qed_nvm_flash_cfg_read(struct qed_dev *cdev, u8 **data,
2330                                   u32 cmd, u32 entity_id)
2331 {
2332         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2333         struct qed_ptt *ptt;
2334         u32 flags, len;
2335         int rc = 0;
2336
2337         ptt = qed_ptt_acquire(hwfn);
2338         if (!ptt)
2339                 return -EAGAIN;
2340
2341         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2342                    "Read config cmd = %d entity id %d\n", cmd, entity_id);
2343         flags = entity_id ? QED_NVM_CFG_GET_PF_FLAGS : QED_NVM_CFG_GET_FLAGS;
2344         rc = qed_mcp_nvm_get_cfg(hwfn, ptt, cmd, entity_id, flags, *data, &len);
2345         if (rc)
2346                 DP_ERR(cdev, "Error %d reading %d\n", rc, cmd);
2347
2348         qed_ptt_release(hwfn, ptt);
2349
2350         return rc;
2351 }
2352
2353 static int qed_nvm_flash(struct qed_dev *cdev, const char *name)
2354 {
2355         const struct firmware *image;
2356         const u8 *data, *data_end;
2357         u32 cmd_type;
2358         int rc;
2359
2360         rc = request_firmware(&image, name, &cdev->pdev->dev);
2361         if (rc) {
2362                 DP_ERR(cdev, "Failed to find '%s'\n", name);
2363                 return rc;
2364         }
2365
2366         DP_VERBOSE(cdev, NETIF_MSG_DRV,
2367                    "Flashing '%s' - firmware's data at %p, size is %08x\n",
2368                    name, image->data, (u32)image->size);
2369         data = image->data;
2370         data_end = data + image->size;
2371
2372         rc = qed_nvm_flash_image_validate(cdev, image, &data);
2373         if (rc)
2374                 goto exit;
2375
2376         while (data < data_end) {
2377                 bool check_resp = false;
2378
2379                 /* Parse the actual command */
2380                 cmd_type = *((u32 *)data);
2381                 switch (cmd_type) {
2382                 case QED_NVM_FLASH_CMD_FILE_DATA:
2383                         rc = qed_nvm_flash_image_file_data(cdev, &data,
2384                                                            &check_resp);
2385                         break;
2386                 case QED_NVM_FLASH_CMD_FILE_START:
2387                         rc = qed_nvm_flash_image_file_start(cdev, &data,
2388                                                             &check_resp);
2389                         break;
2390                 case QED_NVM_FLASH_CMD_NVM_CHANGE:
2391                         rc = qed_nvm_flash_image_access(cdev, &data,
2392                                                         &check_resp);
2393                         break;
2394                 case QED_NVM_FLASH_CMD_NVM_CFG_ID:
2395                         rc = qed_nvm_flash_cfg_write(cdev, &data);
2396                         break;
2397                 default:
2398                         DP_ERR(cdev, "Unknown command %08x\n", cmd_type);
2399                         rc = -EINVAL;
2400                         goto exit;
2401                 }
2402
2403                 if (rc) {
2404                         DP_ERR(cdev, "Command %08x failed\n", cmd_type);
2405                         goto exit;
2406                 }
2407
2408                 /* Check response if needed */
2409                 if (check_resp) {
2410                         u32 mcp_response = 0;
2411
2412                         if (qed_mcp_nvm_resp(cdev, (u8 *)&mcp_response)) {
2413                                 DP_ERR(cdev, "Failed getting MCP response\n");
2414                                 rc = -EINVAL;
2415                                 goto exit;
2416                         }
2417
2418                         switch (mcp_response & FW_MSG_CODE_MASK) {
2419                         case FW_MSG_CODE_OK:
2420                         case FW_MSG_CODE_NVM_OK:
2421                         case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK:
2422                         case FW_MSG_CODE_PHY_OK:
2423                                 break;
2424                         default:
2425                                 DP_ERR(cdev, "MFW returns error: %08x\n",
2426                                        mcp_response);
2427                                 rc = -EINVAL;
2428                                 goto exit;
2429                         }
2430                 }
2431         }
2432
2433 exit:
2434         release_firmware(image);
2435
2436         return rc;
2437 }
2438
2439 static int qed_nvm_get_image(struct qed_dev *cdev, enum qed_nvm_images type,
2440                              u8 *buf, u16 len)
2441 {
2442         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2443
2444         return qed_mcp_get_nvm_image(hwfn, type, buf, len);
2445 }
2446
2447 void qed_schedule_recovery_handler(struct qed_hwfn *p_hwfn)
2448 {
2449         struct qed_common_cb_ops *ops = p_hwfn->cdev->protocol_ops.common;
2450         void *cookie = p_hwfn->cdev->ops_cookie;
2451
2452         if (ops && ops->schedule_recovery_handler)
2453                 ops->schedule_recovery_handler(cookie);
2454 }
2455
2456 static int qed_set_coalesce(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
2457                             void *handle)
2458 {
2459                 return qed_set_queue_coalesce(rx_coal, tx_coal, handle);
2460 }
2461
2462 static int qed_set_led(struct qed_dev *cdev, enum qed_led_mode mode)
2463 {
2464         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2465         struct qed_ptt *ptt;
2466         int status = 0;
2467
2468         ptt = qed_ptt_acquire(hwfn);
2469         if (!ptt)
2470                 return -EAGAIN;
2471
2472         status = qed_mcp_set_led(hwfn, ptt, mode);
2473
2474         qed_ptt_release(hwfn, ptt);
2475
2476         return status;
2477 }
2478
2479 static int qed_recovery_process(struct qed_dev *cdev)
2480 {
2481         struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
2482         struct qed_ptt *p_ptt;
2483         int rc = 0;
2484
2485         p_ptt = qed_ptt_acquire(p_hwfn);
2486         if (!p_ptt)
2487                 return -EAGAIN;
2488
2489         rc = qed_start_recovery_process(p_hwfn, p_ptt);
2490
2491         qed_ptt_release(p_hwfn, p_ptt);
2492
2493         return rc;
2494 }
2495
2496 static int qed_update_wol(struct qed_dev *cdev, bool enabled)
2497 {
2498         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2499         struct qed_ptt *ptt;
2500         int rc = 0;
2501
2502         if (IS_VF(cdev))
2503                 return 0;
2504
2505         ptt = qed_ptt_acquire(hwfn);
2506         if (!ptt)
2507                 return -EAGAIN;
2508
2509         rc = qed_mcp_ov_update_wol(hwfn, ptt, enabled ? QED_OV_WOL_ENABLED
2510                                    : QED_OV_WOL_DISABLED);
2511         if (rc)
2512                 goto out;
2513         rc = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2514
2515 out:
2516         qed_ptt_release(hwfn, ptt);
2517         return rc;
2518 }
2519
2520 static int qed_update_drv_state(struct qed_dev *cdev, bool active)
2521 {
2522         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2523         struct qed_ptt *ptt;
2524         int status = 0;
2525
2526         if (IS_VF(cdev))
2527                 return 0;
2528
2529         ptt = qed_ptt_acquire(hwfn);
2530         if (!ptt)
2531                 return -EAGAIN;
2532
2533         status = qed_mcp_ov_update_driver_state(hwfn, ptt, active ?
2534                                                 QED_OV_DRIVER_STATE_ACTIVE :
2535                                                 QED_OV_DRIVER_STATE_DISABLED);
2536
2537         qed_ptt_release(hwfn, ptt);
2538
2539         return status;
2540 }
2541
2542 static int qed_update_mac(struct qed_dev *cdev, u8 *mac)
2543 {
2544         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2545         struct qed_ptt *ptt;
2546         int status = 0;
2547
2548         if (IS_VF(cdev))
2549                 return 0;
2550
2551         ptt = qed_ptt_acquire(hwfn);
2552         if (!ptt)
2553                 return -EAGAIN;
2554
2555         status = qed_mcp_ov_update_mac(hwfn, ptt, mac);
2556         if (status)
2557                 goto out;
2558
2559         status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2560
2561 out:
2562         qed_ptt_release(hwfn, ptt);
2563         return status;
2564 }
2565
2566 static int qed_update_mtu(struct qed_dev *cdev, u16 mtu)
2567 {
2568         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2569         struct qed_ptt *ptt;
2570         int status = 0;
2571
2572         if (IS_VF(cdev))
2573                 return 0;
2574
2575         ptt = qed_ptt_acquire(hwfn);
2576         if (!ptt)
2577                 return -EAGAIN;
2578
2579         status = qed_mcp_ov_update_mtu(hwfn, ptt, mtu);
2580         if (status)
2581                 goto out;
2582
2583         status = qed_mcp_ov_update_current_config(hwfn, ptt, QED_OV_CLIENT_DRV);
2584
2585 out:
2586         qed_ptt_release(hwfn, ptt);
2587         return status;
2588 }
2589
2590 static int qed_read_module_eeprom(struct qed_dev *cdev, char *buf,
2591                                   u8 dev_addr, u32 offset, u32 len)
2592 {
2593         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2594         struct qed_ptt *ptt;
2595         int rc = 0;
2596
2597         if (IS_VF(cdev))
2598                 return 0;
2599
2600         ptt = qed_ptt_acquire(hwfn);
2601         if (!ptt)
2602                 return -EAGAIN;
2603
2604         rc = qed_mcp_phy_sfp_read(hwfn, ptt, MFW_PORT(hwfn), dev_addr,
2605                                   offset, len, buf);
2606
2607         qed_ptt_release(hwfn, ptt);
2608
2609         return rc;
2610 }
2611
2612 static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
2613 {
2614         struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
2615         struct qed_ptt *ptt;
2616         int rc = 0;
2617
2618         if (IS_VF(cdev))
2619                 return 0;
2620
2621         ptt = qed_ptt_acquire(hwfn);
2622         if (!ptt)
2623                 return -EAGAIN;
2624
2625         rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
2626
2627         qed_ptt_release(hwfn, ptt);
2628
2629         return rc;
2630 }
2631
2632 static u8 qed_get_affin_hwfn_idx(struct qed_dev *cdev)
2633 {
2634         return QED_AFFIN_HWFN_IDX(cdev);
2635 }
2636
2637 static struct qed_selftest_ops qed_selftest_ops_pass = {
2638         .selftest_memory = &qed_selftest_memory,
2639         .selftest_interrupt = &qed_selftest_interrupt,
2640         .selftest_register = &qed_selftest_register,
2641         .selftest_clock = &qed_selftest_clock,
2642         .selftest_nvram = &qed_selftest_nvram,
2643 };
2644
2645 const struct qed_common_ops qed_common_ops_pass = {
2646         .selftest = &qed_selftest_ops_pass,
2647         .probe = &qed_probe,
2648         .remove = &qed_remove,
2649         .set_power_state = &qed_set_power_state,
2650         .set_name = &qed_set_name,
2651         .update_pf_params = &qed_update_pf_params,
2652         .slowpath_start = &qed_slowpath_start,
2653         .slowpath_stop = &qed_slowpath_stop,
2654         .set_fp_int = &qed_set_int_fp,
2655         .get_fp_int = &qed_get_int_fp,
2656         .sb_init = &qed_sb_init,
2657         .sb_release = &qed_sb_release,
2658         .simd_handler_config = &qed_simd_handler_config,
2659         .simd_handler_clean = &qed_simd_handler_clean,
2660         .dbg_grc = &qed_dbg_grc,
2661         .dbg_grc_size = &qed_dbg_grc_size,
2662         .can_link_change = &qed_can_link_change,
2663         .set_link = &qed_set_link,
2664         .get_link = &qed_get_current_link,
2665         .drain = &qed_drain,
2666         .update_msglvl = &qed_init_dp,
2667         .dbg_all_data = &qed_dbg_all_data,
2668         .dbg_all_data_size = &qed_dbg_all_data_size,
2669         .chain_alloc = &qed_chain_alloc,
2670         .chain_free = &qed_chain_free,
2671         .nvm_flash = &qed_nvm_flash,
2672         .nvm_get_image = &qed_nvm_get_image,
2673         .set_coalesce = &qed_set_coalesce,
2674         .set_led = &qed_set_led,
2675         .recovery_process = &qed_recovery_process,
2676         .recovery_prolog = &qed_recovery_prolog,
2677         .update_drv_state = &qed_update_drv_state,
2678         .update_mac = &qed_update_mac,
2679         .update_mtu = &qed_update_mtu,
2680         .update_wol = &qed_update_wol,
2681         .db_recovery_add = &qed_db_recovery_add,
2682         .db_recovery_del = &qed_db_recovery_del,
2683         .read_module_eeprom = &qed_read_module_eeprom,
2684         .get_affin_hwfn_idx = &qed_get_affin_hwfn_idx,
2685         .read_nvm_cfg = &qed_nvm_flash_cfg_read,
2686         .read_nvm_cfg_len = &qed_nvm_flash_cfg_len,
2687         .set_grc_config = &qed_set_grc_config,
2688 };
2689
2690 void qed_get_protocol_stats(struct qed_dev *cdev,
2691                             enum qed_mcp_protocol_type type,
2692                             union qed_mcp_protocol_stats *stats)
2693 {
2694         struct qed_eth_stats eth_stats;
2695
2696         memset(stats, 0, sizeof(*stats));
2697
2698         switch (type) {
2699         case QED_MCP_LAN_STATS:
2700                 qed_get_vport_stats(cdev, &eth_stats);
2701                 stats->lan_stats.ucast_rx_pkts =
2702                                         eth_stats.common.rx_ucast_pkts;
2703                 stats->lan_stats.ucast_tx_pkts =
2704                                         eth_stats.common.tx_ucast_pkts;
2705                 stats->lan_stats.fcs_err = -1;
2706                 break;
2707         case QED_MCP_FCOE_STATS:
2708                 qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats);
2709                 break;
2710         case QED_MCP_ISCSI_STATS:
2711                 qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats);
2712                 break;
2713         default:
2714                 DP_VERBOSE(cdev, QED_MSG_SP,
2715                            "Invalid protocol type = %d\n", type);
2716                 return;
2717         }
2718 }
2719
2720 int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
2721 {
2722         DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
2723                    "Scheduling slowpath task [Flag: %d]\n",
2724                    QED_SLOWPATH_MFW_TLV_REQ);
2725         smp_mb__before_atomic();
2726         set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
2727         smp_mb__after_atomic();
2728         queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
2729
2730         return 0;
2731 }
2732
2733 static void
2734 qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
2735 {
2736         struct qed_common_cb_ops *op = cdev->protocol_ops.common;
2737         struct qed_eth_stats_common *p_common;
2738         struct qed_generic_tlvs gen_tlvs;
2739         struct qed_eth_stats stats;
2740         int i;
2741
2742         memset(&gen_tlvs, 0, sizeof(gen_tlvs));
2743         op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
2744
2745         if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
2746                 tlv->flags.ipv4_csum_offload = true;
2747         if (gen_tlvs.feat_flags & QED_TLV_LSO)
2748                 tlv->flags.lso_supported = true;
2749         tlv->flags.b_set = true;
2750
2751         for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
2752                 if (is_valid_ether_addr(gen_tlvs.mac[i])) {
2753                         ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
2754                         tlv->mac_set[i] = true;
2755                 }
2756         }
2757
2758         qed_get_vport_stats(cdev, &stats);
2759         p_common = &stats.common;
2760         tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
2761                          p_common->rx_bcast_pkts;
2762         tlv->rx_frames_set = true;
2763         tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
2764                         p_common->rx_bcast_bytes;
2765         tlv->rx_bytes_set = true;
2766         tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
2767                          p_common->tx_bcast_pkts;
2768         tlv->tx_frames_set = true;
2769         tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
2770                         p_common->tx_bcast_bytes;
2771         tlv->rx_bytes_set = true;
2772 }
2773
2774 int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
2775                           union qed_mfw_tlv_data *tlv_buf)
2776 {
2777         struct qed_dev *cdev = hwfn->cdev;
2778         struct qed_common_cb_ops *ops;
2779
2780         ops = cdev->protocol_ops.common;
2781         if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
2782                 DP_NOTICE(hwfn, "Can't collect TLV management info\n");
2783                 return -EINVAL;
2784         }
2785
2786         switch (type) {
2787         case QED_MFW_TLV_GENERIC:
2788                 qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
2789                 break;
2790         case QED_MFW_TLV_ETH:
2791                 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
2792                 break;
2793         case QED_MFW_TLV_FCOE:
2794                 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
2795                 break;
2796         case QED_MFW_TLV_ISCSI:
2797                 ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
2798                 break;
2799         default:
2800                 break;
2801         }
2802
2803         return 0;
2804 }