bnx2x: Protect a SM state change
[linux-2.6-block.git] / drivers / net / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
59
60 #define DRV_MODULE_VERSION      "1.52.53-1"
61 #define DRV_MODULE_RELDATE      "2010/18/04"
62 #define BNX2X_BC_VER            0x040200
63
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
66 /* FW files */
67 #define FW_FILE_VERSION                                 \
68         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
69         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
70         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
71         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1         "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H        "bnx2x-e1h-" FW_FILE_VERSION ".fw"
74
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT              (5*HZ)
77
78 static char version[] __devinitdata =
79         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
88
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92                              "(0 Disable; 1 Enable (default))");
93
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97                                 " (default is as a number of CPUs)");
98
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
102
103 static int int_mode;
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
106                                 "(1 INT#x; 2 MSI)");
107
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
111
112 static int poll;
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
115
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
119
120 static int debug;
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
123
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
125
126 static struct workqueue_struct *bnx2x_wq;
127
128 enum bnx2x_board_type {
129         BCM57710 = 0,
130         BCM57711 = 1,
131         BCM57711E = 2,
132 };
133
134 /* indexed by board_type, above */
135 static struct {
136         char *name;
137 } board_info[] __devinitdata = {
138         { "Broadcom NetXtreme II BCM57710 XGb" },
139         { "Broadcom NetXtreme II BCM57711 XGb" },
140         { "Broadcom NetXtreme II BCM57711E XGb" }
141 };
142
143
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
148         { 0 }
149 };
150
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
152
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
156
157 /* used only at init
158  * locking is done by mcp
159  */
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
161 {
162         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165                                PCICFG_VENDOR_ID_OFFSET);
166 }
167
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
169 {
170         u32 val;
171
172         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175                                PCICFG_VENDOR_ID_OFFSET);
176
177         return val;
178 }
179
180 static const u32 dmae_reg_go_c[] = {
181         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
185 };
186
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189                             int idx)
190 {
191         u32 cmd_offset;
192         int i;
193
194         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
197
198                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
200         }
201         REG_WR(bp, dmae_reg_go_c[idx], 1);
202 }
203
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
205                       u32 len32)
206 {
207         struct dmae_command dmae;
208         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
209         int cnt = 200;
210
211         if (!bp->dmae_ready) {
212                 u32 *data = bnx2x_sp(bp, wb_data[0]);
213
214                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
215                    "  using indirect\n", dst_addr, len32);
216                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
217                 return;
218         }
219
220         memset(&dmae, 0, sizeof(struct dmae_command));
221
222         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
225 #ifdef __BIG_ENDIAN
226                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
227 #else
228                        DMAE_CMD_ENDIANITY_DW_SWAP |
229 #endif
230                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232         dmae.src_addr_lo = U64_LO(dma_addr);
233         dmae.src_addr_hi = U64_HI(dma_addr);
234         dmae.dst_addr_lo = dst_addr >> 2;
235         dmae.dst_addr_hi = 0;
236         dmae.len = len32;
237         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239         dmae.comp_val = DMAE_COMP_VAL;
240
241         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
243                     "dst_addr [%x:%08x (%08x)]\n"
244            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
245            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251
252         mutex_lock(&bp->dmae_mutex);
253
254         *wb_comp = 0;
255
256         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
257
258         udelay(5);
259
260         while (*wb_comp != DMAE_COMP_VAL) {
261                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
262
263                 if (!cnt) {
264                         BNX2X_ERR("DMAE timeout!\n");
265                         break;
266                 }
267                 cnt--;
268                 /* adjust delay for emulation/FPGA */
269                 if (CHIP_REV_IS_SLOW(bp))
270                         msleep(100);
271                 else
272                         udelay(5);
273         }
274
275         mutex_unlock(&bp->dmae_mutex);
276 }
277
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
279 {
280         struct dmae_command dmae;
281         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
282         int cnt = 200;
283
284         if (!bp->dmae_ready) {
285                 u32 *data = bnx2x_sp(bp, wb_data[0]);
286                 int i;
287
288                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
289                    "  using indirect\n", src_addr, len32);
290                 for (i = 0; i < len32; i++)
291                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
292                 return;
293         }
294
295         memset(&dmae, 0, sizeof(struct dmae_command));
296
297         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
300 #ifdef __BIG_ENDIAN
301                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
302 #else
303                        DMAE_CMD_ENDIANITY_DW_SWAP |
304 #endif
305                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307         dmae.src_addr_lo = src_addr >> 2;
308         dmae.src_addr_hi = 0;
309         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
311         dmae.len = len32;
312         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314         dmae.comp_val = DMAE_COMP_VAL;
315
316         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
318                     "dst_addr [%x:%08x (%08x)]\n"
319            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
320            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
323
324         mutex_lock(&bp->dmae_mutex);
325
326         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
327         *wb_comp = 0;
328
329         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
330
331         udelay(5);
332
333         while (*wb_comp != DMAE_COMP_VAL) {
334
335                 if (!cnt) {
336                         BNX2X_ERR("DMAE timeout!\n");
337                         break;
338                 }
339                 cnt--;
340                 /* adjust delay for emulation/FPGA */
341                 if (CHIP_REV_IS_SLOW(bp))
342                         msleep(100);
343                 else
344                         udelay(5);
345         }
346         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
349
350         mutex_unlock(&bp->dmae_mutex);
351 }
352
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
354                                u32 addr, u32 len)
355 {
356         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
357         int offset = 0;
358
359         while (len > dmae_wr_max) {
360                 bnx2x_write_dmae(bp, phys_addr + offset,
361                                  addr + offset, dmae_wr_max);
362                 offset += dmae_wr_max * 4;
363                 len -= dmae_wr_max;
364         }
365
366         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
367 }
368
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
371 {
372         u32 wb_write[2];
373
374         wb_write[0] = val_hi;
375         wb_write[1] = val_lo;
376         REG_WR_DMAE(bp, reg, wb_write, 2);
377 }
378
379 #ifdef USE_WB_RD
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
381 {
382         u32 wb_data[2];
383
384         REG_RD_DMAE(bp, reg, wb_data, 2);
385
386         return HILO_U64(wb_data[0], wb_data[1]);
387 }
388 #endif
389
390 static int bnx2x_mc_assert(struct bnx2x *bp)
391 {
392         char last_idx;
393         int i, rc = 0;
394         u32 row0, row1, row2, row3;
395
396         /* XSTORM */
397         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
399         if (last_idx)
400                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
401
402         /* print the asserts */
403         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
404
405                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i));
407                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
413
414                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416                                   " 0x%08x 0x%08x 0x%08x\n",
417                                   i, row3, row2, row1, row0);
418                         rc++;
419                 } else {
420                         break;
421                 }
422         }
423
424         /* TSTORM */
425         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
427         if (last_idx)
428                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
429
430         /* print the asserts */
431         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
432
433                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i));
435                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
441
442                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444                                   " 0x%08x 0x%08x 0x%08x\n",
445                                   i, row3, row2, row1, row0);
446                         rc++;
447                 } else {
448                         break;
449                 }
450         }
451
452         /* CSTORM */
453         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
455         if (last_idx)
456                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
457
458         /* print the asserts */
459         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
460
461                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i));
463                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
469
470                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472                                   " 0x%08x 0x%08x 0x%08x\n",
473                                   i, row3, row2, row1, row0);
474                         rc++;
475                 } else {
476                         break;
477                 }
478         }
479
480         /* USTORM */
481         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482                            USTORM_ASSERT_LIST_INDEX_OFFSET);
483         if (last_idx)
484                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
485
486         /* print the asserts */
487         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
488
489                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i));
491                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
493                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
495                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
497
498                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500                                   " 0x%08x 0x%08x 0x%08x\n",
501                                   i, row3, row2, row1, row0);
502                         rc++;
503                 } else {
504                         break;
505                 }
506         }
507
508         return rc;
509 }
510
511 static void bnx2x_fw_dump(struct bnx2x *bp)
512 {
513         u32 addr;
514         u32 mark, offset;
515         __be32 data[9];
516         int word;
517
518         if (BP_NOMCP(bp)) {
519                 BNX2X_ERR("NO MCP - can not dump\n");
520                 return;
521         }
522
523         addr = bp->common.shmem_base - 0x0800 + 4;
524         mark = REG_RD(bp, addr);
525         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526         pr_err("begin fw dump (mark 0x%x)\n", mark);
527
528         pr_err("");
529         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536                 for (word = 0; word < 8; word++)
537                         data[word] = htonl(REG_RD(bp, offset + 4*word));
538                 data[8] = 0x0;
539                 pr_cont("%s", (char *)data);
540         }
541         pr_err("end of fw dump\n");
542 }
543
544 static void bnx2x_panic_dump(struct bnx2x *bp)
545 {
546         int i;
547         u16 j, start, end;
548
549         bp->stats_state = STATS_STATE_DISABLED;
550         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
551
552         BNX2X_ERR("begin crash dump -----------------\n");
553
554         /* Indices */
555         /* Common */
556         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
557                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
558                   "  spq_prod_idx(0x%x)\n",
559                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
561
562         /* Rx */
563         for_each_queue(bp, i) {
564                 struct bnx2x_fastpath *fp = &bp->fp[i];
565
566                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
567                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
568                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
569                           i, fp->rx_bd_prod, fp->rx_bd_cons,
570                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
573                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574                           fp->rx_sge_prod, fp->last_max_sge,
575                           le16_to_cpu(fp->fp_u_idx),
576                           fp->status_blk->u_status_block.status_block_index);
577         }
578
579         /* Tx */
580         for_each_queue(bp, i) {
581                 struct bnx2x_fastpath *fp = &bp->fp[i];
582
583                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
584                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
585                           "  *tx_cons_sb(0x%x)\n",
586                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
589                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590                           fp->status_blk->c_status_block.status_block_index,
591                           fp->tx_db.data.prod);
592         }
593
594         /* Rings */
595         /* Rx */
596         for_each_queue(bp, i) {
597                 struct bnx2x_fastpath *fp = &bp->fp[i];
598
599                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601                 for (j = start; j != end; j = RX_BD(j + 1)) {
602                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
604
605                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
606                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
607                 }
608
609                 start = RX_SGE(fp->rx_sge_prod);
610                 end = RX_SGE(fp->last_max_sge);
611                 for (j = start; j != end; j = RX_SGE(j + 1)) {
612                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
614
615                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
616                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
617                 }
618
619                 start = RCQ_BD(fp->rx_comp_cons - 10);
620                 end = RCQ_BD(fp->rx_comp_cons + 503);
621                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
623
624                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
626                 }
627         }
628
629         /* Tx */
630         for_each_queue(bp, i) {
631                 struct bnx2x_fastpath *fp = &bp->fp[i];
632
633                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635                 for (j = start; j != end; j = TX_BD(j + 1)) {
636                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
637
638                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639                                   i, j, sw_bd->skb, sw_bd->first_bd);
640                 }
641
642                 start = TX_BD(fp->tx_bd_cons - 10);
643                 end = TX_BD(fp->tx_bd_cons + 254);
644                 for (j = start; j != end; j = TX_BD(j + 1)) {
645                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
646
647                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
649                 }
650         }
651
652         bnx2x_fw_dump(bp);
653         bnx2x_mc_assert(bp);
654         BNX2X_ERR("end crash dump -----------------\n");
655 }
656
657 static void bnx2x_int_enable(struct bnx2x *bp)
658 {
659         int port = BP_PORT(bp);
660         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661         u32 val = REG_RD(bp, addr);
662         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
664
665         if (msix) {
666                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                          HC_CONFIG_0_REG_INT_LINE_EN_0);
668                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
670         } else if (msi) {
671                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
675         } else {
676                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
679                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
680
681                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
682                    val, port, addr);
683
684                 REG_WR(bp, addr, val);
685
686                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
687         }
688
689         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
690            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
691
692         REG_WR(bp, addr, val);
693         /*
694          * Ensure that HC_CONFIG is written before leading/trailing edge config
695          */
696         mmiowb();
697         barrier();
698
699         if (CHIP_IS_E1H(bp)) {
700                 /* init leading/trailing edge */
701                 if (IS_E1HMF(bp)) {
702                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
703                         if (bp->port.pmf)
704                                 /* enable nig and gpio3 attention */
705                                 val |= 0x1100;
706                 } else
707                         val = 0xffff;
708
709                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
711         }
712
713         /* Make sure that interrupts are indeed enabled from here on */
714         mmiowb();
715 }
716
717 static void bnx2x_int_disable(struct bnx2x *bp)
718 {
719         int port = BP_PORT(bp);
720         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721         u32 val = REG_RD(bp, addr);
722
723         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
726                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
727
728         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
729            val, port, addr);
730
731         /* flush all outstanding writes */
732         mmiowb();
733
734         REG_WR(bp, addr, val);
735         if (REG_RD(bp, addr) != val)
736                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
737 }
738
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
740 {
741         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
742         int i, offset;
743
744         /* disable interrupt handling */
745         atomic_inc(&bp->intr_sem);
746         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
747
748         if (disable_hw)
749                 /* prevent the HW from sending interrupts */
750                 bnx2x_int_disable(bp);
751
752         /* make sure all ISRs are done */
753         if (msix) {
754                 synchronize_irq(bp->msix_table[0].vector);
755                 offset = 1;
756 #ifdef BCM_CNIC
757                 offset++;
758 #endif
759                 for_each_queue(bp, i)
760                         synchronize_irq(bp->msix_table[i + offset].vector);
761         } else
762                 synchronize_irq(bp->pdev->irq);
763
764         /* make sure sp_task is not running */
765         cancel_delayed_work(&bp->sp_task);
766         flush_workqueue(bnx2x_wq);
767 }
768
769 /* fast path */
770
771 /*
772  * General service functions
773  */
774
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
777 {
778         u32 lock_status;
779         u32 resource_bit = (1 << resource);
780         int func = BP_FUNC(bp);
781         u32 hw_lock_control_reg;
782
783         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
784
785         /* Validating that the resource is within range */
786         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
787                 DP(NETIF_MSG_HW,
788                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
790                 return -EINVAL;
791         }
792
793         if (func <= 5)
794                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
795         else
796                 hw_lock_control_reg =
797                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
798
799         /* Try to acquire the lock */
800         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801         lock_status = REG_RD(bp, hw_lock_control_reg);
802         if (lock_status & resource_bit)
803                 return true;
804
805         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
806         return false;
807 }
808
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810                                 u8 storm, u16 index, u8 op, u8 update)
811 {
812         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813                        COMMAND_REG_INT_ACK);
814         struct igu_ack_register igu_ack;
815
816         igu_ack.status_block_index = index;
817         igu_ack.sb_id_and_flags =
818                         ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819                          (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820                          (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821                          (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
822
823         DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824            (*(u32 *)&igu_ack), hc_addr);
825         REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
826
827         /* Make sure that ACK is written */
828         mmiowb();
829         barrier();
830 }
831
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
833 {
834         struct host_status_block *fpsb = fp->status_blk;
835
836         barrier(); /* status block is written to by the chip */
837         fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838         fp->fp_u_idx = fpsb->u_status_block.status_block_index;
839 }
840
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
842 {
843         u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844                        COMMAND_REG_SIMD_MASK);
845         u32 result = REG_RD(bp, hc_addr);
846
847         DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
848            result, hc_addr);
849
850         return result;
851 }
852
853
854 /*
855  * fast path service functions
856  */
857
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
859 {
860         /* Tell compiler that consumer and producer can change */
861         barrier();
862         return (fp->tx_pkt_prod != fp->tx_pkt_cons);
863 }
864
865 /* free skb in the packet ring at pos idx
866  * return idx of last bd freed
867  */
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
869                              u16 idx)
870 {
871         struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872         struct eth_tx_start_bd *tx_start_bd;
873         struct eth_tx_bd *tx_data_bd;
874         struct sk_buff *skb = tx_buf->skb;
875         u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
876         int nbd;
877
878         /* prefetch skb end pointer to speedup dev_kfree_skb() */
879         prefetch(&skb->end);
880
881         DP(BNX2X_MSG_OFF, "pkt_idx %d  buff @(%p)->skb %p\n",
882            idx, tx_buf, skb);
883
884         /* unmap first bd */
885         DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886         tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887         dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888                          BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
889
890         nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892         if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893                 BNX2X_ERR("BAD nbd!\n");
894                 bnx2x_panic();
895         }
896 #endif
897         new_cons = nbd + tx_buf->first_bd;
898
899         /* Get the next bd */
900         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
901
902         /* Skip a parse bd... */
903         --nbd;
904         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
905
906         /* ...and the TSO split header bd since they have no mapping */
907         if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
908                 --nbd;
909                 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
910         }
911
912         /* now free frags */
913         while (nbd > 0) {
914
915                 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916                 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917                 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918                                BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
919                 if (--nbd)
920                         bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
921         }
922
923         /* release skb */
924         WARN_ON(!skb);
925         dev_kfree_skb(skb);
926         tx_buf->first_bd = 0;
927         tx_buf->skb = NULL;
928
929         return new_cons;
930 }
931
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
933 {
934         s16 used;
935         u16 prod;
936         u16 cons;
937
938         prod = fp->tx_bd_prod;
939         cons = fp->tx_bd_cons;
940
941         /* NUM_TX_RINGS = number of "next-page" entries
942            It will be used as a threshold */
943         used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
944
945 #ifdef BNX2X_STOP_ON_ERROR
946         WARN_ON(used < 0);
947         WARN_ON(used > fp->bp->tx_ring_size);
948         WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
949 #endif
950
951         return (s16)(fp->bp->tx_ring_size) - used;
952 }
953
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
955 {
956         u16 hw_cons;
957
958         /* Tell compiler that status block fields can change */
959         barrier();
960         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961         return hw_cons != fp->tx_pkt_cons;
962 }
963
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
965 {
966         struct bnx2x *bp = fp->bp;
967         struct netdev_queue *txq;
968         u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
969
970 #ifdef BNX2X_STOP_ON_ERROR
971         if (unlikely(bp->panic))
972                 return -1;
973 #endif
974
975         txq = netdev_get_tx_queue(bp->dev, fp->index);
976         hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977         sw_cons = fp->tx_pkt_cons;
978
979         while (sw_cons != hw_cons) {
980                 u16 pkt_cons;
981
982                 pkt_cons = TX_BD(sw_cons);
983
984                 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
985
986                 DP(NETIF_MSG_TX_DONE, "hw_cons %u  sw_cons %u  pkt_cons %u\n",
987                    hw_cons, sw_cons, pkt_cons);
988
989 /*              if (NEXT_TX_IDX(sw_cons) != hw_cons) {
990                         rmb();
991                         prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
992                 }
993 */
994                 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
995                 sw_cons++;
996         }
997
998         fp->tx_pkt_cons = sw_cons;
999         fp->tx_bd_cons = bd_cons;
1000
1001         /* Need to make the tx_bd_cons update visible to start_xmit()
1002          * before checking for netif_tx_queue_stopped().  Without the
1003          * memory barrier, there is a small possibility that
1004          * start_xmit() will miss it and cause the queue to be stopped
1005          * forever.
1006          */
1007         smp_mb();
1008
1009         /* TBD need a thresh? */
1010         if (unlikely(netif_tx_queue_stopped(txq))) {
1011                 /* Taking tx_lock() is needed to prevent reenabling the queue
1012                  * while it's empty. This could have happen if rx_action() gets
1013                  * suspended in bnx2x_tx_int() after the condition before
1014                  * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1015                  *
1016                  * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017                  * sends some packets consuming the whole queue again->
1018                  * stops the queue
1019                  */
1020
1021                 __netif_tx_lock(txq, smp_processor_id());
1022
1023                 if ((netif_tx_queue_stopped(txq)) &&
1024                     (bp->state == BNX2X_STATE_OPEN) &&
1025                     (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026                         netif_tx_wake_queue(txq);
1027
1028                 __netif_tx_unlock(txq);
1029         }
1030         return 0;
1031 }
1032
1033 #ifdef BCM_CNIC
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1035 #endif
1036
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038                            union eth_rx_cqe *rr_cqe)
1039 {
1040         struct bnx2x *bp = fp->bp;
1041         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1043
1044         DP(BNX2X_MSG_SP,
1045            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
1046            fp->index, cid, command, bp->state,
1047            rr_cqe->ramrod_cqe.ramrod_type);
1048
1049         bp->spq_left++;
1050
1051         if (fp->index) {
1052                 switch (command | fp->state) {
1053                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054                                                 BNX2X_FP_STATE_OPENING):
1055                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1056                            cid);
1057                         fp->state = BNX2X_FP_STATE_OPEN;
1058                         break;
1059
1060                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1062                            cid);
1063                         fp->state = BNX2X_FP_STATE_HALTED;
1064                         break;
1065
1066                 default:
1067                         BNX2X_ERR("unexpected MC reply (%d)  "
1068                                   "fp[%d] state is %x\n",
1069                                   command, fp->index, fp->state);
1070                         break;
1071                 }
1072                 mb(); /* force bnx2x_wait_ramrod() to see the change */
1073                 return;
1074         }
1075
1076         switch (command | bp->state) {
1077         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079                 bp->state = BNX2X_STATE_OPEN;
1080                 break;
1081
1082         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085                 fp->state = BNX2X_FP_STATE_HALTED;
1086                 break;
1087
1088         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1091                 break;
1092
1093 #ifdef BCM_CNIC
1094         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096                 bnx2x_cnic_cfc_comp(bp, cid);
1097                 break;
1098 #endif
1099
1100         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103                 bp->set_mac_pending--;
1104                 smp_wmb();
1105                 break;
1106
1107         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109                 bp->set_mac_pending--;
1110                 smp_wmb();
1111                 break;
1112
1113         default:
1114                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
1115                           command, bp->state);
1116                 break;
1117         }
1118         mb(); /* force bnx2x_wait_ramrod() to see the change */
1119 }
1120
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122                                      struct bnx2x_fastpath *fp, u16 index)
1123 {
1124         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125         struct page *page = sw_buf->page;
1126         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1127
1128         /* Skip "next page" elements */
1129         if (!page)
1130                 return;
1131
1132         dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133                        SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134         __free_pages(page, PAGES_PER_SGE_SHIFT);
1135
1136         sw_buf->page = NULL;
1137         sge->addr_hi = 0;
1138         sge->addr_lo = 0;
1139 }
1140
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142                                            struct bnx2x_fastpath *fp, int last)
1143 {
1144         int i;
1145
1146         for (i = 0; i < last; i++)
1147                 bnx2x_free_rx_sge(bp, fp, i);
1148 }
1149
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151                                      struct bnx2x_fastpath *fp, u16 index)
1152 {
1153         struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154         struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155         struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1156         dma_addr_t mapping;
1157
1158         if (unlikely(page == NULL))
1159                 return -ENOMEM;
1160
1161         mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164                 __free_pages(page, PAGES_PER_SGE_SHIFT);
1165                 return -ENOMEM;
1166         }
1167
1168         sw_buf->page = page;
1169         dma_unmap_addr_set(sw_buf, mapping, mapping);
1170
1171         sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172         sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1173
1174         return 0;
1175 }
1176
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178                                      struct bnx2x_fastpath *fp, u16 index)
1179 {
1180         struct sk_buff *skb;
1181         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182         struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1183         dma_addr_t mapping;
1184
1185         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186         if (unlikely(skb == NULL))
1187                 return -ENOMEM;
1188
1189         mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1190                                  DMA_FROM_DEVICE);
1191         if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1192                 dev_kfree_skb(skb);
1193                 return -ENOMEM;
1194         }
1195
1196         rx_buf->skb = skb;
1197         dma_unmap_addr_set(rx_buf, mapping, mapping);
1198
1199         rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200         rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1201
1202         return 0;
1203 }
1204
1205 /* note that we are not allocating a new skb,
1206  * we are just moving one from cons to prod
1207  * we are not creating a new mapping,
1208  * so there is no need to check for dma_mapping_error().
1209  */
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211                                struct sk_buff *skb, u16 cons, u16 prod)
1212 {
1213         struct bnx2x *bp = fp->bp;
1214         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216         struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1218
1219         dma_sync_single_for_device(&bp->pdev->dev,
1220                                    dma_unmap_addr(cons_rx_buf, mapping),
1221                                    RX_COPY_THRESH, DMA_FROM_DEVICE);
1222
1223         prod_rx_buf->skb = cons_rx_buf->skb;
1224         dma_unmap_addr_set(prod_rx_buf, mapping,
1225                            dma_unmap_addr(cons_rx_buf, mapping));
1226         *prod_bd = *cons_bd;
1227 }
1228
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1230                                              u16 idx)
1231 {
1232         u16 last_max = fp->last_max_sge;
1233
1234         if (SUB_S16(idx, last_max) > 0)
1235                 fp->last_max_sge = idx;
1236 }
1237
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1239 {
1240         int i, j;
1241
1242         for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243                 int idx = RX_SGE_CNT * i - 1;
1244
1245                 for (j = 0; j < 2; j++) {
1246                         SGE_MASK_CLEAR_BIT(fp, idx);
1247                         idx--;
1248                 }
1249         }
1250 }
1251
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253                                   struct eth_fast_path_rx_cqe *fp_cqe)
1254 {
1255         struct bnx2x *bp = fp->bp;
1256         u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257                                      le16_to_cpu(fp_cqe->len_on_bd)) >>
1258                       SGE_PAGE_SHIFT;
1259         u16 last_max, last_elem, first_elem;
1260         u16 delta = 0;
1261         u16 i;
1262
1263         if (!sge_len)
1264                 return;
1265
1266         /* First mark all used pages */
1267         for (i = 0; i < sge_len; i++)
1268                 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1269
1270         DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271            sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1272
1273         /* Here we assume that the last SGE index is the biggest */
1274         prefetch((void *)(fp->sge_mask));
1275         bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1276
1277         last_max = RX_SGE(fp->last_max_sge);
1278         last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279         first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1280
1281         /* If ring is not full */
1282         if (last_elem + 1 != first_elem)
1283                 last_elem++;
1284
1285         /* Now update the prod */
1286         for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287                 if (likely(fp->sge_mask[i]))
1288                         break;
1289
1290                 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291                 delta += RX_SGE_MASK_ELEM_SZ;
1292         }
1293
1294         if (delta > 0) {
1295                 fp->rx_sge_prod += delta;
1296                 /* clear page-end entries */
1297                 bnx2x_clear_sge_mask_next_elems(fp);
1298         }
1299
1300         DP(NETIF_MSG_RX_STATUS,
1301            "fp->last_max_sge = %d  fp->rx_sge_prod = %d\n",
1302            fp->last_max_sge, fp->rx_sge_prod);
1303 }
1304
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1306 {
1307         /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308         memset(fp->sge_mask, 0xff,
1309                (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1310
1311         /* Clear the two last indices in the page to 1:
1312            these are the indices that correspond to the "next" element,
1313            hence will never be indicated and should be removed from
1314            the calculations. */
1315         bnx2x_clear_sge_mask_next_elems(fp);
1316 }
1317
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319                             struct sk_buff *skb, u16 cons, u16 prod)
1320 {
1321         struct bnx2x *bp = fp->bp;
1322         struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323         struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324         struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1325         dma_addr_t mapping;
1326
1327         /* move empty skb from pool to prod and map it */
1328         prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329         mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330                                  bp->rx_buf_size, DMA_FROM_DEVICE);
1331         dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1332
1333         /* move partial skb from cons to pool (don't unmap yet) */
1334         fp->tpa_pool[queue] = *cons_rx_buf;
1335
1336         /* mark bin state as start - print error if current state != stop */
1337         if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338                 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1339
1340         fp->tpa_state[queue] = BNX2X_TPA_START;
1341
1342         /* point prod_bd to new skb */
1343         prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344         prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1345
1346 #ifdef BNX2X_STOP_ON_ERROR
1347         fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1350 #else
1351         DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1352 #endif
1353            fp->tpa_queue_used);
1354 #endif
1355 }
1356
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358                                struct sk_buff *skb,
1359                                struct eth_fast_path_rx_cqe *fp_cqe,
1360                                u16 cqe_idx)
1361 {
1362         struct sw_rx_page *rx_pg, old_rx_pg;
1363         u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364         u32 i, frag_len, frag_size, pages;
1365         int err;
1366         int j;
1367
1368         frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369         pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1370
1371         /* This is needed in order to enable forwarding support */
1372         if (frag_size)
1373                 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374                                                max(frag_size, (u32)len_on_bd));
1375
1376 #ifdef BNX2X_STOP_ON_ERROR
1377         if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378                 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1379                           pages, cqe_idx);
1380                 BNX2X_ERR("fp_cqe->pkt_len = %d  fp_cqe->len_on_bd = %d\n",
1381                           fp_cqe->pkt_len, len_on_bd);
1382                 bnx2x_panic();
1383                 return -EINVAL;
1384         }
1385 #endif
1386
1387         /* Run through the SGL and compose the fragmented skb */
1388         for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389                 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1390
1391                 /* FW gives the indices of the SGE as if the ring is an array
1392                    (meaning that "next" element will consume 2 indices) */
1393                 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394                 rx_pg = &fp->rx_page_ring[sge_idx];
1395                 old_rx_pg = *rx_pg;
1396
1397                 /* If we fail to allocate a substitute page, we simply stop
1398                    where we are and drop the whole packet */
1399                 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400                 if (unlikely(err)) {
1401                         fp->eth_q_stats.rx_skb_alloc_failed++;
1402                         return err;
1403                 }
1404
1405                 /* Unmap the page as we r going to pass it to the stack */
1406                 dma_unmap_page(&bp->pdev->dev,
1407                                dma_unmap_addr(&old_rx_pg, mapping),
1408                                SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1409
1410                 /* Add one frag and update the appropriate fields in the skb */
1411                 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1412
1413                 skb->data_len += frag_len;
1414                 skb->truesize += frag_len;
1415                 skb->len += frag_len;
1416
1417                 frag_size -= frag_len;
1418         }
1419
1420         return 0;
1421 }
1422
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424                            u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1425                            u16 cqe_idx)
1426 {
1427         struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428         struct sk_buff *skb = rx_buf->skb;
1429         /* alloc new skb */
1430         struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1431
1432         /* Unmap skb in the pool anyway, as we are going to change
1433            pool entry status to BNX2X_TPA_STOP even if new skb allocation
1434            fails. */
1435         dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436                          bp->rx_buf_size, DMA_FROM_DEVICE);
1437
1438         if (likely(new_skb)) {
1439                 /* fix ip xsum and give it to the stack */
1440                 /* (no need to map the new skb) */
1441 #ifdef BCM_VLAN
1442                 int is_vlan_cqe =
1443                         (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444                          PARSING_FLAGS_VLAN);
1445                 int is_not_hwaccel_vlan_cqe =
1446                         (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1447 #endif
1448
1449                 prefetch(skb);
1450                 prefetch(((char *)(skb)) + 128);
1451
1452 #ifdef BNX2X_STOP_ON_ERROR
1453                 if (pad + len > bp->rx_buf_size) {
1454                         BNX2X_ERR("skb_put is about to fail...  "
1455                                   "pad %d  len %d  rx_buf_size %d\n",
1456                                   pad, len, bp->rx_buf_size);
1457                         bnx2x_panic();
1458                         return;
1459                 }
1460 #endif
1461
1462                 skb_reserve(skb, pad);
1463                 skb_put(skb, len);
1464
1465                 skb->protocol = eth_type_trans(skb, bp->dev);
1466                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1467
1468                 {
1469                         struct iphdr *iph;
1470
1471                         iph = (struct iphdr *)skb->data;
1472 #ifdef BCM_VLAN
1473                         /* If there is no Rx VLAN offloading -
1474                            take VLAN tag into an account */
1475                         if (unlikely(is_not_hwaccel_vlan_cqe))
1476                                 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1477 #endif
1478                         iph->check = 0;
1479                         iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1480                 }
1481
1482                 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483                                          &cqe->fast_path_cqe, cqe_idx)) {
1484 #ifdef BCM_VLAN
1485                         if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486                             (!is_not_hwaccel_vlan_cqe))
1487                                 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488                                                  le16_to_cpu(cqe->fast_path_cqe.
1489                                                              vlan_tag), skb);
1490                         else
1491 #endif
1492                                 napi_gro_receive(&fp->napi, skb);
1493                 } else {
1494                         DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495                            " - dropping packet!\n");
1496                         dev_kfree_skb(skb);
1497                 }
1498
1499
1500                 /* put new skb in bin */
1501                 fp->tpa_pool[queue].skb = new_skb;
1502
1503         } else {
1504                 /* else drop the packet and keep the buffer in the bin */
1505                 DP(NETIF_MSG_RX_STATUS,
1506                    "Failed to allocate new skb - dropping packet!\n");
1507                 fp->eth_q_stats.rx_skb_alloc_failed++;
1508         }
1509
1510         fp->tpa_state[queue] = BNX2X_TPA_STOP;
1511 }
1512
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514                                         struct bnx2x_fastpath *fp,
1515                                         u16 bd_prod, u16 rx_comp_prod,
1516                                         u16 rx_sge_prod)
1517 {
1518         struct ustorm_eth_rx_producers rx_prods = {0};
1519         int i;
1520
1521         /* Update producers */
1522         rx_prods.bd_prod = bd_prod;
1523         rx_prods.cqe_prod = rx_comp_prod;
1524         rx_prods.sge_prod = rx_sge_prod;
1525
1526         /*
1527          * Make sure that the BD and SGE data is updated before updating the
1528          * producers since FW might read the BD/SGE right after the producer
1529          * is updated.
1530          * This is only applicable for weak-ordered memory model archs such
1531          * as IA-64. The following barrier is also mandatory since FW will
1532          * assumes BDs must have buffers.
1533          */
1534         wmb();
1535
1536         for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537                 REG_WR(bp, BAR_USTRORM_INTMEM +
1538                        USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539                        ((u32 *)&rx_prods)[i]);
1540
1541         mmiowb(); /* keep prod updates ordered */
1542
1543         DP(NETIF_MSG_RX_STATUS,
1544            "queue[%d]:  wrote  bd_prod %u  cqe_prod %u  sge_prod %u\n",
1545            fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1546 }
1547
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1549 {
1550         struct bnx2x *bp = fp->bp;
1551         u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552         u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1553         int rx_pkt = 0;
1554
1555 #ifdef BNX2X_STOP_ON_ERROR
1556         if (unlikely(bp->panic))
1557                 return 0;
1558 #endif
1559
1560         /* CQ "next element" is of the size of the regular element,
1561            that's why it's ok here */
1562         hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563         if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1564                 hw_comp_cons++;
1565
1566         bd_cons = fp->rx_bd_cons;
1567         bd_prod = fp->rx_bd_prod;
1568         bd_prod_fw = bd_prod;
1569         sw_comp_cons = fp->rx_comp_cons;
1570         sw_comp_prod = fp->rx_comp_prod;
1571
1572         /* Memory barrier necessary as speculative reads of the rx
1573          * buffer can be ahead of the index in the status block
1574          */
1575         rmb();
1576
1577         DP(NETIF_MSG_RX_STATUS,
1578            "queue[%d]:  hw_comp_cons %u  sw_comp_cons %u\n",
1579            fp->index, hw_comp_cons, sw_comp_cons);
1580
1581         while (sw_comp_cons != hw_comp_cons) {
1582                 struct sw_rx_bd *rx_buf = NULL;
1583                 struct sk_buff *skb;
1584                 union eth_rx_cqe *cqe;
1585                 u8 cqe_fp_flags, cqe_fp_status_flags;
1586                 u16 len, pad;
1587
1588                 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589                 bd_prod = RX_BD(bd_prod);
1590                 bd_cons = RX_BD(bd_cons);
1591
1592                 /* Prefetch the page containing the BD descriptor
1593                    at producer's index. It will be needed when new skb is
1594                    allocated */
1595                 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596                                              (&fp->rx_desc_ring[bd_prod])) -
1597                                   PAGE_SIZE + 1));
1598
1599                 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600                 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601                 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1602
1603                 DP(NETIF_MSG_RX_STATUS, "CQE type %x  err %x  status %x"
1604                    "  queue %x  vlan %x  len %u\n", CQE_TYPE(cqe_fp_flags),
1605                    cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1606                    le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1607                    le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1608                    le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1609
1610                 /* is this a slowpath msg? */
1611                 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1612                         bnx2x_sp_event(fp, cqe);
1613                         goto next_cqe;
1614
1615                 /* this is an rx packet */
1616                 } else {
1617                         rx_buf = &fp->rx_buf_ring[bd_cons];
1618                         skb = rx_buf->skb;
1619                         prefetch(skb);
1620                         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621                         pad = cqe->fast_path_cqe.placement_offset;
1622
1623                         /* If CQE is marked both TPA_START and TPA_END
1624                            it is a non-TPA CQE */
1625                         if ((!fp->disable_tpa) &&
1626                             (TPA_TYPE(cqe_fp_flags) !=
1627                                         (TPA_TYPE_START | TPA_TYPE_END))) {
1628                                 u16 queue = cqe->fast_path_cqe.queue_index;
1629
1630                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631                                         DP(NETIF_MSG_RX_STATUS,
1632                                            "calling tpa_start on queue %d\n",
1633                                            queue);
1634
1635                                         bnx2x_tpa_start(fp, queue, skb,
1636                                                         bd_cons, bd_prod);
1637                                         goto next_rx;
1638                                 }
1639
1640                                 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641                                         DP(NETIF_MSG_RX_STATUS,
1642                                            "calling tpa_stop on queue %d\n",
1643                                            queue);
1644
1645                                         if (!BNX2X_RX_SUM_FIX(cqe))
1646                                                 BNX2X_ERR("STOP on none TCP "
1647                                                           "data\n");
1648
1649                                         /* This is a size of the linear data
1650                                            on this skb */
1651                                         len = le16_to_cpu(cqe->fast_path_cqe.
1652                                                                 len_on_bd);
1653                                         bnx2x_tpa_stop(bp, fp, queue, pad,
1654                                                     len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1656                                         if (bp->panic)
1657                                                 return 0;
1658 #endif
1659
1660                                         bnx2x_update_sge_prod(fp,
1661                                                         &cqe->fast_path_cqe);
1662                                         goto next_cqe;
1663                                 }
1664                         }
1665
1666                         dma_sync_single_for_device(&bp->pdev->dev,
1667                                         dma_unmap_addr(rx_buf, mapping),
1668                                                    pad + RX_COPY_THRESH,
1669                                                    DMA_FROM_DEVICE);
1670                         prefetch(((char *)(skb)) + 128);
1671
1672                         /* is this an error packet? */
1673                         if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1674                                 DP(NETIF_MSG_RX_ERR,
1675                                    "ERROR  flags %x  rx packet %u\n",
1676                                    cqe_fp_flags, sw_comp_cons);
1677                                 fp->eth_q_stats.rx_err_discard_pkt++;
1678                                 goto reuse_rx;
1679                         }
1680
1681                         /* Since we don't have a jumbo ring
1682                          * copy small packets if mtu > 1500
1683                          */
1684                         if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1685                             (len <= RX_COPY_THRESH)) {
1686                                 struct sk_buff *new_skb;
1687
1688                                 new_skb = netdev_alloc_skb(bp->dev,
1689                                                            len + pad);
1690                                 if (new_skb == NULL) {
1691                                         DP(NETIF_MSG_RX_ERR,
1692                                            "ERROR  packet dropped "
1693                                            "because of alloc failure\n");
1694                                         fp->eth_q_stats.rx_skb_alloc_failed++;
1695                                         goto reuse_rx;
1696                                 }
1697
1698                                 /* aligned copy */
1699                                 skb_copy_from_linear_data_offset(skb, pad,
1700                                                     new_skb->data + pad, len);
1701                                 skb_reserve(new_skb, pad);
1702                                 skb_put(new_skb, len);
1703
1704                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1705
1706                                 skb = new_skb;
1707
1708                         } else
1709                         if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1710                                 dma_unmap_single(&bp->pdev->dev,
1711                                         dma_unmap_addr(rx_buf, mapping),
1712                                                  bp->rx_buf_size,
1713                                                  DMA_FROM_DEVICE);
1714                                 skb_reserve(skb, pad);
1715                                 skb_put(skb, len);
1716
1717                         } else {
1718                                 DP(NETIF_MSG_RX_ERR,
1719                                    "ERROR  packet dropped because "
1720                                    "of alloc failure\n");
1721                                 fp->eth_q_stats.rx_skb_alloc_failed++;
1722 reuse_rx:
1723                                 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1724                                 goto next_rx;
1725                         }
1726
1727                         skb->protocol = eth_type_trans(skb, bp->dev);
1728
1729                         if ((bp->dev->features & NETIF_F_RXHASH) &&
1730                             (cqe_fp_status_flags &
1731                              ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732                                 skb->rxhash = le32_to_cpu(
1733                                     cqe->fast_path_cqe.rss_hash_result);
1734
1735                         skb->ip_summed = CHECKSUM_NONE;
1736                         if (bp->rx_csum) {
1737                                 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1738                                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1739                                 else
1740                                         fp->eth_q_stats.hw_csum_err++;
1741                         }
1742                 }
1743
1744                 skb_record_rx_queue(skb, fp->index);
1745
1746 #ifdef BCM_VLAN
1747                 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1748                     (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1749                      PARSING_FLAGS_VLAN))
1750                         vlan_gro_receive(&fp->napi, bp->vlgrp,
1751                                 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1752                 else
1753 #endif
1754                         napi_gro_receive(&fp->napi, skb);
1755
1756
1757 next_rx:
1758                 rx_buf->skb = NULL;
1759
1760                 bd_cons = NEXT_RX_IDX(bd_cons);
1761                 bd_prod = NEXT_RX_IDX(bd_prod);
1762                 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1763                 rx_pkt++;
1764 next_cqe:
1765                 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1766                 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1767
1768                 if (rx_pkt == budget)
1769                         break;
1770         } /* while */
1771
1772         fp->rx_bd_cons = bd_cons;
1773         fp->rx_bd_prod = bd_prod_fw;
1774         fp->rx_comp_cons = sw_comp_cons;
1775         fp->rx_comp_prod = sw_comp_prod;
1776
1777         /* Update producers */
1778         bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1779                              fp->rx_sge_prod);
1780
1781         fp->rx_pkt += rx_pkt;
1782         fp->rx_calls++;
1783
1784         return rx_pkt;
1785 }
1786
1787 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1788 {
1789         struct bnx2x_fastpath *fp = fp_cookie;
1790         struct bnx2x *bp = fp->bp;
1791
1792         /* Return here if interrupt is disabled */
1793         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1794                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1795                 return IRQ_HANDLED;
1796         }
1797
1798         DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1799            fp->index, fp->sb_id);
1800         bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1801
1802 #ifdef BNX2X_STOP_ON_ERROR
1803         if (unlikely(bp->panic))
1804                 return IRQ_HANDLED;
1805 #endif
1806
1807         /* Handle Rx and Tx according to MSI-X vector */
1808         prefetch(fp->rx_cons_sb);
1809         prefetch(fp->tx_cons_sb);
1810         prefetch(&fp->status_blk->u_status_block.status_block_index);
1811         prefetch(&fp->status_blk->c_status_block.status_block_index);
1812         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1813
1814         return IRQ_HANDLED;
1815 }
1816
1817 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1818 {
1819         struct bnx2x *bp = netdev_priv(dev_instance);
1820         u16 status = bnx2x_ack_int(bp);
1821         u16 mask;
1822         int i;
1823
1824         /* Return here if interrupt is shared and it's not for us */
1825         if (unlikely(status == 0)) {
1826                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1827                 return IRQ_NONE;
1828         }
1829         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
1830
1831         /* Return here if interrupt is disabled */
1832         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1833                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1834                 return IRQ_HANDLED;
1835         }
1836
1837 #ifdef BNX2X_STOP_ON_ERROR
1838         if (unlikely(bp->panic))
1839                 return IRQ_HANDLED;
1840 #endif
1841
1842         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1843                 struct bnx2x_fastpath *fp = &bp->fp[i];
1844
1845                 mask = 0x2 << fp->sb_id;
1846                 if (status & mask) {
1847                         /* Handle Rx and Tx according to SB id */
1848                         prefetch(fp->rx_cons_sb);
1849                         prefetch(&fp->status_blk->u_status_block.
1850                                                 status_block_index);
1851                         prefetch(fp->tx_cons_sb);
1852                         prefetch(&fp->status_blk->c_status_block.
1853                                                 status_block_index);
1854                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1855                         status &= ~mask;
1856                 }
1857         }
1858
1859 #ifdef BCM_CNIC
1860         mask = 0x2 << CNIC_SB_ID(bp);
1861         if (status & (mask | 0x1)) {
1862                 struct cnic_ops *c_ops = NULL;
1863
1864                 rcu_read_lock();
1865                 c_ops = rcu_dereference(bp->cnic_ops);
1866                 if (c_ops)
1867                         c_ops->cnic_handler(bp->cnic_data, NULL);
1868                 rcu_read_unlock();
1869
1870                 status &= ~mask;
1871         }
1872 #endif
1873
1874         if (unlikely(status & 0x1)) {
1875                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1876
1877                 status &= ~0x1;
1878                 if (!status)
1879                         return IRQ_HANDLED;
1880         }
1881
1882         if (unlikely(status))
1883                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1884                    status);
1885
1886         return IRQ_HANDLED;
1887 }
1888
1889 /* end of fast path */
1890
1891 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1892
1893 /* Link */
1894
1895 /*
1896  * General service functions
1897  */
1898
1899 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1900 {
1901         u32 lock_status;
1902         u32 resource_bit = (1 << resource);
1903         int func = BP_FUNC(bp);
1904         u32 hw_lock_control_reg;
1905         int cnt;
1906
1907         /* Validating that the resource is within range */
1908         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1909                 DP(NETIF_MSG_HW,
1910                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1911                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1912                 return -EINVAL;
1913         }
1914
1915         if (func <= 5) {
1916                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1917         } else {
1918                 hw_lock_control_reg =
1919                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1920         }
1921
1922         /* Validating that the resource is not already taken */
1923         lock_status = REG_RD(bp, hw_lock_control_reg);
1924         if (lock_status & resource_bit) {
1925                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1926                    lock_status, resource_bit);
1927                 return -EEXIST;
1928         }
1929
1930         /* Try for 5 second every 5ms */
1931         for (cnt = 0; cnt < 1000; cnt++) {
1932                 /* Try to acquire the lock */
1933                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1934                 lock_status = REG_RD(bp, hw_lock_control_reg);
1935                 if (lock_status & resource_bit)
1936                         return 0;
1937
1938                 msleep(5);
1939         }
1940         DP(NETIF_MSG_HW, "Timeout\n");
1941         return -EAGAIN;
1942 }
1943
1944 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1945 {
1946         u32 lock_status;
1947         u32 resource_bit = (1 << resource);
1948         int func = BP_FUNC(bp);
1949         u32 hw_lock_control_reg;
1950
1951         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1952
1953         /* Validating that the resource is within range */
1954         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1955                 DP(NETIF_MSG_HW,
1956                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1957                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1958                 return -EINVAL;
1959         }
1960
1961         if (func <= 5) {
1962                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1963         } else {
1964                 hw_lock_control_reg =
1965                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1966         }
1967
1968         /* Validating that the resource is currently taken */
1969         lock_status = REG_RD(bp, hw_lock_control_reg);
1970         if (!(lock_status & resource_bit)) {
1971                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1972                    lock_status, resource_bit);
1973                 return -EFAULT;
1974         }
1975
1976         REG_WR(bp, hw_lock_control_reg, resource_bit);
1977         return 0;
1978 }
1979
1980 /* HW Lock for shared dual port PHYs */
1981 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1982 {
1983         mutex_lock(&bp->port.phy_mutex);
1984
1985         if (bp->port.need_hw_lock)
1986                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1987 }
1988
1989 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1990 {
1991         if (bp->port.need_hw_lock)
1992                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1993
1994         mutex_unlock(&bp->port.phy_mutex);
1995 }
1996
1997 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1998 {
1999         /* The GPIO should be swapped if swap register is set and active */
2000         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2001                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2002         int gpio_shift = gpio_num +
2003                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2004         u32 gpio_mask = (1 << gpio_shift);
2005         u32 gpio_reg;
2006         int value;
2007
2008         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2009                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2010                 return -EINVAL;
2011         }
2012
2013         /* read GPIO value */
2014         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2015
2016         /* get the requested pin value */
2017         if ((gpio_reg & gpio_mask) == gpio_mask)
2018                 value = 1;
2019         else
2020                 value = 0;
2021
2022         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
2023
2024         return value;
2025 }
2026
2027 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2028 {
2029         /* The GPIO should be swapped if swap register is set and active */
2030         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2031                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2032         int gpio_shift = gpio_num +
2033                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2034         u32 gpio_mask = (1 << gpio_shift);
2035         u32 gpio_reg;
2036
2037         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2038                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2039                 return -EINVAL;
2040         }
2041
2042         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2043         /* read GPIO and mask except the float bits */
2044         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2045
2046         switch (mode) {
2047         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2048                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2049                    gpio_num, gpio_shift);
2050                 /* clear FLOAT and set CLR */
2051                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2052                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2053                 break;
2054
2055         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2056                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2057                    gpio_num, gpio_shift);
2058                 /* clear FLOAT and set SET */
2059                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2060                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2061                 break;
2062
2063         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2064                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2065                    gpio_num, gpio_shift);
2066                 /* set FLOAT */
2067                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2068                 break;
2069
2070         default:
2071                 break;
2072         }
2073
2074         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2075         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2076
2077         return 0;
2078 }
2079
2080 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2081 {
2082         /* The GPIO should be swapped if swap register is set and active */
2083         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2084                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2085         int gpio_shift = gpio_num +
2086                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2087         u32 gpio_mask = (1 << gpio_shift);
2088         u32 gpio_reg;
2089
2090         if (gpio_num > MISC_REGISTERS_GPIO_3) {
2091                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2092                 return -EINVAL;
2093         }
2094
2095         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2096         /* read GPIO int */
2097         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2098
2099         switch (mode) {
2100         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2101                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2102                                    "output low\n", gpio_num, gpio_shift);
2103                 /* clear SET and set CLR */
2104                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2105                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2106                 break;
2107
2108         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2109                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2110                                    "output high\n", gpio_num, gpio_shift);
2111                 /* clear CLR and set SET */
2112                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2113                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2114                 break;
2115
2116         default:
2117                 break;
2118         }
2119
2120         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2121         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2122
2123         return 0;
2124 }
2125
2126 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2127 {
2128         u32 spio_mask = (1 << spio_num);
2129         u32 spio_reg;
2130
2131         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2132             (spio_num > MISC_REGISTERS_SPIO_7)) {
2133                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2134                 return -EINVAL;
2135         }
2136
2137         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2138         /* read SPIO and mask except the float bits */
2139         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2140
2141         switch (mode) {
2142         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2143                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2144                 /* clear FLOAT and set CLR */
2145                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2146                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2147                 break;
2148
2149         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2150                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2151                 /* clear FLOAT and set SET */
2152                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2153                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2154                 break;
2155
2156         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2157                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2158                 /* set FLOAT */
2159                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2160                 break;
2161
2162         default:
2163                 break;
2164         }
2165
2166         REG_WR(bp, MISC_REG_SPIO, spio_reg);
2167         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2168
2169         return 0;
2170 }
2171
2172 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2173 {
2174         switch (bp->link_vars.ieee_fc &
2175                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2176         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2177                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2178                                           ADVERTISED_Pause);
2179                 break;
2180
2181         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2182                 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2183                                          ADVERTISED_Pause);
2184                 break;
2185
2186         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2187                 bp->port.advertising |= ADVERTISED_Asym_Pause;
2188                 break;
2189
2190         default:
2191                 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2192                                           ADVERTISED_Pause);
2193                 break;
2194         }
2195 }
2196
2197 static void bnx2x_link_report(struct bnx2x *bp)
2198 {
2199         if (bp->flags & MF_FUNC_DIS) {
2200                 netif_carrier_off(bp->dev);
2201                 netdev_err(bp->dev, "NIC Link is Down\n");
2202                 return;
2203         }
2204
2205         if (bp->link_vars.link_up) {
2206                 u16 line_speed;
2207
2208                 if (bp->state == BNX2X_STATE_OPEN)
2209                         netif_carrier_on(bp->dev);
2210                 netdev_info(bp->dev, "NIC Link is Up, ");
2211
2212                 line_speed = bp->link_vars.line_speed;
2213                 if (IS_E1HMF(bp)) {
2214                         u16 vn_max_rate;
2215
2216                         vn_max_rate =
2217                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2218                                  FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2219                         if (vn_max_rate < line_speed)
2220                                 line_speed = vn_max_rate;
2221                 }
2222                 pr_cont("%d Mbps ", line_speed);
2223
2224                 if (bp->link_vars.duplex == DUPLEX_FULL)
2225                         pr_cont("full duplex");
2226                 else
2227                         pr_cont("half duplex");
2228
2229                 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2230                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2231                                 pr_cont(", receive ");
2232                                 if (bp->link_vars.flow_ctrl &
2233                                     BNX2X_FLOW_CTRL_TX)
2234                                         pr_cont("& transmit ");
2235                         } else {
2236                                 pr_cont(", transmit ");
2237                         }
2238                         pr_cont("flow control ON");
2239                 }
2240                 pr_cont("\n");
2241
2242         } else { /* link_down */
2243                 netif_carrier_off(bp->dev);
2244                 netdev_err(bp->dev, "NIC Link is Down\n");
2245         }
2246 }
2247
2248 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2249 {
2250         if (!BP_NOMCP(bp)) {
2251                 u8 rc;
2252
2253                 /* Initialize link parameters structure variables */
2254                 /* It is recommended to turn off RX FC for jumbo frames
2255                    for better performance */
2256                 if (bp->dev->mtu > 5000)
2257                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2258                 else
2259                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2260
2261                 bnx2x_acquire_phy_lock(bp);
2262
2263                 if (load_mode == LOAD_DIAG)
2264                         bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2265
2266                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2267
2268                 bnx2x_release_phy_lock(bp);
2269
2270                 bnx2x_calc_fc_adv(bp);
2271
2272                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2273                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2274                         bnx2x_link_report(bp);
2275                 }
2276
2277                 return rc;
2278         }
2279         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2280         return -EINVAL;
2281 }
2282
2283 static void bnx2x_link_set(struct bnx2x *bp)
2284 {
2285         if (!BP_NOMCP(bp)) {
2286                 bnx2x_acquire_phy_lock(bp);
2287                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2288                 bnx2x_release_phy_lock(bp);
2289
2290                 bnx2x_calc_fc_adv(bp);
2291         } else
2292                 BNX2X_ERR("Bootcode is missing - can not set link\n");
2293 }
2294
2295 static void bnx2x__link_reset(struct bnx2x *bp)
2296 {
2297         if (!BP_NOMCP(bp)) {
2298                 bnx2x_acquire_phy_lock(bp);
2299                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2300                 bnx2x_release_phy_lock(bp);
2301         } else
2302                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2303 }
2304
2305 static u8 bnx2x_link_test(struct bnx2x *bp)
2306 {
2307         u8 rc = 0;
2308
2309         if (!BP_NOMCP(bp)) {
2310                 bnx2x_acquire_phy_lock(bp);
2311                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2312                 bnx2x_release_phy_lock(bp);
2313         } else
2314                 BNX2X_ERR("Bootcode is missing - can not test link\n");
2315
2316         return rc;
2317 }
2318
2319 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2320 {
2321         u32 r_param = bp->link_vars.line_speed / 8;
2322         u32 fair_periodic_timeout_usec;
2323         u32 t_fair;
2324
2325         memset(&(bp->cmng.rs_vars), 0,
2326                sizeof(struct rate_shaping_vars_per_port));
2327         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2328
2329         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2330         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2331
2332         /* this is the threshold below which no timer arming will occur
2333            1.25 coefficient is for the threshold to be a little bigger
2334            than the real time, to compensate for timer in-accuracy */
2335         bp->cmng.rs_vars.rs_threshold =
2336                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2337
2338         /* resolution of fairness timer */
2339         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2340         /* for 10G it is 1000usec. for 1G it is 10000usec. */
2341         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2342
2343         /* this is the threshold below which we won't arm the timer anymore */
2344         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2345
2346         /* we multiply by 1e3/8 to get bytes/msec.
2347            We don't want the credits to pass a credit
2348            of the t_fair*FAIR_MEM (algorithm resolution) */
2349         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2350         /* since each tick is 4 usec */
2351         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2352 }
2353
2354 /* Calculates the sum of vn_min_rates.
2355    It's needed for further normalizing of the min_rates.
2356    Returns:
2357      sum of vn_min_rates.
2358        or
2359      0 - if all the min_rates are 0.
2360      In the later case fainess algorithm should be deactivated.
2361      If not all min_rates are zero then those that are zeroes will be set to 1.
2362  */
2363 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2364 {
2365         int all_zero = 1;
2366         int port = BP_PORT(bp);
2367         int vn;
2368
2369         bp->vn_weight_sum = 0;
2370         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2371                 int func = 2*vn + port;
2372                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2373                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2374                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2375
2376                 /* Skip hidden vns */
2377                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2378                         continue;
2379
2380                 /* If min rate is zero - set it to 1 */
2381                 if (!vn_min_rate)
2382                         vn_min_rate = DEF_MIN_RATE;
2383                 else
2384                         all_zero = 0;
2385
2386                 bp->vn_weight_sum += vn_min_rate;
2387         }
2388
2389         /* ... only if all min rates are zeros - disable fairness */
2390         if (all_zero) {
2391                 bp->cmng.flags.cmng_enables &=
2392                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2393                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2394                    "  fairness will be disabled\n");
2395         } else
2396                 bp->cmng.flags.cmng_enables |=
2397                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2398 }
2399
2400 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2401 {
2402         struct rate_shaping_vars_per_vn m_rs_vn;
2403         struct fairness_vars_per_vn m_fair_vn;
2404         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2405         u16 vn_min_rate, vn_max_rate;
2406         int i;
2407
2408         /* If function is hidden - set min and max to zeroes */
2409         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2410                 vn_min_rate = 0;
2411                 vn_max_rate = 0;
2412
2413         } else {
2414                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2415                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2416                 /* If min rate is zero - set it to 1 */
2417                 if (!vn_min_rate)
2418                         vn_min_rate = DEF_MIN_RATE;
2419                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2420                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2421         }
2422         DP(NETIF_MSG_IFUP,
2423            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
2424            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2425
2426         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2427         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2428
2429         /* global vn counter - maximal Mbps for this vn */
2430         m_rs_vn.vn_counter.rate = vn_max_rate;
2431
2432         /* quota - number of bytes transmitted in this period */
2433         m_rs_vn.vn_counter.quota =
2434                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2435
2436         if (bp->vn_weight_sum) {
2437                 /* credit for each period of the fairness algorithm:
2438                    number of bytes in T_FAIR (the vn share the port rate).
2439                    vn_weight_sum should not be larger than 10000, thus
2440                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2441                    than zero */
2442                 m_fair_vn.vn_credit_delta =
2443                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2444                                                    (8 * bp->vn_weight_sum))),
2445                               (bp->cmng.fair_vars.fair_threshold * 2));
2446                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2447                    m_fair_vn.vn_credit_delta);
2448         }
2449
2450         /* Store it to internal memory */
2451         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2452                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2454                        ((u32 *)(&m_rs_vn))[i]);
2455
2456         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2457                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2458                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2459                        ((u32 *)(&m_fair_vn))[i]);
2460 }
2461
2462
2463 /* This function is called upon link interrupt */
2464 static void bnx2x_link_attn(struct bnx2x *bp)
2465 {
2466         u32 prev_link_status = bp->link_vars.link_status;
2467         /* Make sure that we are synced with the current statistics */
2468         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2469
2470         bnx2x_link_update(&bp->link_params, &bp->link_vars);
2471
2472         if (bp->link_vars.link_up) {
2473
2474                 /* dropless flow control */
2475                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2476                         int port = BP_PORT(bp);
2477                         u32 pause_enabled = 0;
2478
2479                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2480                                 pause_enabled = 1;
2481
2482                         REG_WR(bp, BAR_USTRORM_INTMEM +
2483                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2484                                pause_enabled);
2485                 }
2486
2487                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2488                         struct host_port_stats *pstats;
2489
2490                         pstats = bnx2x_sp(bp, port_stats);
2491                         /* reset old bmac stats */
2492                         memset(&(pstats->mac_stx[0]), 0,
2493                                sizeof(struct mac_stx));
2494                 }
2495                 if (bp->state == BNX2X_STATE_OPEN)
2496                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2497         }
2498
2499         /* indicate link status only if link status actually changed */
2500         if (prev_link_status != bp->link_vars.link_status)
2501                 bnx2x_link_report(bp);
2502
2503         if (IS_E1HMF(bp)) {
2504                 int port = BP_PORT(bp);
2505                 int func;
2506                 int vn;
2507
2508                 /* Set the attention towards other drivers on the same port */
2509                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2510                         if (vn == BP_E1HVN(bp))
2511                                 continue;
2512
2513                         func = ((vn << 1) | port);
2514                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2515                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2516                 }
2517
2518                 if (bp->link_vars.link_up) {
2519                         int i;
2520
2521                         /* Init rate shaping and fairness contexts */
2522                         bnx2x_init_port_minmax(bp);
2523
2524                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2525                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2526
2527                         /* Store it to internal memory */
2528                         for (i = 0;
2529                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
2530                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
2531                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2532                                        ((u32 *)(&bp->cmng))[i]);
2533                 }
2534         }
2535 }
2536
2537 static void bnx2x__link_status_update(struct bnx2x *bp)
2538 {
2539         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2540                 return;
2541
2542         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2543
2544         if (bp->link_vars.link_up)
2545                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2546         else
2547                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2548
2549         bnx2x_calc_vn_weight_sum(bp);
2550
2551         /* indicate link status */
2552         bnx2x_link_report(bp);
2553 }
2554
2555 static void bnx2x_pmf_update(struct bnx2x *bp)
2556 {
2557         int port = BP_PORT(bp);
2558         u32 val;
2559
2560         bp->port.pmf = 1;
2561         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2562
2563         /* enable nig attention */
2564         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2565         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2566         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2567
2568         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2569 }
2570
2571 /* end of Link */
2572
2573 /* slow path */
2574
2575 /*
2576  * General service functions
2577  */
2578
2579 /* send the MCP a request, block until there is a reply */
2580 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2581 {
2582         int func = BP_FUNC(bp);
2583         u32 seq = ++bp->fw_seq;
2584         u32 rc = 0;
2585         u32 cnt = 1;
2586         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2587
2588         mutex_lock(&bp->fw_mb_mutex);
2589         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2590         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2591
2592         do {
2593                 /* let the FW do it's magic ... */
2594                 msleep(delay);
2595
2596                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2597
2598                 /* Give the FW up to 5 second (500*10ms) */
2599         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2600
2601         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2602            cnt*delay, rc, seq);
2603
2604         /* is this a reply to our command? */
2605         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2606                 rc &= FW_MSG_CODE_MASK;
2607         else {
2608                 /* FW BUG! */
2609                 BNX2X_ERR("FW failed to respond!\n");
2610                 bnx2x_fw_dump(bp);
2611                 rc = 0;
2612         }
2613         mutex_unlock(&bp->fw_mb_mutex);
2614
2615         return rc;
2616 }
2617
2618 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2619 static void bnx2x_set_rx_mode(struct net_device *dev);
2620
2621 static void bnx2x_e1h_disable(struct bnx2x *bp)
2622 {
2623         int port = BP_PORT(bp);
2624
2625         netif_tx_disable(bp->dev);
2626
2627         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2628
2629         netif_carrier_off(bp->dev);
2630 }
2631
2632 static void bnx2x_e1h_enable(struct bnx2x *bp)
2633 {
2634         int port = BP_PORT(bp);
2635
2636         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2637
2638         /* Tx queue should be only reenabled */
2639         netif_tx_wake_all_queues(bp->dev);
2640
2641         /*
2642          * Should not call netif_carrier_on since it will be called if the link
2643          * is up when checking for link state
2644          */
2645 }
2646
2647 static void bnx2x_update_min_max(struct bnx2x *bp)
2648 {
2649         int port = BP_PORT(bp);
2650         int vn, i;
2651
2652         /* Init rate shaping and fairness contexts */
2653         bnx2x_init_port_minmax(bp);
2654
2655         bnx2x_calc_vn_weight_sum(bp);
2656
2657         for (vn = VN_0; vn < E1HVN_MAX; vn++)
2658                 bnx2x_init_vn_minmax(bp, 2*vn + port);
2659
2660         if (bp->port.pmf) {
2661                 int func;
2662
2663                 /* Set the attention towards other drivers on the same port */
2664                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2665                         if (vn == BP_E1HVN(bp))
2666                                 continue;
2667
2668                         func = ((vn << 1) | port);
2669                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2670                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2671                 }
2672
2673                 /* Store it to internal memory */
2674                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2675                         REG_WR(bp, BAR_XSTRORM_INTMEM +
2676                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2677                                ((u32 *)(&bp->cmng))[i]);
2678         }
2679 }
2680
2681 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2682 {
2683         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2684
2685         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2686
2687                 /*
2688                  * This is the only place besides the function initialization
2689                  * where the bp->flags can change so it is done without any
2690                  * locks
2691                  */
2692                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2693                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2694                         bp->flags |= MF_FUNC_DIS;
2695
2696                         bnx2x_e1h_disable(bp);
2697                 } else {
2698                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2699                         bp->flags &= ~MF_FUNC_DIS;
2700
2701                         bnx2x_e1h_enable(bp);
2702                 }
2703                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2704         }
2705         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2706
2707                 bnx2x_update_min_max(bp);
2708                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2709         }
2710
2711         /* Report results to MCP */
2712         if (dcc_event)
2713                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2714         else
2715                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2716 }
2717
2718 /* must be called under the spq lock */
2719 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2720 {
2721         struct eth_spe *next_spe = bp->spq_prod_bd;
2722
2723         if (bp->spq_prod_bd == bp->spq_last_bd) {
2724                 bp->spq_prod_bd = bp->spq;
2725                 bp->spq_prod_idx = 0;
2726                 DP(NETIF_MSG_TIMER, "end of spq\n");
2727         } else {
2728                 bp->spq_prod_bd++;
2729                 bp->spq_prod_idx++;
2730         }
2731         return next_spe;
2732 }
2733
2734 /* must be called under the spq lock */
2735 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2736 {
2737         int func = BP_FUNC(bp);
2738
2739         /* Make sure that BD data is updated before writing the producer */
2740         wmb();
2741
2742         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2743                bp->spq_prod_idx);
2744         mmiowb();
2745 }
2746
2747 /* the slow path queue is odd since completions arrive on the fastpath ring */
2748 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2749                          u32 data_hi, u32 data_lo, int common)
2750 {
2751         struct eth_spe *spe;
2752
2753 #ifdef BNX2X_STOP_ON_ERROR
2754         if (unlikely(bp->panic))
2755                 return -EIO;
2756 #endif
2757
2758         spin_lock_bh(&bp->spq_lock);
2759
2760         if (!bp->spq_left) {
2761                 BNX2X_ERR("BUG! SPQ ring full!\n");
2762                 spin_unlock_bh(&bp->spq_lock);
2763                 bnx2x_panic();
2764                 return -EBUSY;
2765         }
2766
2767         spe = bnx2x_sp_get_next(bp);
2768
2769         /* CID needs port number to be encoded int it */
2770         spe->hdr.conn_and_cmd_data =
2771                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2772                                     HW_CID(bp, cid));
2773         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2774         if (common)
2775                 spe->hdr.type |=
2776                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2777
2778         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2779         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2780
2781         bp->spq_left--;
2782
2783         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2784            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
2785            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786            (u32)(U64_LO(bp->spq_mapping) +
2787            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2789
2790         bnx2x_sp_prod_update(bp);
2791         spin_unlock_bh(&bp->spq_lock);
2792         return 0;
2793 }
2794
2795 /* acquire split MCP access lock register */
2796 static int bnx2x_acquire_alr(struct bnx2x *bp)
2797 {
2798         u32 j, val;
2799         int rc = 0;
2800
2801         might_sleep();
2802         for (j = 0; j < 1000; j++) {
2803                 val = (1UL << 31);
2804                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2805                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2806                 if (val & (1L << 31))
2807                         break;
2808
2809                 msleep(5);
2810         }
2811         if (!(val & (1L << 31))) {
2812                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2813                 rc = -EBUSY;
2814         }
2815
2816         return rc;
2817 }
2818
2819 /* release split MCP access lock register */
2820 static void bnx2x_release_alr(struct bnx2x *bp)
2821 {
2822         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2823 }
2824
2825 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2826 {
2827         struct host_def_status_block *def_sb = bp->def_status_blk;
2828         u16 rc = 0;
2829
2830         barrier(); /* status block is written to by the chip */
2831         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2832                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2833                 rc |= 1;
2834         }
2835         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2836                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2837                 rc |= 2;
2838         }
2839         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2840                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2841                 rc |= 4;
2842         }
2843         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2844                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2845                 rc |= 8;
2846         }
2847         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2848                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2849                 rc |= 16;
2850         }
2851         return rc;
2852 }
2853
2854 /*
2855  * slow path service functions
2856  */
2857
2858 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2859 {
2860         int port = BP_PORT(bp);
2861         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2862                        COMMAND_REG_ATTN_BITS_SET);
2863         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2864                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
2865         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2866                                        NIG_REG_MASK_INTERRUPT_PORT0;
2867         u32 aeu_mask;
2868         u32 nig_mask = 0;
2869
2870         if (bp->attn_state & asserted)
2871                 BNX2X_ERR("IGU ERROR\n");
2872
2873         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874         aeu_mask = REG_RD(bp, aeu_addr);
2875
2876         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
2877            aeu_mask, asserted);
2878         aeu_mask &= ~(asserted & 0x3ff);
2879         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2880
2881         REG_WR(bp, aeu_addr, aeu_mask);
2882         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2883
2884         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2885         bp->attn_state |= asserted;
2886         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2887
2888         if (asserted & ATTN_HARD_WIRED_MASK) {
2889                 if (asserted & ATTN_NIG_FOR_FUNC) {
2890
2891                         bnx2x_acquire_phy_lock(bp);
2892
2893                         /* save nig interrupt mask */
2894                         nig_mask = REG_RD(bp, nig_int_mask_addr);
2895                         REG_WR(bp, nig_int_mask_addr, 0);
2896
2897                         bnx2x_link_attn(bp);
2898
2899                         /* handle unicore attn? */
2900                 }
2901                 if (asserted & ATTN_SW_TIMER_4_FUNC)
2902                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2903
2904                 if (asserted & GPIO_2_FUNC)
2905                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2906
2907                 if (asserted & GPIO_3_FUNC)
2908                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2909
2910                 if (asserted & GPIO_4_FUNC)
2911                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2912
2913                 if (port == 0) {
2914                         if (asserted & ATTN_GENERAL_ATTN_1) {
2915                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2916                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2917                         }
2918                         if (asserted & ATTN_GENERAL_ATTN_2) {
2919                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2920                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2921                         }
2922                         if (asserted & ATTN_GENERAL_ATTN_3) {
2923                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2924                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2925                         }
2926                 } else {
2927                         if (asserted & ATTN_GENERAL_ATTN_4) {
2928                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2929                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2930                         }
2931                         if (asserted & ATTN_GENERAL_ATTN_5) {
2932                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2933                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2934                         }
2935                         if (asserted & ATTN_GENERAL_ATTN_6) {
2936                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2937                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2938                         }
2939                 }
2940
2941         } /* if hardwired */
2942
2943         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2944            asserted, hc_addr);
2945         REG_WR(bp, hc_addr, asserted);
2946
2947         /* now set back the mask */
2948         if (asserted & ATTN_NIG_FOR_FUNC) {
2949                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2950                 bnx2x_release_phy_lock(bp);
2951         }
2952 }
2953
2954 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2955 {
2956         int port = BP_PORT(bp);
2957
2958         /* mark the failure */
2959         bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2960         bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2961         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2962                  bp->link_params.ext_phy_config);
2963
2964         /* log the failure */
2965         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2966                " the driver to shutdown the card to prevent permanent"
2967                " damage.  Please contact OEM Support for assistance\n");
2968 }
2969
2970 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2971 {
2972         int port = BP_PORT(bp);
2973         int reg_offset;
2974         u32 val, swap_val, swap_override;
2975
2976         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2977                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2978
2979         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2980
2981                 val = REG_RD(bp, reg_offset);
2982                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2983                 REG_WR(bp, reg_offset, val);
2984
2985                 BNX2X_ERR("SPIO5 hw attention\n");
2986
2987                 /* Fan failure attention */
2988                 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2989                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2990                         /* Low power mode is controlled by GPIO 2 */
2991                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2992                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2993                         /* The PHY reset is controlled by GPIO 1 */
2994                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2995                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2996                         break;
2997
2998                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2999                         /* The PHY reset is controlled by GPIO 1 */
3000                         /* fake the port number to cancel the swap done in
3001                            set_gpio() */
3002                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3003                         swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3004                         port = (swap_val && swap_override) ^ 1;
3005                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3006                                        MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3007                         break;
3008
3009                 default:
3010                         break;
3011                 }
3012                 bnx2x_fan_failure(bp);
3013         }
3014
3015         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3016                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3017                 bnx2x_acquire_phy_lock(bp);
3018                 bnx2x_handle_module_detect_int(&bp->link_params);
3019                 bnx2x_release_phy_lock(bp);
3020         }
3021
3022         if (attn & HW_INTERRUT_ASSERT_SET_0) {
3023
3024                 val = REG_RD(bp, reg_offset);
3025                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3026                 REG_WR(bp, reg_offset, val);
3027
3028                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3029                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3030                 bnx2x_panic();
3031         }
3032 }
3033
3034 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3035 {
3036         u32 val;
3037
3038         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3039
3040                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3041                 BNX2X_ERR("DB hw attention 0x%x\n", val);
3042                 /* DORQ discard attention */
3043                 if (val & 0x2)
3044                         BNX2X_ERR("FATAL error from DORQ\n");
3045         }
3046
3047         if (attn & HW_INTERRUT_ASSERT_SET_1) {
3048
3049                 int port = BP_PORT(bp);
3050                 int reg_offset;
3051
3052                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3053                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3054
3055                 val = REG_RD(bp, reg_offset);
3056                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3057                 REG_WR(bp, reg_offset, val);
3058
3059                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3060                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3061                 bnx2x_panic();
3062         }
3063 }
3064
3065 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3066 {
3067         u32 val;
3068
3069         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3070
3071                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3072                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3073                 /* CFC error attention */
3074                 if (val & 0x2)
3075                         BNX2X_ERR("FATAL error from CFC\n");
3076         }
3077
3078         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3079
3080                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3081                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3082                 /* RQ_USDMDP_FIFO_OVERFLOW */
3083                 if (val & 0x18000)
3084                         BNX2X_ERR("FATAL error from PXP\n");
3085         }
3086
3087         if (attn & HW_INTERRUT_ASSERT_SET_2) {
3088
3089                 int port = BP_PORT(bp);
3090                 int reg_offset;
3091
3092                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3093                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3094
3095                 val = REG_RD(bp, reg_offset);
3096                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3097                 REG_WR(bp, reg_offset, val);
3098
3099                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3100                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3101                 bnx2x_panic();
3102         }
3103 }
3104
3105 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3106 {
3107         u32 val;
3108
3109         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3110
3111                 if (attn & BNX2X_PMF_LINK_ASSERT) {
3112                         int func = BP_FUNC(bp);
3113
3114                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3115                         bp->mf_config = SHMEM_RD(bp,
3116                                            mf_cfg.func_mf_config[func].config);
3117                         val = SHMEM_RD(bp, func_mb[func].drv_status);
3118                         if (val & DRV_STATUS_DCC_EVENT_MASK)
3119                                 bnx2x_dcc_event(bp,
3120                                             (val & DRV_STATUS_DCC_EVENT_MASK));
3121                         bnx2x__link_status_update(bp);
3122                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3123                                 bnx2x_pmf_update(bp);
3124
3125                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3126
3127                         BNX2X_ERR("MC assert!\n");
3128                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3129                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3130                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3131                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3132                         bnx2x_panic();
3133
3134                 } else if (attn & BNX2X_MCP_ASSERT) {
3135
3136                         BNX2X_ERR("MCP assert!\n");
3137                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3138                         bnx2x_fw_dump(bp);
3139
3140                 } else
3141                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3142         }
3143
3144         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3145                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3146                 if (attn & BNX2X_GRC_TIMEOUT) {
3147                         val = CHIP_IS_E1H(bp) ?
3148                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3149                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
3150                 }
3151                 if (attn & BNX2X_GRC_RSV) {
3152                         val = CHIP_IS_E1H(bp) ?
3153                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3154                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
3155                 }
3156                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3157         }
3158 }
3159
3160 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3162
3163
3164 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
3165 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
3166 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
3168 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
3169 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3170 /*
3171  * should be run under rtnl lock
3172  */
3173 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3174 {
3175         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3178         barrier();
3179         mmiowb();
3180 }
3181
3182 /*
3183  * should be run under rtnl lock
3184  */
3185 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3186 {
3187         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3188         val |= (1 << 16);
3189         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3190         barrier();
3191         mmiowb();
3192 }
3193
3194 /*
3195  * should be run under rtnl lock
3196  */
3197 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3198 {
3199         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201         return (val & RESET_DONE_FLAG_MASK) ? false : true;
3202 }
3203
3204 /*
3205  * should be run under rtnl lock
3206  */
3207 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3208 {
3209         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3210
3211         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3212
3213         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3215         barrier();
3216         mmiowb();
3217 }
3218
3219 /*
3220  * should be run under rtnl lock
3221  */
3222 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3223 {
3224         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3225
3226         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3227
3228         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3230         barrier();
3231         mmiowb();
3232
3233         return val1;
3234 }
3235
3236 /*
3237  * should be run under rtnl lock
3238  */
3239 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3240 {
3241         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3242 }
3243
3244 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3245 {
3246         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3248 }
3249
3250 static inline void _print_next_block(int idx, const char *blk)
3251 {
3252         if (idx)
3253                 pr_cont(", ");
3254         pr_cont("%s", blk);
3255 }
3256
3257 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3258 {
3259         int i = 0;
3260         u32 cur_bit = 0;
3261         for (i = 0; sig; i++) {
3262                 cur_bit = ((u32)0x1 << i);
3263                 if (sig & cur_bit) {
3264                         switch (cur_bit) {
3265                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266                                 _print_next_block(par_num++, "BRB");
3267                                 break;
3268                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269                                 _print_next_block(par_num++, "PARSER");
3270                                 break;
3271                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272                                 _print_next_block(par_num++, "TSDM");
3273                                 break;
3274                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275                                 _print_next_block(par_num++, "SEARCHER");
3276                                 break;
3277                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278                                 _print_next_block(par_num++, "TSEMI");
3279                                 break;
3280                         }
3281
3282                         /* Clear the bit */
3283                         sig &= ~cur_bit;
3284                 }
3285         }
3286
3287         return par_num;
3288 }
3289
3290 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3291 {
3292         int i = 0;
3293         u32 cur_bit = 0;
3294         for (i = 0; sig; i++) {
3295                 cur_bit = ((u32)0x1 << i);
3296                 if (sig & cur_bit) {
3297                         switch (cur_bit) {
3298                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299                                 _print_next_block(par_num++, "PBCLIENT");
3300                                 break;
3301                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302                                 _print_next_block(par_num++, "QM");
3303                                 break;
3304                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305                                 _print_next_block(par_num++, "XSDM");
3306                                 break;
3307                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308                                 _print_next_block(par_num++, "XSEMI");
3309                                 break;
3310                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311                                 _print_next_block(par_num++, "DOORBELLQ");
3312                                 break;
3313                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314                                 _print_next_block(par_num++, "VAUX PCI CORE");
3315                                 break;
3316                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317                                 _print_next_block(par_num++, "DEBUG");
3318                                 break;
3319                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320                                 _print_next_block(par_num++, "USDM");
3321                                 break;
3322                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323                                 _print_next_block(par_num++, "USEMI");
3324                                 break;
3325                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326                                 _print_next_block(par_num++, "UPB");
3327                                 break;
3328                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329                                 _print_next_block(par_num++, "CSDM");
3330                                 break;
3331                         }
3332
3333                         /* Clear the bit */
3334                         sig &= ~cur_bit;
3335                 }
3336         }
3337
3338         return par_num;
3339 }
3340
3341 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3342 {
3343         int i = 0;
3344         u32 cur_bit = 0;
3345         for (i = 0; sig; i++) {
3346                 cur_bit = ((u32)0x1 << i);
3347                 if (sig & cur_bit) {
3348                         switch (cur_bit) {
3349                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350                                 _print_next_block(par_num++, "CSEMI");
3351                                 break;
3352                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353                                 _print_next_block(par_num++, "PXP");
3354                                 break;
3355                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356                                 _print_next_block(par_num++,
3357                                         "PXPPCICLOCKCLIENT");
3358                                 break;
3359                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360                                 _print_next_block(par_num++, "CFC");
3361                                 break;
3362                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363                                 _print_next_block(par_num++, "CDU");
3364                                 break;
3365                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366                                 _print_next_block(par_num++, "IGU");
3367                                 break;
3368                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369                                 _print_next_block(par_num++, "MISC");
3370                                 break;
3371                         }
3372
3373                         /* Clear the bit */
3374                         sig &= ~cur_bit;
3375                 }
3376         }
3377
3378         return par_num;
3379 }
3380
3381 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3382 {
3383         int i = 0;
3384         u32 cur_bit = 0;
3385         for (i = 0; sig; i++) {
3386                 cur_bit = ((u32)0x1 << i);
3387                 if (sig & cur_bit) {
3388                         switch (cur_bit) {
3389                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390                                 _print_next_block(par_num++, "MCP ROM");
3391                                 break;
3392                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393                                 _print_next_block(par_num++, "MCP UMP RX");
3394                                 break;
3395                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396                                 _print_next_block(par_num++, "MCP UMP TX");
3397                                 break;
3398                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399                                 _print_next_block(par_num++, "MCP SCPAD");
3400                                 break;
3401                         }
3402
3403                         /* Clear the bit */
3404                         sig &= ~cur_bit;
3405                 }
3406         }
3407
3408         return par_num;
3409 }
3410
3411 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3412                                      u32 sig2, u32 sig3)
3413 {
3414         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3416                 int par_num = 0;
3417                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418                         "[0]:0x%08x [1]:0x%08x "
3419                         "[2]:0x%08x [3]:0x%08x\n",
3420                           sig0 & HW_PRTY_ASSERT_SET_0,
3421                           sig1 & HW_PRTY_ASSERT_SET_1,
3422                           sig2 & HW_PRTY_ASSERT_SET_2,
3423                           sig3 & HW_PRTY_ASSERT_SET_3);
3424                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3425                        bp->dev->name);
3426                 par_num = bnx2x_print_blocks_with_parity0(
3427                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428                 par_num = bnx2x_print_blocks_with_parity1(
3429                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430                 par_num = bnx2x_print_blocks_with_parity2(
3431                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432                 par_num = bnx2x_print_blocks_with_parity3(
3433                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3434                 printk("\n");
3435                 return true;
3436         } else
3437                 return false;
3438 }
3439
3440 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3441 {
3442         struct attn_route attn;
3443         int port = BP_PORT(bp);
3444
3445         attn.sig[0] = REG_RD(bp,
3446                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3447                              port*4);
3448         attn.sig[1] = REG_RD(bp,
3449                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3450                              port*4);
3451         attn.sig[2] = REG_RD(bp,
3452                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3453                              port*4);
3454         attn.sig[3] = REG_RD(bp,
3455                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3456                              port*4);
3457
3458         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3459                                         attn.sig[3]);
3460 }
3461
3462 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3463 {
3464         struct attn_route attn, *group_mask;
3465         int port = BP_PORT(bp);
3466         int index;
3467         u32 reg_addr;
3468         u32 val;
3469         u32 aeu_mask;
3470
3471         /* need to take HW lock because MCP or other port might also
3472            try to handle this event */
3473         bnx2x_acquire_alr(bp);
3474
3475         if (bnx2x_chk_parity_attn(bp)) {
3476                 bp->recovery_state = BNX2X_RECOVERY_INIT;
3477                 bnx2x_set_reset_in_progress(bp);
3478                 schedule_delayed_work(&bp->reset_task, 0);
3479                 /* Disable HW interrupts */
3480                 bnx2x_int_disable(bp);
3481                 bnx2x_release_alr(bp);
3482                 /* In case of parity errors don't handle attentions so that
3483                  * other function would "see" parity errors.
3484                  */
3485                 return;
3486         }
3487
3488         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3489         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3490         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3491         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3492         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3493            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3494
3495         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3496                 if (deasserted & (1 << index)) {
3497                         group_mask = &bp->attn_group[index];
3498
3499                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3500                            index, group_mask->sig[0], group_mask->sig[1],
3501                            group_mask->sig[2], group_mask->sig[3]);
3502
3503                         bnx2x_attn_int_deasserted3(bp,
3504                                         attn.sig[3] & group_mask->sig[3]);
3505                         bnx2x_attn_int_deasserted1(bp,
3506                                         attn.sig[1] & group_mask->sig[1]);
3507                         bnx2x_attn_int_deasserted2(bp,
3508                                         attn.sig[2] & group_mask->sig[2]);
3509                         bnx2x_attn_int_deasserted0(bp,
3510                                         attn.sig[0] & group_mask->sig[0]);
3511                 }
3512         }
3513
3514         bnx2x_release_alr(bp);
3515
3516         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3517
3518         val = ~deasserted;
3519         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3520            val, reg_addr);
3521         REG_WR(bp, reg_addr, val);
3522
3523         if (~bp->attn_state & deasserted)
3524                 BNX2X_ERR("IGU ERROR\n");
3525
3526         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3527                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
3528
3529         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3530         aeu_mask = REG_RD(bp, reg_addr);
3531
3532         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
3533            aeu_mask, deasserted);
3534         aeu_mask |= (deasserted & 0x3ff);
3535         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3536
3537         REG_WR(bp, reg_addr, aeu_mask);
3538         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3539
3540         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3541         bp->attn_state &= ~deasserted;
3542         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3543 }
3544
3545 static void bnx2x_attn_int(struct bnx2x *bp)
3546 {
3547         /* read local copy of bits */
3548         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3549                                                                 attn_bits);
3550         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3551                                                                 attn_bits_ack);
3552         u32 attn_state = bp->attn_state;
3553
3554         /* look for changed bits */
3555         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
3556         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
3557
3558         DP(NETIF_MSG_HW,
3559            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
3560            attn_bits, attn_ack, asserted, deasserted);
3561
3562         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3563                 BNX2X_ERR("BAD attention state\n");
3564
3565         /* handle bits that were raised */
3566         if (asserted)
3567                 bnx2x_attn_int_asserted(bp, asserted);
3568
3569         if (deasserted)
3570                 bnx2x_attn_int_deasserted(bp, deasserted);
3571 }
3572
3573 static void bnx2x_sp_task(struct work_struct *work)
3574 {
3575         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3576         u16 status;
3577
3578         /* Return here if interrupt is disabled */
3579         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3580                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3581                 return;
3582         }
3583
3584         status = bnx2x_update_dsb_idx(bp);
3585 /*      if (status == 0)                                     */
3586 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
3587
3588         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3589
3590         /* HW attentions */
3591         if (status & 0x1) {
3592                 bnx2x_attn_int(bp);
3593                 status &= ~0x1;
3594         }
3595
3596         /* CStorm events: STAT_QUERY */
3597         if (status & 0x2) {
3598                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3599                 status &= ~0x2;
3600         }
3601
3602         if (unlikely(status))
3603                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3604                    status);
3605
3606         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3607                      IGU_INT_NOP, 1);
3608         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3609                      IGU_INT_NOP, 1);
3610         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3611                      IGU_INT_NOP, 1);
3612         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3613                      IGU_INT_NOP, 1);
3614         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3615                      IGU_INT_ENABLE, 1);
3616 }
3617
3618 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3619 {
3620         struct net_device *dev = dev_instance;
3621         struct bnx2x *bp = netdev_priv(dev);
3622
3623         /* Return here if interrupt is disabled */
3624         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3625                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3626                 return IRQ_HANDLED;
3627         }
3628
3629         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3630
3631 #ifdef BNX2X_STOP_ON_ERROR
3632         if (unlikely(bp->panic))
3633                 return IRQ_HANDLED;
3634 #endif
3635
3636 #ifdef BCM_CNIC
3637         {
3638                 struct cnic_ops *c_ops;
3639
3640                 rcu_read_lock();
3641                 c_ops = rcu_dereference(bp->cnic_ops);
3642                 if (c_ops)
3643                         c_ops->cnic_handler(bp->cnic_data, NULL);
3644                 rcu_read_unlock();
3645         }
3646 #endif
3647         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3648
3649         return IRQ_HANDLED;
3650 }
3651
3652 /* end of slow path */
3653
3654 /* Statistics */
3655
3656 /****************************************************************************
3657 * Macros
3658 ****************************************************************************/
3659
3660 /* sum[hi:lo] += add[hi:lo] */
3661 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3662         do { \
3663                 s_lo += a_lo; \
3664                 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3665         } while (0)
3666
3667 /* difference = minuend - subtrahend */
3668 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3669         do { \
3670                 if (m_lo < s_lo) { \
3671                         /* underflow */ \
3672                         d_hi = m_hi - s_hi; \
3673                         if (d_hi > 0) { \
3674                                 /* we can 'loan' 1 */ \
3675                                 d_hi--; \
3676                                 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3677                         } else { \
3678                                 /* m_hi <= s_hi */ \
3679                                 d_hi = 0; \
3680                                 d_lo = 0; \
3681                         } \
3682                 } else { \
3683                         /* m_lo >= s_lo */ \
3684                         if (m_hi < s_hi) { \
3685                                 d_hi = 0; \
3686                                 d_lo = 0; \
3687                         } else { \
3688                                 /* m_hi >= s_hi */ \
3689                                 d_hi = m_hi - s_hi; \
3690                                 d_lo = m_lo - s_lo; \
3691                         } \
3692                 } \
3693         } while (0)
3694
3695 #define UPDATE_STAT64(s, t) \
3696         do { \
3697                 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3698                         diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3699                 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3700                 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3701                 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3702                        pstats->mac_stx[1].t##_lo, diff.lo); \
3703         } while (0)
3704
3705 #define UPDATE_STAT64_NIG(s, t) \
3706         do { \
3707                 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3708                         diff.lo, new->s##_lo, old->s##_lo); \
3709                 ADD_64(estats->t##_hi, diff.hi, \
3710                        estats->t##_lo, diff.lo); \
3711         } while (0)
3712
3713 /* sum[hi:lo] += add */
3714 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3715         do { \
3716                 s_lo += a; \
3717                 s_hi += (s_lo < a) ? 1 : 0; \
3718         } while (0)
3719
3720 #define UPDATE_EXTEND_STAT(s) \
3721         do { \
3722                 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3723                               pstats->mac_stx[1].s##_lo, \
3724                               new->s); \
3725         } while (0)
3726
3727 #define UPDATE_EXTEND_TSTAT(s, t) \
3728         do { \
3729                 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3730                 old_tclient->s = tclient->s; \
3731                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3732         } while (0)
3733
3734 #define UPDATE_EXTEND_USTAT(s, t) \
3735         do { \
3736                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3737                 old_uclient->s = uclient->s; \
3738                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3739         } while (0)
3740
3741 #define UPDATE_EXTEND_XSTAT(s, t) \
3742         do { \
3743                 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3744                 old_xclient->s = xclient->s; \
3745                 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3746         } while (0)
3747
3748 /* minuend -= subtrahend */
3749 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3750         do { \
3751                 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3752         } while (0)
3753
3754 /* minuend[hi:lo] -= subtrahend */
3755 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3756         do { \
3757                 SUB_64(m_hi, 0, m_lo, s); \
3758         } while (0)
3759
3760 #define SUB_EXTEND_USTAT(s, t) \
3761         do { \
3762                 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3763                 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3764         } while (0)
3765
3766 /*
3767  * General service functions
3768  */
3769
3770 static inline long bnx2x_hilo(u32 *hiref)
3771 {
3772         u32 lo = *(hiref + 1);
3773 #if (BITS_PER_LONG == 64)
3774         u32 hi = *hiref;
3775
3776         return HILO_U64(hi, lo);
3777 #else
3778         return lo;
3779 #endif
3780 }
3781
3782 /*
3783  * Init service functions
3784  */
3785
3786 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3787 {
3788         if (!bp->stats_pending) {
3789                 struct eth_query_ramrod_data ramrod_data = {0};
3790                 int i, rc;
3791
3792                 ramrod_data.drv_counter = bp->stats_counter++;
3793                 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3794                 for_each_queue(bp, i)
3795                         ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3796
3797                 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3798                                    ((u32 *)&ramrod_data)[1],
3799                                    ((u32 *)&ramrod_data)[0], 0);
3800                 if (rc == 0) {
3801                         /* stats ramrod has it's own slot on the spq */
3802                         bp->spq_left++;
3803                         bp->stats_pending = 1;
3804                 }
3805         }
3806 }
3807
3808 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3809 {
3810         struct dmae_command *dmae = &bp->stats_dmae;
3811         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3812
3813         *stats_comp = DMAE_COMP_VAL;
3814         if (CHIP_REV_IS_SLOW(bp))
3815                 return;
3816
3817         /* loader */
3818         if (bp->executer_idx) {
3819                 int loader_idx = PMF_DMAE_C(bp);
3820
3821                 memset(dmae, 0, sizeof(struct dmae_command));
3822
3823                 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3824                                 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3825                                 DMAE_CMD_DST_RESET |
3826 #ifdef __BIG_ENDIAN
3827                                 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3828 #else
3829                                 DMAE_CMD_ENDIANITY_DW_SWAP |
3830 #endif
3831                                 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3832                                                DMAE_CMD_PORT_0) |
3833                                 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3834                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3835                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3836                 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3837                                      sizeof(struct dmae_command) *
3838                                      (loader_idx + 1)) >> 2;
3839                 dmae->dst_addr_hi = 0;
3840                 dmae->len = sizeof(struct dmae_command) >> 2;
3841                 if (CHIP_IS_E1(bp))
3842                         dmae->len--;
3843                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3844                 dmae->comp_addr_hi = 0;
3845                 dmae->comp_val = 1;
3846
3847                 *stats_comp = 0;
3848                 bnx2x_post_dmae(bp, dmae, loader_idx);
3849
3850         } else if (bp->func_stx) {
3851                 *stats_comp = 0;
3852                 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3853         }
3854 }
3855
3856 static int bnx2x_stats_comp(struct bnx2x *bp)
3857 {
3858         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3859         int cnt = 10;
3860
3861         might_sleep();
3862         while (*stats_comp != DMAE_COMP_VAL) {
3863                 if (!cnt) {
3864                         BNX2X_ERR("timeout waiting for stats finished\n");
3865                         break;
3866                 }
3867                 cnt--;
3868                 msleep(1);
3869         }
3870         return 1;
3871 }
3872
3873 /*
3874  * Statistics service functions
3875  */
3876
3877 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3878 {
3879         struct dmae_command *dmae;
3880         u32 opcode;
3881         int loader_idx = PMF_DMAE_C(bp);
3882         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3883
3884         /* sanity */
3885         if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3886                 BNX2X_ERR("BUG!\n");
3887                 return;
3888         }
3889
3890         bp->executer_idx = 0;
3891
3892         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3893                   DMAE_CMD_C_ENABLE |
3894                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3895 #ifdef __BIG_ENDIAN
3896                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3897 #else
3898                   DMAE_CMD_ENDIANITY_DW_SWAP |
3899 #endif
3900                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3901                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3902
3903         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3904         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3905         dmae->src_addr_lo = bp->port.port_stx >> 2;
3906         dmae->src_addr_hi = 0;
3907         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3908         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3909         dmae->len = DMAE_LEN32_RD_MAX;
3910         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3911         dmae->comp_addr_hi = 0;
3912         dmae->comp_val = 1;
3913
3914         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3915         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3916         dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3917         dmae->src_addr_hi = 0;
3918         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3919                                    DMAE_LEN32_RD_MAX * 4);
3920         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3921                                    DMAE_LEN32_RD_MAX * 4);
3922         dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3923         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3924         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3925         dmae->comp_val = DMAE_COMP_VAL;
3926
3927         *stats_comp = 0;
3928         bnx2x_hw_stats_post(bp);
3929         bnx2x_stats_comp(bp);
3930 }
3931
3932 static void bnx2x_port_stats_init(struct bnx2x *bp)
3933 {
3934         struct dmae_command *dmae;
3935         int port = BP_PORT(bp);
3936         int vn = BP_E1HVN(bp);
3937         u32 opcode;
3938         int loader_idx = PMF_DMAE_C(bp);
3939         u32 mac_addr;
3940         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3941
3942         /* sanity */
3943         if (!bp->link_vars.link_up || !bp->port.pmf) {
3944                 BNX2X_ERR("BUG!\n");
3945                 return;
3946         }
3947
3948         bp->executer_idx = 0;
3949
3950         /* MCP */
3951         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3952                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3953                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3954 #ifdef __BIG_ENDIAN
3955                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3956 #else
3957                   DMAE_CMD_ENDIANITY_DW_SWAP |
3958 #endif
3959                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3960                   (vn << DMAE_CMD_E1HVN_SHIFT));
3961
3962         if (bp->port.port_stx) {
3963
3964                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3965                 dmae->opcode = opcode;
3966                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3967                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3968                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3969                 dmae->dst_addr_hi = 0;
3970                 dmae->len = sizeof(struct host_port_stats) >> 2;
3971                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3972                 dmae->comp_addr_hi = 0;
3973                 dmae->comp_val = 1;
3974         }
3975
3976         if (bp->func_stx) {
3977
3978                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979                 dmae->opcode = opcode;
3980                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3981                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3982                 dmae->dst_addr_lo = bp->func_stx >> 2;
3983                 dmae->dst_addr_hi = 0;
3984                 dmae->len = sizeof(struct host_func_stats) >> 2;
3985                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986                 dmae->comp_addr_hi = 0;
3987                 dmae->comp_val = 1;
3988         }
3989
3990         /* MAC */
3991         opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3992                   DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3993                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3994 #ifdef __BIG_ENDIAN
3995                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
3996 #else
3997                   DMAE_CMD_ENDIANITY_DW_SWAP |
3998 #endif
3999                   (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4000                   (vn << DMAE_CMD_E1HVN_SHIFT));
4001
4002         if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4003
4004                 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4005                                    NIG_REG_INGRESS_BMAC0_MEM);
4006
4007                 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4008                    BIGMAC_REGISTER_TX_STAT_GTBYT */
4009                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4010                 dmae->opcode = opcode;
4011                 dmae->src_addr_lo = (mac_addr +
4012                                      BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4013                 dmae->src_addr_hi = 0;
4014                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4015                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4016                 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4017                              BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4018                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4019                 dmae->comp_addr_hi = 0;
4020                 dmae->comp_val = 1;
4021
4022                 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4023                    BIGMAC_REGISTER_RX_STAT_GRIPJ */
4024                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4025                 dmae->opcode = opcode;
4026                 dmae->src_addr_lo = (mac_addr +
4027                                      BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028                 dmae->src_addr_hi = 0;
4029                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4030                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4031                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4032                                 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4033                 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4034                              BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4035                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4036                 dmae->comp_addr_hi = 0;
4037                 dmae->comp_val = 1;
4038
4039         } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4040
4041                 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4042
4043                 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4044                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4045                 dmae->opcode = opcode;
4046                 dmae->src_addr_lo = (mac_addr +
4047                                      EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4048                 dmae->src_addr_hi = 0;
4049                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4050                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4051                 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4052                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4053                 dmae->comp_addr_hi = 0;
4054                 dmae->comp_val = 1;
4055
4056                 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4057                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4058                 dmae->opcode = opcode;
4059                 dmae->src_addr_lo = (mac_addr +
4060                                      EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4061                 dmae->src_addr_hi = 0;
4062                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4063                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4064                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4065                      offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4066                 dmae->len = 1;
4067                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068                 dmae->comp_addr_hi = 0;
4069                 dmae->comp_val = 1;
4070
4071                 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4072                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4073                 dmae->opcode = opcode;
4074                 dmae->src_addr_lo = (mac_addr +
4075                                      EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4076                 dmae->src_addr_hi = 0;
4077                 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4078                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4079                 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4080                         offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4081                 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4082                 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4083                 dmae->comp_addr_hi = 0;
4084                 dmae->comp_val = 1;
4085         }
4086
4087         /* NIG */
4088         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4089         dmae->opcode = opcode;
4090         dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4091                                     NIG_REG_STAT0_BRB_DISCARD) >> 2;
4092         dmae->src_addr_hi = 0;
4093         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4094         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4095         dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4096         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097         dmae->comp_addr_hi = 0;
4098         dmae->comp_val = 1;
4099
4100         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101         dmae->opcode = opcode;
4102         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4103                                     NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4104         dmae->src_addr_hi = 0;
4105         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4106                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4107         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4108                         offsetof(struct nig_stats, egress_mac_pkt0_lo));
4109         dmae->len = (2*sizeof(u32)) >> 2;
4110         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111         dmae->comp_addr_hi = 0;
4112         dmae->comp_val = 1;
4113
4114         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4116                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4117                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4118 #ifdef __BIG_ENDIAN
4119                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4120 #else
4121                         DMAE_CMD_ENDIANITY_DW_SWAP |
4122 #endif
4123                         (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4124                         (vn << DMAE_CMD_E1HVN_SHIFT));
4125         dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4126                                     NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4127         dmae->src_addr_hi = 0;
4128         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4129                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4130         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4131                         offsetof(struct nig_stats, egress_mac_pkt1_lo));
4132         dmae->len = (2*sizeof(u32)) >> 2;
4133         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4134         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4135         dmae->comp_val = DMAE_COMP_VAL;
4136
4137         *stats_comp = 0;
4138 }
4139
4140 static void bnx2x_func_stats_init(struct bnx2x *bp)
4141 {
4142         struct dmae_command *dmae = &bp->stats_dmae;
4143         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4144
4145         /* sanity */
4146         if (!bp->func_stx) {
4147                 BNX2X_ERR("BUG!\n");
4148                 return;
4149         }
4150
4151         bp->executer_idx = 0;
4152         memset(dmae, 0, sizeof(struct dmae_command));
4153
4154         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4155                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4156                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4157 #ifdef __BIG_ENDIAN
4158                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4159 #else
4160                         DMAE_CMD_ENDIANITY_DW_SWAP |
4161 #endif
4162                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4163                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4164         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4165         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4166         dmae->dst_addr_lo = bp->func_stx >> 2;
4167         dmae->dst_addr_hi = 0;
4168         dmae->len = sizeof(struct host_func_stats) >> 2;
4169         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4170         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4171         dmae->comp_val = DMAE_COMP_VAL;
4172
4173         *stats_comp = 0;
4174 }
4175
4176 static void bnx2x_stats_start(struct bnx2x *bp)
4177 {
4178         if (bp->port.pmf)
4179                 bnx2x_port_stats_init(bp);
4180
4181         else if (bp->func_stx)
4182                 bnx2x_func_stats_init(bp);
4183
4184         bnx2x_hw_stats_post(bp);
4185         bnx2x_storm_stats_post(bp);
4186 }
4187
4188 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4189 {
4190         bnx2x_stats_comp(bp);
4191         bnx2x_stats_pmf_update(bp);
4192         bnx2x_stats_start(bp);
4193 }
4194
4195 static void bnx2x_stats_restart(struct bnx2x *bp)
4196 {
4197         bnx2x_stats_comp(bp);
4198         bnx2x_stats_start(bp);
4199 }
4200
4201 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4202 {
4203         struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4204         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4205         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4206         struct {
4207                 u32 lo;
4208                 u32 hi;
4209         } diff;
4210
4211         UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4212         UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4213         UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4214         UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4215         UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4216         UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4217         UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4218         UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4219         UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4220         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4221         UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4222         UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4223         UPDATE_STAT64(tx_stat_gt127,
4224                                 tx_stat_etherstatspkts65octetsto127octets);
4225         UPDATE_STAT64(tx_stat_gt255,
4226                                 tx_stat_etherstatspkts128octetsto255octets);
4227         UPDATE_STAT64(tx_stat_gt511,
4228                                 tx_stat_etherstatspkts256octetsto511octets);
4229         UPDATE_STAT64(tx_stat_gt1023,
4230                                 tx_stat_etherstatspkts512octetsto1023octets);
4231         UPDATE_STAT64(tx_stat_gt1518,
4232                                 tx_stat_etherstatspkts1024octetsto1522octets);
4233         UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4234         UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4235         UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4236         UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4237         UPDATE_STAT64(tx_stat_gterr,
4238                                 tx_stat_dot3statsinternalmactransmiterrors);
4239         UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4240
4241         estats->pause_frames_received_hi =
4242                                 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4243         estats->pause_frames_received_lo =
4244                                 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4245
4246         estats->pause_frames_sent_hi =
4247                                 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4248         estats->pause_frames_sent_lo =
4249                                 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4250 }
4251
4252 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4253 {
4254         struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4255         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4256         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4257
4258         UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4259         UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4260         UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4261         UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4262         UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4263         UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4264         UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4265         UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4266         UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4267         UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4268         UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4269         UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4270         UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4271         UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4272         UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4273         UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4274         UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4275         UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4276         UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4277         UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4278         UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4279         UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4280         UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4281         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4282         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4283         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4284         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4285         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4286         UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4287         UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4288         UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4289
4290         estats->pause_frames_received_hi =
4291                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4292         estats->pause_frames_received_lo =
4293                         pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4294         ADD_64(estats->pause_frames_received_hi,
4295                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4296                estats->pause_frames_received_lo,
4297                pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4298
4299         estats->pause_frames_sent_hi =
4300                         pstats->mac_stx[1].tx_stat_outxonsent_hi;
4301         estats->pause_frames_sent_lo =
4302                         pstats->mac_stx[1].tx_stat_outxonsent_lo;
4303         ADD_64(estats->pause_frames_sent_hi,
4304                pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4305                estats->pause_frames_sent_lo,
4306                pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4307 }
4308
4309 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4310 {
4311         struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4312         struct nig_stats *old = &(bp->port.old_nig_stats);
4313         struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4314         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4315         struct {
4316                 u32 lo;
4317                 u32 hi;
4318         } diff;
4319
4320         if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4321                 bnx2x_bmac_stats_update(bp);
4322
4323         else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4324                 bnx2x_emac_stats_update(bp);
4325
4326         else { /* unreached */
4327                 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4328                 return -1;
4329         }
4330
4331         ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4332                       new->brb_discard - old->brb_discard);
4333         ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4334                       new->brb_truncate - old->brb_truncate);
4335
4336         UPDATE_STAT64_NIG(egress_mac_pkt0,
4337                                         etherstatspkts1024octetsto1522octets);
4338         UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4339
4340         memcpy(old, new, sizeof(struct nig_stats));
4341
4342         memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4343                sizeof(struct mac_stx));
4344         estats->brb_drop_hi = pstats->brb_drop_hi;
4345         estats->brb_drop_lo = pstats->brb_drop_lo;
4346
4347         pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4348
4349         if (!BP_NOMCP(bp)) {
4350                 u32 nig_timer_max =
4351                         SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4352                 if (nig_timer_max != estats->nig_timer_max) {
4353                         estats->nig_timer_max = nig_timer_max;
4354                         BNX2X_ERR("NIG timer max (%u)\n",
4355                                   estats->nig_timer_max);
4356                 }
4357         }
4358
4359         return 0;
4360 }
4361
4362 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4363 {
4364         struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4365         struct tstorm_per_port_stats *tport =
4366                                         &stats->tstorm_common.port_statistics;
4367         struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4368         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4369         int i;
4370
4371         memcpy(&(fstats->total_bytes_received_hi),
4372                &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4373                sizeof(struct host_func_stats) - 2*sizeof(u32));
4374         estats->error_bytes_received_hi = 0;
4375         estats->error_bytes_received_lo = 0;
4376         estats->etherstatsoverrsizepkts_hi = 0;
4377         estats->etherstatsoverrsizepkts_lo = 0;
4378         estats->no_buff_discard_hi = 0;
4379         estats->no_buff_discard_lo = 0;
4380
4381         for_each_queue(bp, i) {
4382                 struct bnx2x_fastpath *fp = &bp->fp[i];
4383                 int cl_id = fp->cl_id;
4384                 struct tstorm_per_client_stats *tclient =
4385                                 &stats->tstorm_common.client_statistics[cl_id];
4386                 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4387                 struct ustorm_per_client_stats *uclient =
4388                                 &stats->ustorm_common.client_statistics[cl_id];
4389                 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4390                 struct xstorm_per_client_stats *xclient =
4391                                 &stats->xstorm_common.client_statistics[cl_id];
4392                 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4393                 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4394                 u32 diff;
4395
4396                 /* are storm stats valid? */
4397                 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4398                                                         bp->stats_counter) {
4399                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4400                            "  xstorm counter (0x%x) != stats_counter (0x%x)\n",
4401                            i, xclient->stats_counter, bp->stats_counter);
4402                         return -1;
4403                 }
4404                 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4405                                                         bp->stats_counter) {
4406                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4407                            "  tstorm counter (0x%x) != stats_counter (0x%x)\n",
4408                            i, tclient->stats_counter, bp->stats_counter);
4409                         return -2;
4410                 }
4411                 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4412                                                         bp->stats_counter) {
4413                         DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4414                            "  ustorm counter (0x%x) != stats_counter (0x%x)\n",
4415                            i, uclient->stats_counter, bp->stats_counter);
4416                         return -4;
4417                 }
4418
4419                 qstats->total_bytes_received_hi =
4420                         le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4421                 qstats->total_bytes_received_lo =
4422                         le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4423
4424                 ADD_64(qstats->total_bytes_received_hi,
4425                        le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4426                        qstats->total_bytes_received_lo,
4427                        le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4428
4429                 ADD_64(qstats->total_bytes_received_hi,
4430                        le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4431                        qstats->total_bytes_received_lo,
4432                        le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4433
4434                 SUB_64(qstats->total_bytes_received_hi,
4435                        le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4436                        qstats->total_bytes_received_lo,
4437                        le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4438
4439                 SUB_64(qstats->total_bytes_received_hi,
4440                        le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4441                        qstats->total_bytes_received_lo,
4442                        le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4443
4444                 SUB_64(qstats->total_bytes_received_hi,
4445                        le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4446                        qstats->total_bytes_received_lo,
4447                        le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4448
4449                 qstats->valid_bytes_received_hi =
4450                                         qstats->total_bytes_received_hi;
4451                 qstats->valid_bytes_received_lo =
4452                                         qstats->total_bytes_received_lo;
4453
4454                 qstats->error_bytes_received_hi =
4455                                 le32_to_cpu(tclient->rcv_error_bytes.hi);
4456                 qstats->error_bytes_received_lo =
4457                                 le32_to_cpu(tclient->rcv_error_bytes.lo);
4458
4459                 ADD_64(qstats->total_bytes_received_hi,
4460                        qstats->error_bytes_received_hi,
4461                        qstats->total_bytes_received_lo,
4462                        qstats->error_bytes_received_lo);
4463
4464                 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4465                                         total_unicast_packets_received);
4466                 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4467                                         total_multicast_packets_received);
4468                 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4469                                         total_broadcast_packets_received);
4470                 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4471                                         etherstatsoverrsizepkts);
4472                 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4473
4474                 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4475                                         total_unicast_packets_received);
4476                 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4477                                         total_multicast_packets_received);
4478                 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4479                                         total_broadcast_packets_received);
4480                 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4481                 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4482                 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4483
4484                 qstats->total_bytes_transmitted_hi =
4485                                 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4486                 qstats->total_bytes_transmitted_lo =
4487                                 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4488
4489                 ADD_64(qstats->total_bytes_transmitted_hi,
4490                        le32_to_cpu(xclient->multicast_bytes_sent.hi),
4491                        qstats->total_bytes_transmitted_lo,
4492                        le32_to_cpu(xclient->multicast_bytes_sent.lo));
4493
4494                 ADD_64(qstats->total_bytes_transmitted_hi,
4495                        le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4496                        qstats->total_bytes_transmitted_lo,
4497                        le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4498
4499                 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4500                                         total_unicast_packets_transmitted);
4501                 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4502                                         total_multicast_packets_transmitted);
4503                 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4504                                         total_broadcast_packets_transmitted);
4505
4506                 old_tclient->checksum_discard = tclient->checksum_discard;
4507                 old_tclient->ttl0_discard = tclient->ttl0_discard;
4508
4509                 ADD_64(fstats->total_bytes_received_hi,
4510                        qstats->total_bytes_received_hi,
4511                        fstats->total_bytes_received_lo,
4512                        qstats->total_bytes_received_lo);
4513                 ADD_64(fstats->total_bytes_transmitted_hi,
4514                        qstats->total_bytes_transmitted_hi,
4515                        fstats->total_bytes_transmitted_lo,
4516                        qstats->total_bytes_transmitted_lo);
4517                 ADD_64(fstats->total_unicast_packets_received_hi,
4518                        qstats->total_unicast_packets_received_hi,
4519                        fstats->total_unicast_packets_received_lo,
4520                        qstats->total_unicast_packets_received_lo);
4521                 ADD_64(fstats->total_multicast_packets_received_hi,
4522                        qstats->total_multicast_packets_received_hi,
4523                        fstats->total_multicast_packets_received_lo,
4524                        qstats->total_multicast_packets_received_lo);
4525                 ADD_64(fstats->total_broadcast_packets_received_hi,
4526                        qstats->total_broadcast_packets_received_hi,
4527                        fstats->total_broadcast_packets_received_lo,
4528                        qstats->total_broadcast_packets_received_lo);
4529                 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4530                        qstats->total_unicast_packets_transmitted_hi,
4531                        fstats->total_unicast_packets_transmitted_lo,
4532                        qstats->total_unicast_packets_transmitted_lo);
4533                 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4534                        qstats->total_multicast_packets_transmitted_hi,
4535                        fstats->total_multicast_packets_transmitted_lo,
4536                        qstats->total_multicast_packets_transmitted_lo);
4537                 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4538                        qstats->total_broadcast_packets_transmitted_hi,
4539                        fstats->total_broadcast_packets_transmitted_lo,
4540                        qstats->total_broadcast_packets_transmitted_lo);
4541                 ADD_64(fstats->valid_bytes_received_hi,
4542                        qstats->valid_bytes_received_hi,
4543                        fstats->valid_bytes_received_lo,
4544                        qstats->valid_bytes_received_lo);
4545
4546                 ADD_64(estats->error_bytes_received_hi,
4547                        qstats->error_bytes_received_hi,
4548                        estats->error_bytes_received_lo,
4549                        qstats->error_bytes_received_lo);
4550                 ADD_64(estats->etherstatsoverrsizepkts_hi,
4551                        qstats->etherstatsoverrsizepkts_hi,
4552                        estats->etherstatsoverrsizepkts_lo,
4553                        qstats->etherstatsoverrsizepkts_lo);
4554                 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4555                        estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4556         }
4557
4558         ADD_64(fstats->total_bytes_received_hi,
4559                estats->rx_stat_ifhcinbadoctets_hi,
4560                fstats->total_bytes_received_lo,
4561                estats->rx_stat_ifhcinbadoctets_lo);
4562
4563         memcpy(estats, &(fstats->total_bytes_received_hi),
4564                sizeof(struct host_func_stats) - 2*sizeof(u32));
4565
4566         ADD_64(estats->etherstatsoverrsizepkts_hi,
4567                estats->rx_stat_dot3statsframestoolong_hi,
4568                estats->etherstatsoverrsizepkts_lo,
4569                estats->rx_stat_dot3statsframestoolong_lo);
4570         ADD_64(estats->error_bytes_received_hi,
4571                estats->rx_stat_ifhcinbadoctets_hi,
4572                estats->error_bytes_received_lo,
4573                estats->rx_stat_ifhcinbadoctets_lo);
4574
4575         if (bp->port.pmf) {
4576                 estats->mac_filter_discard =
4577                                 le32_to_cpu(tport->mac_filter_discard);
4578                 estats->xxoverflow_discard =
4579                                 le32_to_cpu(tport->xxoverflow_discard);
4580                 estats->brb_truncate_discard =
4581                                 le32_to_cpu(tport->brb_truncate_discard);
4582                 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4583         }
4584
4585         fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4586
4587         bp->stats_pending = 0;
4588
4589         return 0;
4590 }
4591
4592 static void bnx2x_net_stats_update(struct bnx2x *bp)
4593 {
4594         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4595         struct net_device_stats *nstats = &bp->dev->stats;
4596         int i;
4597
4598         nstats->rx_packets =
4599                 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4600                 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4601                 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4602
4603         nstats->tx_packets =
4604                 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4605                 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4606                 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4607
4608         nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4609
4610         nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4611
4612         nstats->rx_dropped = estats->mac_discard;
4613         for_each_queue(bp, i)
4614                 nstats->rx_dropped +=
4615                         le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4616
4617         nstats->tx_dropped = 0;
4618
4619         nstats->multicast =
4620                 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4621
4622         nstats->collisions =
4623                 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4624
4625         nstats->rx_length_errors =
4626                 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4627                 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4628         nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4629                                  bnx2x_hilo(&estats->brb_truncate_hi);
4630         nstats->rx_crc_errors =
4631                 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4632         nstats->rx_frame_errors =
4633                 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4634         nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4635         nstats->rx_missed_errors = estats->xxoverflow_discard;
4636
4637         nstats->rx_errors = nstats->rx_length_errors +
4638                             nstats->rx_over_errors +
4639                             nstats->rx_crc_errors +
4640                             nstats->rx_frame_errors +
4641                             nstats->rx_fifo_errors +
4642                             nstats->rx_missed_errors;
4643
4644         nstats->tx_aborted_errors =
4645                 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4646                 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4647         nstats->tx_carrier_errors =
4648                 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4649         nstats->tx_fifo_errors = 0;
4650         nstats->tx_heartbeat_errors = 0;
4651         nstats->tx_window_errors = 0;
4652
4653         nstats->tx_errors = nstats->tx_aborted_errors +
4654                             nstats->tx_carrier_errors +
4655             bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4656 }
4657
4658 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4659 {
4660         struct bnx2x_eth_stats *estats = &bp->eth_stats;
4661         int i;
4662
4663         estats->driver_xoff = 0;
4664         estats->rx_err_discard_pkt = 0;
4665         estats->rx_skb_alloc_failed = 0;
4666         estats->hw_csum_err = 0;
4667         for_each_queue(bp, i) {
4668                 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4669
4670                 estats->driver_xoff += qstats->driver_xoff;
4671                 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4672                 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4673                 estats->hw_csum_err += qstats->hw_csum_err;
4674         }
4675 }
4676
4677 static void bnx2x_stats_update(struct bnx2x *bp)
4678 {
4679         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4680
4681         if (*stats_comp != DMAE_COMP_VAL)
4682                 return;
4683
4684         if (bp->port.pmf)
4685                 bnx2x_hw_stats_update(bp);
4686
4687         if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4688                 BNX2X_ERR("storm stats were not updated for 3 times\n");
4689                 bnx2x_panic();
4690                 return;
4691         }
4692
4693         bnx2x_net_stats_update(bp);
4694         bnx2x_drv_stats_update(bp);
4695
4696         if (netif_msg_timer(bp)) {
4697                 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4698                 int i;
4699
4700                 printk(KERN_DEBUG "%s: brb drops %u  brb truncate %u\n",
4701                        bp->dev->name,
4702                        estats->brb_drop_lo, estats->brb_truncate_lo);
4703
4704                 for_each_queue(bp, i) {
4705                         struct bnx2x_fastpath *fp = &bp->fp[i];
4706                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4707
4708                         printk(KERN_DEBUG "%s: rx usage(%4u)  *rx_cons_sb(%u)"
4709                                           "  rx pkt(%lu)  rx calls(%lu %lu)\n",
4710                                fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4711                                fp->rx_comp_cons),
4712                                le16_to_cpu(*fp->rx_cons_sb),
4713                                bnx2x_hilo(&qstats->
4714                                           total_unicast_packets_received_hi),
4715                                fp->rx_calls, fp->rx_pkt);
4716                 }
4717
4718                 for_each_queue(bp, i) {
4719                         struct bnx2x_fastpath *fp = &bp->fp[i];
4720                         struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721                         struct netdev_queue *txq =
4722                                 netdev_get_tx_queue(bp->dev, i);
4723
4724                         printk(KERN_DEBUG "%s: tx avail(%4u)  *tx_cons_sb(%u)"
4725                                           "  tx pkt(%lu) tx calls (%lu)"
4726                                           "  %s (Xoff events %u)\n",
4727                                fp->name, bnx2x_tx_avail(fp),
4728                                le16_to_cpu(*fp->tx_cons_sb),
4729                                bnx2x_hilo(&qstats->
4730                                           total_unicast_packets_transmitted_hi),
4731                                fp->tx_pkt,
4732                                (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4733                                qstats->driver_xoff);
4734                 }
4735         }
4736
4737         bnx2x_hw_stats_post(bp);
4738         bnx2x_storm_stats_post(bp);
4739 }
4740
4741 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4742 {
4743         struct dmae_command *dmae;
4744         u32 opcode;
4745         int loader_idx = PMF_DMAE_C(bp);
4746         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4747
4748         bp->executer_idx = 0;
4749
4750         opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4751                   DMAE_CMD_C_ENABLE |
4752                   DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4753 #ifdef __BIG_ENDIAN
4754                   DMAE_CMD_ENDIANITY_B_DW_SWAP |
4755 #else
4756                   DMAE_CMD_ENDIANITY_DW_SWAP |
4757 #endif
4758                   (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4759                   (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4760
4761         if (bp->port.port_stx) {
4762
4763                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4764                 if (bp->func_stx)
4765                         dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4766                 else
4767                         dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4768                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4769                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4770                 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4771                 dmae->dst_addr_hi = 0;
4772                 dmae->len = sizeof(struct host_port_stats) >> 2;
4773                 if (bp->func_stx) {
4774                         dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4775                         dmae->comp_addr_hi = 0;
4776                         dmae->comp_val = 1;
4777                 } else {
4778                         dmae->comp_addr_lo =
4779                                 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4780                         dmae->comp_addr_hi =
4781                                 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4782                         dmae->comp_val = DMAE_COMP_VAL;
4783
4784                         *stats_comp = 0;
4785                 }
4786         }
4787
4788         if (bp->func_stx) {
4789
4790                 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4791                 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4792                 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4793                 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4794                 dmae->dst_addr_lo = bp->func_stx >> 2;
4795                 dmae->dst_addr_hi = 0;
4796                 dmae->len = sizeof(struct host_func_stats) >> 2;
4797                 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4798                 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4799                 dmae->comp_val = DMAE_COMP_VAL;
4800
4801                 *stats_comp = 0;
4802         }
4803 }
4804
4805 static void bnx2x_stats_stop(struct bnx2x *bp)
4806 {
4807         int update = 0;
4808
4809         bnx2x_stats_comp(bp);
4810
4811         if (bp->port.pmf)
4812                 update = (bnx2x_hw_stats_update(bp) == 0);
4813
4814         update |= (bnx2x_storm_stats_update(bp) == 0);
4815
4816         if (update) {
4817                 bnx2x_net_stats_update(bp);
4818
4819                 if (bp->port.pmf)
4820                         bnx2x_port_stats_stop(bp);
4821
4822                 bnx2x_hw_stats_post(bp);
4823                 bnx2x_stats_comp(bp);
4824         }
4825 }
4826
4827 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4828 {
4829 }
4830
4831 static const struct {
4832         void (*action)(struct bnx2x *bp);
4833         enum bnx2x_stats_state next_state;
4834 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4835 /* state        event   */
4836 {
4837 /* DISABLED     PMF     */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4838 /*              LINK_UP */ {bnx2x_stats_start,      STATS_STATE_ENABLED},
4839 /*              UPDATE  */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4840 /*              STOP    */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4841 },
4842 {
4843 /* ENABLED      PMF     */ {bnx2x_stats_pmf_start,  STATS_STATE_ENABLED},
4844 /*              LINK_UP */ {bnx2x_stats_restart,    STATS_STATE_ENABLED},
4845 /*              UPDATE  */ {bnx2x_stats_update,     STATS_STATE_ENABLED},
4846 /*              STOP    */ {bnx2x_stats_stop,       STATS_STATE_DISABLED}
4847 }
4848 };
4849
4850 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4851 {
4852         enum bnx2x_stats_state state;
4853
4854         if (unlikely(bp->panic))
4855                 return;
4856
4857         /* Protect a state change flow */
4858         spin_lock_bh(&bp->stats_lock);
4859         state = bp->stats_state;
4860         bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4861         spin_unlock_bh(&bp->stats_lock);
4862
4863         bnx2x_stats_stm[state][event].action(bp);
4864
4865         if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4866                 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4867                    state, event, bp->stats_state);
4868 }
4869
4870 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4871 {
4872         struct dmae_command *dmae;
4873         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4874
4875         /* sanity */
4876         if (!bp->port.pmf || !bp->port.port_stx) {
4877                 BNX2X_ERR("BUG!\n");
4878                 return;
4879         }
4880
4881         bp->executer_idx = 0;
4882
4883         dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4884         dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4885                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4886                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4887 #ifdef __BIG_ENDIAN
4888                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4889 #else
4890                         DMAE_CMD_ENDIANITY_DW_SWAP |
4891 #endif
4892                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4893                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4894         dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4895         dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4896         dmae->dst_addr_lo = bp->port.port_stx >> 2;
4897         dmae->dst_addr_hi = 0;
4898         dmae->len = sizeof(struct host_port_stats) >> 2;
4899         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4900         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4901         dmae->comp_val = DMAE_COMP_VAL;
4902
4903         *stats_comp = 0;
4904         bnx2x_hw_stats_post(bp);
4905         bnx2x_stats_comp(bp);
4906 }
4907
4908 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4909 {
4910         int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4911         int port = BP_PORT(bp);
4912         int func;
4913         u32 func_stx;
4914
4915         /* sanity */
4916         if (!bp->port.pmf || !bp->func_stx) {
4917                 BNX2X_ERR("BUG!\n");
4918                 return;
4919         }
4920
4921         /* save our func_stx */
4922         func_stx = bp->func_stx;
4923
4924         for (vn = VN_0; vn < vn_max; vn++) {
4925                 func = 2*vn + port;
4926
4927                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4928                 bnx2x_func_stats_init(bp);
4929                 bnx2x_hw_stats_post(bp);
4930                 bnx2x_stats_comp(bp);
4931         }
4932
4933         /* restore our func_stx */
4934         bp->func_stx = func_stx;
4935 }
4936
4937 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4938 {
4939         struct dmae_command *dmae = &bp->stats_dmae;
4940         u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4941
4942         /* sanity */
4943         if (!bp->func_stx) {
4944                 BNX2X_ERR("BUG!\n");
4945                 return;
4946         }
4947
4948         bp->executer_idx = 0;
4949         memset(dmae, 0, sizeof(struct dmae_command));
4950
4951         dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4952                         DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4953                         DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4954 #ifdef __BIG_ENDIAN
4955                         DMAE_CMD_ENDIANITY_B_DW_SWAP |
4956 #else
4957                         DMAE_CMD_ENDIANITY_DW_SWAP |
4958 #endif
4959                         (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4960                         (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4961         dmae->src_addr_lo = bp->func_stx >> 2;
4962         dmae->src_addr_hi = 0;
4963         dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4964         dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4965         dmae->len = sizeof(struct host_func_stats) >> 2;
4966         dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4967         dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4968         dmae->comp_val = DMAE_COMP_VAL;
4969
4970         *stats_comp = 0;
4971         bnx2x_hw_stats_post(bp);
4972         bnx2x_stats_comp(bp);
4973 }
4974
4975 static void bnx2x_stats_init(struct bnx2x *bp)
4976 {
4977         int port = BP_PORT(bp);
4978         int func = BP_FUNC(bp);
4979         int i;
4980
4981         bp->stats_pending = 0;
4982         bp->executer_idx = 0;
4983         bp->stats_counter = 0;
4984
4985         /* port and func stats for management */
4986         if (!BP_NOMCP(bp)) {
4987                 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4988                 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4989
4990         } else {
4991                 bp->port.port_stx = 0;
4992                 bp->func_stx = 0;
4993         }
4994         DP(BNX2X_MSG_STATS, "port_stx 0x%x  func_stx 0x%x\n",
4995            bp->port.port_stx, bp->func_stx);
4996
4997         /* port stats */
4998         memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4999         bp->port.old_nig_stats.brb_discard =
5000                         REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5001         bp->port.old_nig_stats.brb_truncate =
5002                         REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5003         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5004                     &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5005         REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5006                     &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5007
5008         /* function stats */
5009         for_each_queue(bp, i) {
5010                 struct bnx2x_fastpath *fp = &bp->fp[i];
5011
5012                 memset(&fp->old_tclient, 0,
5013                        sizeof(struct tstorm_per_client_stats));
5014                 memset(&fp->old_uclient, 0,
5015                        sizeof(struct ustorm_per_client_stats));
5016                 memset(&fp->old_xclient, 0,
5017                        sizeof(struct xstorm_per_client_stats));
5018                 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5019         }
5020
5021         memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5022         memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5023
5024         bp->stats_state = STATS_STATE_DISABLED;
5025
5026         if (bp->port.pmf) {
5027                 if (bp->port.port_stx)
5028                         bnx2x_port_stats_base_init(bp);
5029
5030                 if (bp->func_stx)
5031                         bnx2x_func_stats_base_init(bp);
5032
5033         } else if (bp->func_stx)
5034                 bnx2x_func_stats_base_update(bp);
5035 }
5036
5037 static void bnx2x_timer(unsigned long data)
5038 {
5039         struct bnx2x *bp = (struct bnx2x *) data;
5040
5041         if (!netif_running(bp->dev))
5042                 return;
5043
5044         if (atomic_read(&bp->intr_sem) != 0)
5045                 goto timer_restart;
5046
5047         if (poll) {
5048                 struct bnx2x_fastpath *fp = &bp->fp[0];
5049                 int rc;
5050
5051                 bnx2x_tx_int(fp);
5052                 rc = bnx2x_rx_int(fp, 1000);
5053         }
5054
5055         if (!BP_NOMCP(bp)) {
5056                 int func = BP_FUNC(bp);
5057                 u32 drv_pulse;
5058                 u32 mcp_pulse;
5059
5060                 ++bp->fw_drv_pulse_wr_seq;
5061                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5062                 /* TBD - add SYSTEM_TIME */
5063                 drv_pulse = bp->fw_drv_pulse_wr_seq;
5064                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5065
5066                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5067                              MCP_PULSE_SEQ_MASK);
5068                 /* The delta between driver pulse and mcp response
5069                  * should be 1 (before mcp response) or 0 (after mcp response)
5070                  */
5071                 if ((drv_pulse != mcp_pulse) &&
5072                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5073                         /* someone lost a heartbeat... */
5074                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5075                                   drv_pulse, mcp_pulse);
5076                 }
5077         }
5078
5079         if (bp->state == BNX2X_STATE_OPEN)
5080                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5081
5082 timer_restart:
5083         mod_timer(&bp->timer, jiffies + bp->current_interval);
5084 }
5085
5086 /* end of Statistics */
5087
5088 /* nic init */
5089
5090 /*
5091  * nic init service functions
5092  */
5093
5094 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5095 {
5096         int port = BP_PORT(bp);
5097
5098         /* "CSTORM" */
5099         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5100                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5101                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5102         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5103                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5104                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5105 }
5106
5107 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5108                           dma_addr_t mapping, int sb_id)
5109 {
5110         int port = BP_PORT(bp);
5111         int func = BP_FUNC(bp);
5112         int index;
5113         u64 section;
5114
5115         /* USTORM */
5116         section = ((u64)mapping) + offsetof(struct host_status_block,
5117                                             u_status_block);
5118         sb->u_status_block.status_block_id = sb_id;
5119
5120         REG_WR(bp, BAR_CSTRORM_INTMEM +
5121                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5122         REG_WR(bp, BAR_CSTRORM_INTMEM +
5123                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5124                U64_HI(section));
5125         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5126                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5127
5128         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5129                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5130                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5131
5132         /* CSTORM */
5133         section = ((u64)mapping) + offsetof(struct host_status_block,
5134                                             c_status_block);
5135         sb->c_status_block.status_block_id = sb_id;
5136
5137         REG_WR(bp, BAR_CSTRORM_INTMEM +
5138                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5139         REG_WR(bp, BAR_CSTRORM_INTMEM +
5140                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5141                U64_HI(section));
5142         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5143                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5144
5145         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5146                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5147                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5148
5149         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5150 }
5151
5152 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5153 {
5154         int func = BP_FUNC(bp);
5155
5156         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5157                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5158                         sizeof(struct tstorm_def_status_block)/4);
5159         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5160                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5161                         sizeof(struct cstorm_def_status_block_u)/4);
5162         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5163                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5164                         sizeof(struct cstorm_def_status_block_c)/4);
5165         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5166                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5167                         sizeof(struct xstorm_def_status_block)/4);
5168 }
5169
5170 static void bnx2x_init_def_sb(struct bnx2x *bp,
5171                               struct host_def_status_block *def_sb,
5172                               dma_addr_t mapping, int sb_id)
5173 {
5174         int port = BP_PORT(bp);
5175         int func = BP_FUNC(bp);
5176         int index, val, reg_offset;
5177         u64 section;
5178
5179         /* ATTN */
5180         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5181                                             atten_status_block);
5182         def_sb->atten_status_block.status_block_id = sb_id;
5183
5184         bp->attn_state = 0;
5185
5186         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5187                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5188
5189         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5190                 bp->attn_group[index].sig[0] = REG_RD(bp,
5191                                                      reg_offset + 0x10*index);
5192                 bp->attn_group[index].sig[1] = REG_RD(bp,
5193                                                reg_offset + 0x4 + 0x10*index);
5194                 bp->attn_group[index].sig[2] = REG_RD(bp,
5195                                                reg_offset + 0x8 + 0x10*index);
5196                 bp->attn_group[index].sig[3] = REG_RD(bp,
5197                                                reg_offset + 0xc + 0x10*index);
5198         }
5199
5200         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5201                              HC_REG_ATTN_MSG0_ADDR_L);
5202
5203         REG_WR(bp, reg_offset, U64_LO(section));
5204         REG_WR(bp, reg_offset + 4, U64_HI(section));
5205
5206         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5207
5208         val = REG_RD(bp, reg_offset);
5209         val |= sb_id;
5210         REG_WR(bp, reg_offset, val);
5211
5212         /* USTORM */
5213         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5214                                             u_def_status_block);
5215         def_sb->u_def_status_block.status_block_id = sb_id;
5216
5217         REG_WR(bp, BAR_CSTRORM_INTMEM +
5218                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5219         REG_WR(bp, BAR_CSTRORM_INTMEM +
5220                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5221                U64_HI(section));
5222         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5223                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5224
5225         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5226                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5227                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5228
5229         /* CSTORM */
5230         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5231                                             c_def_status_block);
5232         def_sb->c_def_status_block.status_block_id = sb_id;
5233
5234         REG_WR(bp, BAR_CSTRORM_INTMEM +
5235                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5236         REG_WR(bp, BAR_CSTRORM_INTMEM +
5237                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5238                U64_HI(section));
5239         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5240                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5241
5242         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5243                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5244                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5245
5246         /* TSTORM */
5247         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5248                                             t_def_status_block);
5249         def_sb->t_def_status_block.status_block_id = sb_id;
5250
5251         REG_WR(bp, BAR_TSTRORM_INTMEM +
5252                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5253         REG_WR(bp, BAR_TSTRORM_INTMEM +
5254                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5255                U64_HI(section));
5256         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5257                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5258
5259         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5260                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5261                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5262
5263         /* XSTORM */
5264         section = ((u64)mapping) + offsetof(struct host_def_status_block,
5265                                             x_def_status_block);
5266         def_sb->x_def_status_block.status_block_id = sb_id;
5267
5268         REG_WR(bp, BAR_XSTRORM_INTMEM +
5269                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5270         REG_WR(bp, BAR_XSTRORM_INTMEM +
5271                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5272                U64_HI(section));
5273         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5274                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5275
5276         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5277                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5278                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5279
5280         bp->stats_pending = 0;
5281         bp->set_mac_pending = 0;
5282
5283         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5284 }
5285
5286 static void bnx2x_update_coalesce(struct bnx2x *bp)
5287 {
5288         int port = BP_PORT(bp);
5289         int i;
5290
5291         for_each_queue(bp, i) {
5292                 int sb_id = bp->fp[i].sb_id;
5293
5294                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5295                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5296                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5297                                                       U_SB_ETH_RX_CQ_INDEX),
5298                         bp->rx_ticks/(4 * BNX2X_BTR));
5299                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5300                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5301                                                        U_SB_ETH_RX_CQ_INDEX),
5302                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5303
5304                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5305                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5306                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5307                                                       C_SB_ETH_TX_CQ_INDEX),
5308                         bp->tx_ticks/(4 * BNX2X_BTR));
5309                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5310                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5311                                                        C_SB_ETH_TX_CQ_INDEX),
5312                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5313         }
5314 }
5315
5316 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5317                                        struct bnx2x_fastpath *fp, int last)
5318 {
5319         int i;
5320
5321         for (i = 0; i < last; i++) {
5322                 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5323                 struct sk_buff *skb = rx_buf->skb;
5324
5325                 if (skb == NULL) {
5326                         DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5327                         continue;
5328                 }
5329
5330                 if (fp->tpa_state[i] == BNX2X_TPA_START)
5331                         dma_unmap_single(&bp->pdev->dev,
5332                                          dma_unmap_addr(rx_buf, mapping),
5333                                          bp->rx_buf_size, DMA_FROM_DEVICE);
5334
5335                 dev_kfree_skb(skb);
5336                 rx_buf->skb = NULL;
5337         }
5338 }
5339
5340 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5341 {
5342         int func = BP_FUNC(bp);
5343         int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5344                                               ETH_MAX_AGGREGATION_QUEUES_E1H;
5345         u16 ring_prod, cqe_ring_prod;
5346         int i, j;
5347
5348         bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5349         DP(NETIF_MSG_IFUP,
5350            "mtu %d  rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5351
5352         if (bp->flags & TPA_ENABLE_FLAG) {
5353
5354                 for_each_queue(bp, j) {
5355                         struct bnx2x_fastpath *fp = &bp->fp[j];
5356
5357                         for (i = 0; i < max_agg_queues; i++) {
5358                                 fp->tpa_pool[i].skb =
5359                                    netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5360                                 if (!fp->tpa_pool[i].skb) {
5361                                         BNX2X_ERR("Failed to allocate TPA "
5362                                                   "skb pool for queue[%d] - "
5363                                                   "disabling TPA on this "
5364                                                   "queue!\n", j);
5365                                         bnx2x_free_tpa_pool(bp, fp, i);
5366                                         fp->disable_tpa = 1;
5367                                         break;
5368                                 }
5369                                 dma_unmap_addr_set((struct sw_rx_bd *)
5370                                                         &bp->fp->tpa_pool[i],
5371                                                    mapping, 0);
5372                                 fp->tpa_state[i] = BNX2X_TPA_STOP;
5373                         }
5374                 }
5375         }
5376
5377         for_each_queue(bp, j) {
5378                 struct bnx2x_fastpath *fp = &bp->fp[j];
5379
5380                 fp->rx_bd_cons = 0;
5381                 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5382                 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5383
5384                 /* "next page" elements initialization */
5385                 /* SGE ring */
5386                 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5387                         struct eth_rx_sge *sge;
5388
5389                         sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5390                         sge->addr_hi =
5391                                 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5392                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5393                         sge->addr_lo =
5394                                 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5395                                         BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5396                 }
5397
5398                 bnx2x_init_sge_ring_bit_mask(fp);
5399
5400                 /* RX BD ring */
5401                 for (i = 1; i <= NUM_RX_RINGS; i++) {
5402                         struct eth_rx_bd *rx_bd;
5403
5404                         rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5405                         rx_bd->addr_hi =
5406                                 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5407                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5408                         rx_bd->addr_lo =
5409                                 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5410                                             BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5411                 }
5412
5413                 /* CQ ring */
5414                 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5415                         struct eth_rx_cqe_next_page *nextpg;
5416
5417                         nextpg = (struct eth_rx_cqe_next_page *)
5418                                 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5419                         nextpg->addr_hi =
5420                                 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5421                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5422                         nextpg->addr_lo =
5423                                 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5424                                            BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5425                 }
5426
5427                 /* Allocate SGEs and initialize the ring elements */
5428                 for (i = 0, ring_prod = 0;
5429                      i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5430
5431                         if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5432                                 BNX2X_ERR("was only able to allocate "
5433                                           "%d rx sges\n", i);
5434                                 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5435                                 /* Cleanup already allocated elements */
5436                                 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5437                                 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5438                                 fp->disable_tpa = 1;
5439                                 ring_prod = 0;
5440                                 break;
5441                         }
5442                         ring_prod = NEXT_SGE_IDX(ring_prod);
5443                 }
5444                 fp->rx_sge_prod = ring_prod;
5445
5446                 /* Allocate BDs and initialize BD ring */
5447                 fp->rx_comp_cons = 0;
5448                 cqe_ring_prod = ring_prod = 0;
5449                 for (i = 0; i < bp->rx_ring_size; i++) {
5450                         if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5451                                 BNX2X_ERR("was only able to allocate "
5452                                           "%d rx skbs on queue[%d]\n", i, j);
5453                                 fp->eth_q_stats.rx_skb_alloc_failed++;
5454                                 break;
5455                         }
5456                         ring_prod = NEXT_RX_IDX(ring_prod);
5457                         cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5458                         WARN_ON(ring_prod <= i);
5459                 }
5460
5461                 fp->rx_bd_prod = ring_prod;
5462                 /* must not have more available CQEs than BDs */
5463                 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5464                                          cqe_ring_prod);
5465                 fp->rx_pkt = fp->rx_calls = 0;
5466
5467                 /* Warning!
5468                  * this will generate an interrupt (to the TSTORM)
5469                  * must only be done after chip is initialized
5470                  */
5471                 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5472                                      fp->rx_sge_prod);
5473                 if (j != 0)
5474                         continue;
5475
5476                 REG_WR(bp, BAR_USTRORM_INTMEM +
5477                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5478                        U64_LO(fp->rx_comp_mapping));
5479                 REG_WR(bp, BAR_USTRORM_INTMEM +
5480                        USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5481                        U64_HI(fp->rx_comp_mapping));
5482         }
5483 }
5484
5485 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5486 {
5487         int i, j;
5488
5489         for_each_queue(bp, j) {
5490                 struct bnx2x_fastpath *fp = &bp->fp[j];
5491
5492                 for (i = 1; i <= NUM_TX_RINGS; i++) {
5493                         struct eth_tx_next_bd *tx_next_bd =
5494                                 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5495
5496                         tx_next_bd->addr_hi =
5497                                 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5498                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5499                         tx_next_bd->addr_lo =
5500                                 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5501                                             BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5502                 }
5503
5504                 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5505                 fp->tx_db.data.zero_fill1 = 0;
5506                 fp->tx_db.data.prod = 0;
5507
5508                 fp->tx_pkt_prod = 0;
5509                 fp->tx_pkt_cons = 0;
5510                 fp->tx_bd_prod = 0;
5511                 fp->tx_bd_cons = 0;
5512                 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5513                 fp->tx_pkt = 0;
5514         }
5515 }
5516
5517 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5518 {
5519         int func = BP_FUNC(bp);
5520
5521         spin_lock_init(&bp->spq_lock);
5522
5523         bp->spq_left = MAX_SPQ_PENDING;
5524         bp->spq_prod_idx = 0;
5525         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5526         bp->spq_prod_bd = bp->spq;
5527         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5528
5529         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5530                U64_LO(bp->spq_mapping));
5531         REG_WR(bp,
5532                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5533                U64_HI(bp->spq_mapping));
5534
5535         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5536                bp->spq_prod_idx);
5537 }
5538
5539 static void bnx2x_init_context(struct bnx2x *bp)
5540 {
5541         int i;
5542
5543         /* Rx */
5544         for_each_queue(bp, i) {
5545                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5546                 struct bnx2x_fastpath *fp = &bp->fp[i];
5547                 u8 cl_id = fp->cl_id;
5548
5549                 context->ustorm_st_context.common.sb_index_numbers =
5550                                                 BNX2X_RX_SB_INDEX_NUM;
5551                 context->ustorm_st_context.common.clientId = cl_id;
5552                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5553                 context->ustorm_st_context.common.flags =
5554                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5555                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5556                 context->ustorm_st_context.common.statistics_counter_id =
5557                                                 cl_id;
5558                 context->ustorm_st_context.common.mc_alignment_log_size =
5559                                                 BNX2X_RX_ALIGN_SHIFT;
5560                 context->ustorm_st_context.common.bd_buff_size =
5561                                                 bp->rx_buf_size;
5562                 context->ustorm_st_context.common.bd_page_base_hi =
5563                                                 U64_HI(fp->rx_desc_mapping);
5564                 context->ustorm_st_context.common.bd_page_base_lo =
5565                                                 U64_LO(fp->rx_desc_mapping);
5566                 if (!fp->disable_tpa) {
5567                         context->ustorm_st_context.common.flags |=
5568                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5569                         context->ustorm_st_context.common.sge_buff_size =
5570                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5571                                            0xffff);
5572                         context->ustorm_st_context.common.sge_page_base_hi =
5573                                                 U64_HI(fp->rx_sge_mapping);
5574                         context->ustorm_st_context.common.sge_page_base_lo =
5575                                                 U64_LO(fp->rx_sge_mapping);
5576
5577                         context->ustorm_st_context.common.max_sges_for_packet =
5578                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5579                         context->ustorm_st_context.common.max_sges_for_packet =
5580                                 ((context->ustorm_st_context.common.
5581                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
5582                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5583                 }
5584
5585                 context->ustorm_ag_context.cdu_usage =
5586                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5587                                                CDU_REGION_NUMBER_UCM_AG,
5588                                                ETH_CONNECTION_TYPE);
5589
5590                 context->xstorm_ag_context.cdu_reserved =
5591                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5592                                                CDU_REGION_NUMBER_XCM_AG,
5593                                                ETH_CONNECTION_TYPE);
5594         }
5595
5596         /* Tx */
5597         for_each_queue(bp, i) {
5598                 struct bnx2x_fastpath *fp = &bp->fp[i];
5599                 struct eth_context *context =
5600                         bnx2x_sp(bp, context[i].eth);
5601
5602                 context->cstorm_st_context.sb_index_number =
5603                                                 C_SB_ETH_TX_CQ_INDEX;
5604                 context->cstorm_st_context.status_block_id = fp->sb_id;
5605
5606                 context->xstorm_st_context.tx_bd_page_base_hi =
5607                                                 U64_HI(fp->tx_desc_mapping);
5608                 context->xstorm_st_context.tx_bd_page_base_lo =
5609                                                 U64_LO(fp->tx_desc_mapping);
5610                 context->xstorm_st_context.statistics_data = (fp->cl_id |
5611                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5612         }
5613 }
5614
5615 static void bnx2x_init_ind_table(struct bnx2x *bp)
5616 {
5617         int func = BP_FUNC(bp);
5618         int i;
5619
5620         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5621                 return;
5622
5623         DP(NETIF_MSG_IFUP,
5624            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
5625         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5626                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5627                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5628                         bp->fp->cl_id + (i % bp->num_queues));
5629 }
5630
5631 static void bnx2x_set_client_config(struct bnx2x *bp)
5632 {
5633         struct tstorm_eth_client_config tstorm_client = {0};
5634         int port = BP_PORT(bp);
5635         int i;
5636
5637         tstorm_client.mtu = bp->dev->mtu;
5638         tstorm_client.config_flags =
5639                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5640                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5641 #ifdef BCM_VLAN
5642         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5643                 tstorm_client.config_flags |=
5644                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5645                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5646         }
5647 #endif
5648
5649         for_each_queue(bp, i) {
5650                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5651
5652                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5653                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5654                        ((u32 *)&tstorm_client)[0]);
5655                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5656                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5657                        ((u32 *)&tstorm_client)[1]);
5658         }
5659
5660         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5661            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5662 }
5663
5664 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5665 {
5666         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5667         int mode = bp->rx_mode;
5668         int mask = bp->rx_mode_cl_mask;
5669         int func = BP_FUNC(bp);
5670         int port = BP_PORT(bp);
5671         int i;
5672         /* All but management unicast packets should pass to the host as well */
5673         u32 llh_mask =
5674                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5675                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5676                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5677                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5678
5679         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
5680
5681         switch (mode) {
5682         case BNX2X_RX_MODE_NONE: /* no Rx */
5683                 tstorm_mac_filter.ucast_drop_all = mask;
5684                 tstorm_mac_filter.mcast_drop_all = mask;
5685                 tstorm_mac_filter.bcast_drop_all = mask;
5686                 break;
5687
5688         case BNX2X_RX_MODE_NORMAL:
5689                 tstorm_mac_filter.bcast_accept_all = mask;
5690                 break;
5691
5692         case BNX2X_RX_MODE_ALLMULTI:
5693                 tstorm_mac_filter.mcast_accept_all = mask;
5694                 tstorm_mac_filter.bcast_accept_all = mask;
5695                 break;
5696
5697         case BNX2X_RX_MODE_PROMISC:
5698                 tstorm_mac_filter.ucast_accept_all = mask;
5699                 tstorm_mac_filter.mcast_accept_all = mask;
5700                 tstorm_mac_filter.bcast_accept_all = mask;
5701                 /* pass management unicast packets as well */
5702                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5703                 break;
5704
5705         default:
5706                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5707                 break;
5708         }
5709
5710         REG_WR(bp,
5711                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5712                llh_mask);
5713
5714         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5715                 REG_WR(bp, BAR_TSTRORM_INTMEM +
5716                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5717                        ((u32 *)&tstorm_mac_filter)[i]);
5718
5719 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5720                    ((u32 *)&tstorm_mac_filter)[i]); */
5721         }
5722
5723         if (mode != BNX2X_RX_MODE_NONE)
5724                 bnx2x_set_client_config(bp);
5725 }
5726
5727 static void bnx2x_init_internal_common(struct bnx2x *bp)
5728 {
5729         int i;
5730
5731         /* Zero this manually as its initialization is
5732            currently missing in the initTool */
5733         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5734                 REG_WR(bp, BAR_USTRORM_INTMEM +
5735                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
5736 }
5737
5738 static void bnx2x_init_internal_port(struct bnx2x *bp)
5739 {
5740         int port = BP_PORT(bp);
5741
5742         REG_WR(bp,
5743                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5744         REG_WR(bp,
5745                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5746         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5747         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5748 }
5749
5750 static void bnx2x_init_internal_func(struct bnx2x *bp)
5751 {
5752         struct tstorm_eth_function_common_config tstorm_config = {0};
5753         struct stats_indication_flags stats_flags = {0};
5754         int port = BP_PORT(bp);
5755         int func = BP_FUNC(bp);
5756         int i, j;
5757         u32 offset;
5758         u16 max_agg_size;
5759
5760         tstorm_config.config_flags = RSS_FLAGS(bp);
5761
5762         if (is_multi(bp))
5763                 tstorm_config.rss_result_mask = MULTI_MASK;
5764
5765         /* Enable TPA if needed */
5766         if (bp->flags & TPA_ENABLE_FLAG)
5767                 tstorm_config.config_flags |=
5768                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5769
5770         if (IS_E1HMF(bp))
5771                 tstorm_config.config_flags |=
5772                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5773
5774         tstorm_config.leading_client_id = BP_L_ID(bp);
5775
5776         REG_WR(bp, BAR_TSTRORM_INTMEM +
5777                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5778                (*(u32 *)&tstorm_config));
5779
5780         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5781         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5782         bnx2x_set_storm_rx_mode(bp);
5783
5784         for_each_queue(bp, i) {
5785                 u8 cl_id = bp->fp[i].cl_id;
5786
5787                 /* reset xstorm per client statistics */
5788                 offset = BAR_XSTRORM_INTMEM +
5789                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5790                 for (j = 0;
5791                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5792                         REG_WR(bp, offset + j*4, 0);
5793
5794                 /* reset tstorm per client statistics */
5795                 offset = BAR_TSTRORM_INTMEM +
5796                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5797                 for (j = 0;
5798                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5799                         REG_WR(bp, offset + j*4, 0);
5800
5801                 /* reset ustorm per client statistics */
5802                 offset = BAR_USTRORM_INTMEM +
5803                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5804                 for (j = 0;
5805                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5806                         REG_WR(bp, offset + j*4, 0);
5807         }
5808
5809         /* Init statistics related context */
5810         stats_flags.collect_eth = 1;
5811
5812         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5813                ((u32 *)&stats_flags)[0]);
5814         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5815                ((u32 *)&stats_flags)[1]);
5816
5817         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5818                ((u32 *)&stats_flags)[0]);
5819         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5820                ((u32 *)&stats_flags)[1]);
5821
5822         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5823                ((u32 *)&stats_flags)[0]);
5824         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5825                ((u32 *)&stats_flags)[1]);
5826
5827         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5828                ((u32 *)&stats_flags)[0]);
5829         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5830                ((u32 *)&stats_flags)[1]);
5831
5832         REG_WR(bp, BAR_XSTRORM_INTMEM +
5833                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5834                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5835         REG_WR(bp, BAR_XSTRORM_INTMEM +
5836                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5837                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5838
5839         REG_WR(bp, BAR_TSTRORM_INTMEM +
5840                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5841                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5842         REG_WR(bp, BAR_TSTRORM_INTMEM +
5843                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5844                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5845
5846         REG_WR(bp, BAR_USTRORM_INTMEM +
5847                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5848                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5849         REG_WR(bp, BAR_USTRORM_INTMEM +
5850                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5851                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5852
5853         if (CHIP_IS_E1H(bp)) {
5854                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5855                         IS_E1HMF(bp));
5856                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5857                         IS_E1HMF(bp));
5858                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5859                         IS_E1HMF(bp));
5860                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5861                         IS_E1HMF(bp));
5862
5863                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5864                          bp->e1hov);
5865         }
5866
5867         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5868         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5869                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5870         for_each_queue(bp, i) {
5871                 struct bnx2x_fastpath *fp = &bp->fp[i];
5872
5873                 REG_WR(bp, BAR_USTRORM_INTMEM +
5874                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5875                        U64_LO(fp->rx_comp_mapping));
5876                 REG_WR(bp, BAR_USTRORM_INTMEM +
5877                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5878                        U64_HI(fp->rx_comp_mapping));
5879
5880                 /* Next page */
5881                 REG_WR(bp, BAR_USTRORM_INTMEM +
5882                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5883                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5884                 REG_WR(bp, BAR_USTRORM_INTMEM +
5885                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5886                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5887
5888                 REG_WR16(bp, BAR_USTRORM_INTMEM +
5889                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5890                          max_agg_size);
5891         }
5892
5893         /* dropless flow control */
5894         if (CHIP_IS_E1H(bp)) {
5895                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5896
5897                 rx_pause.bd_thr_low = 250;
5898                 rx_pause.cqe_thr_low = 250;
5899                 rx_pause.cos = 1;
5900                 rx_pause.sge_thr_low = 0;
5901                 rx_pause.bd_thr_high = 350;
5902                 rx_pause.cqe_thr_high = 350;
5903                 rx_pause.sge_thr_high = 0;
5904
5905                 for_each_queue(bp, i) {
5906                         struct bnx2x_fastpath *fp = &bp->fp[i];
5907
5908                         if (!fp->disable_tpa) {
5909                                 rx_pause.sge_thr_low = 150;
5910                                 rx_pause.sge_thr_high = 250;
5911                         }
5912
5913
5914                         offset = BAR_USTRORM_INTMEM +
5915                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5916                                                                    fp->cl_id);
5917                         for (j = 0;
5918                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5919                              j++)
5920                                 REG_WR(bp, offset + j*4,
5921                                        ((u32 *)&rx_pause)[j]);
5922                 }
5923         }
5924
5925         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5926
5927         /* Init rate shaping and fairness contexts */
5928         if (IS_E1HMF(bp)) {
5929                 int vn;
5930
5931                 /* During init there is no active link
5932                    Until link is up, set link rate to 10Gbps */
5933                 bp->link_vars.line_speed = SPEED_10000;
5934                 bnx2x_init_port_minmax(bp);
5935
5936                 if (!BP_NOMCP(bp))
5937                         bp->mf_config =
5938                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5939                 bnx2x_calc_vn_weight_sum(bp);
5940
5941                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5942                         bnx2x_init_vn_minmax(bp, 2*vn + port);
5943
5944                 /* Enable rate shaping and fairness */
5945                 bp->cmng.flags.cmng_enables |=
5946                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5947
5948         } else {
5949                 /* rate shaping and fairness are disabled */
5950                 DP(NETIF_MSG_IFUP,
5951                    "single function mode  minmax will be disabled\n");
5952         }
5953
5954
5955         /* Store cmng structures to internal memory */
5956         if (bp->port.pmf)
5957                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5958                         REG_WR(bp, BAR_XSTRORM_INTMEM +
5959                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5960                                ((u32 *)(&bp->cmng))[i]);
5961 }
5962
5963 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5964 {
5965         switch (load_code) {
5966         case FW_MSG_CODE_DRV_LOAD_COMMON:
5967                 bnx2x_init_internal_common(bp);
5968                 /* no break */
5969
5970         case FW_MSG_CODE_DRV_LOAD_PORT:
5971                 bnx2x_init_internal_port(bp);
5972                 /* no break */
5973
5974         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5975                 bnx2x_init_internal_func(bp);
5976                 break;
5977
5978         default:
5979                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5980                 break;
5981         }
5982 }
5983
5984 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5985 {
5986         int i;
5987
5988         for_each_queue(bp, i) {
5989                 struct bnx2x_fastpath *fp = &bp->fp[i];
5990
5991                 fp->bp = bp;
5992                 fp->state = BNX2X_FP_STATE_CLOSED;
5993                 fp->index = i;
5994                 fp->cl_id = BP_L_ID(bp) + i;
5995 #ifdef BCM_CNIC
5996                 fp->sb_id = fp->cl_id + 1;
5997 #else
5998                 fp->sb_id = fp->cl_id;
5999 #endif
6000                 DP(NETIF_MSG_IFUP,
6001                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
6002                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
6003                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
6004                               fp->sb_id);
6005                 bnx2x_update_fpsb_idx(fp);
6006         }
6007
6008         /* ensure status block indices were read */
6009         rmb();
6010
6011
6012         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6013                           DEF_SB_ID);
6014         bnx2x_update_dsb_idx(bp);
6015         bnx2x_update_coalesce(bp);
6016         bnx2x_init_rx_rings(bp);
6017         bnx2x_init_tx_ring(bp);
6018         bnx2x_init_sp_ring(bp);
6019         bnx2x_init_context(bp);
6020         bnx2x_init_internal(bp, load_code);
6021         bnx2x_init_ind_table(bp);
6022         bnx2x_stats_init(bp);
6023
6024         /* At this point, we are ready for interrupts */
6025         atomic_set(&bp->intr_sem, 0);
6026
6027         /* flush all before enabling interrupts */
6028         mb();
6029         mmiowb();
6030
6031         bnx2x_int_enable(bp);
6032
6033         /* Check for SPIO5 */
6034         bnx2x_attn_int_deasserted0(bp,
6035                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6036                                    AEU_INPUTS_ATTN_BITS_SPIO5);
6037 }
6038
6039 /* end of nic init */
6040
6041 /*
6042  * gzip service functions
6043  */
6044
6045 static int bnx2x_gunzip_init(struct bnx2x *bp)
6046 {
6047         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6048                                             &bp->gunzip_mapping, GFP_KERNEL);
6049         if (bp->gunzip_buf  == NULL)
6050                 goto gunzip_nomem1;
6051
6052         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6053         if (bp->strm  == NULL)
6054                 goto gunzip_nomem2;
6055
6056         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6057                                       GFP_KERNEL);
6058         if (bp->strm->workspace == NULL)
6059                 goto gunzip_nomem3;
6060
6061         return 0;
6062
6063 gunzip_nomem3:
6064         kfree(bp->strm);
6065         bp->strm = NULL;
6066
6067 gunzip_nomem2:
6068         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6069                           bp->gunzip_mapping);
6070         bp->gunzip_buf = NULL;
6071
6072 gunzip_nomem1:
6073         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6074                " un-compression\n");
6075         return -ENOMEM;
6076 }
6077
6078 static void bnx2x_gunzip_end(struct bnx2x *bp)
6079 {
6080         kfree(bp->strm->workspace);
6081
6082         kfree(bp->strm);
6083         bp->strm = NULL;
6084
6085         if (bp->gunzip_buf) {
6086                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6087                                   bp->gunzip_mapping);
6088                 bp->gunzip_buf = NULL;
6089         }
6090 }
6091
6092 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6093 {
6094         int n, rc;
6095
6096         /* check gzip header */
6097         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6098                 BNX2X_ERR("Bad gzip header\n");
6099                 return -EINVAL;
6100         }
6101
6102         n = 10;
6103
6104 #define FNAME                           0x8
6105
6106         if (zbuf[3] & FNAME)
6107                 while ((zbuf[n++] != 0) && (n < len));
6108
6109         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6110         bp->strm->avail_in = len - n;
6111         bp->strm->next_out = bp->gunzip_buf;
6112         bp->strm->avail_out = FW_BUF_SIZE;
6113
6114         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6115         if (rc != Z_OK)
6116                 return rc;
6117
6118         rc = zlib_inflate(bp->strm, Z_FINISH);
6119         if ((rc != Z_OK) && (rc != Z_STREAM_END))
6120                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6121                            bp->strm->msg);
6122
6123         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6124         if (bp->gunzip_outlen & 0x3)
6125                 netdev_err(bp->dev, "Firmware decompression error:"
6126                                     " gunzip_outlen (%d) not aligned\n",
6127                                 bp->gunzip_outlen);
6128         bp->gunzip_outlen >>= 2;
6129
6130         zlib_inflateEnd(bp->strm);
6131
6132         if (rc == Z_STREAM_END)
6133                 return 0;
6134
6135         return rc;
6136 }
6137
6138 /* nic load/unload */
6139
6140 /*
6141  * General service functions
6142  */
6143
6144 /* send a NIG loopback debug packet */
6145 static void bnx2x_lb_pckt(struct bnx2x *bp)
6146 {
6147         u32 wb_write[3];
6148
6149         /* Ethernet source and destination addresses */
6150         wb_write[0] = 0x55555555;
6151         wb_write[1] = 0x55555555;
6152         wb_write[2] = 0x20;             /* SOP */
6153         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6154
6155         /* NON-IP protocol */
6156         wb_write[0] = 0x09000000;
6157         wb_write[1] = 0x55555555;
6158         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
6159         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6160 }
6161
6162 /* some of the internal memories
6163  * are not directly readable from the driver
6164  * to test them we send debug packets
6165  */
6166 static int bnx2x_int_mem_test(struct bnx2x *bp)
6167 {
6168         int factor;
6169         int count, i;
6170         u32 val = 0;
6171
6172         if (CHIP_REV_IS_FPGA(bp))
6173                 factor = 120;
6174         else if (CHIP_REV_IS_EMUL(bp))
6175                 factor = 200;
6176         else
6177                 factor = 1;
6178
6179         DP(NETIF_MSG_HW, "start part1\n");
6180
6181         /* Disable inputs of parser neighbor blocks */
6182         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6183         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6184         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6185         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6186
6187         /*  Write 0 to parser credits for CFC search request */
6188         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6189
6190         /* send Ethernet packet */
6191         bnx2x_lb_pckt(bp);
6192
6193         /* TODO do i reset NIG statistic? */
6194         /* Wait until NIG register shows 1 packet of size 0x10 */
6195         count = 1000 * factor;
6196         while (count) {
6197
6198                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6199                 val = *bnx2x_sp(bp, wb_data[0]);
6200                 if (val == 0x10)
6201                         break;
6202
6203                 msleep(10);
6204                 count--;
6205         }
6206         if (val != 0x10) {
6207                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6208                 return -1;
6209         }
6210
6211         /* Wait until PRS register shows 1 packet */
6212         count = 1000 * factor;
6213         while (count) {
6214                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6215                 if (val == 1)
6216                         break;
6217
6218                 msleep(10);
6219                 count--;
6220         }
6221         if (val != 0x1) {
6222                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6223                 return -2;
6224         }
6225
6226         /* Reset and init BRB, PRS */
6227         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6228         msleep(50);
6229         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6230         msleep(50);
6231         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6232         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6233
6234         DP(NETIF_MSG_HW, "part2\n");
6235
6236         /* Disable inputs of parser neighbor blocks */
6237         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6238         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6239         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6240         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6241
6242         /* Write 0 to parser credits for CFC search request */
6243         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6244
6245         /* send 10 Ethernet packets */
6246         for (i = 0; i < 10; i++)
6247                 bnx2x_lb_pckt(bp);
6248
6249         /* Wait until NIG register shows 10 + 1
6250            packets of size 11*0x10 = 0xb0 */
6251         count = 1000 * factor;
6252         while (count) {
6253
6254                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6255                 val = *bnx2x_sp(bp, wb_data[0]);
6256                 if (val == 0xb0)
6257                         break;
6258
6259                 msleep(10);
6260                 count--;
6261         }
6262         if (val != 0xb0) {
6263                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
6264                 return -3;
6265         }
6266
6267         /* Wait until PRS register shows 2 packets */
6268         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6269         if (val != 2)
6270                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6271
6272         /* Write 1 to parser credits for CFC search request */
6273         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6274
6275         /* Wait until PRS register shows 3 packets */
6276         msleep(10 * factor);
6277         /* Wait until NIG register shows 1 packet of size 0x10 */
6278         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6279         if (val != 3)
6280                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
6281
6282         /* clear NIG EOP FIFO */
6283         for (i = 0; i < 11; i++)
6284                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6285         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6286         if (val != 1) {
6287                 BNX2X_ERR("clear of NIG failed\n");
6288                 return -4;
6289         }
6290
6291         /* Reset and init BRB, PRS, NIG */
6292         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6293         msleep(50);
6294         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6295         msleep(50);
6296         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6297         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6298 #ifndef BCM_CNIC
6299         /* set NIC mode */
6300         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6301 #endif
6302
6303         /* Enable inputs of parser neighbor blocks */
6304         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6305         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6306         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6307         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6308
6309         DP(NETIF_MSG_HW, "done\n");
6310
6311         return 0; /* OK */
6312 }
6313
6314 static void enable_blocks_attention(struct bnx2x *bp)
6315 {
6316         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6317         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6318         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6319         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6320         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6321         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6322         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6323         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6324         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6325 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6326 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6327         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6328         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6329         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6330 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6331 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6332         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6333         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6334         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6335         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6336 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6337 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6338         if (CHIP_REV_IS_FPGA(bp))
6339                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6340         else
6341                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6342         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6343         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6344         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6345 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6346 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6347         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6348         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6349 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6350         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
6351 }
6352
6353 static const struct {
6354         u32 addr;
6355         u32 mask;
6356 } bnx2x_parity_mask[] = {
6357         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6358         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6359         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6360         {HC_REG_HC_PRTY_MASK, 0xffffffff},
6361         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6362         {QM_REG_QM_PRTY_MASK, 0x0},
6363         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6364         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6365         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6366         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6367         {CDU_REG_CDU_PRTY_MASK, 0x0},
6368         {CFC_REG_CFC_PRTY_MASK, 0x0},
6369         {DBG_REG_DBG_PRTY_MASK, 0x0},
6370         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6371         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6372         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6373         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6374         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6375         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6376         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6377         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6378         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6379         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6380         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6381         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6382         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6383         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6384         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6385 };
6386
6387 static void enable_blocks_parity(struct bnx2x *bp)
6388 {
6389         int i, mask_arr_len =
6390                 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6391
6392         for (i = 0; i < mask_arr_len; i++)
6393                 REG_WR(bp, bnx2x_parity_mask[i].addr,
6394                         bnx2x_parity_mask[i].mask);
6395 }
6396
6397
6398 static void bnx2x_reset_common(struct bnx2x *bp)
6399 {
6400         /* reset_common */
6401         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6402                0xd3ffff7f);
6403         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6404 }
6405
6406 static void bnx2x_init_pxp(struct bnx2x *bp)
6407 {
6408         u16 devctl;
6409         int r_order, w_order;
6410
6411         pci_read_config_word(bp->pdev,
6412                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6413         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6414         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6415         if (bp->mrrs == -1)
6416                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6417         else {
6418                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6419                 r_order = bp->mrrs;
6420         }
6421
6422         bnx2x_init_pxp_arb(bp, r_order, w_order);
6423 }
6424
6425 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6426 {
6427         int is_required;
6428         u32 val;
6429         int port;
6430
6431         if (BP_NOMCP(bp))
6432                 return;
6433
6434         is_required = 0;
6435         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6436               SHARED_HW_CFG_FAN_FAILURE_MASK;
6437
6438         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6439                 is_required = 1;
6440
6441         /*
6442          * The fan failure mechanism is usually related to the PHY type since
6443          * the power consumption of the board is affected by the PHY. Currently,
6444          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6445          */
6446         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6447                 for (port = PORT_0; port < PORT_MAX; port++) {
6448                         u32 phy_type =
6449                                 SHMEM_RD(bp, dev_info.port_hw_config[port].
6450                                          external_phy_config) &
6451                                 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6452                         is_required |=
6453                                 ((phy_type ==
6454                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6455                                  (phy_type ==
6456                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6457                                  (phy_type ==
6458                                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6459                 }
6460
6461         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6462
6463         if (is_required == 0)
6464                 return;
6465
6466         /* Fan failure is indicated by SPIO 5 */
6467         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6468                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
6469
6470         /* set to active low mode */
6471         val = REG_RD(bp, MISC_REG_SPIO_INT);
6472         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6473                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6474         REG_WR(bp, MISC_REG_SPIO_INT, val);
6475
6476         /* enable interrupt to signal the IGU */
6477         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6478         val |= (1 << MISC_REGISTERS_SPIO_5);
6479         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6480 }
6481
6482 static int bnx2x_init_common(struct bnx2x *bp)
6483 {
6484         u32 val, i;
6485 #ifdef BCM_CNIC
6486         u32 wb_write[2];
6487 #endif
6488
6489         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
6490
6491         bnx2x_reset_common(bp);
6492         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6493         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6494
6495         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6496         if (CHIP_IS_E1H(bp))
6497                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6498
6499         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6500         msleep(30);
6501         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6502
6503         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6504         if (CHIP_IS_E1(bp)) {
6505                 /* enable HW interrupt from PXP on USDM overflow
6506                    bit 16 on INT_MASK_0 */
6507                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6508         }
6509
6510         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6511         bnx2x_init_pxp(bp);
6512
6513 #ifdef __BIG_ENDIAN
6514         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6515         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6516         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6517         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6518         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6519         /* make sure this value is 0 */
6520         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6521
6522 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6523         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6524         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6525         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6526         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6527 #endif
6528
6529         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6530 #ifdef BCM_CNIC
6531         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6532         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6533         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6534 #endif
6535
6536         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6537                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6538
6539         /* let the HW do it's magic ... */
6540         msleep(100);
6541         /* finish PXP init */
6542         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6543         if (val != 1) {
6544                 BNX2X_ERR("PXP2 CFG failed\n");
6545                 return -EBUSY;
6546         }
6547         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6548         if (val != 1) {
6549                 BNX2X_ERR("PXP2 RD_INIT failed\n");
6550                 return -EBUSY;
6551         }
6552
6553         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6554         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6555
6556         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6557
6558         /* clean the DMAE memory */
6559         bp->dmae_ready = 1;
6560         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6561
6562         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6563         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6564         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6565         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6566
6567         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6568         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6569         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6570         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6571
6572         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6573
6574 #ifdef BCM_CNIC
6575         wb_write[0] = 0;
6576         wb_write[1] = 0;
6577         for (i = 0; i < 64; i++) {
6578                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6579                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6580
6581                 if (CHIP_IS_E1H(bp)) {
6582                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6583                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6584                                           wb_write, 2);
6585                 }
6586         }
6587 #endif
6588         /* soft reset pulse */
6589         REG_WR(bp, QM_REG_SOFT_RESET, 1);
6590         REG_WR(bp, QM_REG_SOFT_RESET, 0);
6591
6592 #ifdef BCM_CNIC
6593         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6594 #endif
6595
6596         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6597         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6598         if (!CHIP_REV_IS_SLOW(bp)) {
6599                 /* enable hw interrupt from doorbell Q */
6600                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6601         }
6602
6603         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6604         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6605         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6606 #ifndef BCM_CNIC
6607         /* set NIC mode */
6608         REG_WR(bp, PRS_REG_NIC_MODE, 1);
6609 #endif
6610         if (CHIP_IS_E1H(bp))
6611                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6612
6613         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6614         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6615         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6616         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6617
6618         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6619         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6620         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6621         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6622
6623         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6624         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6625         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6626         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6627
6628         /* sync semi rtc */
6629         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6630                0x80000000);
6631         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6632                0x80000000);
6633
6634         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6635         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6636         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6637
6638         REG_WR(bp, SRC_REG_SOFT_RST, 1);
6639         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6640                 REG_WR(bp, i, random32());
6641         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6642 #ifdef BCM_CNIC
6643         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6644         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6645         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6646         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6647         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6648         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6649         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6650         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6651         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6652         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6653 #endif
6654         REG_WR(bp, SRC_REG_SOFT_RST, 0);
6655
6656         if (sizeof(union cdu_context) != 1024)
6657                 /* we currently assume that a context is 1024 bytes */
6658                 dev_alert(&bp->pdev->dev, "please adjust the size "
6659                                           "of cdu_context(%ld)\n",
6660                          (long)sizeof(union cdu_context));
6661
6662         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6663         val = (4 << 24) + (0 << 12) + 1024;
6664         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6665
6666         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6667         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6668         /* enable context validation interrupt from CFC */
6669         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6670
6671         /* set the thresholds to prevent CFC/CDU race */
6672         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6673
6674         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6675         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6676
6677         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6678         /* Reset PCIE errors for debug */
6679         REG_WR(bp, 0x2814, 0xffffffff);
6680         REG_WR(bp, 0x3820, 0xffffffff);
6681
6682         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6683         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6684         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6685         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6686
6687         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6688         if (CHIP_IS_E1H(bp)) {
6689                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6690                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6691         }
6692
6693         if (CHIP_REV_IS_SLOW(bp))
6694                 msleep(200);
6695
6696         /* finish CFC init */
6697         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6698         if (val != 1) {
6699                 BNX2X_ERR("CFC LL_INIT failed\n");
6700                 return -EBUSY;
6701         }
6702         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6703         if (val != 1) {
6704                 BNX2X_ERR("CFC AC_INIT failed\n");
6705                 return -EBUSY;
6706         }
6707         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6708         if (val != 1) {
6709                 BNX2X_ERR("CFC CAM_INIT failed\n");
6710                 return -EBUSY;
6711         }
6712         REG_WR(bp, CFC_REG_DEBUG0, 0);
6713
6714         /* read NIG statistic
6715            to see if this is our first up since powerup */
6716         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6717         val = *bnx2x_sp(bp, wb_data[0]);
6718
6719         /* do internal memory self test */
6720         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6721                 BNX2X_ERR("internal mem self test failed\n");
6722                 return -EBUSY;
6723         }
6724
6725         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6726         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6727         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6728         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6729         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6730                 bp->port.need_hw_lock = 1;
6731                 break;
6732
6733         default:
6734                 break;
6735         }
6736
6737         bnx2x_setup_fan_failure_detection(bp);
6738
6739         /* clear PXP2 attentions */
6740         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6741
6742         enable_blocks_attention(bp);
6743         if (CHIP_PARITY_SUPPORTED(bp))
6744                 enable_blocks_parity(bp);
6745
6746         if (!BP_NOMCP(bp)) {
6747                 bnx2x_acquire_phy_lock(bp);
6748                 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6749                 bnx2x_release_phy_lock(bp);
6750         } else
6751                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6752
6753         return 0;
6754 }
6755
6756 static int bnx2x_init_port(struct bnx2x *bp)
6757 {
6758         int port = BP_PORT(bp);
6759         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6760         u32 low, high;
6761         u32 val;
6762
6763         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
6764
6765         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6766
6767         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6768         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6769
6770         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6771         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6772         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6773         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6774
6775 #ifdef BCM_CNIC
6776         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6777
6778         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6779         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6780         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6781 #endif
6782
6783         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6784
6785         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6786         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6787                 /* no pause for emulation and FPGA */
6788                 low = 0;
6789                 high = 513;
6790         } else {
6791                 if (IS_E1HMF(bp))
6792                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6793                 else if (bp->dev->mtu > 4096) {
6794                         if (bp->flags & ONE_PORT_FLAG)
6795                                 low = 160;
6796                         else {
6797                                 val = bp->dev->mtu;
6798                                 /* (24*1024 + val*4)/256 */
6799                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6800                         }
6801                 } else
6802                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6803                 high = low + 56;        /* 14*1024/256 */
6804         }
6805         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6806         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6807
6808
6809         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6810
6811         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6812         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6813         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6814         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6815
6816         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6817         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6818         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6819         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6820
6821         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6822         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6823
6824         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6825
6826         /* configure PBF to work without PAUSE mtu 9000 */
6827         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6828
6829         /* update threshold */
6830         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6831         /* update init credit */
6832         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6833
6834         /* probe changes */
6835         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6836         msleep(5);
6837         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6838
6839 #ifdef BCM_CNIC
6840         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6841 #endif
6842         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6843         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6844
6845         if (CHIP_IS_E1(bp)) {
6846                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6847                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6848         }
6849         bnx2x_init_block(bp, HC_BLOCK, init_stage);
6850
6851         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6852         /* init aeu_mask_attn_func_0/1:
6853          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6854          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6855          *             bits 4-7 are used for "per vn group attention" */
6856         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6857                (IS_E1HMF(bp) ? 0xF7 : 0x7));
6858
6859         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6860         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6861         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6862         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6863         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6864
6865         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6866
6867         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6868
6869         if (CHIP_IS_E1H(bp)) {
6870                 /* 0x2 disable e1hov, 0x1 enable */
6871                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6872                        (IS_E1HMF(bp) ? 0x1 : 0x2));
6873
6874                 {
6875                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6876                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6877                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6878                 }
6879         }
6880
6881         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6882         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6883
6884         switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6885         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6886                 {
6887                 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6888
6889                 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6890                                MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6891
6892                 /* The GPIO should be swapped if the swap register is
6893                    set and active */
6894                 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6895                 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6896
6897                 /* Select function upon port-swap configuration */
6898                 if (port == 0) {
6899                         offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6900                         aeu_gpio_mask = (swap_val && swap_override) ?
6901                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6902                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6903                 } else {
6904                         offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6905                         aeu_gpio_mask = (swap_val && swap_override) ?
6906                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6907                                 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6908                 }
6909                 val = REG_RD(bp, offset);
6910                 /* add GPIO3 to group */
6911                 val |= aeu_gpio_mask;
6912                 REG_WR(bp, offset, val);
6913                 }
6914                 break;
6915
6916         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6917         case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6918                 /* add SPIO 5 to group 0 */
6919                 {
6920                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6921                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6922                 val = REG_RD(bp, reg_addr);
6923                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6924                 REG_WR(bp, reg_addr, val);
6925                 }
6926                 break;
6927
6928         default:
6929                 break;
6930         }
6931
6932         bnx2x__link_reset(bp);
6933
6934         return 0;
6935 }
6936
6937 #define ILT_PER_FUNC            (768/2)
6938 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
6939 /* the phys address is shifted right 12 bits and has an added
6940    1=valid bit added to the 53rd bit
6941    then since this is a wide register(TM)
6942    we split it into two 32 bit writes
6943  */
6944 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6945 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
6946 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
6947 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
6948
6949 #ifdef BCM_CNIC
6950 #define CNIC_ILT_LINES          127
6951 #define CNIC_CTX_PER_ILT        16
6952 #else
6953 #define CNIC_ILT_LINES          0
6954 #endif
6955
6956 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6957 {
6958         int reg;
6959
6960         if (CHIP_IS_E1H(bp))
6961                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6962         else /* E1 */
6963                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6964
6965         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6966 }
6967
6968 static int bnx2x_init_func(struct bnx2x *bp)
6969 {
6970         int port = BP_PORT(bp);
6971         int func = BP_FUNC(bp);
6972         u32 addr, val;
6973         int i;
6974
6975         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
6976
6977         /* set MSI reconfigure capability */
6978         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6979         val = REG_RD(bp, addr);
6980         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6981         REG_WR(bp, addr, val);
6982
6983         i = FUNC_ILT_BASE(func);
6984
6985         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6986         if (CHIP_IS_E1H(bp)) {
6987                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6988                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6989         } else /* E1 */
6990                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6991                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6992
6993 #ifdef BCM_CNIC
6994         i += 1 + CNIC_ILT_LINES;
6995         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6996         if (CHIP_IS_E1(bp))
6997                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6998         else {
6999                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7000                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7001         }
7002
7003         i++;
7004         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7005         if (CHIP_IS_E1(bp))
7006                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7007         else {
7008                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7009                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7010         }
7011
7012         i++;
7013         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7014         if (CHIP_IS_E1(bp))
7015                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7016         else {
7017                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7018                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7019         }
7020
7021         /* tell the searcher where the T2 table is */
7022         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7023
7024         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7025                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7026
7027         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7028                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7029                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7030
7031         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7032 #endif
7033
7034         if (CHIP_IS_E1H(bp)) {
7035                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7036                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7037                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7038                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7039                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7040                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7041                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7042                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7043                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7044
7045                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7046                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7047         }
7048
7049         /* HC init per function */
7050         if (CHIP_IS_E1H(bp)) {
7051                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7052
7053                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7054                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7055         }
7056         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7057
7058         /* Reset PCIE errors for debug */
7059         REG_WR(bp, 0x2114, 0xffffffff);
7060         REG_WR(bp, 0x2120, 0xffffffff);
7061
7062         return 0;
7063 }
7064
7065 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7066 {
7067         int i, rc = 0;
7068
7069         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
7070            BP_FUNC(bp), load_code);
7071
7072         bp->dmae_ready = 0;
7073         mutex_init(&bp->dmae_mutex);
7074         rc = bnx2x_gunzip_init(bp);
7075         if (rc)
7076                 return rc;
7077
7078         switch (load_code) {
7079         case FW_MSG_CODE_DRV_LOAD_COMMON:
7080                 rc = bnx2x_init_common(bp);
7081                 if (rc)
7082                         goto init_hw_err;
7083                 /* no break */
7084
7085         case FW_MSG_CODE_DRV_LOAD_PORT:
7086                 bp->dmae_ready = 1;
7087                 rc = bnx2x_init_port(bp);
7088                 if (rc)
7089                         goto init_hw_err;
7090                 /* no break */
7091
7092         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7093                 bp->dmae_ready = 1;
7094                 rc = bnx2x_init_func(bp);
7095                 if (rc)
7096                         goto init_hw_err;
7097                 break;
7098
7099         default:
7100                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7101                 break;
7102         }
7103
7104         if (!BP_NOMCP(bp)) {
7105                 int func = BP_FUNC(bp);
7106
7107                 bp->fw_drv_pulse_wr_seq =
7108                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7109                                  DRV_PULSE_SEQ_MASK);
7110                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7111         }
7112
7113         /* this needs to be done before gunzip end */
7114         bnx2x_zero_def_sb(bp);
7115         for_each_queue(bp, i)
7116                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7117 #ifdef BCM_CNIC
7118         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7119 #endif
7120
7121 init_hw_err:
7122         bnx2x_gunzip_end(bp);
7123
7124         return rc;
7125 }
7126
7127 static void bnx2x_free_mem(struct bnx2x *bp)
7128 {
7129
7130 #define BNX2X_PCI_FREE(x, y, size) \
7131         do { \
7132                 if (x) { \
7133                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
7134                         x = NULL; \
7135                         y = 0; \
7136                 } \
7137         } while (0)
7138
7139 #define BNX2X_FREE(x) \
7140         do { \
7141                 if (x) { \
7142                         vfree(x); \
7143                         x = NULL; \
7144                 } \
7145         } while (0)
7146
7147         int i;
7148
7149         /* fastpath */
7150         /* Common */
7151         for_each_queue(bp, i) {
7152
7153                 /* status blocks */
7154                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7155                                bnx2x_fp(bp, i, status_blk_mapping),
7156                                sizeof(struct host_status_block));
7157         }
7158         /* Rx */
7159         for_each_queue(bp, i) {
7160
7161                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7162                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7163                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7164                                bnx2x_fp(bp, i, rx_desc_mapping),
7165                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
7166
7167                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7168                                bnx2x_fp(bp, i, rx_comp_mapping),
7169                                sizeof(struct eth_fast_path_rx_cqe) *
7170                                NUM_RCQ_BD);
7171
7172                 /* SGE ring */
7173                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7174                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7175                                bnx2x_fp(bp, i, rx_sge_mapping),
7176                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7177         }
7178         /* Tx */
7179         for_each_queue(bp, i) {
7180
7181                 /* fastpath tx rings: tx_buf tx_desc */
7182                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7183                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7184                                bnx2x_fp(bp, i, tx_desc_mapping),
7185                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7186         }
7187         /* end of fastpath */
7188
7189         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7190                        sizeof(struct host_def_status_block));
7191
7192         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7193                        sizeof(struct bnx2x_slowpath));
7194
7195 #ifdef BCM_CNIC
7196         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7197         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7198         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7199         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7200         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7201                        sizeof(struct host_status_block));
7202 #endif
7203         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7204
7205 #undef BNX2X_PCI_FREE
7206 #undef BNX2X_KFREE
7207 }
7208
7209 static int bnx2x_alloc_mem(struct bnx2x *bp)
7210 {
7211
7212 #define BNX2X_PCI_ALLOC(x, y, size) \
7213         do { \
7214                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7215                 if (x == NULL) \
7216                         goto alloc_mem_err; \
7217                 memset(x, 0, size); \
7218         } while (0)
7219
7220 #define BNX2X_ALLOC(x, size) \
7221         do { \
7222                 x = vmalloc(size); \
7223                 if (x == NULL) \
7224                         goto alloc_mem_err; \
7225                 memset(x, 0, size); \
7226         } while (0)
7227
7228         int i;
7229
7230         /* fastpath */
7231         /* Common */
7232         for_each_queue(bp, i) {
7233                 bnx2x_fp(bp, i, bp) = bp;
7234
7235                 /* status blocks */
7236                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7237                                 &bnx2x_fp(bp, i, status_blk_mapping),
7238                                 sizeof(struct host_status_block));
7239         }
7240         /* Rx */
7241         for_each_queue(bp, i) {
7242
7243                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7244                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7245                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7246                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7247                                 &bnx2x_fp(bp, i, rx_desc_mapping),
7248                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7249
7250                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7251                                 &bnx2x_fp(bp, i, rx_comp_mapping),
7252                                 sizeof(struct eth_fast_path_rx_cqe) *
7253                                 NUM_RCQ_BD);
7254
7255                 /* SGE ring */
7256                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7257                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7258                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7259                                 &bnx2x_fp(bp, i, rx_sge_mapping),
7260                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7261         }
7262         /* Tx */
7263         for_each_queue(bp, i) {
7264
7265                 /* fastpath tx rings: tx_buf tx_desc */
7266                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7267                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7268                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7269                                 &bnx2x_fp(bp, i, tx_desc_mapping),
7270                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7271         }
7272         /* end of fastpath */
7273
7274         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7275                         sizeof(struct host_def_status_block));
7276
7277         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7278                         sizeof(struct bnx2x_slowpath));
7279
7280 #ifdef BCM_CNIC
7281         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7282
7283         /* allocate searcher T2 table
7284            we allocate 1/4 of alloc num for T2
7285           (which is not entered into the ILT) */
7286         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7287
7288         /* Initialize T2 (for 1024 connections) */
7289         for (i = 0; i < 16*1024; i += 64)
7290                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7291
7292         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7293         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7294
7295         /* QM queues (128*MAX_CONN) */
7296         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7297
7298         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7299                         sizeof(struct host_status_block));
7300 #endif
7301
7302         /* Slow path ring */
7303         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7304
7305         return 0;
7306
7307 alloc_mem_err:
7308         bnx2x_free_mem(bp);
7309         return -ENOMEM;
7310
7311 #undef BNX2X_PCI_ALLOC
7312 #undef BNX2X_ALLOC
7313 }
7314
7315 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7316 {
7317         int i;
7318
7319         for_each_queue(bp, i) {
7320                 struct bnx2x_fastpath *fp = &bp->fp[i];
7321
7322                 u16 bd_cons = fp->tx_bd_cons;
7323                 u16 sw_prod = fp->tx_pkt_prod;
7324                 u16 sw_cons = fp->tx_pkt_cons;
7325
7326                 while (sw_cons != sw_prod) {
7327                         bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7328                         sw_cons++;
7329                 }
7330         }
7331 }
7332
7333 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7334 {
7335         int i, j;
7336
7337         for_each_queue(bp, j) {
7338                 struct bnx2x_fastpath *fp = &bp->fp[j];
7339
7340                 for (i = 0; i < NUM_RX_BD; i++) {
7341                         struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7342                         struct sk_buff *skb = rx_buf->skb;
7343
7344                         if (skb == NULL)
7345                                 continue;
7346
7347                         dma_unmap_single(&bp->pdev->dev,
7348                                          dma_unmap_addr(rx_buf, mapping),
7349                                          bp->rx_buf_size, DMA_FROM_DEVICE);
7350
7351                         rx_buf->skb = NULL;
7352                         dev_kfree_skb(skb);
7353                 }
7354                 if (!fp->disable_tpa)
7355                         bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7356                                             ETH_MAX_AGGREGATION_QUEUES_E1 :
7357                                             ETH_MAX_AGGREGATION_QUEUES_E1H);
7358         }
7359 }
7360
7361 static void bnx2x_free_skbs(struct bnx2x *bp)
7362 {
7363         bnx2x_free_tx_skbs(bp);
7364         bnx2x_free_rx_skbs(bp);
7365 }
7366
7367 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7368 {
7369         int i, offset = 1;
7370
7371         free_irq(bp->msix_table[0].vector, bp->dev);
7372         DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7373            bp->msix_table[0].vector);
7374
7375 #ifdef BCM_CNIC
7376         offset++;
7377 #endif
7378         for_each_queue(bp, i) {
7379                 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq  "
7380                    "state %x\n", i, bp->msix_table[i + offset].vector,
7381                    bnx2x_fp(bp, i, state));
7382
7383                 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7384         }
7385 }
7386
7387 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7388 {
7389         if (bp->flags & USING_MSIX_FLAG) {
7390                 if (!disable_only)
7391                         bnx2x_free_msix_irqs(bp);
7392                 pci_disable_msix(bp->pdev);
7393                 bp->flags &= ~USING_MSIX_FLAG;
7394
7395         } else if (bp->flags & USING_MSI_FLAG) {
7396                 if (!disable_only)
7397                         free_irq(bp->pdev->irq, bp->dev);
7398                 pci_disable_msi(bp->pdev);
7399                 bp->flags &= ~USING_MSI_FLAG;
7400
7401         } else if (!disable_only)
7402                 free_irq(bp->pdev->irq, bp->dev);
7403 }
7404
7405 static int bnx2x_enable_msix(struct bnx2x *bp)
7406 {
7407         int i, rc, offset = 1;
7408         int igu_vec = 0;
7409
7410         bp->msix_table[0].entry = igu_vec;
7411         DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7412
7413 #ifdef BCM_CNIC
7414         igu_vec = BP_L_ID(bp) + offset;
7415         bp->msix_table[1].entry = igu_vec;
7416         DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7417         offset++;
7418 #endif
7419         for_each_queue(bp, i) {
7420                 igu_vec = BP_L_ID(bp) + offset + i;
7421                 bp->msix_table[i + offset].entry = igu_vec;
7422                 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7423                    "(fastpath #%u)\n", i + offset, igu_vec, i);
7424         }
7425
7426         rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7427                              BNX2X_NUM_QUEUES(bp) + offset);
7428
7429         /*
7430          * reconfigure number of tx/rx queues according to available
7431          * MSI-X vectors
7432          */
7433         if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7434                 /* vectors available for FP */
7435                 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7436
7437                 DP(NETIF_MSG_IFUP,
7438                    "Trying to use less MSI-X vectors: %d\n", rc);
7439
7440                 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7441
7442                 if (rc) {
7443                         DP(NETIF_MSG_IFUP,
7444                            "MSI-X is not attainable  rc %d\n", rc);
7445                         return rc;
7446                 }
7447
7448                 bp->num_queues = min(bp->num_queues, fp_vec);
7449
7450                 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7451                                   bp->num_queues);
7452         } else if (rc) {
7453                 DP(NETIF_MSG_IFUP, "MSI-X is not attainable  rc %d\n", rc);
7454                 return rc;
7455         }
7456
7457         bp->flags |= USING_MSIX_FLAG;
7458
7459         return 0;
7460 }
7461
7462 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7463 {
7464         int i, rc, offset = 1;
7465
7466         rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7467                          bp->dev->name, bp->dev);
7468         if (rc) {
7469                 BNX2X_ERR("request sp irq failed\n");
7470                 return -EBUSY;
7471         }
7472
7473 #ifdef BCM_CNIC
7474         offset++;
7475 #endif
7476         for_each_queue(bp, i) {
7477                 struct bnx2x_fastpath *fp = &bp->fp[i];
7478                 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7479                          bp->dev->name, i);
7480
7481                 rc = request_irq(bp->msix_table[i + offset].vector,
7482                                  bnx2x_msix_fp_int, 0, fp->name, fp);
7483                 if (rc) {
7484                         BNX2X_ERR("request fp #%d irq failed  rc %d\n", i, rc);
7485                         bnx2x_free_msix_irqs(bp);
7486                         return -EBUSY;
7487                 }
7488
7489                 fp->state = BNX2X_FP_STATE_IRQ;
7490         }
7491
7492         i = BNX2X_NUM_QUEUES(bp);
7493         netdev_info(bp->dev, "using MSI-X  IRQs: sp %d  fp[%d] %d"
7494                " ... fp[%d] %d\n",
7495                bp->msix_table[0].vector,
7496                0, bp->msix_table[offset].vector,
7497                i - 1, bp->msix_table[offset + i - 1].vector);
7498
7499         return 0;
7500 }
7501
7502 static int bnx2x_enable_msi(struct bnx2x *bp)
7503 {
7504         int rc;
7505
7506         rc = pci_enable_msi(bp->pdev);
7507         if (rc) {
7508                 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7509                 return -1;
7510         }
7511         bp->flags |= USING_MSI_FLAG;
7512
7513         return 0;
7514 }
7515
7516 static int bnx2x_req_irq(struct bnx2x *bp)
7517 {
7518         unsigned long flags;
7519         int rc;
7520
7521         if (bp->flags & USING_MSI_FLAG)
7522                 flags = 0;
7523         else
7524                 flags = IRQF_SHARED;
7525
7526         rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7527                          bp->dev->name, bp->dev);
7528         if (!rc)
7529                 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7530
7531         return rc;
7532 }
7533
7534 static void bnx2x_napi_enable(struct bnx2x *bp)
7535 {
7536         int i;
7537
7538         for_each_queue(bp, i)
7539                 napi_enable(&bnx2x_fp(bp, i, napi));
7540 }
7541
7542 static void bnx2x_napi_disable(struct bnx2x *bp)
7543 {
7544         int i;
7545
7546         for_each_queue(bp, i)
7547                 napi_disable(&bnx2x_fp(bp, i, napi));
7548 }
7549
7550 static void bnx2x_netif_start(struct bnx2x *bp)
7551 {
7552         int intr_sem;
7553
7554         intr_sem = atomic_dec_and_test(&bp->intr_sem);
7555         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7556
7557         if (intr_sem) {
7558                 if (netif_running(bp->dev)) {
7559                         bnx2x_napi_enable(bp);
7560                         bnx2x_int_enable(bp);
7561                         if (bp->state == BNX2X_STATE_OPEN)
7562                                 netif_tx_wake_all_queues(bp->dev);
7563                 }
7564         }
7565 }
7566
7567 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7568 {
7569         bnx2x_int_disable_sync(bp, disable_hw);
7570         bnx2x_napi_disable(bp);
7571         netif_tx_disable(bp->dev);
7572 }
7573
7574 /*
7575  * Init service functions
7576  */
7577
7578 /**
7579  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7580  *
7581  * @param bp driver descriptor
7582  * @param set set or clear an entry (1 or 0)
7583  * @param mac pointer to a buffer containing a MAC
7584  * @param cl_bit_vec bit vector of clients to register a MAC for
7585  * @param cam_offset offset in a CAM to use
7586  * @param with_bcast set broadcast MAC as well
7587  */
7588 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7589                                       u32 cl_bit_vec, u8 cam_offset,
7590                                       u8 with_bcast)
7591 {
7592         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7593         int port = BP_PORT(bp);
7594
7595         /* CAM allocation
7596          * unicasts 0-31:port0 32-63:port1
7597          * multicast 64-127:port0 128-191:port1
7598          */
7599         config->hdr.length = 1 + (with_bcast ? 1 : 0);
7600         config->hdr.offset = cam_offset;
7601         config->hdr.client_id = 0xff;
7602         config->hdr.reserved1 = 0;
7603
7604         /* primary MAC */
7605         config->config_table[0].cam_entry.msb_mac_addr =
7606                                         swab16(*(u16 *)&mac[0]);
7607         config->config_table[0].cam_entry.middle_mac_addr =
7608                                         swab16(*(u16 *)&mac[2]);
7609         config->config_table[0].cam_entry.lsb_mac_addr =
7610                                         swab16(*(u16 *)&mac[4]);
7611         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7612         if (set)
7613                 config->config_table[0].target_table_entry.flags = 0;
7614         else
7615                 CAM_INVALIDATE(config->config_table[0]);
7616         config->config_table[0].target_table_entry.clients_bit_vector =
7617                                                 cpu_to_le32(cl_bit_vec);
7618         config->config_table[0].target_table_entry.vlan_id = 0;
7619
7620         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7621            (set ? "setting" : "clearing"),
7622            config->config_table[0].cam_entry.msb_mac_addr,
7623            config->config_table[0].cam_entry.middle_mac_addr,
7624            config->config_table[0].cam_entry.lsb_mac_addr);
7625
7626         /* broadcast */
7627         if (with_bcast) {
7628                 config->config_table[1].cam_entry.msb_mac_addr =
7629                         cpu_to_le16(0xffff);
7630                 config->config_table[1].cam_entry.middle_mac_addr =
7631                         cpu_to_le16(0xffff);
7632                 config->config_table[1].cam_entry.lsb_mac_addr =
7633                         cpu_to_le16(0xffff);
7634                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7635                 if (set)
7636                         config->config_table[1].target_table_entry.flags =
7637                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7638                 else
7639                         CAM_INVALIDATE(config->config_table[1]);
7640                 config->config_table[1].target_table_entry.clients_bit_vector =
7641                                                         cpu_to_le32(cl_bit_vec);
7642                 config->config_table[1].target_table_entry.vlan_id = 0;
7643         }
7644
7645         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7646                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7647                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7648 }
7649
7650 /**
7651  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7652  *
7653  * @param bp driver descriptor
7654  * @param set set or clear an entry (1 or 0)
7655  * @param mac pointer to a buffer containing a MAC
7656  * @param cl_bit_vec bit vector of clients to register a MAC for
7657  * @param cam_offset offset in a CAM to use
7658  */
7659 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7660                                        u32 cl_bit_vec, u8 cam_offset)
7661 {
7662         struct mac_configuration_cmd_e1h *config =
7663                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7664
7665         config->hdr.length = 1;
7666         config->hdr.offset = cam_offset;
7667         config->hdr.client_id = 0xff;
7668         config->hdr.reserved1 = 0;
7669
7670         /* primary MAC */
7671         config->config_table[0].msb_mac_addr =
7672                                         swab16(*(u16 *)&mac[0]);
7673         config->config_table[0].middle_mac_addr =
7674                                         swab16(*(u16 *)&mac[2]);
7675         config->config_table[0].lsb_mac_addr =
7676                                         swab16(*(u16 *)&mac[4]);
7677         config->config_table[0].clients_bit_vector =
7678                                         cpu_to_le32(cl_bit_vec);
7679         config->config_table[0].vlan_id = 0;
7680         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7681         if (set)
7682                 config->config_table[0].flags = BP_PORT(bp);
7683         else
7684                 config->config_table[0].flags =
7685                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7686
7687         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
7688            (set ? "setting" : "clearing"),
7689            config->config_table[0].msb_mac_addr,
7690            config->config_table[0].middle_mac_addr,
7691            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7692
7693         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7694                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7695                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7696 }
7697
7698 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7699                              int *state_p, int poll)
7700 {
7701         /* can take a while if any port is running */
7702         int cnt = 5000;
7703
7704         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7705            poll ? "polling" : "waiting", state, idx);
7706
7707         might_sleep();
7708         while (cnt--) {
7709                 if (poll) {
7710                         bnx2x_rx_int(bp->fp, 10);
7711                         /* if index is different from 0
7712                          * the reply for some commands will
7713                          * be on the non default queue
7714                          */
7715                         if (idx)
7716                                 bnx2x_rx_int(&bp->fp[idx], 10);
7717                 }
7718
7719                 mb(); /* state is changed by bnx2x_sp_event() */
7720                 if (*state_p == state) {
7721 #ifdef BNX2X_STOP_ON_ERROR
7722                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
7723 #endif
7724                         return 0;
7725                 }
7726
7727                 msleep(1);
7728
7729                 if (bp->panic)
7730                         return -EIO;
7731         }
7732
7733         /* timeout! */
7734         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7735                   poll ? "polling" : "waiting", state, idx);
7736 #ifdef BNX2X_STOP_ON_ERROR
7737         bnx2x_panic();
7738 #endif
7739
7740         return -EBUSY;
7741 }
7742
7743 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7744 {
7745         bp->set_mac_pending++;
7746         smp_wmb();
7747
7748         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7749                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
7750
7751         /* Wait for a completion */
7752         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7753 }
7754
7755 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7756 {
7757         bp->set_mac_pending++;
7758         smp_wmb();
7759
7760         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7761                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7762                                   1);
7763
7764         /* Wait for a completion */
7765         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7766 }
7767
7768 #ifdef BCM_CNIC
7769 /**
7770  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7771  * MAC(s). This function will wait until the ramdord completion
7772  * returns.
7773  *
7774  * @param bp driver handle
7775  * @param set set or clear the CAM entry
7776  *
7777  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7778  */
7779 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7780 {
7781         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7782
7783         bp->set_mac_pending++;
7784         smp_wmb();
7785
7786         /* Send a SET_MAC ramrod */
7787         if (CHIP_IS_E1(bp))
7788                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7789                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7790                                   1);
7791         else
7792                 /* CAM allocation for E1H
7793                 * unicasts: by func number
7794                 * multicast: 20+FUNC*20, 20 each
7795                 */
7796                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7797                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7798
7799         /* Wait for a completion when setting */
7800         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7801
7802         return 0;
7803 }
7804 #endif
7805
7806 static int bnx2x_setup_leading(struct bnx2x *bp)
7807 {
7808         int rc;
7809
7810         /* reset IGU state */
7811         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7812
7813         /* SETUP ramrod */
7814         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7815
7816         /* Wait for completion */
7817         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7818
7819         return rc;
7820 }
7821
7822 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7823 {
7824         struct bnx2x_fastpath *fp = &bp->fp[index];
7825
7826         /* reset IGU state */
7827         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7828
7829         /* SETUP ramrod */
7830         fp->state = BNX2X_FP_STATE_OPENING;
7831         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7832                       fp->cl_id, 0);
7833
7834         /* Wait for completion */
7835         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7836                                  &(fp->state), 0);
7837 }
7838
7839 static int bnx2x_poll(struct napi_struct *napi, int budget);
7840
7841 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7842 {
7843
7844         switch (bp->multi_mode) {
7845         case ETH_RSS_MODE_DISABLED:
7846                 bp->num_queues = 1;
7847                 break;
7848
7849         case ETH_RSS_MODE_REGULAR:
7850                 if (num_queues)
7851                         bp->num_queues = min_t(u32, num_queues,
7852                                                   BNX2X_MAX_QUEUES(bp));
7853                 else
7854                         bp->num_queues = min_t(u32, num_online_cpus(),
7855                                                   BNX2X_MAX_QUEUES(bp));
7856                 break;
7857
7858
7859         default:
7860                 bp->num_queues = 1;
7861                 break;
7862         }
7863 }
7864
7865 static int bnx2x_set_num_queues(struct bnx2x *bp)
7866 {
7867         int rc = 0;
7868
7869         switch (int_mode) {
7870         case INT_MODE_INTx:
7871         case INT_MODE_MSI:
7872                 bp->num_queues = 1;
7873                 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7874                 break;
7875         default:
7876                 /* Set number of queues according to bp->multi_mode value */
7877                 bnx2x_set_num_queues_msix(bp);
7878
7879                 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7880                    bp->num_queues);
7881
7882                 /* if we can't use MSI-X we only need one fp,
7883                  * so try to enable MSI-X with the requested number of fp's
7884                  * and fallback to MSI or legacy INTx with one fp
7885                  */
7886                 rc = bnx2x_enable_msix(bp);
7887                 if (rc)
7888                         /* failed to enable MSI-X */
7889                         bp->num_queues = 1;
7890                 break;
7891         }
7892         bp->dev->real_num_tx_queues = bp->num_queues;
7893         return rc;
7894 }
7895
7896 #ifdef BCM_CNIC
7897 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7898 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7899 #endif
7900
7901 /* must be called with rtnl_lock */
7902 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7903 {
7904         u32 load_code;
7905         int i, rc;
7906
7907 #ifdef BNX2X_STOP_ON_ERROR
7908         if (unlikely(bp->panic))
7909                 return -EPERM;
7910 #endif
7911
7912         bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7913
7914         rc = bnx2x_set_num_queues(bp);
7915
7916         if (bnx2x_alloc_mem(bp)) {
7917                 bnx2x_free_irq(bp, true);
7918                 return -ENOMEM;
7919         }
7920
7921         for_each_queue(bp, i)
7922                 bnx2x_fp(bp, i, disable_tpa) =
7923                                         ((bp->flags & TPA_ENABLE_FLAG) == 0);
7924
7925         for_each_queue(bp, i)
7926                 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7927                                bnx2x_poll, 128);
7928
7929         bnx2x_napi_enable(bp);
7930
7931         if (bp->flags & USING_MSIX_FLAG) {
7932                 rc = bnx2x_req_msix_irqs(bp);
7933                 if (rc) {
7934                         bnx2x_free_irq(bp, true);
7935                         goto load_error1;
7936                 }
7937         } else {
7938                 /* Fall to INTx if failed to enable MSI-X due to lack of
7939                    memory (in bnx2x_set_num_queues()) */
7940                 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7941                         bnx2x_enable_msi(bp);
7942                 bnx2x_ack_int(bp);
7943                 rc = bnx2x_req_irq(bp);
7944                 if (rc) {
7945                         BNX2X_ERR("IRQ request failed  rc %d, aborting\n", rc);
7946                         bnx2x_free_irq(bp, true);
7947                         goto load_error1;
7948                 }
7949                 if (bp->flags & USING_MSI_FLAG) {
7950                         bp->dev->irq = bp->pdev->irq;
7951                         netdev_info(bp->dev, "using MSI  IRQ %d\n",
7952                                     bp->pdev->irq);
7953                 }
7954         }
7955
7956         /* Send LOAD_REQUEST command to MCP
7957            Returns the type of LOAD command:
7958            if it is the first port to be initialized
7959            common blocks should be initialized, otherwise - not
7960         */
7961         if (!BP_NOMCP(bp)) {
7962                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7963                 if (!load_code) {
7964                         BNX2X_ERR("MCP response failure, aborting\n");
7965                         rc = -EBUSY;
7966                         goto load_error2;
7967                 }
7968                 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7969                         rc = -EBUSY; /* other port in diagnostic mode */
7970                         goto load_error2;
7971                 }
7972
7973         } else {
7974                 int port = BP_PORT(bp);
7975
7976                 DP(NETIF_MSG_IFUP, "NO MCP - load counts      %d, %d, %d\n",
7977                    load_count[0], load_count[1], load_count[2]);
7978                 load_count[0]++;
7979                 load_count[1 + port]++;
7980                 DP(NETIF_MSG_IFUP, "NO MCP - new load counts  %d, %d, %d\n",
7981                    load_count[0], load_count[1], load_count[2]);
7982                 if (load_count[0] == 1)
7983                         load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7984                 else if (load_count[1 + port] == 1)
7985                         load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7986                 else
7987                         load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7988         }
7989
7990         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7991             (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7992                 bp->port.pmf = 1;
7993         else
7994                 bp->port.pmf = 0;
7995         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7996
7997         /* Initialize HW */
7998         rc = bnx2x_init_hw(bp, load_code);
7999         if (rc) {
8000                 BNX2X_ERR("HW init failed, aborting\n");
8001                 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8002                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8003                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8004                 goto load_error2;
8005         }
8006
8007         /* Setup NIC internals and enable interrupts */
8008         bnx2x_nic_init(bp, load_code);
8009
8010         if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8011             (bp->common.shmem2_base))
8012                 SHMEM2_WR(bp, dcc_support,
8013                           (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8014                            SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8015
8016         /* Send LOAD_DONE command to MCP */
8017         if (!BP_NOMCP(bp)) {
8018                 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8019                 if (!load_code) {
8020                         BNX2X_ERR("MCP response failure, aborting\n");
8021                         rc = -EBUSY;
8022                         goto load_error3;
8023                 }
8024         }
8025
8026         bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8027
8028         rc = bnx2x_setup_leading(bp);
8029         if (rc) {
8030                 BNX2X_ERR("Setup leading failed!\n");
8031 #ifndef BNX2X_STOP_ON_ERROR
8032                 goto load_error3;
8033 #else
8034                 bp->panic = 1;
8035                 return -EBUSY;
8036 #endif
8037         }
8038
8039         if (CHIP_IS_E1H(bp))
8040                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8041                         DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8042                         bp->flags |= MF_FUNC_DIS;
8043                 }
8044
8045         if (bp->state == BNX2X_STATE_OPEN) {
8046 #ifdef BCM_CNIC
8047                 /* Enable Timer scan */
8048                 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8049 #endif
8050                 for_each_nondefault_queue(bp, i) {
8051                         rc = bnx2x_setup_multi(bp, i);
8052                         if (rc)
8053 #ifdef BCM_CNIC
8054                                 goto load_error4;
8055 #else
8056                                 goto load_error3;
8057 #endif
8058                 }
8059
8060                 if (CHIP_IS_E1(bp))
8061                         bnx2x_set_eth_mac_addr_e1(bp, 1);
8062                 else
8063                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
8064 #ifdef BCM_CNIC
8065                 /* Set iSCSI L2 MAC */
8066                 mutex_lock(&bp->cnic_mutex);
8067                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8068                         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8069                         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8070                         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8071                                       CNIC_SB_ID(bp));
8072                 }
8073                 mutex_unlock(&bp->cnic_mutex);
8074 #endif
8075         }
8076
8077         if (bp->port.pmf)
8078                 bnx2x_initial_phy_init(bp, load_mode);
8079
8080         /* Start fast path */
8081         switch (load_mode) {
8082         case LOAD_NORMAL:
8083                 if (bp->state == BNX2X_STATE_OPEN) {
8084                         /* Tx queue should be only reenabled */
8085                         netif_tx_wake_all_queues(bp->dev);
8086                 }
8087                 /* Initialize the receive filter. */
8088                 bnx2x_set_rx_mode(bp->dev);
8089                 break;
8090
8091         case LOAD_OPEN:
8092                 netif_tx_start_all_queues(bp->dev);
8093                 if (bp->state != BNX2X_STATE_OPEN)
8094                         netif_tx_disable(bp->dev);
8095                 /* Initialize the receive filter. */
8096                 bnx2x_set_rx_mode(bp->dev);
8097                 break;
8098
8099         case LOAD_DIAG:
8100                 /* Initialize the receive filter. */
8101                 bnx2x_set_rx_mode(bp->dev);
8102                 bp->state = BNX2X_STATE_DIAG;
8103                 break;
8104
8105         default:
8106                 break;
8107         }
8108
8109         if (!bp->port.pmf)
8110                 bnx2x__link_status_update(bp);
8111
8112         /* start the timer */
8113         mod_timer(&bp->timer, jiffies + bp->current_interval);
8114
8115 #ifdef BCM_CNIC
8116         bnx2x_setup_cnic_irq_info(bp);
8117         if (bp->state == BNX2X_STATE_OPEN)
8118                 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8119 #endif
8120         bnx2x_inc_load_cnt(bp);
8121
8122         return 0;
8123
8124 #ifdef BCM_CNIC
8125 load_error4:
8126         /* Disable Timer scan */
8127         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8128 #endif
8129 load_error3:
8130         bnx2x_int_disable_sync(bp, 1);
8131         if (!BP_NOMCP(bp)) {
8132                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8133                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8134         }
8135         bp->port.pmf = 0;
8136         /* Free SKBs, SGEs, TPA pool and driver internals */
8137         bnx2x_free_skbs(bp);
8138         for_each_queue(bp, i)
8139                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8140 load_error2:
8141         /* Release IRQs */
8142         bnx2x_free_irq(bp, false);
8143 load_error1:
8144         bnx2x_napi_disable(bp);
8145         for_each_queue(bp, i)
8146                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8147         bnx2x_free_mem(bp);
8148
8149         return rc;
8150 }
8151
8152 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8153 {
8154         struct bnx2x_fastpath *fp = &bp->fp[index];
8155         int rc;
8156
8157         /* halt the connection */
8158         fp->state = BNX2X_FP_STATE_HALTING;
8159         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8160
8161         /* Wait for completion */
8162         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8163                                &(fp->state), 1);
8164         if (rc) /* timeout */
8165                 return rc;
8166
8167         /* delete cfc entry */
8168         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8169
8170         /* Wait for completion */
8171         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8172                                &(fp->state), 1);
8173         return rc;
8174 }
8175
8176 static int bnx2x_stop_leading(struct bnx2x *bp)
8177 {
8178         __le16 dsb_sp_prod_idx;
8179         /* if the other port is handling traffic,
8180            this can take a lot of time */
8181         int cnt = 500;
8182         int rc;
8183
8184         might_sleep();
8185
8186         /* Send HALT ramrod */
8187         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8188         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8189
8190         /* Wait for completion */
8191         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8192                                &(bp->fp[0].state), 1);
8193         if (rc) /* timeout */
8194                 return rc;
8195
8196         dsb_sp_prod_idx = *bp->dsb_sp_prod;
8197
8198         /* Send PORT_DELETE ramrod */
8199         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8200
8201         /* Wait for completion to arrive on default status block
8202            we are going to reset the chip anyway
8203            so there is not much to do if this times out
8204          */
8205         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8206                 if (!cnt) {
8207                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8208                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8209                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
8210 #ifdef BNX2X_STOP_ON_ERROR
8211                         bnx2x_panic();
8212 #endif
8213                         rc = -EBUSY;
8214                         break;
8215                 }
8216                 cnt--;
8217                 msleep(1);
8218                 rmb(); /* Refresh the dsb_sp_prod */
8219         }
8220         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8221         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8222
8223         return rc;
8224 }
8225
8226 static void bnx2x_reset_func(struct bnx2x *bp)
8227 {
8228         int port = BP_PORT(bp);
8229         int func = BP_FUNC(bp);
8230         int base, i;
8231
8232         /* Configure IGU */
8233         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8234         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8235
8236 #ifdef BCM_CNIC
8237         /* Disable Timer scan */
8238         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8239         /*
8240          * Wait for at least 10ms and up to 2 second for the timers scan to
8241          * complete
8242          */
8243         for (i = 0; i < 200; i++) {
8244                 msleep(10);
8245                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8246                         break;
8247         }
8248 #endif
8249         /* Clear ILT */
8250         base = FUNC_ILT_BASE(func);
8251         for (i = base; i < base + ILT_PER_FUNC; i++)
8252                 bnx2x_ilt_wr(bp, i, 0);
8253 }
8254
8255 static void bnx2x_reset_port(struct bnx2x *bp)
8256 {
8257         int port = BP_PORT(bp);
8258         u32 val;
8259
8260         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8261
8262         /* Do not rcv packets to BRB */
8263         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8264         /* Do not direct rcv packets that are not for MCP to the BRB */
8265         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8266                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8267
8268         /* Configure AEU */
8269         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8270
8271         msleep(100);
8272         /* Check for BRB port occupancy */
8273         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8274         if (val)
8275                 DP(NETIF_MSG_IFDOWN,
8276                    "BRB1 is not empty  %d blocks are occupied\n", val);
8277
8278         /* TODO: Close Doorbell port? */
8279 }
8280
8281 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8282 {
8283         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
8284            BP_FUNC(bp), reset_code);
8285
8286         switch (reset_code) {
8287         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8288                 bnx2x_reset_port(bp);
8289                 bnx2x_reset_func(bp);
8290                 bnx2x_reset_common(bp);
8291                 break;
8292
8293         case FW_MSG_CODE_DRV_UNLOAD_PORT:
8294                 bnx2x_reset_port(bp);
8295                 bnx2x_reset_func(bp);
8296                 break;
8297
8298         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8299                 bnx2x_reset_func(bp);
8300                 break;
8301
8302         default:
8303                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8304                 break;
8305         }
8306 }
8307
8308 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8309 {
8310         int port = BP_PORT(bp);
8311         u32 reset_code = 0;
8312         int i, cnt, rc;
8313
8314         /* Wait until tx fastpath tasks complete */
8315         for_each_queue(bp, i) {
8316                 struct bnx2x_fastpath *fp = &bp->fp[i];
8317
8318                 cnt = 1000;
8319                 while (bnx2x_has_tx_work_unload(fp)) {
8320
8321                         bnx2x_tx_int(fp);
8322                         if (!cnt) {
8323                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
8324                                           i);
8325 #ifdef BNX2X_STOP_ON_ERROR
8326                                 bnx2x_panic();
8327                                 return -EBUSY;
8328 #else
8329                                 break;
8330 #endif
8331                         }
8332                         cnt--;
8333                         msleep(1);
8334                 }
8335         }
8336         /* Give HW time to discard old tx messages */
8337         msleep(1);
8338
8339         if (CHIP_IS_E1(bp)) {
8340                 struct mac_configuration_cmd *config =
8341                                                 bnx2x_sp(bp, mcast_config);
8342
8343                 bnx2x_set_eth_mac_addr_e1(bp, 0);
8344
8345                 for (i = 0; i < config->hdr.length; i++)
8346                         CAM_INVALIDATE(config->config_table[i]);
8347
8348                 config->hdr.length = i;
8349                 if (CHIP_REV_IS_SLOW(bp))
8350                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8351                 else
8352                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8353                 config->hdr.client_id = bp->fp->cl_id;
8354                 config->hdr.reserved1 = 0;
8355
8356                 bp->set_mac_pending++;
8357                 smp_wmb();
8358
8359                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8360                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8361                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8362
8363         } else { /* E1H */
8364                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8365
8366                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8367
8368                 for (i = 0; i < MC_HASH_SIZE; i++)
8369                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8370
8371                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8372         }
8373 #ifdef BCM_CNIC
8374         /* Clear iSCSI L2 MAC */
8375         mutex_lock(&bp->cnic_mutex);
8376         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8377                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8378                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8379         }
8380         mutex_unlock(&bp->cnic_mutex);
8381 #endif
8382
8383         if (unload_mode == UNLOAD_NORMAL)
8384                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8385
8386         else if (bp->flags & NO_WOL_FLAG)
8387                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8388
8389         else if (bp->wol) {
8390                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8391                 u8 *mac_addr = bp->dev->dev_addr;
8392                 u32 val;
8393                 /* The mac address is written to entries 1-4 to
8394                    preserve entry 0 which is used by the PMF */
8395                 u8 entry = (BP_E1HVN(bp) + 1)*8;
8396
8397                 val = (mac_addr[0] << 8) | mac_addr[1];
8398                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8399
8400                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8401                       (mac_addr[4] << 8) | mac_addr[5];
8402                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8403
8404                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8405
8406         } else
8407                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8408
8409         /* Close multi and leading connections
8410            Completions for ramrods are collected in a synchronous way */
8411         for_each_nondefault_queue(bp, i)
8412                 if (bnx2x_stop_multi(bp, i))
8413                         goto unload_error;
8414
8415         rc = bnx2x_stop_leading(bp);
8416         if (rc) {
8417                 BNX2X_ERR("Stop leading failed!\n");
8418 #ifdef BNX2X_STOP_ON_ERROR
8419                 return -EBUSY;
8420 #else
8421                 goto unload_error;
8422 #endif
8423         }
8424
8425 unload_error:
8426         if (!BP_NOMCP(bp))
8427                 reset_code = bnx2x_fw_command(bp, reset_code);
8428         else {
8429                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
8430                    load_count[0], load_count[1], load_count[2]);
8431                 load_count[0]--;
8432                 load_count[1 + port]--;
8433                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
8434                    load_count[0], load_count[1], load_count[2]);
8435                 if (load_count[0] == 0)
8436                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8437                 else if (load_count[1 + port] == 0)
8438                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8439                 else
8440                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8441         }
8442
8443         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8444             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8445                 bnx2x__link_reset(bp);
8446
8447         /* Reset the chip */
8448         bnx2x_reset_chip(bp, reset_code);
8449
8450         /* Report UNLOAD_DONE to MCP */
8451         if (!BP_NOMCP(bp))
8452                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8453
8454 }
8455
8456 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8457 {
8458         u32 val;
8459
8460         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8461
8462         if (CHIP_IS_E1(bp)) {
8463                 int port = BP_PORT(bp);
8464                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8465                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
8466
8467                 val = REG_RD(bp, addr);
8468                 val &= ~(0x300);
8469                 REG_WR(bp, addr, val);
8470         } else if (CHIP_IS_E1H(bp)) {
8471                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8472                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8473                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8474                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8475         }
8476 }
8477
8478 /* must be called with rtnl_lock */
8479 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8480 {
8481         int i;
8482
8483         if (bp->state == BNX2X_STATE_CLOSED) {
8484                 /* Interface has been removed - nothing to recover */
8485                 bp->recovery_state = BNX2X_RECOVERY_DONE;
8486                 bp->is_leader = 0;
8487                 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8488                 smp_wmb();
8489
8490                 return -EINVAL;
8491         }
8492
8493 #ifdef BCM_CNIC
8494         bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8495 #endif
8496         bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8497
8498         /* Set "drop all" */
8499         bp->rx_mode = BNX2X_RX_MODE_NONE;
8500         bnx2x_set_storm_rx_mode(bp);
8501
8502         /* Disable HW interrupts, NAPI and Tx */
8503         bnx2x_netif_stop(bp, 1);
8504         netif_carrier_off(bp->dev);
8505
8506         del_timer_sync(&bp->timer);
8507         SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8508                  (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8509         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8510
8511         /* Release IRQs */
8512         bnx2x_free_irq(bp, false);
8513
8514         /* Cleanup the chip if needed */
8515         if (unload_mode != UNLOAD_RECOVERY)
8516                 bnx2x_chip_cleanup(bp, unload_mode);
8517
8518         bp->port.pmf = 0;
8519
8520         /* Free SKBs, SGEs, TPA pool and driver internals */
8521         bnx2x_free_skbs(bp);
8522         for_each_queue(bp, i)
8523                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8524         for_each_queue(bp, i)
8525                 netif_napi_del(&bnx2x_fp(bp, i, napi));
8526         bnx2x_free_mem(bp);
8527
8528         bp->state = BNX2X_STATE_CLOSED;
8529
8530         /* The last driver must disable a "close the gate" if there is no
8531          * parity attention or "process kill" pending.
8532          */
8533         if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8534             bnx2x_reset_is_done(bp))
8535                 bnx2x_disable_close_the_gate(bp);
8536
8537         /* Reset MCP mail box sequence if there is on going recovery */
8538         if (unload_mode == UNLOAD_RECOVERY)
8539                 bp->fw_seq = 0;
8540
8541         return 0;
8542 }
8543
8544 /* Close gates #2, #3 and #4: */
8545 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8546 {
8547         u32 val, addr;
8548
8549         /* Gates #2 and #4a are closed/opened for "not E1" only */
8550         if (!CHIP_IS_E1(bp)) {
8551                 /* #4 */
8552                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8553                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8554                        close ? (val | 0x1) : (val & (~(u32)1)));
8555                 /* #2 */
8556                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8557                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8558                        close ? (val | 0x1) : (val & (~(u32)1)));
8559         }
8560
8561         /* #3 */
8562         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8563         val = REG_RD(bp, addr);
8564         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8565
8566         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8567                 close ? "closing" : "opening");
8568         mmiowb();
8569 }
8570
8571 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
8572
8573 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8574 {
8575         /* Do some magic... */
8576         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8577         *magic_val = val & SHARED_MF_CLP_MAGIC;
8578         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8579 }
8580
8581 /* Restore the value of the `magic' bit.
8582  *
8583  * @param pdev Device handle.
8584  * @param magic_val Old value of the `magic' bit.
8585  */
8586 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8587 {
8588         /* Restore the `magic' bit value... */
8589         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8590         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8591                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8592         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8593         MF_CFG_WR(bp, shared_mf_config.clp_mb,
8594                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8595 }
8596
8597 /* Prepares for MCP reset: takes care of CLP configurations.
8598  *
8599  * @param bp
8600  * @param magic_val Old value of 'magic' bit.
8601  */
8602 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8603 {
8604         u32 shmem;
8605         u32 validity_offset;
8606
8607         DP(NETIF_MSG_HW, "Starting\n");
8608
8609         /* Set `magic' bit in order to save MF config */
8610         if (!CHIP_IS_E1(bp))
8611                 bnx2x_clp_reset_prep(bp, magic_val);
8612
8613         /* Get shmem offset */
8614         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8615         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8616
8617         /* Clear validity map flags */
8618         if (shmem > 0)
8619                 REG_WR(bp, shmem + validity_offset, 0);
8620 }
8621
8622 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
8623 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
8624
8625 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8626  * depending on the HW type.
8627  *
8628  * @param bp
8629  */
8630 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8631 {
8632         /* special handling for emulation and FPGA,
8633            wait 10 times longer */
8634         if (CHIP_REV_IS_SLOW(bp))
8635                 msleep(MCP_ONE_TIMEOUT*10);
8636         else
8637                 msleep(MCP_ONE_TIMEOUT);
8638 }
8639
8640 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8641 {
8642         u32 shmem, cnt, validity_offset, val;
8643         int rc = 0;
8644
8645         msleep(100);
8646
8647         /* Get shmem offset */
8648         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8649         if (shmem == 0) {
8650                 BNX2X_ERR("Shmem 0 return failure\n");
8651                 rc = -ENOTTY;
8652                 goto exit_lbl;
8653         }
8654
8655         validity_offset = offsetof(struct shmem_region, validity_map[0]);
8656
8657         /* Wait for MCP to come up */
8658         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8659                 /* TBD: its best to check validity map of last port.
8660                  * currently checks on port 0.
8661                  */
8662                 val = REG_RD(bp, shmem + validity_offset);
8663                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8664                    shmem + validity_offset, val);
8665
8666                 /* check that shared memory is valid. */
8667                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8668                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8669                         break;
8670
8671                 bnx2x_mcp_wait_one(bp);
8672         }
8673
8674         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8675
8676         /* Check that shared memory is valid. This indicates that MCP is up. */
8677         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8678             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8679                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8680                 rc = -ENOTTY;
8681                 goto exit_lbl;
8682         }
8683
8684 exit_lbl:
8685         /* Restore the `magic' bit value */
8686         if (!CHIP_IS_E1(bp))
8687                 bnx2x_clp_reset_done(bp, magic_val);
8688
8689         return rc;
8690 }
8691
8692 static void bnx2x_pxp_prep(struct bnx2x *bp)
8693 {
8694         if (!CHIP_IS_E1(bp)) {
8695                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8696                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8697                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8698                 mmiowb();
8699         }
8700 }
8701
8702 /*
8703  * Reset the whole chip except for:
8704  *      - PCIE core
8705  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8706  *              one reset bit)
8707  *      - IGU
8708  *      - MISC (including AEU)
8709  *      - GRC
8710  *      - RBCN, RBCP
8711  */
8712 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8713 {
8714         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8715
8716         not_reset_mask1 =
8717                 MISC_REGISTERS_RESET_REG_1_RST_HC |
8718                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8719                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8720
8721         not_reset_mask2 =
8722                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8723                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8724                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8725                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8726                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8727                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
8728                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8729                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8730
8731         reset_mask1 = 0xffffffff;
8732
8733         if (CHIP_IS_E1(bp))
8734                 reset_mask2 = 0xffff;
8735         else
8736                 reset_mask2 = 0x1ffff;
8737
8738         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8739                reset_mask1 & (~not_reset_mask1));
8740         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8741                reset_mask2 & (~not_reset_mask2));
8742
8743         barrier();
8744         mmiowb();
8745
8746         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8747         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8748         mmiowb();
8749 }
8750
8751 static int bnx2x_process_kill(struct bnx2x *bp)
8752 {
8753         int cnt = 1000;
8754         u32 val = 0;
8755         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8756
8757
8758         /* Empty the Tetris buffer, wait for 1s */
8759         do {
8760                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8761                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8762                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8763                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8764                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8765                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8766                     ((port_is_idle_0 & 0x1) == 0x1) &&
8767                     ((port_is_idle_1 & 0x1) == 0x1) &&
8768                     (pgl_exp_rom2 == 0xffffffff))
8769                         break;
8770                 msleep(1);
8771         } while (cnt-- > 0);
8772
8773         if (cnt <= 0) {
8774                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8775                           " are still"
8776                           " outstanding read requests after 1s!\n");
8777                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8778                           " port_is_idle_0=0x%08x,"
8779                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8780                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8781                           pgl_exp_rom2);
8782                 return -EAGAIN;
8783         }
8784
8785         barrier();
8786
8787         /* Close gates #2, #3 and #4 */
8788         bnx2x_set_234_gates(bp, true);
8789
8790         /* TBD: Indicate that "process kill" is in progress to MCP */
8791
8792         /* Clear "unprepared" bit */
8793         REG_WR(bp, MISC_REG_UNPREPARED, 0);
8794         barrier();
8795
8796         /* Make sure all is written to the chip before the reset */
8797         mmiowb();
8798
8799         /* Wait for 1ms to empty GLUE and PCI-E core queues,
8800          * PSWHST, GRC and PSWRD Tetris buffer.
8801          */
8802         msleep(1);
8803
8804         /* Prepare to chip reset: */
8805         /* MCP */
8806         bnx2x_reset_mcp_prep(bp, &val);
8807
8808         /* PXP */
8809         bnx2x_pxp_prep(bp);
8810         barrier();
8811
8812         /* reset the chip */
8813         bnx2x_process_kill_chip_reset(bp);
8814         barrier();
8815
8816         /* Recover after reset: */
8817         /* MCP */
8818         if (bnx2x_reset_mcp_comp(bp, val))
8819                 return -EAGAIN;
8820
8821         /* PXP */
8822         bnx2x_pxp_prep(bp);
8823
8824         /* Open the gates #2, #3 and #4 */
8825         bnx2x_set_234_gates(bp, false);
8826
8827         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8828          * reset state, re-enable attentions. */
8829
8830         return 0;
8831 }
8832
8833 static int bnx2x_leader_reset(struct bnx2x *bp)
8834 {
8835         int rc = 0;
8836         /* Try to recover after the failure */
8837         if (bnx2x_process_kill(bp)) {
8838                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8839                        bp->dev->name);
8840                 rc = -EAGAIN;
8841                 goto exit_leader_reset;
8842         }
8843
8844         /* Clear "reset is in progress" bit and update the driver state */
8845         bnx2x_set_reset_done(bp);
8846         bp->recovery_state = BNX2X_RECOVERY_DONE;
8847
8848 exit_leader_reset:
8849         bp->is_leader = 0;
8850         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8851         smp_wmb();
8852         return rc;
8853 }
8854
8855 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8856
8857 /* Assumption: runs under rtnl lock. This together with the fact
8858  * that it's called only from bnx2x_reset_task() ensure that it
8859  * will never be called when netif_running(bp->dev) is false.
8860  */
8861 static void bnx2x_parity_recover(struct bnx2x *bp)
8862 {
8863         DP(NETIF_MSG_HW, "Handling parity\n");
8864         while (1) {
8865                 switch (bp->recovery_state) {
8866                 case BNX2X_RECOVERY_INIT:
8867                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8868                         /* Try to get a LEADER_LOCK HW lock */
8869                         if (bnx2x_trylock_hw_lock(bp,
8870                                 HW_LOCK_RESOURCE_RESERVED_08))
8871                                 bp->is_leader = 1;
8872
8873                         /* Stop the driver */
8874                         /* If interface has been removed - break */
8875                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8876                                 return;
8877
8878                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
8879                         /* Ensure "is_leader" and "recovery_state"
8880                          *  update values are seen on other CPUs
8881                          */
8882                         smp_wmb();
8883                         break;
8884
8885                 case BNX2X_RECOVERY_WAIT:
8886                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8887                         if (bp->is_leader) {
8888                                 u32 load_counter = bnx2x_get_load_cnt(bp);
8889                                 if (load_counter) {
8890                                         /* Wait until all other functions get
8891                                          * down.
8892                                          */
8893                                         schedule_delayed_work(&bp->reset_task,
8894                                                                 HZ/10);
8895                                         return;
8896                                 } else {
8897                                         /* If all other functions got down -
8898                                          * try to bring the chip back to
8899                                          * normal. In any case it's an exit
8900                                          * point for a leader.
8901                                          */
8902                                         if (bnx2x_leader_reset(bp) ||
8903                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
8904                                                 printk(KERN_ERR"%s: Recovery "
8905                                                 "has failed. Power cycle is "
8906                                                 "needed.\n", bp->dev->name);
8907                                                 /* Disconnect this device */
8908                                                 netif_device_detach(bp->dev);
8909                                                 /* Block ifup for all function
8910                                                  * of this ASIC until
8911                                                  * "process kill" or power
8912                                                  * cycle.
8913                                                  */
8914                                                 bnx2x_set_reset_in_progress(bp);
8915                                                 /* Shut down the power */
8916                                                 bnx2x_set_power_state(bp,
8917                                                                 PCI_D3hot);
8918                                                 return;
8919                                         }
8920
8921                                         return;
8922                                 }
8923                         } else { /* non-leader */
8924                                 if (!bnx2x_reset_is_done(bp)) {
8925                                         /* Try to get a LEADER_LOCK HW lock as
8926                                          * long as a former leader may have
8927                                          * been unloaded by the user or
8928                                          * released a leadership by another
8929                                          * reason.
8930                                          */
8931                                         if (bnx2x_trylock_hw_lock(bp,
8932                                             HW_LOCK_RESOURCE_RESERVED_08)) {
8933                                                 /* I'm a leader now! Restart a
8934                                                  * switch case.
8935                                                  */
8936                                                 bp->is_leader = 1;
8937                                                 break;
8938                                         }
8939
8940                                         schedule_delayed_work(&bp->reset_task,
8941                                                                 HZ/10);
8942                                         return;
8943
8944                                 } else { /* A leader has completed
8945                                           * the "process kill". It's an exit
8946                                           * point for a non-leader.
8947                                           */
8948                                         bnx2x_nic_load(bp, LOAD_NORMAL);
8949                                         bp->recovery_state =
8950                                                 BNX2X_RECOVERY_DONE;
8951                                         smp_wmb();
8952                                         return;
8953                                 }
8954                         }
8955                 default:
8956                         return;
8957                 }
8958         }
8959 }
8960
8961 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8962  * scheduled on a general queue in order to prevent a dead lock.
8963  */
8964 static void bnx2x_reset_task(struct work_struct *work)
8965 {
8966         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8967
8968 #ifdef BNX2X_STOP_ON_ERROR
8969         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8970                   " so reset not done to allow debug dump,\n"
8971          KERN_ERR " you will need to reboot when done\n");
8972         return;
8973 #endif
8974
8975         rtnl_lock();
8976
8977         if (!netif_running(bp->dev))
8978                 goto reset_task_exit;
8979
8980         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8981                 bnx2x_parity_recover(bp);
8982         else {
8983                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8984                 bnx2x_nic_load(bp, LOAD_NORMAL);
8985         }
8986
8987 reset_task_exit:
8988         rtnl_unlock();
8989 }
8990
8991 /* end of nic load/unload */
8992
8993 /* ethtool_ops */
8994
8995 /*
8996  * Init service functions
8997  */
8998
8999 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9000 {
9001         switch (func) {
9002         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9003         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9004         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9005         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9006         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9007         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9008         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9009         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9010         default:
9011                 BNX2X_ERR("Unsupported function index: %d\n", func);
9012                 return (u32)(-1);
9013         }
9014 }
9015
9016 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9017 {
9018         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9019
9020         /* Flush all outstanding writes */
9021         mmiowb();
9022
9023         /* Pretend to be function 0 */
9024         REG_WR(bp, reg, 0);
9025         /* Flush the GRC transaction (in the chip) */
9026         new_val = REG_RD(bp, reg);
9027         if (new_val != 0) {
9028                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9029                           new_val);
9030                 BUG();
9031         }
9032
9033         /* From now we are in the "like-E1" mode */
9034         bnx2x_int_disable(bp);
9035
9036         /* Flush all outstanding writes */
9037         mmiowb();
9038
9039         /* Restore the original funtion settings */
9040         REG_WR(bp, reg, orig_func);
9041         new_val = REG_RD(bp, reg);
9042         if (new_val != orig_func) {
9043                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9044                           orig_func, new_val);
9045                 BUG();
9046         }
9047 }
9048
9049 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9050 {
9051         if (CHIP_IS_E1H(bp))
9052                 bnx2x_undi_int_disable_e1h(bp, func);
9053         else
9054                 bnx2x_int_disable(bp);
9055 }
9056
9057 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9058 {
9059         u32 val;
9060
9061         /* Check if there is any driver already loaded */
9062         val = REG_RD(bp, MISC_REG_UNPREPARED);
9063         if (val == 0x1) {
9064                 /* Check if it is the UNDI driver
9065                  * UNDI driver initializes CID offset for normal bell to 0x7
9066                  */
9067                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9068                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9069                 if (val == 0x7) {
9070                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9071                         /* save our func */
9072                         int func = BP_FUNC(bp);
9073                         u32 swap_en;
9074                         u32 swap_val;
9075
9076                         /* clear the UNDI indication */
9077                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9078
9079                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
9080
9081                         /* try unload UNDI on port 0 */
9082                         bp->func = 0;
9083                         bp->fw_seq =
9084                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9085                                 DRV_MSG_SEQ_NUMBER_MASK);
9086                         reset_code = bnx2x_fw_command(bp, reset_code);
9087
9088                         /* if UNDI is loaded on the other port */
9089                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9090
9091                                 /* send "DONE" for previous unload */
9092                                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9093
9094                                 /* unload UNDI on port 1 */
9095                                 bp->func = 1;
9096                                 bp->fw_seq =
9097                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9098                                         DRV_MSG_SEQ_NUMBER_MASK);
9099                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9100
9101                                 bnx2x_fw_command(bp, reset_code);
9102                         }
9103
9104                         /* now it's safe to release the lock */
9105                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9106
9107                         bnx2x_undi_int_disable(bp, func);
9108
9109                         /* close input traffic and wait for it */
9110                         /* Do not rcv packets to BRB */
9111                         REG_WR(bp,
9112                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9113                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9114                         /* Do not direct rcv packets that are not for MCP to
9115                          * the BRB */
9116                         REG_WR(bp,
9117                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9118                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9119                         /* clear AEU */
9120                         REG_WR(bp,
9121                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9122                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9123                         msleep(10);
9124
9125                         /* save NIG port swap info */
9126                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9127                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9128                         /* reset device */
9129                         REG_WR(bp,
9130                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9131                                0xd3ffffff);
9132                         REG_WR(bp,
9133                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9134                                0x1403);
9135                         /* take the NIG out of reset and restore swap values */
9136                         REG_WR(bp,
9137                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9138                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
9139                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9140                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9141
9142                         /* send unload done to the MCP */
9143                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9144
9145                         /* restore our func and fw_seq */
9146                         bp->func = func;
9147                         bp->fw_seq =
9148                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9149                                 DRV_MSG_SEQ_NUMBER_MASK);
9150
9151                 } else
9152                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9153         }
9154 }
9155
9156 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9157 {
9158         u32 val, val2, val3, val4, id;
9159         u16 pmc;
9160
9161         /* Get the chip revision id and number. */
9162         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9163         val = REG_RD(bp, MISC_REG_CHIP_NUM);
9164         id = ((val & 0xffff) << 16);
9165         val = REG_RD(bp, MISC_REG_CHIP_REV);
9166         id |= ((val & 0xf) << 12);
9167         val = REG_RD(bp, MISC_REG_CHIP_METAL);
9168         id |= ((val & 0xff) << 4);
9169         val = REG_RD(bp, MISC_REG_BOND_ID);
9170         id |= (val & 0xf);
9171         bp->common.chip_id = id;
9172         bp->link_params.chip_id = bp->common.chip_id;
9173         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9174
9175         val = (REG_RD(bp, 0x2874) & 0x55);
9176         if ((bp->common.chip_id & 0x1) ||
9177             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9178                 bp->flags |= ONE_PORT_FLAG;
9179                 BNX2X_DEV_INFO("single port device\n");
9180         }
9181
9182         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9183         bp->common.flash_size = (NVRAM_1MB_SIZE <<
9184                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
9185         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9186                        bp->common.flash_size, bp->common.flash_size);
9187
9188         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9189         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9190         bp->link_params.shmem_base = bp->common.shmem_base;
9191         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
9192                        bp->common.shmem_base, bp->common.shmem2_base);
9193
9194         if (!bp->common.shmem_base ||
9195             (bp->common.shmem_base < 0xA0000) ||
9196             (bp->common.shmem_base >= 0xC0000)) {
9197                 BNX2X_DEV_INFO("MCP not active\n");
9198                 bp->flags |= NO_MCP_FLAG;
9199                 return;
9200         }
9201
9202         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9203         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9204                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9205                 BNX2X_ERROR("BAD MCP validity signature\n");
9206
9207         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9208         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9209
9210         bp->link_params.hw_led_mode = ((bp->common.hw_config &
9211                                         SHARED_HW_CFG_LED_MODE_MASK) >>
9212                                        SHARED_HW_CFG_LED_MODE_SHIFT);
9213
9214         bp->link_params.feature_config_flags = 0;
9215         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9216         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9217                 bp->link_params.feature_config_flags |=
9218                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9219         else
9220                 bp->link_params.feature_config_flags &=
9221                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9222
9223         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9224         bp->common.bc_ver = val;
9225         BNX2X_DEV_INFO("bc_ver %X\n", val);
9226         if (val < BNX2X_BC_VER) {
9227                 /* for now only warn
9228                  * later we might need to enforce this */
9229                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9230                             "please upgrade BC\n", BNX2X_BC_VER, val);
9231         }
9232         bp->link_params.feature_config_flags |=
9233                 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9234                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9235
9236         if (BP_E1HVN(bp) == 0) {
9237                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9238                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9239         } else {
9240                 /* no WOL capability for E1HVN != 0 */
9241                 bp->flags |= NO_WOL_FLAG;
9242         }
9243         BNX2X_DEV_INFO("%sWoL capable\n",
9244                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
9245
9246         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9247         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9248         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9249         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9250
9251         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9252                  val, val2, val3, val4);
9253 }
9254
9255 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9256                                                     u32 switch_cfg)
9257 {
9258         int port = BP_PORT(bp);
9259         u32 ext_phy_type;
9260
9261         switch (switch_cfg) {
9262         case SWITCH_CFG_1G:
9263                 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9264
9265                 ext_phy_type =
9266                         SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9267                 switch (ext_phy_type) {
9268                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9269                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9270                                        ext_phy_type);
9271
9272                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9273                                                SUPPORTED_10baseT_Full |
9274                                                SUPPORTED_100baseT_Half |
9275                                                SUPPORTED_100baseT_Full |
9276                                                SUPPORTED_1000baseT_Full |
9277                                                SUPPORTED_2500baseX_Full |
9278                                                SUPPORTED_TP |
9279                                                SUPPORTED_FIBRE |
9280                                                SUPPORTED_Autoneg |
9281                                                SUPPORTED_Pause |
9282                                                SUPPORTED_Asym_Pause);
9283                         break;
9284
9285                 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9286                         BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9287                                        ext_phy_type);
9288
9289                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9290                                                SUPPORTED_10baseT_Full |
9291                                                SUPPORTED_100baseT_Half |
9292                                                SUPPORTED_100baseT_Full |
9293                                                SUPPORTED_1000baseT_Full |
9294                                                SUPPORTED_TP |
9295                                                SUPPORTED_FIBRE |
9296                                                SUPPORTED_Autoneg |
9297                                                SUPPORTED_Pause |
9298                                                SUPPORTED_Asym_Pause);
9299                         break;
9300
9301                 default:
9302                         BNX2X_ERR("NVRAM config error. "
9303                                   "BAD SerDes ext_phy_config 0x%x\n",
9304                                   bp->link_params.ext_phy_config);
9305                         return;
9306                 }
9307
9308                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9309                                            port*0x10);
9310                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9311                 break;
9312
9313         case SWITCH_CFG_10G:
9314                 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9315
9316                 ext_phy_type =
9317                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9318                 switch (ext_phy_type) {
9319                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9320                         BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9321                                        ext_phy_type);
9322
9323                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9324                                                SUPPORTED_10baseT_Full |
9325                                                SUPPORTED_100baseT_Half |
9326                                                SUPPORTED_100baseT_Full |
9327                                                SUPPORTED_1000baseT_Full |
9328                                                SUPPORTED_2500baseX_Full |
9329                                                SUPPORTED_10000baseT_Full |
9330                                                SUPPORTED_TP |
9331                                                SUPPORTED_FIBRE |
9332                                                SUPPORTED_Autoneg |
9333                                                SUPPORTED_Pause |
9334                                                SUPPORTED_Asym_Pause);
9335                         break;
9336
9337                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9338                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9339                                        ext_phy_type);
9340
9341                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9342                                                SUPPORTED_1000baseT_Full |
9343                                                SUPPORTED_FIBRE |
9344                                                SUPPORTED_Autoneg |
9345                                                SUPPORTED_Pause |
9346                                                SUPPORTED_Asym_Pause);
9347                         break;
9348
9349                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9350                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9351                                        ext_phy_type);
9352
9353                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9354                                                SUPPORTED_2500baseX_Full |
9355                                                SUPPORTED_1000baseT_Full |
9356                                                SUPPORTED_FIBRE |
9357                                                SUPPORTED_Autoneg |
9358                                                SUPPORTED_Pause |
9359                                                SUPPORTED_Asym_Pause);
9360                         break;
9361
9362                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9363                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9364                                        ext_phy_type);
9365
9366                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9367                                                SUPPORTED_FIBRE |
9368                                                SUPPORTED_Pause |
9369                                                SUPPORTED_Asym_Pause);
9370                         break;
9371
9372                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9373                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9374                                        ext_phy_type);
9375
9376                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9377                                                SUPPORTED_1000baseT_Full |
9378                                                SUPPORTED_FIBRE |
9379                                                SUPPORTED_Pause |
9380                                                SUPPORTED_Asym_Pause);
9381                         break;
9382
9383                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9384                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9385                                        ext_phy_type);
9386
9387                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9388                                                SUPPORTED_1000baseT_Full |
9389                                                SUPPORTED_Autoneg |
9390                                                SUPPORTED_FIBRE |
9391                                                SUPPORTED_Pause |
9392                                                SUPPORTED_Asym_Pause);
9393                         break;
9394
9395                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9396                         BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9397                                        ext_phy_type);
9398
9399                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9400                                                SUPPORTED_1000baseT_Full |
9401                                                SUPPORTED_Autoneg |
9402                                                SUPPORTED_FIBRE |
9403                                                SUPPORTED_Pause |
9404                                                SUPPORTED_Asym_Pause);
9405                         break;
9406
9407                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9408                         BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9409                                        ext_phy_type);
9410
9411                         bp->port.supported |= (SUPPORTED_10000baseT_Full |
9412                                                SUPPORTED_TP |
9413                                                SUPPORTED_Autoneg |
9414                                                SUPPORTED_Pause |
9415                                                SUPPORTED_Asym_Pause);
9416                         break;
9417
9418                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9419                         BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9420                                        ext_phy_type);
9421
9422                         bp->port.supported |= (SUPPORTED_10baseT_Half |
9423                                                SUPPORTED_10baseT_Full |
9424                                                SUPPORTED_100baseT_Half |
9425                                                SUPPORTED_100baseT_Full |
9426                                                SUPPORTED_1000baseT_Full |
9427                                                SUPPORTED_10000baseT_Full |
9428                                                SUPPORTED_TP |
9429                                                SUPPORTED_Autoneg |
9430                                                SUPPORTED_Pause |
9431                                                SUPPORTED_Asym_Pause);
9432                         break;
9433
9434                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9435                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9436                                   bp->link_params.ext_phy_config);
9437                         break;
9438
9439                 default:
9440                         BNX2X_ERR("NVRAM config error. "
9441                                   "BAD XGXS ext_phy_config 0x%x\n",
9442                                   bp->link_params.ext_phy_config);
9443                         return;
9444                 }
9445
9446                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9447                                            port*0x18);
9448                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9449
9450                 break;
9451
9452         default:
9453                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9454                           bp->port.link_config);
9455                 return;
9456         }
9457         bp->link_params.phy_addr = bp->port.phy_addr;
9458
9459         /* mask what we support according to speed_cap_mask */
9460         if (!(bp->link_params.speed_cap_mask &
9461                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9462                 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9463
9464         if (!(bp->link_params.speed_cap_mask &
9465                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9466                 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9467
9468         if (!(bp->link_params.speed_cap_mask &
9469                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9470                 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9471
9472         if (!(bp->link_params.speed_cap_mask &
9473                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9474                 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9475
9476         if (!(bp->link_params.speed_cap_mask &
9477                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9478                 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9479                                         SUPPORTED_1000baseT_Full);
9480
9481         if (!(bp->link_params.speed_cap_mask &
9482                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9483                 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9484
9485         if (!(bp->link_params.speed_cap_mask &
9486                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9487                 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9488
9489         BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9490 }
9491
9492 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9493 {
9494         bp->link_params.req_duplex = DUPLEX_FULL;
9495
9496         switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9497         case PORT_FEATURE_LINK_SPEED_AUTO:
9498                 if (bp->port.supported & SUPPORTED_Autoneg) {
9499                         bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9500                         bp->port.advertising = bp->port.supported;
9501                 } else {
9502                         u32 ext_phy_type =
9503                             XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9504
9505                         if ((ext_phy_type ==
9506                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9507                             (ext_phy_type ==
9508                              PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9509                                 /* force 10G, no AN */
9510                                 bp->link_params.req_line_speed = SPEED_10000;
9511                                 bp->port.advertising =
9512                                                 (ADVERTISED_10000baseT_Full |
9513                                                  ADVERTISED_FIBRE);
9514                                 break;
9515                         }
9516                         BNX2X_ERR("NVRAM config error. "
9517                                   "Invalid link_config 0x%x"
9518                                   "  Autoneg not supported\n",
9519                                   bp->port.link_config);
9520                         return;
9521                 }
9522                 break;
9523
9524         case PORT_FEATURE_LINK_SPEED_10M_FULL:
9525                 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9526                         bp->link_params.req_line_speed = SPEED_10;
9527                         bp->port.advertising = (ADVERTISED_10baseT_Full |
9528                                                 ADVERTISED_TP);
9529                 } else {
9530                         BNX2X_ERROR("NVRAM config error. "
9531                                     "Invalid link_config 0x%x"
9532                                     "  speed_cap_mask 0x%x\n",
9533                                     bp->port.link_config,
9534                                     bp->link_params.speed_cap_mask);
9535                         return;
9536                 }
9537                 break;
9538
9539         case PORT_FEATURE_LINK_SPEED_10M_HALF:
9540                 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9541                         bp->link_params.req_line_speed = SPEED_10;
9542                         bp->link_params.req_duplex = DUPLEX_HALF;
9543                         bp->port.advertising = (ADVERTISED_10baseT_Half |
9544                                                 ADVERTISED_TP);
9545                 } else {
9546                         BNX2X_ERROR("NVRAM config error. "
9547                                     "Invalid link_config 0x%x"
9548                                     "  speed_cap_mask 0x%x\n",
9549                                     bp->port.link_config,
9550                                     bp->link_params.speed_cap_mask);
9551                         return;
9552                 }
9553                 break;
9554
9555         case PORT_FEATURE_LINK_SPEED_100M_FULL:
9556                 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9557                         bp->link_params.req_line_speed = SPEED_100;
9558                         bp->port.advertising = (ADVERTISED_100baseT_Full |
9559                                                 ADVERTISED_TP);
9560                 } else {
9561                         BNX2X_ERROR("NVRAM config error. "
9562                                     "Invalid link_config 0x%x"
9563                                     "  speed_cap_mask 0x%x\n",
9564                                     bp->port.link_config,
9565                                     bp->link_params.speed_cap_mask);
9566                         return;
9567                 }
9568                 break;
9569
9570         case PORT_FEATURE_LINK_SPEED_100M_HALF:
9571                 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9572                         bp->link_params.req_line_speed = SPEED_100;
9573                         bp->link_params.req_duplex = DUPLEX_HALF;
9574                         bp->port.advertising = (ADVERTISED_100baseT_Half |
9575                                                 ADVERTISED_TP);
9576                 } else {
9577                         BNX2X_ERROR("NVRAM config error. "
9578                                     "Invalid link_config 0x%x"
9579                                     "  speed_cap_mask 0x%x\n",
9580                                     bp->port.link_config,
9581                                     bp->link_params.speed_cap_mask);
9582                         return;
9583                 }
9584                 break;
9585
9586         case PORT_FEATURE_LINK_SPEED_1G:
9587                 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9588                         bp->link_params.req_line_speed = SPEED_1000;
9589                         bp->port.advertising = (ADVERTISED_1000baseT_Full |
9590                                                 ADVERTISED_TP);
9591                 } else {
9592                         BNX2X_ERROR("NVRAM config error. "
9593                                     "Invalid link_config 0x%x"
9594                                     "  speed_cap_mask 0x%x\n",
9595                                     bp->port.link_config,
9596                                     bp->link_params.speed_cap_mask);
9597                         return;
9598                 }
9599                 break;
9600
9601         case PORT_FEATURE_LINK_SPEED_2_5G:
9602                 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9603                         bp->link_params.req_line_speed = SPEED_2500;
9604                         bp->port.advertising = (ADVERTISED_2500baseX_Full |
9605                                                 ADVERTISED_TP);
9606                 } else {
9607                         BNX2X_ERROR("NVRAM config error. "
9608                                     "Invalid link_config 0x%x"
9609                                     "  speed_cap_mask 0x%x\n",
9610                                     bp->port.link_config,
9611                                     bp->link_params.speed_cap_mask);
9612                         return;
9613                 }
9614                 break;
9615
9616         case PORT_FEATURE_LINK_SPEED_10G_CX4:
9617         case PORT_FEATURE_LINK_SPEED_10G_KX4:
9618         case PORT_FEATURE_LINK_SPEED_10G_KR:
9619                 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9620                         bp->link_params.req_line_speed = SPEED_10000;
9621                         bp->port.advertising = (ADVERTISED_10000baseT_Full |
9622                                                 ADVERTISED_FIBRE);
9623                 } else {
9624                         BNX2X_ERROR("NVRAM config error. "
9625                                     "Invalid link_config 0x%x"
9626                                     "  speed_cap_mask 0x%x\n",
9627                                     bp->port.link_config,
9628                                     bp->link_params.speed_cap_mask);
9629                         return;
9630                 }
9631                 break;
9632
9633         default:
9634                 BNX2X_ERROR("NVRAM config error. "
9635                             "BAD link speed link_config 0x%x\n",
9636                             bp->port.link_config);
9637                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9638                 bp->port.advertising = bp->port.supported;
9639                 break;
9640         }
9641
9642         bp->link_params.req_flow_ctrl = (bp->port.link_config &
9643                                          PORT_FEATURE_FLOW_CONTROL_MASK);
9644         if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9645             !(bp->port.supported & SUPPORTED_Autoneg))
9646                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9647
9648         BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d  req_flow_ctrl 0x%x"
9649                        "  advertising 0x%x\n",
9650                        bp->link_params.req_line_speed,
9651                        bp->link_params.req_duplex,
9652                        bp->link_params.req_flow_ctrl, bp->port.advertising);
9653 }
9654
9655 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9656 {
9657         mac_hi = cpu_to_be16(mac_hi);
9658         mac_lo = cpu_to_be32(mac_lo);
9659         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9660         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9661 }
9662
9663 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9664 {
9665         int port = BP_PORT(bp);
9666         u32 val, val2;
9667         u32 config;
9668         u16 i;
9669         u32 ext_phy_type;
9670
9671         bp->link_params.bp = bp;
9672         bp->link_params.port = port;
9673
9674         bp->link_params.lane_config =
9675                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9676         bp->link_params.ext_phy_config =
9677                 SHMEM_RD(bp,
9678                          dev_info.port_hw_config[port].external_phy_config);
9679         /* BCM8727_NOC => BCM8727 no over current */
9680         if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9681             PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9682                 bp->link_params.ext_phy_config &=
9683                         ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9684                 bp->link_params.ext_phy_config |=
9685                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9686                 bp->link_params.feature_config_flags |=
9687                         FEATURE_CONFIG_BCM8727_NOC;
9688         }
9689
9690         bp->link_params.speed_cap_mask =
9691                 SHMEM_RD(bp,
9692                          dev_info.port_hw_config[port].speed_capability_mask);
9693
9694         bp->port.link_config =
9695                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9696
9697         /* Get the 4 lanes xgxs config rx and tx */
9698         for (i = 0; i < 2; i++) {
9699                 val = SHMEM_RD(bp,
9700                            dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9701                 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9702                 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9703
9704                 val = SHMEM_RD(bp,
9705                            dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9706                 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9707                 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9708         }
9709
9710         /* If the device is capable of WoL, set the default state according
9711          * to the HW
9712          */
9713         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9714         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9715                    (config & PORT_FEATURE_WOL_ENABLED));
9716
9717         BNX2X_DEV_INFO("lane_config 0x%08x  ext_phy_config 0x%08x"
9718                        "  speed_cap_mask 0x%08x  link_config 0x%08x\n",
9719                        bp->link_params.lane_config,
9720                        bp->link_params.ext_phy_config,
9721                        bp->link_params.speed_cap_mask, bp->port.link_config);
9722
9723         bp->link_params.switch_cfg |= (bp->port.link_config &
9724                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
9725         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9726
9727         bnx2x_link_settings_requested(bp);
9728
9729         /*
9730          * If connected directly, work with the internal PHY, otherwise, work
9731          * with the external PHY
9732          */
9733         ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9734         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9735                 bp->mdio.prtad = bp->link_params.phy_addr;
9736
9737         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9738                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9739                 bp->mdio.prtad =
9740                         XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9741
9742         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9743         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9744         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9745         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9746         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9747
9748 #ifdef BCM_CNIC
9749         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9750         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9751         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9752 #endif
9753 }
9754
9755 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9756 {
9757         int func = BP_FUNC(bp);
9758         u32 val, val2;
9759         int rc = 0;
9760
9761         bnx2x_get_common_hwinfo(bp);
9762
9763         bp->e1hov = 0;
9764         bp->e1hmf = 0;
9765         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9766                 bp->mf_config =
9767                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9768
9769                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9770                        FUNC_MF_CFG_E1HOV_TAG_MASK);
9771                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9772                         bp->e1hmf = 1;
9773                 BNX2X_DEV_INFO("%s function mode\n",
9774                                IS_E1HMF(bp) ? "multi" : "single");
9775
9776                 if (IS_E1HMF(bp)) {
9777                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9778                                                                 e1hov_tag) &
9779                                FUNC_MF_CFG_E1HOV_TAG_MASK);
9780                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9781                                 bp->e1hov = val;
9782                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9783                                                "(0x%04x)\n",
9784                                                func, bp->e1hov, bp->e1hov);
9785                         } else {
9786                                 BNX2X_ERROR("No valid E1HOV for func %d,"
9787                                             "  aborting\n", func);
9788                                 rc = -EPERM;
9789                         }
9790                 } else {
9791                         if (BP_E1HVN(bp)) {
9792                                 BNX2X_ERROR("VN %d in single function mode,"
9793                                             "  aborting\n", BP_E1HVN(bp));
9794                                 rc = -EPERM;
9795                         }
9796                 }
9797         }
9798
9799         if (!BP_NOMCP(bp)) {
9800                 bnx2x_get_port_hwinfo(bp);
9801
9802                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9803                               DRV_MSG_SEQ_NUMBER_MASK);
9804                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9805         }
9806
9807         if (IS_E1HMF(bp)) {
9808                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9809                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
9810                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9811                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9812                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9813                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9814                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9815                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9816                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
9817                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
9818                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9819                                ETH_ALEN);
9820                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9821                                ETH_ALEN);
9822                 }
9823
9824                 return rc;
9825         }
9826
9827         if (BP_NOMCP(bp)) {
9828                 /* only supposed to happen on emulation/FPGA */
9829                 BNX2X_ERROR("warning: random MAC workaround active\n");
9830                 random_ether_addr(bp->dev->dev_addr);
9831                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9832         }
9833
9834         return rc;
9835 }
9836
9837 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9838 {
9839         int cnt, i, block_end, rodi;
9840         char vpd_data[BNX2X_VPD_LEN+1];
9841         char str_id_reg[VENDOR_ID_LEN+1];
9842         char str_id_cap[VENDOR_ID_LEN+1];
9843         u8 len;
9844
9845         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9846         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9847
9848         if (cnt < BNX2X_VPD_LEN)
9849                 goto out_not_found;
9850
9851         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9852                              PCI_VPD_LRDT_RO_DATA);
9853         if (i < 0)
9854                 goto out_not_found;
9855
9856
9857         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9858                     pci_vpd_lrdt_size(&vpd_data[i]);
9859
9860         i += PCI_VPD_LRDT_TAG_SIZE;
9861
9862         if (block_end > BNX2X_VPD_LEN)
9863                 goto out_not_found;
9864
9865         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9866                                    PCI_VPD_RO_KEYWORD_MFR_ID);
9867         if (rodi < 0)
9868                 goto out_not_found;
9869
9870         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9871
9872         if (len != VENDOR_ID_LEN)
9873                 goto out_not_found;
9874
9875         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9876
9877         /* vendor specific info */
9878         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9879         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9880         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9881             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9882
9883                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9884                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
9885                 if (rodi >= 0) {
9886                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
9887
9888                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9889
9890                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9891                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9892                                 bp->fw_ver[len] = ' ';
9893                         }
9894                 }
9895                 return;
9896         }
9897 out_not_found:
9898         return;
9899 }
9900
9901 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9902 {
9903         int func = BP_FUNC(bp);
9904         int timer_interval;
9905         int rc;
9906
9907         /* Disable interrupt handling until HW is initialized */
9908         atomic_set(&bp->intr_sem, 1);
9909         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9910
9911         mutex_init(&bp->port.phy_mutex);
9912         mutex_init(&bp->fw_mb_mutex);
9913         spin_lock_init(&bp->stats_lock);
9914 #ifdef BCM_CNIC
9915         mutex_init(&bp->cnic_mutex);
9916 #endif
9917
9918         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9919         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9920
9921         rc = bnx2x_get_hwinfo(bp);
9922
9923         bnx2x_read_fwinfo(bp);
9924         /* need to reset chip if undi was active */
9925         if (!BP_NOMCP(bp))
9926                 bnx2x_undi_unload(bp);
9927
9928         if (CHIP_REV_IS_FPGA(bp))
9929                 dev_err(&bp->pdev->dev, "FPGA detected\n");
9930
9931         if (BP_NOMCP(bp) && (func == 0))
9932                 dev_err(&bp->pdev->dev, "MCP disabled, "
9933                                         "must load devices in order!\n");
9934
9935         /* Set multi queue mode */
9936         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9937             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9938                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9939                                         "requested is not MSI-X\n");
9940                 multi_mode = ETH_RSS_MODE_DISABLED;
9941         }
9942         bp->multi_mode = multi_mode;
9943
9944
9945         bp->dev->features |= NETIF_F_GRO;
9946
9947         /* Set TPA flags */
9948         if (disable_tpa) {
9949                 bp->flags &= ~TPA_ENABLE_FLAG;
9950                 bp->dev->features &= ~NETIF_F_LRO;
9951         } else {
9952                 bp->flags |= TPA_ENABLE_FLAG;
9953                 bp->dev->features |= NETIF_F_LRO;
9954         }
9955
9956         if (CHIP_IS_E1(bp))
9957                 bp->dropless_fc = 0;
9958         else
9959                 bp->dropless_fc = dropless_fc;
9960
9961         bp->mrrs = mrrs;
9962
9963         bp->tx_ring_size = MAX_TX_AVAIL;
9964         bp->rx_ring_size = MAX_RX_AVAIL;
9965
9966         bp->rx_csum = 1;
9967
9968         /* make sure that the numbers are in the right granularity */
9969         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9970         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9971
9972         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9973         bp->current_interval = (poll ? poll : timer_interval);
9974
9975         init_timer(&bp->timer);
9976         bp->timer.expires = jiffies + bp->current_interval;
9977         bp->timer.data = (unsigned long) bp;
9978         bp->timer.function = bnx2x_timer;
9979
9980         return rc;
9981 }
9982
9983 /*
9984  * ethtool service functions
9985  */
9986
9987 /* All ethtool functions called with rtnl_lock */
9988
9989 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9990 {
9991         struct bnx2x *bp = netdev_priv(dev);
9992
9993         cmd->supported = bp->port.supported;
9994         cmd->advertising = bp->port.advertising;
9995
9996         if ((bp->state == BNX2X_STATE_OPEN) &&
9997             !(bp->flags & MF_FUNC_DIS) &&
9998             (bp->link_vars.link_up)) {
9999                 cmd->speed = bp->link_vars.line_speed;
10000                 cmd->duplex = bp->link_vars.duplex;
10001                 if (IS_E1HMF(bp)) {
10002                         u16 vn_max_rate;
10003
10004                         vn_max_rate =
10005                                 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10006                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10007                         if (vn_max_rate < cmd->speed)
10008                                 cmd->speed = vn_max_rate;
10009                 }
10010         } else {
10011                 cmd->speed = -1;
10012                 cmd->duplex = -1;
10013         }
10014
10015         if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10016                 u32 ext_phy_type =
10017                         XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10018
10019                 switch (ext_phy_type) {
10020                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10021                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10022                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10023                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10024                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10025                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10026                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10027                         cmd->port = PORT_FIBRE;
10028                         break;
10029
10030                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10031                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10032                         cmd->port = PORT_TP;
10033                         break;
10034
10035                 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10036                         BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10037                                   bp->link_params.ext_phy_config);
10038                         break;
10039
10040                 default:
10041                         DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10042                            bp->link_params.ext_phy_config);
10043                         break;
10044                 }
10045         } else
10046                 cmd->port = PORT_TP;
10047
10048         cmd->phy_address = bp->mdio.prtad;
10049         cmd->transceiver = XCVR_INTERNAL;
10050
10051         if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10052                 cmd->autoneg = AUTONEG_ENABLE;
10053         else
10054                 cmd->autoneg = AUTONEG_DISABLE;
10055
10056         cmd->maxtxpkt = 0;
10057         cmd->maxrxpkt = 0;
10058
10059         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10060            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10061            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10062            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10063            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10064            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10065            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10066
10067         return 0;
10068 }
10069
10070 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10071 {
10072         struct bnx2x *bp = netdev_priv(dev);
10073         u32 advertising;
10074
10075         if (IS_E1HMF(bp))
10076                 return 0;
10077
10078         DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10079            DP_LEVEL "  supported 0x%x  advertising 0x%x  speed %d\n"
10080            DP_LEVEL "  duplex %d  port %d  phy_address %d  transceiver %d\n"
10081            DP_LEVEL "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
10082            cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10083            cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10084            cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10085
10086         if (cmd->autoneg == AUTONEG_ENABLE) {
10087                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10088                         DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10089                         return -EINVAL;
10090                 }
10091
10092                 /* advertise the requested speed and duplex if supported */
10093                 cmd->advertising &= bp->port.supported;
10094
10095                 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10096                 bp->link_params.req_duplex = DUPLEX_FULL;
10097                 bp->port.advertising |= (ADVERTISED_Autoneg |
10098                                          cmd->advertising);
10099
10100         } else { /* forced speed */
10101                 /* advertise the requested speed and duplex if supported */
10102                 switch (cmd->speed) {
10103                 case SPEED_10:
10104                         if (cmd->duplex == DUPLEX_FULL) {
10105                                 if (!(bp->port.supported &
10106                                       SUPPORTED_10baseT_Full)) {
10107                                         DP(NETIF_MSG_LINK,
10108                                            "10M full not supported\n");
10109                                         return -EINVAL;
10110                                 }
10111
10112                                 advertising = (ADVERTISED_10baseT_Full |
10113                                                ADVERTISED_TP);
10114                         } else {
10115                                 if (!(bp->port.supported &
10116                                       SUPPORTED_10baseT_Half)) {
10117                                         DP(NETIF_MSG_LINK,
10118                                            "10M half not supported\n");
10119                                         return -EINVAL;
10120                                 }
10121
10122                                 advertising = (ADVERTISED_10baseT_Half |
10123                                                ADVERTISED_TP);
10124                         }
10125                         break;
10126
10127                 case SPEED_100:
10128                         if (cmd->duplex == DUPLEX_FULL) {
10129                                 if (!(bp->port.supported &
10130                                                 SUPPORTED_100baseT_Full)) {
10131                                         DP(NETIF_MSG_LINK,
10132                                            "100M full not supported\n");
10133                                         return -EINVAL;
10134                                 }
10135
10136                                 advertising = (ADVERTISED_100baseT_Full |
10137                                                ADVERTISED_TP);
10138                         } else {
10139                                 if (!(bp->port.supported &
10140                                                 SUPPORTED_100baseT_Half)) {
10141                                         DP(NETIF_MSG_LINK,
10142                                            "100M half not supported\n");
10143                                         return -EINVAL;
10144                                 }
10145
10146                                 advertising = (ADVERTISED_100baseT_Half |
10147                                                ADVERTISED_TP);
10148                         }
10149                         break;
10150
10151                 case SPEED_1000:
10152                         if (cmd->duplex != DUPLEX_FULL) {
10153                                 DP(NETIF_MSG_LINK, "1G half not supported\n");
10154                                 return -EINVAL;
10155                         }
10156
10157                         if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10158                                 DP(NETIF_MSG_LINK, "1G full not supported\n");
10159                                 return -EINVAL;
10160                         }
10161
10162                         advertising = (ADVERTISED_1000baseT_Full |
10163                                        ADVERTISED_TP);
10164                         break;
10165
10166                 case SPEED_2500:
10167                         if (cmd->duplex != DUPLEX_FULL) {
10168                                 DP(NETIF_MSG_LINK,
10169                                    "2.5G half not supported\n");
10170                                 return -EINVAL;
10171                         }
10172
10173                         if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10174                                 DP(NETIF_MSG_LINK,
10175                                    "2.5G full not supported\n");
10176                                 return -EINVAL;
10177                         }
10178
10179                         advertising = (ADVERTISED_2500baseX_Full |
10180                                        ADVERTISED_TP);
10181                         break;
10182
10183                 case SPEED_10000:
10184                         if (cmd->duplex != DUPLEX_FULL) {
10185                                 DP(NETIF_MSG_LINK, "10G half not supported\n");
10186                                 return -EINVAL;
10187                         }
10188
10189                         if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10190                                 DP(NETIF_MSG_LINK, "10G full not supported\n");
10191                                 return -EINVAL;
10192                         }
10193
10194                         advertising = (ADVERTISED_10000baseT_Full |
10195                                        ADVERTISED_FIBRE);
10196                         break;
10197
10198                 default:
10199                         DP(NETIF_MSG_LINK, "Unsupported speed\n");
10200                         return -EINVAL;
10201                 }
10202
10203                 bp->link_params.req_line_speed = cmd->speed;
10204                 bp->link_params.req_duplex = cmd->duplex;
10205                 bp->port.advertising = advertising;
10206         }
10207
10208         DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10209            DP_LEVEL "  req_duplex %d  advertising 0x%x\n",
10210            bp->link_params.req_line_speed, bp->link_params.req_duplex,
10211            bp->port.advertising);
10212
10213         if (netif_running(dev)) {
10214                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10215                 bnx2x_link_set(bp);
10216         }
10217
10218         return 0;
10219 }
10220
10221 #define IS_E1_ONLINE(info)      (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10222 #define IS_E1H_ONLINE(info)     (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10223
10224 static int bnx2x_get_regs_len(struct net_device *dev)
10225 {
10226         struct bnx2x *bp = netdev_priv(dev);
10227         int regdump_len = 0;
10228         int i;
10229
10230         if (CHIP_IS_E1(bp)) {
10231                 for (i = 0; i < REGS_COUNT; i++)
10232                         if (IS_E1_ONLINE(reg_addrs[i].info))
10233                                 regdump_len += reg_addrs[i].size;
10234
10235                 for (i = 0; i < WREGS_COUNT_E1; i++)
10236                         if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10237                                 regdump_len += wreg_addrs_e1[i].size *
10238                                         (1 + wreg_addrs_e1[i].read_regs_count);
10239
10240         } else { /* E1H */
10241                 for (i = 0; i < REGS_COUNT; i++)
10242                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10243                                 regdump_len += reg_addrs[i].size;
10244
10245                 for (i = 0; i < WREGS_COUNT_E1H; i++)
10246                         if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10247                                 regdump_len += wreg_addrs_e1h[i].size *
10248                                         (1 + wreg_addrs_e1h[i].read_regs_count);
10249         }
10250         regdump_len *= 4;
10251         regdump_len += sizeof(struct dump_hdr);
10252
10253         return regdump_len;
10254 }
10255
10256 static void bnx2x_get_regs(struct net_device *dev,
10257                            struct ethtool_regs *regs, void *_p)
10258 {
10259         u32 *p = _p, i, j;
10260         struct bnx2x *bp = netdev_priv(dev);
10261         struct dump_hdr dump_hdr = {0};
10262
10263         regs->version = 0;
10264         memset(p, 0, regs->len);
10265
10266         if (!netif_running(bp->dev))
10267                 return;
10268
10269         dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10270         dump_hdr.dump_sign = dump_sign_all;
10271         dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10272         dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10273         dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10274         dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10275         dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10276
10277         memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10278         p += dump_hdr.hdr_size + 1;
10279
10280         if (CHIP_IS_E1(bp)) {
10281                 for (i = 0; i < REGS_COUNT; i++)
10282                         if (IS_E1_ONLINE(reg_addrs[i].info))
10283                                 for (j = 0; j < reg_addrs[i].size; j++)
10284                                         *p++ = REG_RD(bp,
10285                                                       reg_addrs[i].addr + j*4);
10286
10287         } else { /* E1H */
10288                 for (i = 0; i < REGS_COUNT; i++)
10289                         if (IS_E1H_ONLINE(reg_addrs[i].info))
10290                                 for (j = 0; j < reg_addrs[i].size; j++)
10291                                         *p++ = REG_RD(bp,
10292                                                       reg_addrs[i].addr + j*4);
10293         }
10294 }
10295
10296 #define PHY_FW_VER_LEN                  10
10297
10298 static void bnx2x_get_drvinfo(struct net_device *dev,
10299                               struct ethtool_drvinfo *info)
10300 {
10301         struct bnx2x *bp = netdev_priv(dev);
10302         u8 phy_fw_ver[PHY_FW_VER_LEN];
10303
10304         strcpy(info->driver, DRV_MODULE_NAME);
10305         strcpy(info->version, DRV_MODULE_VERSION);
10306
10307         phy_fw_ver[0] = '\0';
10308         if (bp->port.pmf) {
10309                 bnx2x_acquire_phy_lock(bp);
10310                 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10311                                              (bp->state != BNX2X_STATE_CLOSED),
10312                                              phy_fw_ver, PHY_FW_VER_LEN);
10313                 bnx2x_release_phy_lock(bp);
10314         }
10315
10316         strncpy(info->fw_version, bp->fw_ver, 32);
10317         snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10318                  "bc %d.%d.%d%s%s",
10319                  (bp->common.bc_ver & 0xff0000) >> 16,
10320                  (bp->common.bc_ver & 0xff00) >> 8,
10321                  (bp->common.bc_ver & 0xff),
10322                  ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10323         strcpy(info->bus_info, pci_name(bp->pdev));
10324         info->n_stats = BNX2X_NUM_STATS;
10325         info->testinfo_len = BNX2X_NUM_TESTS;
10326         info->eedump_len = bp->common.flash_size;
10327         info->regdump_len = bnx2x_get_regs_len(dev);
10328 }
10329
10330 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10331 {
10332         struct bnx2x *bp = netdev_priv(dev);
10333
10334         if (bp->flags & NO_WOL_FLAG) {
10335                 wol->supported = 0;
10336                 wol->wolopts = 0;
10337         } else {
10338                 wol->supported = WAKE_MAGIC;
10339                 if (bp->wol)
10340                         wol->wolopts = WAKE_MAGIC;
10341                 else
10342                         wol->wolopts = 0;
10343         }
10344         memset(&wol->sopass, 0, sizeof(wol->sopass));
10345 }
10346
10347 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10348 {
10349         struct bnx2x *bp = netdev_priv(dev);
10350
10351         if (wol->wolopts & ~WAKE_MAGIC)
10352                 return -EINVAL;
10353
10354         if (wol->wolopts & WAKE_MAGIC) {
10355                 if (bp->flags & NO_WOL_FLAG)
10356                         return -EINVAL;
10357
10358                 bp->wol = 1;
10359         } else
10360                 bp->wol = 0;
10361
10362         return 0;
10363 }
10364
10365 static u32 bnx2x_get_msglevel(struct net_device *dev)
10366 {
10367         struct bnx2x *bp = netdev_priv(dev);
10368
10369         return bp->msg_enable;
10370 }
10371
10372 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10373 {
10374         struct bnx2x *bp = netdev_priv(dev);
10375
10376         if (capable(CAP_NET_ADMIN))
10377                 bp->msg_enable = level;
10378 }
10379
10380 static int bnx2x_nway_reset(struct net_device *dev)
10381 {
10382         struct bnx2x *bp = netdev_priv(dev);
10383
10384         if (!bp->port.pmf)
10385                 return 0;
10386
10387         if (netif_running(dev)) {
10388                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10389                 bnx2x_link_set(bp);
10390         }
10391
10392         return 0;
10393 }
10394
10395 static u32 bnx2x_get_link(struct net_device *dev)
10396 {
10397         struct bnx2x *bp = netdev_priv(dev);
10398
10399         if (bp->flags & MF_FUNC_DIS)
10400                 return 0;
10401
10402         return bp->link_vars.link_up;
10403 }
10404
10405 static int bnx2x_get_eeprom_len(struct net_device *dev)
10406 {
10407         struct bnx2x *bp = netdev_priv(dev);
10408
10409         return bp->common.flash_size;
10410 }
10411
10412 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10413 {
10414         int port = BP_PORT(bp);
10415         int count, i;
10416         u32 val = 0;
10417
10418         /* adjust timeout for emulation/FPGA */
10419         count = NVRAM_TIMEOUT_COUNT;
10420         if (CHIP_REV_IS_SLOW(bp))
10421                 count *= 100;
10422
10423         /* request access to nvram interface */
10424         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10425                (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10426
10427         for (i = 0; i < count*10; i++) {
10428                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10429                 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10430                         break;
10431
10432                 udelay(5);
10433         }
10434
10435         if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10436                 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10437                 return -EBUSY;
10438         }
10439
10440         return 0;
10441 }
10442
10443 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10444 {
10445         int port = BP_PORT(bp);
10446         int count, i;
10447         u32 val = 0;
10448
10449         /* adjust timeout for emulation/FPGA */
10450         count = NVRAM_TIMEOUT_COUNT;
10451         if (CHIP_REV_IS_SLOW(bp))
10452                 count *= 100;
10453
10454         /* relinquish nvram interface */
10455         REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10456                (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10457
10458         for (i = 0; i < count*10; i++) {
10459                 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10460                 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10461                         break;
10462
10463                 udelay(5);
10464         }
10465
10466         if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10467                 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10468                 return -EBUSY;
10469         }
10470
10471         return 0;
10472 }
10473
10474 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10475 {
10476         u32 val;
10477
10478         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10479
10480         /* enable both bits, even on read */
10481         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10482                (val | MCPR_NVM_ACCESS_ENABLE_EN |
10483                       MCPR_NVM_ACCESS_ENABLE_WR_EN));
10484 }
10485
10486 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10487 {
10488         u32 val;
10489
10490         val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10491
10492         /* disable both bits, even after read */
10493         REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10494                (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10495                         MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10496 }
10497
10498 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10499                                   u32 cmd_flags)
10500 {
10501         int count, i, rc;
10502         u32 val;
10503
10504         /* build the command word */
10505         cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10506
10507         /* need to clear DONE bit separately */
10508         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10509
10510         /* address of the NVRAM to read from */
10511         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10512                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10513
10514         /* issue a read command */
10515         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10516
10517         /* adjust timeout for emulation/FPGA */
10518         count = NVRAM_TIMEOUT_COUNT;
10519         if (CHIP_REV_IS_SLOW(bp))
10520                 count *= 100;
10521
10522         /* wait for completion */
10523         *ret_val = 0;
10524         rc = -EBUSY;
10525         for (i = 0; i < count; i++) {
10526                 udelay(5);
10527                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10528
10529                 if (val & MCPR_NVM_COMMAND_DONE) {
10530                         val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10531                         /* we read nvram data in cpu order
10532                          * but ethtool sees it as an array of bytes
10533                          * converting to big-endian will do the work */
10534                         *ret_val = cpu_to_be32(val);
10535                         rc = 0;
10536                         break;
10537                 }
10538         }
10539
10540         return rc;
10541 }
10542
10543 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10544                             int buf_size)
10545 {
10546         int rc;
10547         u32 cmd_flags;
10548         __be32 val;
10549
10550         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10551                 DP(BNX2X_MSG_NVM,
10552                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10553                    offset, buf_size);
10554                 return -EINVAL;
10555         }
10556
10557         if (offset + buf_size > bp->common.flash_size) {
10558                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10559                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10560                    offset, buf_size, bp->common.flash_size);
10561                 return -EINVAL;
10562         }
10563
10564         /* request access to nvram interface */
10565         rc = bnx2x_acquire_nvram_lock(bp);
10566         if (rc)
10567                 return rc;
10568
10569         /* enable access to nvram interface */
10570         bnx2x_enable_nvram_access(bp);
10571
10572         /* read the first word(s) */
10573         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10574         while ((buf_size > sizeof(u32)) && (rc == 0)) {
10575                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10576                 memcpy(ret_buf, &val, 4);
10577
10578                 /* advance to the next dword */
10579                 offset += sizeof(u32);
10580                 ret_buf += sizeof(u32);
10581                 buf_size -= sizeof(u32);
10582                 cmd_flags = 0;
10583         }
10584
10585         if (rc == 0) {
10586                 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10587                 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10588                 memcpy(ret_buf, &val, 4);
10589         }
10590
10591         /* disable access to nvram interface */
10592         bnx2x_disable_nvram_access(bp);
10593         bnx2x_release_nvram_lock(bp);
10594
10595         return rc;
10596 }
10597
10598 static int bnx2x_get_eeprom(struct net_device *dev,
10599                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10600 {
10601         struct bnx2x *bp = netdev_priv(dev);
10602         int rc;
10603
10604         if (!netif_running(dev))
10605                 return -EAGAIN;
10606
10607         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10608            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10609            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10610            eeprom->len, eeprom->len);
10611
10612         /* parameters already validated in ethtool_get_eeprom */
10613
10614         rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10615
10616         return rc;
10617 }
10618
10619 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10620                                    u32 cmd_flags)
10621 {
10622         int count, i, rc;
10623
10624         /* build the command word */
10625         cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10626
10627         /* need to clear DONE bit separately */
10628         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10629
10630         /* write the data */
10631         REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10632
10633         /* address of the NVRAM to write to */
10634         REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10635                (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10636
10637         /* issue the write command */
10638         REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10639
10640         /* adjust timeout for emulation/FPGA */
10641         count = NVRAM_TIMEOUT_COUNT;
10642         if (CHIP_REV_IS_SLOW(bp))
10643                 count *= 100;
10644
10645         /* wait for completion */
10646         rc = -EBUSY;
10647         for (i = 0; i < count; i++) {
10648                 udelay(5);
10649                 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10650                 if (val & MCPR_NVM_COMMAND_DONE) {
10651                         rc = 0;
10652                         break;
10653                 }
10654         }
10655
10656         return rc;
10657 }
10658
10659 #define BYTE_OFFSET(offset)             (8 * (offset & 0x03))
10660
10661 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10662                               int buf_size)
10663 {
10664         int rc;
10665         u32 cmd_flags;
10666         u32 align_offset;
10667         __be32 val;
10668
10669         if (offset + buf_size > bp->common.flash_size) {
10670                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10671                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10672                    offset, buf_size, bp->common.flash_size);
10673                 return -EINVAL;
10674         }
10675
10676         /* request access to nvram interface */
10677         rc = bnx2x_acquire_nvram_lock(bp);
10678         if (rc)
10679                 return rc;
10680
10681         /* enable access to nvram interface */
10682         bnx2x_enable_nvram_access(bp);
10683
10684         cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10685         align_offset = (offset & ~0x03);
10686         rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10687
10688         if (rc == 0) {
10689                 val &= ~(0xff << BYTE_OFFSET(offset));
10690                 val |= (*data_buf << BYTE_OFFSET(offset));
10691
10692                 /* nvram data is returned as an array of bytes
10693                  * convert it back to cpu order */
10694                 val = be32_to_cpu(val);
10695
10696                 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10697                                              cmd_flags);
10698         }
10699
10700         /* disable access to nvram interface */
10701         bnx2x_disable_nvram_access(bp);
10702         bnx2x_release_nvram_lock(bp);
10703
10704         return rc;
10705 }
10706
10707 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10708                              int buf_size)
10709 {
10710         int rc;
10711         u32 cmd_flags;
10712         u32 val;
10713         u32 written_so_far;
10714
10715         if (buf_size == 1)      /* ethtool */
10716                 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10717
10718         if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10719                 DP(BNX2X_MSG_NVM,
10720                    "Invalid parameter: offset 0x%x  buf_size 0x%x\n",
10721                    offset, buf_size);
10722                 return -EINVAL;
10723         }
10724
10725         if (offset + buf_size > bp->common.flash_size) {
10726                 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10727                                   " buf_size (0x%x) > flash_size (0x%x)\n",
10728                    offset, buf_size, bp->common.flash_size);
10729                 return -EINVAL;
10730         }
10731
10732         /* request access to nvram interface */
10733         rc = bnx2x_acquire_nvram_lock(bp);
10734         if (rc)
10735                 return rc;
10736
10737         /* enable access to nvram interface */
10738         bnx2x_enable_nvram_access(bp);
10739
10740         written_so_far = 0;
10741         cmd_flags = MCPR_NVM_COMMAND_FIRST;
10742         while ((written_so_far < buf_size) && (rc == 0)) {
10743                 if (written_so_far == (buf_size - sizeof(u32)))
10744                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10745                 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10746                         cmd_flags |= MCPR_NVM_COMMAND_LAST;
10747                 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10748                         cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10749
10750                 memcpy(&val, data_buf, 4);
10751
10752                 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10753
10754                 /* advance to the next dword */
10755                 offset += sizeof(u32);
10756                 data_buf += sizeof(u32);
10757                 written_so_far += sizeof(u32);
10758                 cmd_flags = 0;
10759         }
10760
10761         /* disable access to nvram interface */
10762         bnx2x_disable_nvram_access(bp);
10763         bnx2x_release_nvram_lock(bp);
10764
10765         return rc;
10766 }
10767
10768 static int bnx2x_set_eeprom(struct net_device *dev,
10769                             struct ethtool_eeprom *eeprom, u8 *eebuf)
10770 {
10771         struct bnx2x *bp = netdev_priv(dev);
10772         int port = BP_PORT(bp);
10773         int rc = 0;
10774
10775         if (!netif_running(dev))
10776                 return -EAGAIN;
10777
10778         DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10779            DP_LEVEL "  magic 0x%x  offset 0x%x (%d)  len 0x%x (%d)\n",
10780            eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10781            eeprom->len, eeprom->len);
10782
10783         /* parameters already validated in ethtool_set_eeprom */
10784
10785         /* PHY eeprom can be accessed only by the PMF */
10786         if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10787             !bp->port.pmf)
10788                 return -EINVAL;
10789
10790         if (eeprom->magic == 0x50485950) {
10791                 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10792                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10793
10794                 bnx2x_acquire_phy_lock(bp);
10795                 rc |= bnx2x_link_reset(&bp->link_params,
10796                                        &bp->link_vars, 0);
10797                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10798                                         PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10799                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10800                                        MISC_REGISTERS_GPIO_HIGH, port);
10801                 bnx2x_release_phy_lock(bp);
10802                 bnx2x_link_report(bp);
10803
10804         } else if (eeprom->magic == 0x50485952) {
10805                 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10806                 if (bp->state == BNX2X_STATE_OPEN) {
10807                         bnx2x_acquire_phy_lock(bp);
10808                         rc |= bnx2x_link_reset(&bp->link_params,
10809                                                &bp->link_vars, 1);
10810
10811                         rc |= bnx2x_phy_init(&bp->link_params,
10812                                              &bp->link_vars);
10813                         bnx2x_release_phy_lock(bp);
10814                         bnx2x_calc_fc_adv(bp);
10815                 }
10816         } else if (eeprom->magic == 0x53985943) {
10817                 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10818                 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10819                                        PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10820                         u8 ext_phy_addr =
10821                              XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10822
10823                         /* DSP Remove Download Mode */
10824                         bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10825                                        MISC_REGISTERS_GPIO_LOW, port);
10826
10827                         bnx2x_acquire_phy_lock(bp);
10828
10829                         bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10830
10831                         /* wait 0.5 sec to allow it to run */
10832                         msleep(500);
10833                         bnx2x_ext_phy_hw_reset(bp, port);
10834                         msleep(500);
10835                         bnx2x_release_phy_lock(bp);
10836                 }
10837         } else
10838                 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10839
10840         return rc;
10841 }
10842
10843 static int bnx2x_get_coalesce(struct net_device *dev,
10844                               struct ethtool_coalesce *coal)
10845 {
10846         struct bnx2x *bp = netdev_priv(dev);
10847
10848         memset(coal, 0, sizeof(struct ethtool_coalesce));
10849
10850         coal->rx_coalesce_usecs = bp->rx_ticks;
10851         coal->tx_coalesce_usecs = bp->tx_ticks;
10852
10853         return 0;
10854 }
10855
10856 static int bnx2x_set_coalesce(struct net_device *dev,
10857                               struct ethtool_coalesce *coal)
10858 {
10859         struct bnx2x *bp = netdev_priv(dev);
10860
10861         bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10862         if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10863                 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10864
10865         bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10866         if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10867                 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10868
10869         if (netif_running(dev))
10870                 bnx2x_update_coalesce(bp);
10871
10872         return 0;
10873 }
10874
10875 static void bnx2x_get_ringparam(struct net_device *dev,
10876                                 struct ethtool_ringparam *ering)
10877 {
10878         struct bnx2x *bp = netdev_priv(dev);
10879
10880         ering->rx_max_pending = MAX_RX_AVAIL;
10881         ering->rx_mini_max_pending = 0;
10882         ering->rx_jumbo_max_pending = 0;
10883
10884         ering->rx_pending = bp->rx_ring_size;
10885         ering->rx_mini_pending = 0;
10886         ering->rx_jumbo_pending = 0;
10887
10888         ering->tx_max_pending = MAX_TX_AVAIL;
10889         ering->tx_pending = bp->tx_ring_size;
10890 }
10891
10892 static int bnx2x_set_ringparam(struct net_device *dev,
10893                                struct ethtool_ringparam *ering)
10894 {
10895         struct bnx2x *bp = netdev_priv(dev);
10896         int rc = 0;
10897
10898         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10899                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10900                 return -EAGAIN;
10901         }
10902
10903         if ((ering->rx_pending > MAX_RX_AVAIL) ||
10904             (ering->tx_pending > MAX_TX_AVAIL) ||
10905             (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10906                 return -EINVAL;
10907
10908         bp->rx_ring_size = ering->rx_pending;
10909         bp->tx_ring_size = ering->tx_pending;
10910
10911         if (netif_running(dev)) {
10912                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10913                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10914         }
10915
10916         return rc;
10917 }
10918
10919 static void bnx2x_get_pauseparam(struct net_device *dev,
10920                                  struct ethtool_pauseparam *epause)
10921 {
10922         struct bnx2x *bp = netdev_priv(dev);
10923
10924         epause->autoneg = (bp->link_params.req_flow_ctrl ==
10925                            BNX2X_FLOW_CTRL_AUTO) &&
10926                           (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10927
10928         epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10929                             BNX2X_FLOW_CTRL_RX);
10930         epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10931                             BNX2X_FLOW_CTRL_TX);
10932
10933         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10934            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10935            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10936 }
10937
10938 static int bnx2x_set_pauseparam(struct net_device *dev,
10939                                 struct ethtool_pauseparam *epause)
10940 {
10941         struct bnx2x *bp = netdev_priv(dev);
10942
10943         if (IS_E1HMF(bp))
10944                 return 0;
10945
10946         DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10947            DP_LEVEL "  autoneg %d  rx_pause %d  tx_pause %d\n",
10948            epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10949
10950         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10951
10952         if (epause->rx_pause)
10953                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10954
10955         if (epause->tx_pause)
10956                 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10957
10958         if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10959                 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10960
10961         if (epause->autoneg) {
10962                 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10963                         DP(NETIF_MSG_LINK, "autoneg not supported\n");
10964                         return -EINVAL;
10965                 }
10966
10967                 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10968                         bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10969         }
10970
10971         DP(NETIF_MSG_LINK,
10972            "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10973
10974         if (netif_running(dev)) {
10975                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10976                 bnx2x_link_set(bp);
10977         }
10978
10979         return 0;
10980 }
10981
10982 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10983 {
10984         struct bnx2x *bp = netdev_priv(dev);
10985         int changed = 0;
10986         int rc = 0;
10987
10988         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10989                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10990                 return -EAGAIN;
10991         }
10992
10993         /* TPA requires Rx CSUM offloading */
10994         if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10995                 if (!disable_tpa) {
10996                         if (!(dev->features & NETIF_F_LRO)) {
10997                                 dev->features |= NETIF_F_LRO;
10998                                 bp->flags |= TPA_ENABLE_FLAG;
10999                                 changed = 1;
11000                         }
11001                 } else
11002                         rc = -EINVAL;
11003         } else if (dev->features & NETIF_F_LRO) {
11004                 dev->features &= ~NETIF_F_LRO;
11005                 bp->flags &= ~TPA_ENABLE_FLAG;
11006                 changed = 1;
11007         }
11008
11009         if (data & ETH_FLAG_RXHASH)
11010                 dev->features |= NETIF_F_RXHASH;
11011         else
11012                 dev->features &= ~NETIF_F_RXHASH;
11013
11014         if (changed && netif_running(dev)) {
11015                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11016                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11017         }
11018
11019         return rc;
11020 }
11021
11022 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11023 {
11024         struct bnx2x *bp = netdev_priv(dev);
11025
11026         return bp->rx_csum;
11027 }
11028
11029 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11030 {
11031         struct bnx2x *bp = netdev_priv(dev);
11032         int rc = 0;
11033
11034         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11035                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11036                 return -EAGAIN;
11037         }
11038
11039         bp->rx_csum = data;
11040
11041         /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11042            TPA'ed packets will be discarded due to wrong TCP CSUM */
11043         if (!data) {
11044                 u32 flags = ethtool_op_get_flags(dev);
11045
11046                 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11047         }
11048
11049         return rc;
11050 }
11051
11052 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11053 {
11054         if (data) {
11055                 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11056                 dev->features |= NETIF_F_TSO6;
11057         } else {
11058                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11059                 dev->features &= ~NETIF_F_TSO6;
11060         }
11061
11062         return 0;
11063 }
11064
11065 static const struct {
11066         char string[ETH_GSTRING_LEN];
11067 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11068         { "register_test (offline)" },
11069         { "memory_test (offline)" },
11070         { "loopback_test (offline)" },
11071         { "nvram_test (online)" },
11072         { "interrupt_test (online)" },
11073         { "link_test (online)" },
11074         { "idle check (online)" }
11075 };
11076
11077 static int bnx2x_test_registers(struct bnx2x *bp)
11078 {
11079         int idx, i, rc = -ENODEV;
11080         u32 wr_val = 0;
11081         int port = BP_PORT(bp);
11082         static const struct {
11083                 u32 offset0;
11084                 u32 offset1;
11085                 u32 mask;
11086         } reg_tbl[] = {
11087 /* 0 */         { BRB1_REG_PAUSE_LOW_THRESHOLD_0,      4, 0x000003ff },
11088                 { DORQ_REG_DB_ADDR0,                   4, 0xffffffff },
11089                 { HC_REG_AGG_INT_0,                    4, 0x000003ff },
11090                 { PBF_REG_MAC_IF0_ENABLE,              4, 0x00000001 },
11091                 { PBF_REG_P0_INIT_CRD,                 4, 0x000007ff },
11092                 { PRS_REG_CID_PORT_0,                  4, 0x00ffffff },
11093                 { PXP2_REG_PSWRQ_CDU0_L2P,             4, 0x000fffff },
11094                 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR,    8, 0x0003ffff },
11095                 { PXP2_REG_PSWRQ_TM0_L2P,              4, 0x000fffff },
11096                 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR,   8, 0x0003ffff },
11097 /* 10 */        { PXP2_REG_PSWRQ_TSDM0_L2P,            4, 0x000fffff },
11098                 { QM_REG_CONNNUM_0,                    4, 0x000fffff },
11099                 { TM_REG_LIN0_MAX_ACTIVE_CID,          4, 0x0003ffff },
11100                 { SRC_REG_KEYRSS0_0,                  40, 0xffffffff },
11101                 { SRC_REG_KEYRSS0_7,                  40, 0xffffffff },
11102                 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11103                 { XCM_REG_WU_DA_CNT_CMD00,             4, 0x00000003 },
11104                 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0,       4, 0x000000ff },
11105                 { NIG_REG_LLH0_T_BIT,                  4, 0x00000001 },
11106                 { NIG_REG_EMAC0_IN_EN,                 4, 0x00000001 },
11107 /* 20 */        { NIG_REG_BMAC0_IN_EN,                 4, 0x00000001 },
11108                 { NIG_REG_XCM0_OUT_EN,                 4, 0x00000001 },
11109                 { NIG_REG_BRB0_OUT_EN,                 4, 0x00000001 },
11110                 { NIG_REG_LLH0_XCM_MASK,               4, 0x00000007 },
11111                 { NIG_REG_LLH0_ACPI_PAT_6_LEN,        68, 0x000000ff },
11112                 { NIG_REG_LLH0_ACPI_PAT_0_CRC,        68, 0xffffffff },
11113                 { NIG_REG_LLH0_DEST_MAC_0_0,         160, 0xffffffff },
11114                 { NIG_REG_LLH0_DEST_IP_0_1,          160, 0xffffffff },
11115                 { NIG_REG_LLH0_IPV4_IPV6_0,          160, 0x00000001 },
11116                 { NIG_REG_LLH0_DEST_UDP_0,           160, 0x0000ffff },
11117 /* 30 */        { NIG_REG_LLH0_DEST_TCP_0,           160, 0x0000ffff },
11118                 { NIG_REG_LLH0_VLAN_ID_0,            160, 0x00000fff },
11119                 { NIG_REG_XGXS_SERDES0_MODE_SEL,       4, 0x00000001 },
11120                 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11121                 { NIG_REG_STATUS_INTERRUPT_PORT0,      4, 0x07ffffff },
11122                 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11123                 { NIG_REG_SERDES0_CTRL_PHY_ADDR,      16, 0x0000001f },
11124
11125                 { 0xffffffff, 0, 0x00000000 }
11126         };
11127
11128         if (!netif_running(bp->dev))
11129                 return rc;
11130
11131         /* Repeat the test twice:
11132            First by writing 0x00000000, second by writing 0xffffffff */
11133         for (idx = 0; idx < 2; idx++) {
11134
11135                 switch (idx) {
11136                 case 0:
11137                         wr_val = 0;
11138                         break;
11139                 case 1:
11140                         wr_val = 0xffffffff;
11141                         break;
11142                 }
11143
11144                 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11145                         u32 offset, mask, save_val, val;
11146
11147                         offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11148                         mask = reg_tbl[i].mask;
11149
11150                         save_val = REG_RD(bp, offset);
11151
11152                         REG_WR(bp, offset, (wr_val & mask));
11153                         val = REG_RD(bp, offset);
11154
11155                         /* Restore the original register's value */
11156                         REG_WR(bp, offset, save_val);
11157
11158                         /* verify value is as expected */
11159                         if ((val & mask) != (wr_val & mask)) {
11160                                 DP(NETIF_MSG_PROBE,
11161                                    "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11162                                    offset, val, wr_val, mask);
11163                                 goto test_reg_exit;
11164                         }
11165                 }
11166         }
11167
11168         rc = 0;
11169
11170 test_reg_exit:
11171         return rc;
11172 }
11173
11174 static int bnx2x_test_memory(struct bnx2x *bp)
11175 {
11176         int i, j, rc = -ENODEV;
11177         u32 val;
11178         static const struct {
11179                 u32 offset;
11180                 int size;
11181         } mem_tbl[] = {
11182                 { CCM_REG_XX_DESCR_TABLE,   CCM_REG_XX_DESCR_TABLE_SIZE },
11183                 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11184                 { CFC_REG_LINK_LIST,        CFC_REG_LINK_LIST_SIZE },
11185                 { DMAE_REG_CMD_MEM,         DMAE_REG_CMD_MEM_SIZE },
11186                 { TCM_REG_XX_DESCR_TABLE,   TCM_REG_XX_DESCR_TABLE_SIZE },
11187                 { UCM_REG_XX_DESCR_TABLE,   UCM_REG_XX_DESCR_TABLE_SIZE },
11188                 { XCM_REG_XX_DESCR_TABLE,   XCM_REG_XX_DESCR_TABLE_SIZE },
11189
11190                 { 0xffffffff, 0 }
11191         };
11192         static const struct {
11193                 char *name;
11194                 u32 offset;
11195                 u32 e1_mask;
11196                 u32 e1h_mask;
11197         } prty_tbl[] = {
11198                 { "CCM_PRTY_STS",  CCM_REG_CCM_PRTY_STS,   0x3ffc0, 0 },
11199                 { "CFC_PRTY_STS",  CFC_REG_CFC_PRTY_STS,   0x2,     0x2 },
11200                 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0,       0 },
11201                 { "TCM_PRTY_STS",  TCM_REG_TCM_PRTY_STS,   0x3ffc0, 0 },
11202                 { "UCM_PRTY_STS",  UCM_REG_UCM_PRTY_STS,   0x3ffc0, 0 },
11203                 { "XCM_PRTY_STS",  XCM_REG_XCM_PRTY_STS,   0x3ffc1, 0 },
11204
11205                 { NULL, 0xffffffff, 0, 0 }
11206         };
11207
11208         if (!netif_running(bp->dev))
11209                 return rc;
11210
11211         /* Go through all the memories */
11212         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11213                 for (j = 0; j < mem_tbl[i].size; j++)
11214                         REG_RD(bp, mem_tbl[i].offset + j*4);
11215
11216         /* Check the parity status */
11217         for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11218                 val = REG_RD(bp, prty_tbl[i].offset);
11219                 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11220                     (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11221                         DP(NETIF_MSG_HW,
11222                            "%s is 0x%x\n", prty_tbl[i].name, val);
11223                         goto test_mem_exit;
11224                 }
11225         }
11226
11227         rc = 0;
11228
11229 test_mem_exit:
11230         return rc;
11231 }
11232
11233 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11234 {
11235         int cnt = 1000;
11236
11237         if (link_up)
11238                 while (bnx2x_link_test(bp) && cnt--)
11239                         msleep(10);
11240 }
11241
11242 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11243 {
11244         unsigned int pkt_size, num_pkts, i;
11245         struct sk_buff *skb;
11246         unsigned char *packet;
11247         struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11248         struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11249         u16 tx_start_idx, tx_idx;
11250         u16 rx_start_idx, rx_idx;
11251         u16 pkt_prod, bd_prod;
11252         struct sw_tx_bd *tx_buf;
11253         struct eth_tx_start_bd *tx_start_bd;
11254         struct eth_tx_parse_bd *pbd = NULL;
11255         dma_addr_t mapping;
11256         union eth_rx_cqe *cqe;
11257         u8 cqe_fp_flags;
11258         struct sw_rx_bd *rx_buf;
11259         u16 len;
11260         int rc = -ENODEV;
11261
11262         /* check the loopback mode */
11263         switch (loopback_mode) {
11264         case BNX2X_PHY_LOOPBACK:
11265                 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11266                         return -EINVAL;
11267                 break;
11268         case BNX2X_MAC_LOOPBACK:
11269                 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11270                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11271                 break;
11272         default:
11273                 return -EINVAL;
11274         }
11275
11276         /* prepare the loopback packet */
11277         pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11278                      bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11279         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11280         if (!skb) {
11281                 rc = -ENOMEM;
11282                 goto test_loopback_exit;
11283         }
11284         packet = skb_put(skb, pkt_size);
11285         memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11286         memset(packet + ETH_ALEN, 0, ETH_ALEN);
11287         memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11288         for (i = ETH_HLEN; i < pkt_size; i++)
11289                 packet[i] = (unsigned char) (i & 0xff);
11290
11291         /* send the loopback packet */
11292         num_pkts = 0;
11293         tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11294         rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11295
11296         pkt_prod = fp_tx->tx_pkt_prod++;
11297         tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11298         tx_buf->first_bd = fp_tx->tx_bd_prod;
11299         tx_buf->skb = skb;
11300         tx_buf->flags = 0;
11301
11302         bd_prod = TX_BD(fp_tx->tx_bd_prod);
11303         tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11304         mapping = dma_map_single(&bp->pdev->dev, skb->data,
11305                                  skb_headlen(skb), DMA_TO_DEVICE);
11306         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11307         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11308         tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11309         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11310         tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11311         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11312         tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11313                                 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11314
11315         /* turn on parsing and get a BD */
11316         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11317         pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11318
11319         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11320
11321         wmb();
11322
11323         fp_tx->tx_db.data.prod += 2;
11324         barrier();
11325         DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11326
11327         mmiowb();
11328
11329         num_pkts++;
11330         fp_tx->tx_bd_prod += 2; /* start + pbd */
11331
11332         udelay(100);
11333
11334         tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11335         if (tx_idx != tx_start_idx + num_pkts)
11336                 goto test_loopback_exit;
11337
11338         rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11339         if (rx_idx != rx_start_idx + num_pkts)
11340                 goto test_loopback_exit;
11341
11342         cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11343         cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11344         if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11345                 goto test_loopback_rx_exit;
11346
11347         len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11348         if (len != pkt_size)
11349                 goto test_loopback_rx_exit;
11350
11351         rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11352         skb = rx_buf->skb;
11353         skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11354         for (i = ETH_HLEN; i < pkt_size; i++)
11355                 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11356                         goto test_loopback_rx_exit;
11357
11358         rc = 0;
11359
11360 test_loopback_rx_exit:
11361
11362         fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11363         fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11364         fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11365         fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11366
11367         /* Update producers */
11368         bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11369                              fp_rx->rx_sge_prod);
11370
11371 test_loopback_exit:
11372         bp->link_params.loopback_mode = LOOPBACK_NONE;
11373
11374         return rc;
11375 }
11376
11377 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11378 {
11379         int rc = 0, res;
11380
11381         if (BP_NOMCP(bp))
11382                 return rc;
11383
11384         if (!netif_running(bp->dev))
11385                 return BNX2X_LOOPBACK_FAILED;
11386
11387         bnx2x_netif_stop(bp, 1);
11388         bnx2x_acquire_phy_lock(bp);
11389
11390         res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11391         if (res) {
11392                 DP(NETIF_MSG_PROBE, "  PHY loopback failed  (res %d)\n", res);
11393                 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11394         }
11395
11396         res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11397         if (res) {
11398                 DP(NETIF_MSG_PROBE, "  MAC loopback failed  (res %d)\n", res);
11399                 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11400         }
11401
11402         bnx2x_release_phy_lock(bp);
11403         bnx2x_netif_start(bp);
11404
11405         return rc;
11406 }
11407
11408 #define CRC32_RESIDUAL                  0xdebb20e3
11409
11410 static int bnx2x_test_nvram(struct bnx2x *bp)
11411 {
11412         static const struct {
11413                 int offset;
11414                 int size;
11415         } nvram_tbl[] = {
11416                 {     0,  0x14 }, /* bootstrap */
11417                 {  0x14,  0xec }, /* dir */
11418                 { 0x100, 0x350 }, /* manuf_info */
11419                 { 0x450,  0xf0 }, /* feature_info */
11420                 { 0x640,  0x64 }, /* upgrade_key_info */
11421                 { 0x6a4,  0x64 },
11422                 { 0x708,  0x70 }, /* manuf_key_info */
11423                 { 0x778,  0x70 },
11424                 {     0,     0 }
11425         };
11426         __be32 buf[0x350 / 4];
11427         u8 *data = (u8 *)buf;
11428         int i, rc;
11429         u32 magic, crc;
11430
11431         if (BP_NOMCP(bp))
11432                 return 0;
11433
11434         rc = bnx2x_nvram_read(bp, 0, data, 4);
11435         if (rc) {
11436                 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11437                 goto test_nvram_exit;
11438         }
11439
11440         magic = be32_to_cpu(buf[0]);
11441         if (magic != 0x669955aa) {
11442                 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11443                 rc = -ENODEV;
11444                 goto test_nvram_exit;
11445         }
11446
11447         for (i = 0; nvram_tbl[i].size; i++) {
11448
11449                 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11450                                       nvram_tbl[i].size);
11451                 if (rc) {
11452                         DP(NETIF_MSG_PROBE,
11453                            "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11454                         goto test_nvram_exit;
11455                 }
11456
11457                 crc = ether_crc_le(nvram_tbl[i].size, data);
11458                 if (crc != CRC32_RESIDUAL) {
11459                         DP(NETIF_MSG_PROBE,
11460                            "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11461                         rc = -ENODEV;
11462                         goto test_nvram_exit;
11463                 }
11464         }
11465
11466 test_nvram_exit:
11467         return rc;
11468 }
11469
11470 static int bnx2x_test_intr(struct bnx2x *bp)
11471 {
11472         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11473         int i, rc;
11474
11475         if (!netif_running(bp->dev))
11476                 return -ENODEV;
11477
11478         config->hdr.length = 0;
11479         if (CHIP_IS_E1(bp))
11480                 /* use last unicast entries */
11481                 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11482         else
11483                 config->hdr.offset = BP_FUNC(bp);
11484         config->hdr.client_id = bp->fp->cl_id;
11485         config->hdr.reserved1 = 0;
11486
11487         bp->set_mac_pending++;
11488         smp_wmb();
11489         rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11490                            U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11491                            U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11492         if (rc == 0) {
11493                 for (i = 0; i < 10; i++) {
11494                         if (!bp->set_mac_pending)
11495                                 break;
11496                         smp_rmb();
11497                         msleep_interruptible(10);
11498                 }
11499                 if (i == 10)
11500                         rc = -ENODEV;
11501         }
11502
11503         return rc;
11504 }
11505
11506 static void bnx2x_self_test(struct net_device *dev,
11507                             struct ethtool_test *etest, u64 *buf)
11508 {
11509         struct bnx2x *bp = netdev_priv(dev);
11510
11511         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11512                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11513                 etest->flags |= ETH_TEST_FL_FAILED;
11514                 return;
11515         }
11516
11517         memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11518
11519         if (!netif_running(dev))
11520                 return;
11521
11522         /* offline tests are not supported in MF mode */
11523         if (IS_E1HMF(bp))
11524                 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11525
11526         if (etest->flags & ETH_TEST_FL_OFFLINE) {
11527                 int port = BP_PORT(bp);
11528                 u32 val;
11529                 u8 link_up;
11530
11531                 /* save current value of input enable for TX port IF */
11532                 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11533                 /* disable input for TX port IF */
11534                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11535
11536                 link_up = (bnx2x_link_test(bp) == 0);
11537                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11538                 bnx2x_nic_load(bp, LOAD_DIAG);
11539                 /* wait until link state is restored */
11540                 bnx2x_wait_for_link(bp, link_up);
11541
11542                 if (bnx2x_test_registers(bp) != 0) {
11543                         buf[0] = 1;
11544                         etest->flags |= ETH_TEST_FL_FAILED;
11545                 }
11546                 if (bnx2x_test_memory(bp) != 0) {
11547                         buf[1] = 1;
11548                         etest->flags |= ETH_TEST_FL_FAILED;
11549                 }
11550                 buf[2] = bnx2x_test_loopback(bp, link_up);
11551                 if (buf[2] != 0)
11552                         etest->flags |= ETH_TEST_FL_FAILED;
11553
11554                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11555
11556                 /* restore input for TX port IF */
11557                 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11558
11559                 bnx2x_nic_load(bp, LOAD_NORMAL);
11560                 /* wait until link state is restored */
11561                 bnx2x_wait_for_link(bp, link_up);
11562         }
11563         if (bnx2x_test_nvram(bp) != 0) {
11564                 buf[3] = 1;
11565                 etest->flags |= ETH_TEST_FL_FAILED;
11566         }
11567         if (bnx2x_test_intr(bp) != 0) {
11568                 buf[4] = 1;
11569                 etest->flags |= ETH_TEST_FL_FAILED;
11570         }
11571         if (bp->port.pmf)
11572                 if (bnx2x_link_test(bp) != 0) {
11573                         buf[5] = 1;
11574                         etest->flags |= ETH_TEST_FL_FAILED;
11575                 }
11576
11577 #ifdef BNX2X_EXTRA_DEBUG
11578         bnx2x_panic_dump(bp);
11579 #endif
11580 }
11581
11582 static const struct {
11583         long offset;
11584         int size;
11585         u8 string[ETH_GSTRING_LEN];
11586 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11587 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11588         { Q_STATS_OFFSET32(error_bytes_received_hi),
11589                                                 8, "[%d]: rx_error_bytes" },
11590         { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11591                                                 8, "[%d]: rx_ucast_packets" },
11592         { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11593                                                 8, "[%d]: rx_mcast_packets" },
11594         { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11595                                                 8, "[%d]: rx_bcast_packets" },
11596         { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11597         { Q_STATS_OFFSET32(rx_err_discard_pkt),
11598                                          4, "[%d]: rx_phy_ip_err_discards"},
11599         { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11600                                          4, "[%d]: rx_skb_alloc_discard" },
11601         { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11602
11603 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11604         { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11605                                                 8, "[%d]: tx_ucast_packets" },
11606         { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11607                                                 8, "[%d]: tx_mcast_packets" },
11608         { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11609                                                 8, "[%d]: tx_bcast_packets" }
11610 };
11611
11612 static const struct {
11613         long offset;
11614         int size;
11615         u32 flags;
11616 #define STATS_FLAGS_PORT                1
11617 #define STATS_FLAGS_FUNC                2
11618 #define STATS_FLAGS_BOTH                (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11619         u8 string[ETH_GSTRING_LEN];
11620 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11621 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11622                                 8, STATS_FLAGS_BOTH, "rx_bytes" },
11623         { STATS_OFFSET32(error_bytes_received_hi),
11624                                 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11625         { STATS_OFFSET32(total_unicast_packets_received_hi),
11626                                 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11627         { STATS_OFFSET32(total_multicast_packets_received_hi),
11628                                 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11629         { STATS_OFFSET32(total_broadcast_packets_received_hi),
11630                                 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11631         { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11632                                 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11633         { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11634                                 8, STATS_FLAGS_PORT, "rx_align_errors" },
11635         { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11636                                 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11637         { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11638                                 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11639 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11640                                 8, STATS_FLAGS_PORT, "rx_fragments" },
11641         { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11642                                 8, STATS_FLAGS_PORT, "rx_jabbers" },
11643         { STATS_OFFSET32(no_buff_discard_hi),
11644                                 8, STATS_FLAGS_BOTH, "rx_discards" },
11645         { STATS_OFFSET32(mac_filter_discard),
11646                                 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11647         { STATS_OFFSET32(xxoverflow_discard),
11648                                 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11649         { STATS_OFFSET32(brb_drop_hi),
11650                                 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11651         { STATS_OFFSET32(brb_truncate_hi),
11652                                 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11653         { STATS_OFFSET32(pause_frames_received_hi),
11654                                 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11655         { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11656                                 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11657         { STATS_OFFSET32(nig_timer_max),
11658                         4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11659 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11660                                 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11661         { STATS_OFFSET32(rx_skb_alloc_failed),
11662                                 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11663         { STATS_OFFSET32(hw_csum_err),
11664                                 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11665
11666         { STATS_OFFSET32(total_bytes_transmitted_hi),
11667                                 8, STATS_FLAGS_BOTH, "tx_bytes" },
11668         { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11669                                 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11670         { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11671                                 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11672         { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11673                                 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11674         { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11675                                 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11676         { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11677                                 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11678         { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11679                                 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11680 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11681                                 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11682         { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11683                                 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11684         { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11685                                 8, STATS_FLAGS_PORT, "tx_deferred" },
11686         { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11687                                 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11688         { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11689                                 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11690         { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11691                                 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11692         { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11693                                 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11694         { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11695                         8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11696         { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11697                         8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11698         { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11699                         8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11700 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11701                         8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11702         { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11703                         8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11704         { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11705                         8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11706         { STATS_OFFSET32(pause_frames_sent_hi),
11707                                 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11708 };
11709
11710 #define IS_PORT_STAT(i) \
11711         ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11712 #define IS_FUNC_STAT(i)         (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11713 #define IS_E1HMF_MODE_STAT(bp) \
11714                         (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11715
11716 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11717 {
11718         struct bnx2x *bp = netdev_priv(dev);
11719         int i, num_stats;
11720
11721         switch (stringset) {
11722         case ETH_SS_STATS:
11723                 if (is_multi(bp)) {
11724                         num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11725                         if (!IS_E1HMF_MODE_STAT(bp))
11726                                 num_stats += BNX2X_NUM_STATS;
11727                 } else {
11728                         if (IS_E1HMF_MODE_STAT(bp)) {
11729                                 num_stats = 0;
11730                                 for (i = 0; i < BNX2X_NUM_STATS; i++)
11731                                         if (IS_FUNC_STAT(i))
11732                                                 num_stats++;
11733                         } else
11734                                 num_stats = BNX2X_NUM_STATS;
11735                 }
11736                 return num_stats;
11737
11738         case ETH_SS_TEST:
11739                 return BNX2X_NUM_TESTS;
11740
11741         default:
11742                 return -EINVAL;
11743         }
11744 }
11745
11746 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11747 {
11748         struct bnx2x *bp = netdev_priv(dev);
11749         int i, j, k;
11750
11751         switch (stringset) {
11752         case ETH_SS_STATS:
11753                 if (is_multi(bp)) {
11754                         k = 0;
11755                         for_each_queue(bp, i) {
11756                                 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11757                                         sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11758                                                 bnx2x_q_stats_arr[j].string, i);
11759                                 k += BNX2X_NUM_Q_STATS;
11760                         }
11761                         if (IS_E1HMF_MODE_STAT(bp))
11762                                 break;
11763                         for (j = 0; j < BNX2X_NUM_STATS; j++)
11764                                 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11765                                        bnx2x_stats_arr[j].string);
11766                 } else {
11767                         for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11768                                 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11769                                         continue;
11770                                 strcpy(buf + j*ETH_GSTRING_LEN,
11771                                        bnx2x_stats_arr[i].string);
11772                                 j++;
11773                         }
11774                 }
11775                 break;
11776
11777         case ETH_SS_TEST:
11778                 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11779                 break;
11780         }
11781 }
11782
11783 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11784                                     struct ethtool_stats *stats, u64 *buf)
11785 {
11786         struct bnx2x *bp = netdev_priv(dev);
11787         u32 *hw_stats, *offset;
11788         int i, j, k;
11789
11790         if (is_multi(bp)) {
11791                 k = 0;
11792                 for_each_queue(bp, i) {
11793                         hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11794                         for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11795                                 if (bnx2x_q_stats_arr[j].size == 0) {
11796                                         /* skip this counter */
11797                                         buf[k + j] = 0;
11798                                         continue;
11799                                 }
11800                                 offset = (hw_stats +
11801                                           bnx2x_q_stats_arr[j].offset);
11802                                 if (bnx2x_q_stats_arr[j].size == 4) {
11803                                         /* 4-byte counter */
11804                                         buf[k + j] = (u64) *offset;
11805                                         continue;
11806                                 }
11807                                 /* 8-byte counter */
11808                                 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11809                         }
11810                         k += BNX2X_NUM_Q_STATS;
11811                 }
11812                 if (IS_E1HMF_MODE_STAT(bp))
11813                         return;
11814                 hw_stats = (u32 *)&bp->eth_stats;
11815                 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11816                         if (bnx2x_stats_arr[j].size == 0) {
11817                                 /* skip this counter */
11818                                 buf[k + j] = 0;
11819                                 continue;
11820                         }
11821                         offset = (hw_stats + bnx2x_stats_arr[j].offset);
11822                         if (bnx2x_stats_arr[j].size == 4) {
11823                                 /* 4-byte counter */
11824                                 buf[k + j] = (u64) *offset;
11825                                 continue;
11826                         }
11827                         /* 8-byte counter */
11828                         buf[k + j] = HILO_U64(*offset, *(offset + 1));
11829                 }
11830         } else {
11831                 hw_stats = (u32 *)&bp->eth_stats;
11832                 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11833                         if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11834                                 continue;
11835                         if (bnx2x_stats_arr[i].size == 0) {
11836                                 /* skip this counter */
11837                                 buf[j] = 0;
11838                                 j++;
11839                                 continue;
11840                         }
11841                         offset = (hw_stats + bnx2x_stats_arr[i].offset);
11842                         if (bnx2x_stats_arr[i].size == 4) {
11843                                 /* 4-byte counter */
11844                                 buf[j] = (u64) *offset;
11845                                 j++;
11846                                 continue;
11847                         }
11848                         /* 8-byte counter */
11849                         buf[j] = HILO_U64(*offset, *(offset + 1));
11850                         j++;
11851                 }
11852         }
11853 }
11854
11855 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11856 {
11857         struct bnx2x *bp = netdev_priv(dev);
11858         int i;
11859
11860         if (!netif_running(dev))
11861                 return 0;
11862
11863         if (!bp->port.pmf)
11864                 return 0;
11865
11866         if (data == 0)
11867                 data = 2;
11868
11869         for (i = 0; i < (data * 2); i++) {
11870                 if ((i % 2) == 0)
11871                         bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11872                                       SPEED_1000);
11873                 else
11874                         bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11875
11876                 msleep_interruptible(500);
11877                 if (signal_pending(current))
11878                         break;
11879         }
11880
11881         if (bp->link_vars.link_up)
11882                 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11883                               bp->link_vars.line_speed);
11884
11885         return 0;
11886 }
11887
11888 static const struct ethtool_ops bnx2x_ethtool_ops = {
11889         .get_settings           = bnx2x_get_settings,
11890         .set_settings           = bnx2x_set_settings,
11891         .get_drvinfo            = bnx2x_get_drvinfo,
11892         .get_regs_len           = bnx2x_get_regs_len,
11893         .get_regs               = bnx2x_get_regs,
11894         .get_wol                = bnx2x_get_wol,
11895         .set_wol                = bnx2x_set_wol,
11896         .get_msglevel           = bnx2x_get_msglevel,
11897         .set_msglevel           = bnx2x_set_msglevel,
11898         .nway_reset             = bnx2x_nway_reset,
11899         .get_link               = bnx2x_get_link,
11900         .get_eeprom_len         = bnx2x_get_eeprom_len,
11901         .get_eeprom             = bnx2x_get_eeprom,
11902         .set_eeprom             = bnx2x_set_eeprom,
11903         .get_coalesce           = bnx2x_get_coalesce,
11904         .set_coalesce           = bnx2x_set_coalesce,
11905         .get_ringparam          = bnx2x_get_ringparam,
11906         .set_ringparam          = bnx2x_set_ringparam,
11907         .get_pauseparam         = bnx2x_get_pauseparam,
11908         .set_pauseparam         = bnx2x_set_pauseparam,
11909         .get_rx_csum            = bnx2x_get_rx_csum,
11910         .set_rx_csum            = bnx2x_set_rx_csum,
11911         .get_tx_csum            = ethtool_op_get_tx_csum,
11912         .set_tx_csum            = ethtool_op_set_tx_hw_csum,
11913         .set_flags              = bnx2x_set_flags,
11914         .get_flags              = ethtool_op_get_flags,
11915         .get_sg                 = ethtool_op_get_sg,
11916         .set_sg                 = ethtool_op_set_sg,
11917         .get_tso                = ethtool_op_get_tso,
11918         .set_tso                = bnx2x_set_tso,
11919         .self_test              = bnx2x_self_test,
11920         .get_sset_count         = bnx2x_get_sset_count,
11921         .get_strings            = bnx2x_get_strings,
11922         .phys_id                = bnx2x_phys_id,
11923         .get_ethtool_stats      = bnx2x_get_ethtool_stats,
11924 };
11925
11926 /* end of ethtool_ops */
11927
11928 /****************************************************************************
11929 * General service functions
11930 ****************************************************************************/
11931
11932 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11933 {
11934         u16 pmcsr;
11935
11936         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11937
11938         switch (state) {
11939         case PCI_D0:
11940                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11941                                       ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11942                                        PCI_PM_CTRL_PME_STATUS));
11943
11944                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11945                         /* delay required during transition out of D3hot */
11946                         msleep(20);
11947                 break;
11948
11949         case PCI_D3hot:
11950                 /* If there are other clients above don't
11951                    shut down the power */
11952                 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11953                         return 0;
11954                 /* Don't shut down the power for emulation and FPGA */
11955                 if (CHIP_REV_IS_SLOW(bp))
11956                         return 0;
11957
11958                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11959                 pmcsr |= 3;
11960
11961                 if (bp->wol)
11962                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11963
11964                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11965                                       pmcsr);
11966
11967                 /* No more memory access after this point until
11968                 * device is brought back to D0.
11969                 */
11970                 break;
11971
11972         default:
11973                 return -EINVAL;
11974         }
11975         return 0;
11976 }
11977
11978 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11979 {
11980         u16 rx_cons_sb;
11981
11982         /* Tell compiler that status block fields can change */
11983         barrier();
11984         rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11985         if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11986                 rx_cons_sb++;
11987         return (fp->rx_comp_cons != rx_cons_sb);
11988 }
11989
11990 /*
11991  * net_device service functions
11992  */
11993
11994 static int bnx2x_poll(struct napi_struct *napi, int budget)
11995 {
11996         int work_done = 0;
11997         struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11998                                                  napi);
11999         struct bnx2x *bp = fp->bp;
12000
12001         while (1) {
12002 #ifdef BNX2X_STOP_ON_ERROR
12003                 if (unlikely(bp->panic)) {
12004                         napi_complete(napi);
12005                         return 0;
12006                 }
12007 #endif
12008
12009                 if (bnx2x_has_tx_work(fp))
12010                         bnx2x_tx_int(fp);
12011
12012                 if (bnx2x_has_rx_work(fp)) {
12013                         work_done += bnx2x_rx_int(fp, budget - work_done);
12014
12015                         /* must not complete if we consumed full budget */
12016                         if (work_done >= budget)
12017                                 break;
12018                 }
12019
12020                 /* Fall out from the NAPI loop if needed */
12021                 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12022                         bnx2x_update_fpsb_idx(fp);
12023                 /* bnx2x_has_rx_work() reads the status block, thus we need
12024                  * to ensure that status block indices have been actually read
12025                  * (bnx2x_update_fpsb_idx) prior to this check
12026                  * (bnx2x_has_rx_work) so that we won't write the "newer"
12027                  * value of the status block to IGU (if there was a DMA right
12028                  * after bnx2x_has_rx_work and if there is no rmb, the memory
12029                  * reading (bnx2x_update_fpsb_idx) may be postponed to right
12030                  * before bnx2x_ack_sb). In this case there will never be
12031                  * another interrupt until there is another update of the
12032                  * status block, while there is still unhandled work.
12033                  */
12034                         rmb();
12035
12036                         if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12037                                 napi_complete(napi);
12038                                 /* Re-enable interrupts */
12039                                 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12040                                              le16_to_cpu(fp->fp_c_idx),
12041                                              IGU_INT_NOP, 1);
12042                                 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12043                                              le16_to_cpu(fp->fp_u_idx),
12044                                              IGU_INT_ENABLE, 1);
12045                                 break;
12046                         }
12047                 }
12048         }
12049
12050         return work_done;
12051 }
12052
12053
12054 /* we split the first BD into headers and data BDs
12055  * to ease the pain of our fellow microcode engineers
12056  * we use one mapping for both BDs
12057  * So far this has only been observed to happen
12058  * in Other Operating Systems(TM)
12059  */
12060 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12061                                    struct bnx2x_fastpath *fp,
12062                                    struct sw_tx_bd *tx_buf,
12063                                    struct eth_tx_start_bd **tx_bd, u16 hlen,
12064                                    u16 bd_prod, int nbd)
12065 {
12066         struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12067         struct eth_tx_bd *d_tx_bd;
12068         dma_addr_t mapping;
12069         int old_len = le16_to_cpu(h_tx_bd->nbytes);
12070
12071         /* first fix first BD */
12072         h_tx_bd->nbd = cpu_to_le16(nbd);
12073         h_tx_bd->nbytes = cpu_to_le16(hlen);
12074
12075         DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12076            "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12077            h_tx_bd->addr_lo, h_tx_bd->nbd);
12078
12079         /* now get a new data BD
12080          * (after the pbd) and fill it */
12081         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12082         d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12083
12084         mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12085                            le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12086
12087         d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12088         d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12089         d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12090
12091         /* this marks the BD as one that has no individual mapping */
12092         tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12093
12094         DP(NETIF_MSG_TX_QUEUED,
12095            "TSO split data size is %d (%x:%x)\n",
12096            d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12097
12098         /* update tx_bd */
12099         *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12100
12101         return bd_prod;
12102 }
12103
12104 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12105 {
12106         if (fix > 0)
12107                 csum = (u16) ~csum_fold(csum_sub(csum,
12108                                 csum_partial(t_header - fix, fix, 0)));
12109
12110         else if (fix < 0)
12111                 csum = (u16) ~csum_fold(csum_add(csum,
12112                                 csum_partial(t_header, -fix, 0)));
12113
12114         return swab16(csum);
12115 }
12116
12117 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12118 {
12119         u32 rc;
12120
12121         if (skb->ip_summed != CHECKSUM_PARTIAL)
12122                 rc = XMIT_PLAIN;
12123
12124         else {
12125                 if (skb->protocol == htons(ETH_P_IPV6)) {
12126                         rc = XMIT_CSUM_V6;
12127                         if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12128                                 rc |= XMIT_CSUM_TCP;
12129
12130                 } else {
12131                         rc = XMIT_CSUM_V4;
12132                         if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12133                                 rc |= XMIT_CSUM_TCP;
12134                 }
12135         }
12136
12137         if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12138                 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12139
12140         else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12141                 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12142
12143         return rc;
12144 }
12145
12146 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12147 /* check if packet requires linearization (packet is too fragmented)
12148    no need to check fragmentation if page size > 8K (there will be no
12149    violation to FW restrictions) */
12150 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12151                              u32 xmit_type)
12152 {
12153         int to_copy = 0;
12154         int hlen = 0;
12155         int first_bd_sz = 0;
12156
12157         /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12158         if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12159
12160                 if (xmit_type & XMIT_GSO) {
12161                         unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12162                         /* Check if LSO packet needs to be copied:
12163                            3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12164                         int wnd_size = MAX_FETCH_BD - 3;
12165                         /* Number of windows to check */
12166                         int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12167                         int wnd_idx = 0;
12168                         int frag_idx = 0;
12169                         u32 wnd_sum = 0;
12170
12171                         /* Headers length */
12172                         hlen = (int)(skb_transport_header(skb) - skb->data) +
12173                                 tcp_hdrlen(skb);
12174
12175                         /* Amount of data (w/o headers) on linear part of SKB*/
12176                         first_bd_sz = skb_headlen(skb) - hlen;
12177
12178                         wnd_sum  = first_bd_sz;
12179
12180                         /* Calculate the first sum - it's special */
12181                         for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12182                                 wnd_sum +=
12183                                         skb_shinfo(skb)->frags[frag_idx].size;
12184
12185                         /* If there was data on linear skb data - check it */
12186                         if (first_bd_sz > 0) {
12187                                 if (unlikely(wnd_sum < lso_mss)) {
12188                                         to_copy = 1;
12189                                         goto exit_lbl;
12190                                 }
12191
12192                                 wnd_sum -= first_bd_sz;
12193                         }
12194
12195                         /* Others are easier: run through the frag list and
12196                            check all windows */
12197                         for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12198                                 wnd_sum +=
12199                           skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12200
12201                                 if (unlikely(wnd_sum < lso_mss)) {
12202                                         to_copy = 1;
12203                                         break;
12204                                 }
12205                                 wnd_sum -=
12206                                         skb_shinfo(skb)->frags[wnd_idx].size;
12207                         }
12208                 } else {
12209                         /* in non-LSO too fragmented packet should always
12210                            be linearized */
12211                         to_copy = 1;
12212                 }
12213         }
12214
12215 exit_lbl:
12216         if (unlikely(to_copy))
12217                 DP(NETIF_MSG_TX_QUEUED,
12218                    "Linearization IS REQUIRED for %s packet. "
12219                    "num_frags %d  hlen %d  first_bd_sz %d\n",
12220                    (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12221                    skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12222
12223         return to_copy;
12224 }
12225 #endif
12226
12227 /* called with netif_tx_lock
12228  * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12229  * netif_wake_queue()
12230  */
12231 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12232 {
12233         struct bnx2x *bp = netdev_priv(dev);
12234         struct bnx2x_fastpath *fp;
12235         struct netdev_queue *txq;
12236         struct sw_tx_bd *tx_buf;
12237         struct eth_tx_start_bd *tx_start_bd;
12238         struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12239         struct eth_tx_parse_bd *pbd = NULL;
12240         u16 pkt_prod, bd_prod;
12241         int nbd, fp_index;
12242         dma_addr_t mapping;
12243         u32 xmit_type = bnx2x_xmit_type(bp, skb);
12244         int i;
12245         u8 hlen = 0;
12246         __le16 pkt_size = 0;
12247         struct ethhdr *eth;
12248         u8 mac_type = UNICAST_ADDRESS;
12249
12250 #ifdef BNX2X_STOP_ON_ERROR
12251         if (unlikely(bp->panic))
12252                 return NETDEV_TX_BUSY;
12253 #endif
12254
12255         fp_index = skb_get_queue_mapping(skb);
12256         txq = netdev_get_tx_queue(dev, fp_index);
12257
12258         fp = &bp->fp[fp_index];
12259
12260         if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12261                 fp->eth_q_stats.driver_xoff++;
12262                 netif_tx_stop_queue(txq);
12263                 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12264                 return NETDEV_TX_BUSY;
12265         }
12266
12267         DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x  protocol %x  protocol(%x,%x)"
12268            "  gso type %x  xmit_type %x\n",
12269            skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12270            ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12271
12272         eth = (struct ethhdr *)skb->data;
12273
12274         /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12275         if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12276                 if (is_broadcast_ether_addr(eth->h_dest))
12277                         mac_type = BROADCAST_ADDRESS;
12278                 else
12279                         mac_type = MULTICAST_ADDRESS;
12280         }
12281
12282 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12283         /* First, check if we need to linearize the skb (due to FW
12284            restrictions). No need to check fragmentation if page size > 8K
12285            (there will be no violation to FW restrictions) */
12286         if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12287                 /* Statistics of linearization */
12288                 bp->lin_cnt++;
12289                 if (skb_linearize(skb) != 0) {
12290                         DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12291                            "silently dropping this SKB\n");
12292                         dev_kfree_skb_any(skb);
12293                         return NETDEV_TX_OK;
12294                 }
12295         }
12296 #endif
12297
12298         /*
12299         Please read carefully. First we use one BD which we mark as start,
12300         then we have a parsing info BD (used for TSO or xsum),
12301         and only then we have the rest of the TSO BDs.
12302         (don't forget to mark the last one as last,
12303         and to unmap only AFTER you write to the BD ...)
12304         And above all, all pdb sizes are in words - NOT DWORDS!
12305         */
12306
12307         pkt_prod = fp->tx_pkt_prod++;
12308         bd_prod = TX_BD(fp->tx_bd_prod);
12309
12310         /* get a tx_buf and first BD */
12311         tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12312         tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12313
12314         tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12315         tx_start_bd->general_data =  (mac_type <<
12316                                         ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12317         /* header nbd */
12318         tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12319
12320         /* remember the first BD of the packet */
12321         tx_buf->first_bd = fp->tx_bd_prod;
12322         tx_buf->skb = skb;
12323         tx_buf->flags = 0;
12324
12325         DP(NETIF_MSG_TX_QUEUED,
12326            "sending pkt %u @%p  next_idx %u  bd %u @%p\n",
12327            pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12328
12329 #ifdef BCM_VLAN
12330         if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12331             (bp->flags & HW_VLAN_TX_FLAG)) {
12332                 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12333                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12334         } else
12335 #endif
12336                 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12337
12338         /* turn on parsing and get a BD */
12339         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12340         pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12341
12342         memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12343
12344         if (xmit_type & XMIT_CSUM) {
12345                 hlen = (skb_network_header(skb) - skb->data) / 2;
12346
12347                 /* for now NS flag is not used in Linux */
12348                 pbd->global_data =
12349                         (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12350                                  ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12351
12352                 pbd->ip_hlen = (skb_transport_header(skb) -
12353                                 skb_network_header(skb)) / 2;
12354
12355                 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12356
12357                 pbd->total_hlen = cpu_to_le16(hlen);
12358                 hlen = hlen*2;
12359
12360                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12361
12362                 if (xmit_type & XMIT_CSUM_V4)
12363                         tx_start_bd->bd_flags.as_bitfield |=
12364                                                 ETH_TX_BD_FLAGS_IP_CSUM;
12365                 else
12366                         tx_start_bd->bd_flags.as_bitfield |=
12367                                                 ETH_TX_BD_FLAGS_IPV6;
12368
12369                 if (xmit_type & XMIT_CSUM_TCP) {
12370                         pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12371
12372                 } else {
12373                         s8 fix = SKB_CS_OFF(skb); /* signed! */
12374
12375                         pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12376
12377                         DP(NETIF_MSG_TX_QUEUED,
12378                            "hlen %d  fix %d  csum before fix %x\n",
12379                            le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12380
12381                         /* HW bug: fixup the CSUM */
12382                         pbd->tcp_pseudo_csum =
12383                                 bnx2x_csum_fix(skb_transport_header(skb),
12384                                                SKB_CS(skb), fix);
12385
12386                         DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12387                            pbd->tcp_pseudo_csum);
12388                 }
12389         }
12390
12391         mapping = dma_map_single(&bp->pdev->dev, skb->data,
12392                                  skb_headlen(skb), DMA_TO_DEVICE);
12393
12394         tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12395         tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12396         nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12397         tx_start_bd->nbd = cpu_to_le16(nbd);
12398         tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12399         pkt_size = tx_start_bd->nbytes;
12400
12401         DP(NETIF_MSG_TX_QUEUED, "first bd @%p  addr (%x:%x)  nbd %d"
12402            "  nbytes %d  flags %x  vlan %x\n",
12403            tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12404            le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12405            tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12406
12407         if (xmit_type & XMIT_GSO) {
12408
12409                 DP(NETIF_MSG_TX_QUEUED,
12410                    "TSO packet len %d  hlen %d  total len %d  tso size %d\n",
12411                    skb->len, hlen, skb_headlen(skb),
12412                    skb_shinfo(skb)->gso_size);
12413
12414                 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12415
12416                 if (unlikely(skb_headlen(skb) > hlen))
12417                         bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12418                                                  hlen, bd_prod, ++nbd);
12419
12420                 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12421                 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12422                 pbd->tcp_flags = pbd_tcp_flags(skb);
12423
12424                 if (xmit_type & XMIT_GSO_V4) {
12425                         pbd->ip_id = swab16(ip_hdr(skb)->id);
12426                         pbd->tcp_pseudo_csum =
12427                                 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12428                                                           ip_hdr(skb)->daddr,
12429                                                           0, IPPROTO_TCP, 0));
12430
12431                 } else
12432                         pbd->tcp_pseudo_csum =
12433                                 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12434                                                         &ipv6_hdr(skb)->daddr,
12435                                                         0, IPPROTO_TCP, 0));
12436
12437                 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12438         }
12439         tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12440
12441         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12442                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12443
12444                 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12445                 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12446                 if (total_pkt_bd == NULL)
12447                         total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12448
12449                 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12450                                        frag->page_offset,
12451                                        frag->size, DMA_TO_DEVICE);
12452
12453                 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12454                 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12455                 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12456                 le16_add_cpu(&pkt_size, frag->size);
12457
12458                 DP(NETIF_MSG_TX_QUEUED,
12459                    "frag %d  bd @%p  addr (%x:%x)  nbytes %d\n",
12460                    i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12461                    le16_to_cpu(tx_data_bd->nbytes));
12462         }
12463
12464         DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12465
12466         bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12467
12468         /* now send a tx doorbell, counting the next BD
12469          * if the packet contains or ends with it
12470          */
12471         if (TX_BD_POFF(bd_prod) < nbd)
12472                 nbd++;
12473
12474         if (total_pkt_bd != NULL)
12475                 total_pkt_bd->total_pkt_bytes = pkt_size;
12476
12477         if (pbd)
12478                 DP(NETIF_MSG_TX_QUEUED,
12479                    "PBD @%p  ip_data %x  ip_hlen %u  ip_id %u  lso_mss %u"
12480                    "  tcp_flags %x  xsum %x  seq %u  hlen %u\n",
12481                    pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12482                    pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12483                    pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12484
12485         DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d  bd %u\n", nbd, bd_prod);
12486
12487         /*
12488          * Make sure that the BD data is updated before updating the producer
12489          * since FW might read the BD right after the producer is updated.
12490          * This is only applicable for weak-ordered memory model archs such
12491          * as IA-64. The following barrier is also mandatory since FW will
12492          * assumes packets must have BDs.
12493          */
12494         wmb();
12495
12496         fp->tx_db.data.prod += nbd;
12497         barrier();
12498         DOORBELL(bp, fp->index, fp->tx_db.raw);
12499
12500         mmiowb();
12501
12502         fp->tx_bd_prod += nbd;
12503
12504         if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12505                 netif_tx_stop_queue(txq);
12506
12507                 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12508                  * ordering of set_bit() in netif_tx_stop_queue() and read of
12509                  * fp->bd_tx_cons */
12510                 smp_mb();
12511
12512                 fp->eth_q_stats.driver_xoff++;
12513                 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12514                         netif_tx_wake_queue(txq);
12515         }
12516         fp->tx_pkt++;
12517
12518         return NETDEV_TX_OK;
12519 }
12520
12521 /* called with rtnl_lock */
12522 static int bnx2x_open(struct net_device *dev)
12523 {
12524         struct bnx2x *bp = netdev_priv(dev);
12525
12526         netif_carrier_off(dev);
12527
12528         bnx2x_set_power_state(bp, PCI_D0);
12529
12530         if (!bnx2x_reset_is_done(bp)) {
12531                 do {
12532                         /* Reset MCP mail box sequence if there is on going
12533                          * recovery
12534                          */
12535                         bp->fw_seq = 0;
12536
12537                         /* If it's the first function to load and reset done
12538                          * is still not cleared it may mean that. We don't
12539                          * check the attention state here because it may have
12540                          * already been cleared by a "common" reset but we
12541                          * shell proceed with "process kill" anyway.
12542                          */
12543                         if ((bnx2x_get_load_cnt(bp) == 0) &&
12544                                 bnx2x_trylock_hw_lock(bp,
12545                                 HW_LOCK_RESOURCE_RESERVED_08) &&
12546                                 (!bnx2x_leader_reset(bp))) {
12547                                 DP(NETIF_MSG_HW, "Recovered in open\n");
12548                                 break;
12549                         }
12550
12551                         bnx2x_set_power_state(bp, PCI_D3hot);
12552
12553                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12554                         " completed yet. Try again later. If u still see this"
12555                         " message after a few retries then power cycle is"
12556                         " required.\n", bp->dev->name);
12557
12558                         return -EAGAIN;
12559                 } while (0);
12560         }
12561
12562         bp->recovery_state = BNX2X_RECOVERY_DONE;
12563
12564         return bnx2x_nic_load(bp, LOAD_OPEN);
12565 }
12566
12567 /* called with rtnl_lock */
12568 static int bnx2x_close(struct net_device *dev)
12569 {
12570         struct bnx2x *bp = netdev_priv(dev);
12571
12572         /* Unload the driver, release IRQs */
12573         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12574         bnx2x_set_power_state(bp, PCI_D3hot);
12575
12576         return 0;
12577 }
12578
12579 /* called with netif_tx_lock from dev_mcast.c */
12580 static void bnx2x_set_rx_mode(struct net_device *dev)
12581 {
12582         struct bnx2x *bp = netdev_priv(dev);
12583         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12584         int port = BP_PORT(bp);
12585
12586         if (bp->state != BNX2X_STATE_OPEN) {
12587                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12588                 return;
12589         }
12590
12591         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12592
12593         if (dev->flags & IFF_PROMISC)
12594                 rx_mode = BNX2X_RX_MODE_PROMISC;
12595
12596         else if ((dev->flags & IFF_ALLMULTI) ||
12597                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12598                   CHIP_IS_E1(bp)))
12599                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12600
12601         else { /* some multicasts */
12602                 if (CHIP_IS_E1(bp)) {
12603                         int i, old, offset;
12604                         struct netdev_hw_addr *ha;
12605                         struct mac_configuration_cmd *config =
12606                                                 bnx2x_sp(bp, mcast_config);
12607
12608                         i = 0;
12609                         netdev_for_each_mc_addr(ha, dev) {
12610                                 config->config_table[i].
12611                                         cam_entry.msb_mac_addr =
12612                                         swab16(*(u16 *)&ha->addr[0]);
12613                                 config->config_table[i].
12614                                         cam_entry.middle_mac_addr =
12615                                         swab16(*(u16 *)&ha->addr[2]);
12616                                 config->config_table[i].
12617                                         cam_entry.lsb_mac_addr =
12618                                         swab16(*(u16 *)&ha->addr[4]);
12619                                 config->config_table[i].cam_entry.flags =
12620                                                         cpu_to_le16(port);
12621                                 config->config_table[i].
12622                                         target_table_entry.flags = 0;
12623                                 config->config_table[i].target_table_entry.
12624                                         clients_bit_vector =
12625                                                 cpu_to_le32(1 << BP_L_ID(bp));
12626                                 config->config_table[i].
12627                                         target_table_entry.vlan_id = 0;
12628
12629                                 DP(NETIF_MSG_IFUP,
12630                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12631                                    config->config_table[i].
12632                                                 cam_entry.msb_mac_addr,
12633                                    config->config_table[i].
12634                                                 cam_entry.middle_mac_addr,
12635                                    config->config_table[i].
12636                                                 cam_entry.lsb_mac_addr);
12637                                 i++;
12638                         }
12639                         old = config->hdr.length;
12640                         if (old > i) {
12641                                 for (; i < old; i++) {
12642                                         if (CAM_IS_INVALID(config->
12643                                                            config_table[i])) {
12644                                                 /* already invalidated */
12645                                                 break;
12646                                         }
12647                                         /* invalidate */
12648                                         CAM_INVALIDATE(config->
12649                                                        config_table[i]);
12650                                 }
12651                         }
12652
12653                         if (CHIP_REV_IS_SLOW(bp))
12654                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12655                         else
12656                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
12657
12658                         config->hdr.length = i;
12659                         config->hdr.offset = offset;
12660                         config->hdr.client_id = bp->fp->cl_id;
12661                         config->hdr.reserved1 = 0;
12662
12663                         bp->set_mac_pending++;
12664                         smp_wmb();
12665
12666                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12667                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12668                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12669                                       0);
12670                 } else { /* E1H */
12671                         /* Accept one or more multicasts */
12672                         struct netdev_hw_addr *ha;
12673                         u32 mc_filter[MC_HASH_SIZE];
12674                         u32 crc, bit, regidx;
12675                         int i;
12676
12677                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12678
12679                         netdev_for_each_mc_addr(ha, dev) {
12680                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12681                                    ha->addr);
12682
12683                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12684                                 bit = (crc >> 24) & 0xff;
12685                                 regidx = bit >> 5;
12686                                 bit &= 0x1f;
12687                                 mc_filter[regidx] |= (1 << bit);
12688                         }
12689
12690                         for (i = 0; i < MC_HASH_SIZE; i++)
12691                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12692                                        mc_filter[i]);
12693                 }
12694         }
12695
12696         bp->rx_mode = rx_mode;
12697         bnx2x_set_storm_rx_mode(bp);
12698 }
12699
12700 /* called with rtnl_lock */
12701 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12702 {
12703         struct sockaddr *addr = p;
12704         struct bnx2x *bp = netdev_priv(dev);
12705
12706         if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12707                 return -EINVAL;
12708
12709         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12710         if (netif_running(dev)) {
12711                 if (CHIP_IS_E1(bp))
12712                         bnx2x_set_eth_mac_addr_e1(bp, 1);
12713                 else
12714                         bnx2x_set_eth_mac_addr_e1h(bp, 1);
12715         }
12716
12717         return 0;
12718 }
12719
12720 /* called with rtnl_lock */
12721 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12722                            int devad, u16 addr)
12723 {
12724         struct bnx2x *bp = netdev_priv(netdev);
12725         u16 value;
12726         int rc;
12727         u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12728
12729         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12730            prtad, devad, addr);
12731
12732         if (prtad != bp->mdio.prtad) {
12733                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12734                    prtad, bp->mdio.prtad);
12735                 return -EINVAL;
12736         }
12737
12738         /* The HW expects different devad if CL22 is used */
12739         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12740
12741         bnx2x_acquire_phy_lock(bp);
12742         rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12743                              devad, addr, &value);
12744         bnx2x_release_phy_lock(bp);
12745         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12746
12747         if (!rc)
12748                 rc = value;
12749         return rc;
12750 }
12751
12752 /* called with rtnl_lock */
12753 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12754                             u16 addr, u16 value)
12755 {
12756         struct bnx2x *bp = netdev_priv(netdev);
12757         u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12758         int rc;
12759
12760         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12761                            " value 0x%x\n", prtad, devad, addr, value);
12762
12763         if (prtad != bp->mdio.prtad) {
12764                 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12765                    prtad, bp->mdio.prtad);
12766                 return -EINVAL;
12767         }
12768
12769         /* The HW expects different devad if CL22 is used */
12770         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12771
12772         bnx2x_acquire_phy_lock(bp);
12773         rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12774                               devad, addr, value);
12775         bnx2x_release_phy_lock(bp);
12776         return rc;
12777 }
12778
12779 /* called with rtnl_lock */
12780 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12781 {
12782         struct bnx2x *bp = netdev_priv(dev);
12783         struct mii_ioctl_data *mdio = if_mii(ifr);
12784
12785         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12786            mdio->phy_id, mdio->reg_num, mdio->val_in);
12787
12788         if (!netif_running(dev))
12789                 return -EAGAIN;
12790
12791         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12792 }
12793
12794 /* called with rtnl_lock */
12795 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12796 {
12797         struct bnx2x *bp = netdev_priv(dev);
12798         int rc = 0;
12799
12800         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12801                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12802                 return -EAGAIN;
12803         }
12804
12805         if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12806             ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12807                 return -EINVAL;
12808
12809         /* This does not race with packet allocation
12810          * because the actual alloc size is
12811          * only updated as part of load
12812          */
12813         dev->mtu = new_mtu;
12814
12815         if (netif_running(dev)) {
12816                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12817                 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12818         }
12819
12820         return rc;
12821 }
12822
12823 static void bnx2x_tx_timeout(struct net_device *dev)
12824 {
12825         struct bnx2x *bp = netdev_priv(dev);
12826
12827 #ifdef BNX2X_STOP_ON_ERROR
12828         if (!bp->panic)
12829                 bnx2x_panic();
12830 #endif
12831         /* This allows the netif to be shutdown gracefully before resetting */
12832         schedule_delayed_work(&bp->reset_task, 0);
12833 }
12834
12835 #ifdef BCM_VLAN
12836 /* called with rtnl_lock */
12837 static void bnx2x_vlan_rx_register(struct net_device *dev,
12838                                    struct vlan_group *vlgrp)
12839 {
12840         struct bnx2x *bp = netdev_priv(dev);
12841
12842         bp->vlgrp = vlgrp;
12843
12844         /* Set flags according to the required capabilities */
12845         bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12846
12847         if (dev->features & NETIF_F_HW_VLAN_TX)
12848                 bp->flags |= HW_VLAN_TX_FLAG;
12849
12850         if (dev->features & NETIF_F_HW_VLAN_RX)
12851                 bp->flags |= HW_VLAN_RX_FLAG;
12852
12853         if (netif_running(dev))
12854                 bnx2x_set_client_config(bp);
12855 }
12856
12857 #endif
12858
12859 #ifdef CONFIG_NET_POLL_CONTROLLER
12860 static void poll_bnx2x(struct net_device *dev)
12861 {
12862         struct bnx2x *bp = netdev_priv(dev);
12863
12864         disable_irq(bp->pdev->irq);
12865         bnx2x_interrupt(bp->pdev->irq, dev);
12866         enable_irq(bp->pdev->irq);
12867 }
12868 #endif
12869
12870 static const struct net_device_ops bnx2x_netdev_ops = {
12871         .ndo_open               = bnx2x_open,
12872         .ndo_stop               = bnx2x_close,
12873         .ndo_start_xmit         = bnx2x_start_xmit,
12874         .ndo_set_multicast_list = bnx2x_set_rx_mode,
12875         .ndo_set_mac_address    = bnx2x_change_mac_addr,
12876         .ndo_validate_addr      = eth_validate_addr,
12877         .ndo_do_ioctl           = bnx2x_ioctl,
12878         .ndo_change_mtu         = bnx2x_change_mtu,
12879         .ndo_tx_timeout         = bnx2x_tx_timeout,
12880 #ifdef BCM_VLAN
12881         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
12882 #endif
12883 #ifdef CONFIG_NET_POLL_CONTROLLER
12884         .ndo_poll_controller    = poll_bnx2x,
12885 #endif
12886 };
12887
12888 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12889                                     struct net_device *dev)
12890 {
12891         struct bnx2x *bp;
12892         int rc;
12893
12894         SET_NETDEV_DEV(dev, &pdev->dev);
12895         bp = netdev_priv(dev);
12896
12897         bp->dev = dev;
12898         bp->pdev = pdev;
12899         bp->flags = 0;
12900         bp->func = PCI_FUNC(pdev->devfn);
12901
12902         rc = pci_enable_device(pdev);
12903         if (rc) {
12904                 dev_err(&bp->pdev->dev,
12905                         "Cannot enable PCI device, aborting\n");
12906                 goto err_out;
12907         }
12908
12909         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12910                 dev_err(&bp->pdev->dev,
12911                         "Cannot find PCI device base address, aborting\n");
12912                 rc = -ENODEV;
12913                 goto err_out_disable;
12914         }
12915
12916         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12917                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12918                        " base address, aborting\n");
12919                 rc = -ENODEV;
12920                 goto err_out_disable;
12921         }
12922
12923         if (atomic_read(&pdev->enable_cnt) == 1) {
12924                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12925                 if (rc) {
12926                         dev_err(&bp->pdev->dev,
12927                                 "Cannot obtain PCI resources, aborting\n");
12928                         goto err_out_disable;
12929                 }
12930
12931                 pci_set_master(pdev);
12932                 pci_save_state(pdev);
12933         }
12934
12935         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12936         if (bp->pm_cap == 0) {
12937                 dev_err(&bp->pdev->dev,
12938                         "Cannot find power management capability, aborting\n");
12939                 rc = -EIO;
12940                 goto err_out_release;
12941         }
12942
12943         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12944         if (bp->pcie_cap == 0) {
12945                 dev_err(&bp->pdev->dev,
12946                         "Cannot find PCI Express capability, aborting\n");
12947                 rc = -EIO;
12948                 goto err_out_release;
12949         }
12950
12951         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12952                 bp->flags |= USING_DAC_FLAG;
12953                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12954                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12955                                " failed, aborting\n");
12956                         rc = -EIO;
12957                         goto err_out_release;
12958                 }
12959
12960         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12961                 dev_err(&bp->pdev->dev,
12962                         "System does not support DMA, aborting\n");
12963                 rc = -EIO;
12964                 goto err_out_release;
12965         }
12966
12967         dev->mem_start = pci_resource_start(pdev, 0);
12968         dev->base_addr = dev->mem_start;
12969         dev->mem_end = pci_resource_end(pdev, 0);
12970
12971         dev->irq = pdev->irq;
12972
12973         bp->regview = pci_ioremap_bar(pdev, 0);
12974         if (!bp->regview) {
12975                 dev_err(&bp->pdev->dev,
12976                         "Cannot map register space, aborting\n");
12977                 rc = -ENOMEM;
12978                 goto err_out_release;
12979         }
12980
12981         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12982                                         min_t(u64, BNX2X_DB_SIZE,
12983                                               pci_resource_len(pdev, 2)));
12984         if (!bp->doorbells) {
12985                 dev_err(&bp->pdev->dev,
12986                         "Cannot map doorbell space, aborting\n");
12987                 rc = -ENOMEM;
12988                 goto err_out_unmap;
12989         }
12990
12991         bnx2x_set_power_state(bp, PCI_D0);
12992
12993         /* clean indirect addresses */
12994         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12995                                PCICFG_VENDOR_ID_OFFSET);
12996         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12997         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12998         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12999         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
13000
13001         /* Reset the load counter */
13002         bnx2x_clear_load_cnt(bp);
13003
13004         dev->watchdog_timeo = TX_TIMEOUT;
13005
13006         dev->netdev_ops = &bnx2x_netdev_ops;
13007         dev->ethtool_ops = &bnx2x_ethtool_ops;
13008         dev->features |= NETIF_F_SG;
13009         dev->features |= NETIF_F_HW_CSUM;
13010         if (bp->flags & USING_DAC_FLAG)
13011                 dev->features |= NETIF_F_HIGHDMA;
13012         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13013         dev->features |= NETIF_F_TSO6;
13014 #ifdef BCM_VLAN
13015         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13016         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13017
13018         dev->vlan_features |= NETIF_F_SG;
13019         dev->vlan_features |= NETIF_F_HW_CSUM;
13020         if (bp->flags & USING_DAC_FLAG)
13021                 dev->vlan_features |= NETIF_F_HIGHDMA;
13022         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13023         dev->vlan_features |= NETIF_F_TSO6;
13024 #endif
13025
13026         /* get_port_hwinfo() will set prtad and mmds properly */
13027         bp->mdio.prtad = MDIO_PRTAD_NONE;
13028         bp->mdio.mmds = 0;
13029         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13030         bp->mdio.dev = dev;
13031         bp->mdio.mdio_read = bnx2x_mdio_read;
13032         bp->mdio.mdio_write = bnx2x_mdio_write;
13033
13034         return 0;
13035
13036 err_out_unmap:
13037         if (bp->regview) {
13038                 iounmap(bp->regview);
13039                 bp->regview = NULL;
13040         }
13041         if (bp->doorbells) {
13042                 iounmap(bp->doorbells);
13043                 bp->doorbells = NULL;
13044         }
13045
13046 err_out_release:
13047         if (atomic_read(&pdev->enable_cnt) == 1)
13048                 pci_release_regions(pdev);
13049
13050 err_out_disable:
13051         pci_disable_device(pdev);
13052         pci_set_drvdata(pdev, NULL);
13053
13054 err_out:
13055         return rc;
13056 }
13057
13058 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13059                                                  int *width, int *speed)
13060 {
13061         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13062
13063         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13064
13065         /* return value of 1=2.5GHz 2=5GHz */
13066         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13067 }
13068
13069 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13070 {
13071         const struct firmware *firmware = bp->firmware;
13072         struct bnx2x_fw_file_hdr *fw_hdr;
13073         struct bnx2x_fw_file_section *sections;
13074         u32 offset, len, num_ops;
13075         u16 *ops_offsets;
13076         int i;
13077         const u8 *fw_ver;
13078
13079         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13080                 return -EINVAL;
13081
13082         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13083         sections = (struct bnx2x_fw_file_section *)fw_hdr;
13084
13085         /* Make sure none of the offsets and sizes make us read beyond
13086          * the end of the firmware data */
13087         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13088                 offset = be32_to_cpu(sections[i].offset);
13089                 len = be32_to_cpu(sections[i].len);
13090                 if (offset + len > firmware->size) {
13091                         dev_err(&bp->pdev->dev,
13092                                 "Section %d length is out of bounds\n", i);
13093                         return -EINVAL;
13094                 }
13095         }
13096
13097         /* Likewise for the init_ops offsets */
13098         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13099         ops_offsets = (u16 *)(firmware->data + offset);
13100         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13101
13102         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13103                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13104                         dev_err(&bp->pdev->dev,
13105                                 "Section offset %d is out of bounds\n", i);
13106                         return -EINVAL;
13107                 }
13108         }
13109
13110         /* Check FW version */
13111         offset = be32_to_cpu(fw_hdr->fw_version.offset);
13112         fw_ver = firmware->data + offset;
13113         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13114             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13115             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13116             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13117                 dev_err(&bp->pdev->dev,
13118                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13119                        fw_ver[0], fw_ver[1], fw_ver[2],
13120                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13121                        BCM_5710_FW_MINOR_VERSION,
13122                        BCM_5710_FW_REVISION_VERSION,
13123                        BCM_5710_FW_ENGINEERING_VERSION);
13124                 return -EINVAL;
13125         }
13126
13127         return 0;
13128 }
13129
13130 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13131 {
13132         const __be32 *source = (const __be32 *)_source;
13133         u32 *target = (u32 *)_target;
13134         u32 i;
13135
13136         for (i = 0; i < n/4; i++)
13137                 target[i] = be32_to_cpu(source[i]);
13138 }
13139
13140 /*
13141    Ops array is stored in the following format:
13142    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13143  */
13144 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13145 {
13146         const __be32 *source = (const __be32 *)_source;
13147         struct raw_op *target = (struct raw_op *)_target;
13148         u32 i, j, tmp;
13149
13150         for (i = 0, j = 0; i < n/8; i++, j += 2) {
13151                 tmp = be32_to_cpu(source[j]);
13152                 target[i].op = (tmp >> 24) & 0xff;
13153                 target[i].offset = tmp & 0xffffff;
13154                 target[i].raw_data = be32_to_cpu(source[j + 1]);
13155         }
13156 }
13157
13158 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13159 {
13160         const __be16 *source = (const __be16 *)_source;
13161         u16 *target = (u16 *)_target;
13162         u32 i;
13163
13164         for (i = 0; i < n/2; i++)
13165                 target[i] = be16_to_cpu(source[i]);
13166 }
13167
13168 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
13169 do {                                                                    \
13170         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
13171         bp->arr = kmalloc(len, GFP_KERNEL);                             \
13172         if (!bp->arr) {                                                 \
13173                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13174                 goto lbl;                                               \
13175         }                                                               \
13176         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
13177              (u8 *)bp->arr, len);                                       \
13178 } while (0)
13179
13180 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13181 {
13182         const char *fw_file_name;
13183         struct bnx2x_fw_file_hdr *fw_hdr;
13184         int rc;
13185
13186         if (CHIP_IS_E1(bp))
13187                 fw_file_name = FW_FILE_NAME_E1;
13188         else if (CHIP_IS_E1H(bp))
13189                 fw_file_name = FW_FILE_NAME_E1H;
13190         else {
13191                 dev_err(dev, "Unsupported chip revision\n");
13192                 return -EINVAL;
13193         }
13194
13195         dev_info(dev, "Loading %s\n", fw_file_name);
13196
13197         rc = request_firmware(&bp->firmware, fw_file_name, dev);
13198         if (rc) {
13199                 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13200                 goto request_firmware_exit;
13201         }
13202
13203         rc = bnx2x_check_firmware(bp);
13204         if (rc) {
13205                 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13206                 goto request_firmware_exit;
13207         }
13208
13209         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13210
13211         /* Initialize the pointers to the init arrays */
13212         /* Blob */
13213         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13214
13215         /* Opcodes */
13216         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13217
13218         /* Offsets */
13219         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13220                             be16_to_cpu_n);
13221
13222         /* STORMs firmware */
13223         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13224                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13225         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
13226                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13227         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13228                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13229         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
13230                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
13231         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13232                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13233         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
13234                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13235         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13236                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13237         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
13238                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
13239
13240         return 0;
13241
13242 init_offsets_alloc_err:
13243         kfree(bp->init_ops);
13244 init_ops_alloc_err:
13245         kfree(bp->init_data);
13246 request_firmware_exit:
13247         release_firmware(bp->firmware);
13248
13249         return rc;
13250 }
13251
13252
13253 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13254                                     const struct pci_device_id *ent)
13255 {
13256         struct net_device *dev = NULL;
13257         struct bnx2x *bp;
13258         int pcie_width, pcie_speed;
13259         int rc;
13260
13261         /* dev zeroed in init_etherdev */
13262         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13263         if (!dev) {
13264                 dev_err(&pdev->dev, "Cannot allocate net device\n");
13265                 return -ENOMEM;
13266         }
13267
13268         bp = netdev_priv(dev);
13269         bp->msg_enable = debug;
13270
13271         pci_set_drvdata(pdev, dev);
13272
13273         rc = bnx2x_init_dev(pdev, dev);
13274         if (rc < 0) {
13275                 free_netdev(dev);
13276                 return rc;
13277         }
13278
13279         rc = bnx2x_init_bp(bp);
13280         if (rc)
13281                 goto init_one_exit;
13282
13283         /* Set init arrays */
13284         rc = bnx2x_init_firmware(bp, &pdev->dev);
13285         if (rc) {
13286                 dev_err(&pdev->dev, "Error loading firmware\n");
13287                 goto init_one_exit;
13288         }
13289
13290         rc = register_netdev(dev);
13291         if (rc) {
13292                 dev_err(&pdev->dev, "Cannot register net device\n");
13293                 goto init_one_exit;
13294         }
13295
13296         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13297         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13298                " IRQ %d, ", board_info[ent->driver_data].name,
13299                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13300                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13301                dev->base_addr, bp->pdev->irq);
13302         pr_cont("node addr %pM\n", dev->dev_addr);
13303
13304         return 0;
13305
13306 init_one_exit:
13307         if (bp->regview)
13308                 iounmap(bp->regview);
13309
13310         if (bp->doorbells)
13311                 iounmap(bp->doorbells);
13312
13313         free_netdev(dev);
13314
13315         if (atomic_read(&pdev->enable_cnt) == 1)
13316                 pci_release_regions(pdev);
13317
13318         pci_disable_device(pdev);
13319         pci_set_drvdata(pdev, NULL);
13320
13321         return rc;
13322 }
13323
13324 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13325 {
13326         struct net_device *dev = pci_get_drvdata(pdev);
13327         struct bnx2x *bp;
13328
13329         if (!dev) {
13330                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13331                 return;
13332         }
13333         bp = netdev_priv(dev);
13334
13335         unregister_netdev(dev);
13336
13337         /* Make sure RESET task is not scheduled before continuing */
13338         cancel_delayed_work_sync(&bp->reset_task);
13339
13340         kfree(bp->init_ops_offsets);
13341         kfree(bp->init_ops);
13342         kfree(bp->init_data);
13343         release_firmware(bp->firmware);
13344
13345         if (bp->regview)
13346                 iounmap(bp->regview);
13347
13348         if (bp->doorbells)
13349                 iounmap(bp->doorbells);
13350
13351         free_netdev(dev);
13352
13353         if (atomic_read(&pdev->enable_cnt) == 1)
13354                 pci_release_regions(pdev);
13355
13356         pci_disable_device(pdev);
13357         pci_set_drvdata(pdev, NULL);
13358 }
13359
13360 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13361 {
13362         struct net_device *dev = pci_get_drvdata(pdev);
13363         struct bnx2x *bp;
13364
13365         if (!dev) {
13366                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13367                 return -ENODEV;
13368         }
13369         bp = netdev_priv(dev);
13370
13371         rtnl_lock();
13372
13373         pci_save_state(pdev);
13374
13375         if (!netif_running(dev)) {
13376                 rtnl_unlock();
13377                 return 0;
13378         }
13379
13380         netif_device_detach(dev);
13381
13382         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13383
13384         bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13385
13386         rtnl_unlock();
13387
13388         return 0;
13389 }
13390
13391 static int bnx2x_resume(struct pci_dev *pdev)
13392 {
13393         struct net_device *dev = pci_get_drvdata(pdev);
13394         struct bnx2x *bp;
13395         int rc;
13396
13397         if (!dev) {
13398                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13399                 return -ENODEV;
13400         }
13401         bp = netdev_priv(dev);
13402
13403         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13404                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13405                 return -EAGAIN;
13406         }
13407
13408         rtnl_lock();
13409
13410         pci_restore_state(pdev);
13411
13412         if (!netif_running(dev)) {
13413                 rtnl_unlock();
13414                 return 0;
13415         }
13416
13417         bnx2x_set_power_state(bp, PCI_D0);
13418         netif_device_attach(dev);
13419
13420         rc = bnx2x_nic_load(bp, LOAD_OPEN);
13421
13422         rtnl_unlock();
13423
13424         return rc;
13425 }
13426
13427 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13428 {
13429         int i;
13430
13431         bp->state = BNX2X_STATE_ERROR;
13432
13433         bp->rx_mode = BNX2X_RX_MODE_NONE;
13434
13435         bnx2x_netif_stop(bp, 0);
13436         netif_carrier_off(bp->dev);
13437
13438         del_timer_sync(&bp->timer);
13439         bp->stats_state = STATS_STATE_DISABLED;
13440         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13441
13442         /* Release IRQs */
13443         bnx2x_free_irq(bp, false);
13444
13445         if (CHIP_IS_E1(bp)) {
13446                 struct mac_configuration_cmd *config =
13447                                                 bnx2x_sp(bp, mcast_config);
13448
13449                 for (i = 0; i < config->hdr.length; i++)
13450                         CAM_INVALIDATE(config->config_table[i]);
13451         }
13452
13453         /* Free SKBs, SGEs, TPA pool and driver internals */
13454         bnx2x_free_skbs(bp);
13455         for_each_queue(bp, i)
13456                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13457         for_each_queue(bp, i)
13458                 netif_napi_del(&bnx2x_fp(bp, i, napi));
13459         bnx2x_free_mem(bp);
13460
13461         bp->state = BNX2X_STATE_CLOSED;
13462
13463         return 0;
13464 }
13465
13466 static void bnx2x_eeh_recover(struct bnx2x *bp)
13467 {
13468         u32 val;
13469
13470         mutex_init(&bp->port.phy_mutex);
13471
13472         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13473         bp->link_params.shmem_base = bp->common.shmem_base;
13474         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13475
13476         if (!bp->common.shmem_base ||
13477             (bp->common.shmem_base < 0xA0000) ||
13478             (bp->common.shmem_base >= 0xC0000)) {
13479                 BNX2X_DEV_INFO("MCP not active\n");
13480                 bp->flags |= NO_MCP_FLAG;
13481                 return;
13482         }
13483
13484         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13485         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13486                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13487                 BNX2X_ERR("BAD MCP validity signature\n");
13488
13489         if (!BP_NOMCP(bp)) {
13490                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13491                               & DRV_MSG_SEQ_NUMBER_MASK);
13492                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13493         }
13494 }
13495
13496 /**
13497  * bnx2x_io_error_detected - called when PCI error is detected
13498  * @pdev: Pointer to PCI device
13499  * @state: The current pci connection state
13500  *
13501  * This function is called after a PCI bus error affecting
13502  * this device has been detected.
13503  */
13504 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13505                                                 pci_channel_state_t state)
13506 {
13507         struct net_device *dev = pci_get_drvdata(pdev);
13508         struct bnx2x *bp = netdev_priv(dev);
13509
13510         rtnl_lock();
13511
13512         netif_device_detach(dev);
13513
13514         if (state == pci_channel_io_perm_failure) {
13515                 rtnl_unlock();
13516                 return PCI_ERS_RESULT_DISCONNECT;
13517         }
13518
13519         if (netif_running(dev))
13520                 bnx2x_eeh_nic_unload(bp);
13521
13522         pci_disable_device(pdev);
13523
13524         rtnl_unlock();
13525
13526         /* Request a slot reset */
13527         return PCI_ERS_RESULT_NEED_RESET;
13528 }
13529
13530 /**
13531  * bnx2x_io_slot_reset - called after the PCI bus has been reset
13532  * @pdev: Pointer to PCI device
13533  *
13534  * Restart the card from scratch, as if from a cold-boot.
13535  */
13536 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13537 {
13538         struct net_device *dev = pci_get_drvdata(pdev);
13539         struct bnx2x *bp = netdev_priv(dev);
13540
13541         rtnl_lock();
13542
13543         if (pci_enable_device(pdev)) {
13544                 dev_err(&pdev->dev,
13545                         "Cannot re-enable PCI device after reset\n");
13546                 rtnl_unlock();
13547                 return PCI_ERS_RESULT_DISCONNECT;
13548         }
13549
13550         pci_set_master(pdev);
13551         pci_restore_state(pdev);
13552
13553         if (netif_running(dev))
13554                 bnx2x_set_power_state(bp, PCI_D0);
13555
13556         rtnl_unlock();
13557
13558         return PCI_ERS_RESULT_RECOVERED;
13559 }
13560
13561 /**
13562  * bnx2x_io_resume - called when traffic can start flowing again
13563  * @pdev: Pointer to PCI device
13564  *
13565  * This callback is called when the error recovery driver tells us that
13566  * its OK to resume normal operation.
13567  */
13568 static void bnx2x_io_resume(struct pci_dev *pdev)
13569 {
13570         struct net_device *dev = pci_get_drvdata(pdev);
13571         struct bnx2x *bp = netdev_priv(dev);
13572
13573         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13574                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13575                 return;
13576         }
13577
13578         rtnl_lock();
13579
13580         bnx2x_eeh_recover(bp);
13581
13582         if (netif_running(dev))
13583                 bnx2x_nic_load(bp, LOAD_NORMAL);
13584
13585         netif_device_attach(dev);
13586
13587         rtnl_unlock();
13588 }
13589
13590 static struct pci_error_handlers bnx2x_err_handler = {
13591         .error_detected = bnx2x_io_error_detected,
13592         .slot_reset     = bnx2x_io_slot_reset,
13593         .resume         = bnx2x_io_resume,
13594 };
13595
13596 static struct pci_driver bnx2x_pci_driver = {
13597         .name        = DRV_MODULE_NAME,
13598         .id_table    = bnx2x_pci_tbl,
13599         .probe       = bnx2x_init_one,
13600         .remove      = __devexit_p(bnx2x_remove_one),
13601         .suspend     = bnx2x_suspend,
13602         .resume      = bnx2x_resume,
13603         .err_handler = &bnx2x_err_handler,
13604 };
13605
13606 static int __init bnx2x_init(void)
13607 {
13608         int ret;
13609
13610         pr_info("%s", version);
13611
13612         bnx2x_wq = create_singlethread_workqueue("bnx2x");
13613         if (bnx2x_wq == NULL) {
13614                 pr_err("Cannot create workqueue\n");
13615                 return -ENOMEM;
13616         }
13617
13618         ret = pci_register_driver(&bnx2x_pci_driver);
13619         if (ret) {
13620                 pr_err("Cannot register driver\n");
13621                 destroy_workqueue(bnx2x_wq);
13622         }
13623         return ret;
13624 }
13625
13626 static void __exit bnx2x_cleanup(void)
13627 {
13628         pci_unregister_driver(&bnx2x_pci_driver);
13629
13630         destroy_workqueue(bnx2x_wq);
13631 }
13632
13633 module_init(bnx2x_init);
13634 module_exit(bnx2x_cleanup);
13635
13636 #ifdef BCM_CNIC
13637
13638 /* count denotes the number of new completions we have seen */
13639 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13640 {
13641         struct eth_spe *spe;
13642
13643 #ifdef BNX2X_STOP_ON_ERROR
13644         if (unlikely(bp->panic))
13645                 return;
13646 #endif
13647
13648         spin_lock_bh(&bp->spq_lock);
13649         bp->cnic_spq_pending -= count;
13650
13651         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13652              bp->cnic_spq_pending++) {
13653
13654                 if (!bp->cnic_kwq_pending)
13655                         break;
13656
13657                 spe = bnx2x_sp_get_next(bp);
13658                 *spe = *bp->cnic_kwq_cons;
13659
13660                 bp->cnic_kwq_pending--;
13661
13662                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13663                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13664
13665                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13666                         bp->cnic_kwq_cons = bp->cnic_kwq;
13667                 else
13668                         bp->cnic_kwq_cons++;
13669         }
13670         bnx2x_sp_prod_update(bp);
13671         spin_unlock_bh(&bp->spq_lock);
13672 }
13673
13674 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13675                                struct kwqe_16 *kwqes[], u32 count)
13676 {
13677         struct bnx2x *bp = netdev_priv(dev);
13678         int i;
13679
13680 #ifdef BNX2X_STOP_ON_ERROR
13681         if (unlikely(bp->panic))
13682                 return -EIO;
13683 #endif
13684
13685         spin_lock_bh(&bp->spq_lock);
13686
13687         for (i = 0; i < count; i++) {
13688                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13689
13690                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13691                         break;
13692
13693                 *bp->cnic_kwq_prod = *spe;
13694
13695                 bp->cnic_kwq_pending++;
13696
13697                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13698                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
13699                    spe->data.mac_config_addr.hi,
13700                    spe->data.mac_config_addr.lo,
13701                    bp->cnic_kwq_pending);
13702
13703                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13704                         bp->cnic_kwq_prod = bp->cnic_kwq;
13705                 else
13706                         bp->cnic_kwq_prod++;
13707         }
13708
13709         spin_unlock_bh(&bp->spq_lock);
13710
13711         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13712                 bnx2x_cnic_sp_post(bp, 0);
13713
13714         return i;
13715 }
13716
13717 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13718 {
13719         struct cnic_ops *c_ops;
13720         int rc = 0;
13721
13722         mutex_lock(&bp->cnic_mutex);
13723         c_ops = bp->cnic_ops;
13724         if (c_ops)
13725                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13726         mutex_unlock(&bp->cnic_mutex);
13727
13728         return rc;
13729 }
13730
13731 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13732 {
13733         struct cnic_ops *c_ops;
13734         int rc = 0;
13735
13736         rcu_read_lock();
13737         c_ops = rcu_dereference(bp->cnic_ops);
13738         if (c_ops)
13739                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13740         rcu_read_unlock();
13741
13742         return rc;
13743 }
13744
13745 /*
13746  * for commands that have no data
13747  */
13748 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13749 {
13750         struct cnic_ctl_info ctl = {0};
13751
13752         ctl.cmd = cmd;
13753
13754         return bnx2x_cnic_ctl_send(bp, &ctl);
13755 }
13756
13757 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13758 {
13759         struct cnic_ctl_info ctl;
13760
13761         /* first we tell CNIC and only then we count this as a completion */
13762         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13763         ctl.data.comp.cid = cid;
13764
13765         bnx2x_cnic_ctl_send_bh(bp, &ctl);
13766         bnx2x_cnic_sp_post(bp, 1);
13767 }
13768
13769 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13770 {
13771         struct bnx2x *bp = netdev_priv(dev);
13772         int rc = 0;
13773
13774         switch (ctl->cmd) {
13775         case DRV_CTL_CTXTBL_WR_CMD: {
13776                 u32 index = ctl->data.io.offset;
13777                 dma_addr_t addr = ctl->data.io.dma_addr;
13778
13779                 bnx2x_ilt_wr(bp, index, addr);
13780                 break;
13781         }
13782
13783         case DRV_CTL_COMPLETION_CMD: {
13784                 int count = ctl->data.comp.comp_count;
13785
13786                 bnx2x_cnic_sp_post(bp, count);
13787                 break;
13788         }
13789
13790         /* rtnl_lock is held.  */
13791         case DRV_CTL_START_L2_CMD: {
13792                 u32 cli = ctl->data.ring.client_id;
13793
13794                 bp->rx_mode_cl_mask |= (1 << cli);
13795                 bnx2x_set_storm_rx_mode(bp);
13796                 break;
13797         }
13798
13799         /* rtnl_lock is held.  */
13800         case DRV_CTL_STOP_L2_CMD: {
13801                 u32 cli = ctl->data.ring.client_id;
13802
13803                 bp->rx_mode_cl_mask &= ~(1 << cli);
13804                 bnx2x_set_storm_rx_mode(bp);
13805                 break;
13806         }
13807
13808         default:
13809                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13810                 rc = -EINVAL;
13811         }
13812
13813         return rc;
13814 }
13815
13816 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13817 {
13818         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13819
13820         if (bp->flags & USING_MSIX_FLAG) {
13821                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13822                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13823                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13824         } else {
13825                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13826                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13827         }
13828         cp->irq_arr[0].status_blk = bp->cnic_sb;
13829         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13830         cp->irq_arr[1].status_blk = bp->def_status_blk;
13831         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13832
13833         cp->num_irq = 2;
13834 }
13835
13836 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13837                                void *data)
13838 {
13839         struct bnx2x *bp = netdev_priv(dev);
13840         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13841
13842         if (ops == NULL)
13843                 return -EINVAL;
13844
13845         if (atomic_read(&bp->intr_sem) != 0)
13846                 return -EBUSY;
13847
13848         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13849         if (!bp->cnic_kwq)
13850                 return -ENOMEM;
13851
13852         bp->cnic_kwq_cons = bp->cnic_kwq;
13853         bp->cnic_kwq_prod = bp->cnic_kwq;
13854         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13855
13856         bp->cnic_spq_pending = 0;
13857         bp->cnic_kwq_pending = 0;
13858
13859         bp->cnic_data = data;
13860
13861         cp->num_irq = 0;
13862         cp->drv_state = CNIC_DRV_STATE_REGD;
13863
13864         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13865
13866         bnx2x_setup_cnic_irq_info(bp);
13867         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13868         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13869         rcu_assign_pointer(bp->cnic_ops, ops);
13870
13871         return 0;
13872 }
13873
13874 static int bnx2x_unregister_cnic(struct net_device *dev)
13875 {
13876         struct bnx2x *bp = netdev_priv(dev);
13877         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13878
13879         mutex_lock(&bp->cnic_mutex);
13880         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13881                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13882                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13883         }
13884         cp->drv_state = 0;
13885         rcu_assign_pointer(bp->cnic_ops, NULL);
13886         mutex_unlock(&bp->cnic_mutex);
13887         synchronize_rcu();
13888         kfree(bp->cnic_kwq);
13889         bp->cnic_kwq = NULL;
13890
13891         return 0;
13892 }
13893
13894 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13895 {
13896         struct bnx2x *bp = netdev_priv(dev);
13897         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13898
13899         cp->drv_owner = THIS_MODULE;
13900         cp->chip_id = CHIP_ID(bp);
13901         cp->pdev = bp->pdev;
13902         cp->io_base = bp->regview;
13903         cp->io_base2 = bp->doorbells;
13904         cp->max_kwqe_pending = 8;
13905         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13906         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13907         cp->ctx_tbl_len = CNIC_ILT_LINES;
13908         cp->starting_cid = BCM_CNIC_CID_START;
13909         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13910         cp->drv_ctl = bnx2x_drv_ctl;
13911         cp->drv_register_cnic = bnx2x_register_cnic;
13912         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13913
13914         return cp;
13915 }
13916 EXPORT_SYMBOL(bnx2x_cnic_probe);
13917
13918 #endif /* BCM_CNIC */
13919