1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
52 #include <linux/stringify.h>
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_dump.h"
60 #define DRV_MODULE_VERSION "1.52.53-1"
61 #define DRV_MODULE_RELDATE "2010/18/04"
62 #define BNX2X_BC_VER 0x040200
64 #include <linux/firmware.h>
65 #include "bnx2x_fw_file_hdr.h"
67 #define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72 #define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73 #define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
75 /* Time in jiffies before concluding the transmitter is hung */
76 #define TX_TIMEOUT (5*HZ)
78 static char version[] __devinitdata =
79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
82 MODULE_AUTHOR("Eliezer Tamir");
83 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
84 MODULE_LICENSE("GPL");
85 MODULE_VERSION(DRV_MODULE_VERSION);
86 MODULE_FIRMWARE(FW_FILE_NAME_E1);
87 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
89 static int multi_mode = 1;
90 module_param(multi_mode, int, 0);
91 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
94 static int num_queues;
95 module_param(num_queues, int, 0);
96 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
99 static int disable_tpa;
100 module_param(disable_tpa, int, 0);
101 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
104 module_param(int_mode, int, 0);
105 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
108 static int dropless_fc;
109 module_param(dropless_fc, int, 0);
110 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
113 module_param(poll, int, 0);
114 MODULE_PARM_DESC(poll, " Use polling (for debug)");
116 static int mrrs = -1;
117 module_param(mrrs, int, 0);
118 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
121 module_param(debug, int, 0);
122 MODULE_PARM_DESC(debug, " Default debug msglevel");
124 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
126 static struct workqueue_struct *bnx2x_wq;
128 enum bnx2x_board_type {
134 /* indexed by board_type, above */
137 } board_info[] __devinitdata = {
138 { "Broadcom NetXtreme II BCM57710 XGb" },
139 { "Broadcom NetXtreme II BCM57711 XGb" },
140 { "Broadcom NetXtreme II BCM57711E XGb" }
144 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
147 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
151 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
153 /****************************************************************************
154 * General service functions
155 ****************************************************************************/
158 * locking is done by mcp
160 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
164 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
165 PCICFG_VENDOR_ID_OFFSET);
168 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
172 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
173 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
174 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
175 PCICFG_VENDOR_ID_OFFSET);
180 static const u32 dmae_reg_go_c[] = {
181 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
182 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
183 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
184 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
187 /* copy command into DMAE command memory and set DMAE command go */
188 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
194 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
195 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
196 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
198 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
199 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
201 REG_WR(bp, dmae_reg_go_c[idx], 1);
204 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
207 struct dmae_command dmae;
208 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
211 if (!bp->dmae_ready) {
212 u32 *data = bnx2x_sp(bp, wb_data[0]);
214 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
215 " using indirect\n", dst_addr, len32);
216 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
220 memset(&dmae, 0, sizeof(struct dmae_command));
222 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
223 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
224 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
226 DMAE_CMD_ENDIANITY_B_DW_SWAP |
228 DMAE_CMD_ENDIANITY_DW_SWAP |
230 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
231 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
232 dmae.src_addr_lo = U64_LO(dma_addr);
233 dmae.src_addr_hi = U64_HI(dma_addr);
234 dmae.dst_addr_lo = dst_addr >> 2;
235 dmae.dst_addr_hi = 0;
237 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
239 dmae.comp_val = DMAE_COMP_VAL;
241 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
242 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
243 "dst_addr [%x:%08x (%08x)]\n"
244 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
245 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
246 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
247 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
248 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
249 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
250 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
252 mutex_lock(&bp->dmae_mutex);
256 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
260 while (*wb_comp != DMAE_COMP_VAL) {
261 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
264 BNX2X_ERR("DMAE timeout!\n");
268 /* adjust delay for emulation/FPGA */
269 if (CHIP_REV_IS_SLOW(bp))
275 mutex_unlock(&bp->dmae_mutex);
278 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
280 struct dmae_command dmae;
281 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
284 if (!bp->dmae_ready) {
285 u32 *data = bnx2x_sp(bp, wb_data[0]);
288 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
289 " using indirect\n", src_addr, len32);
290 for (i = 0; i < len32; i++)
291 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
295 memset(&dmae, 0, sizeof(struct dmae_command));
297 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
298 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
299 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
301 DMAE_CMD_ENDIANITY_B_DW_SWAP |
303 DMAE_CMD_ENDIANITY_DW_SWAP |
305 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
306 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
307 dmae.src_addr_lo = src_addr >> 2;
308 dmae.src_addr_hi = 0;
309 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
310 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
312 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
314 dmae.comp_val = DMAE_COMP_VAL;
316 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
317 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
318 "dst_addr [%x:%08x (%08x)]\n"
319 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
320 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
321 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
322 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
324 mutex_lock(&bp->dmae_mutex);
326 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
329 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
333 while (*wb_comp != DMAE_COMP_VAL) {
336 BNX2X_ERR("DMAE timeout!\n");
340 /* adjust delay for emulation/FPGA */
341 if (CHIP_REV_IS_SLOW(bp))
346 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
347 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
348 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
350 mutex_unlock(&bp->dmae_mutex);
353 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
356 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
359 while (len > dmae_wr_max) {
360 bnx2x_write_dmae(bp, phys_addr + offset,
361 addr + offset, dmae_wr_max);
362 offset += dmae_wr_max * 4;
366 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
369 /* used only for slowpath so not inlined */
370 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
374 wb_write[0] = val_hi;
375 wb_write[1] = val_lo;
376 REG_WR_DMAE(bp, reg, wb_write, 2);
380 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
384 REG_RD_DMAE(bp, reg, wb_data, 2);
386 return HILO_U64(wb_data[0], wb_data[1]);
390 static int bnx2x_mc_assert(struct bnx2x *bp)
394 u32 row0, row1, row2, row3;
397 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
398 XSTORM_ASSERT_LIST_INDEX_OFFSET);
400 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
402 /* print the asserts */
403 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
405 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406 XSTORM_ASSERT_LIST_OFFSET(i));
407 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
408 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
409 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
410 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
411 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
412 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
414 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
415 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
416 " 0x%08x 0x%08x 0x%08x\n",
417 i, row3, row2, row1, row0);
425 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
426 TSTORM_ASSERT_LIST_INDEX_OFFSET);
428 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
430 /* print the asserts */
431 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
433 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434 TSTORM_ASSERT_LIST_OFFSET(i));
435 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
436 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
437 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
438 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
439 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
440 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
442 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
443 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
444 " 0x%08x 0x%08x 0x%08x\n",
445 i, row3, row2, row1, row0);
453 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
454 CSTORM_ASSERT_LIST_INDEX_OFFSET);
456 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
458 /* print the asserts */
459 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
461 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462 CSTORM_ASSERT_LIST_OFFSET(i));
463 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
464 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
465 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
466 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
467 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
468 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
470 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
471 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
472 " 0x%08x 0x%08x 0x%08x\n",
473 i, row3, row2, row1, row0);
481 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
482 USTORM_ASSERT_LIST_INDEX_OFFSET);
484 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
486 /* print the asserts */
487 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
489 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
490 USTORM_ASSERT_LIST_OFFSET(i));
491 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
492 USTORM_ASSERT_LIST_OFFSET(i) + 4);
493 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
494 USTORM_ASSERT_LIST_OFFSET(i) + 8);
495 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
496 USTORM_ASSERT_LIST_OFFSET(i) + 12);
498 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
499 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
500 " 0x%08x 0x%08x 0x%08x\n",
501 i, row3, row2, row1, row0);
511 static void bnx2x_fw_dump(struct bnx2x *bp)
519 BNX2X_ERR("NO MCP - can not dump\n");
523 addr = bp->common.shmem_base - 0x0800 + 4;
524 mark = REG_RD(bp, addr);
525 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
526 pr_err("begin fw dump (mark 0x%x)\n", mark);
529 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
530 for (word = 0; word < 8; word++)
531 data[word] = htonl(REG_RD(bp, offset + 4*word));
533 pr_cont("%s", (char *)data);
535 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
536 for (word = 0; word < 8; word++)
537 data[word] = htonl(REG_RD(bp, offset + 4*word));
539 pr_cont("%s", (char *)data);
541 pr_err("end of fw dump\n");
544 static void bnx2x_panic_dump(struct bnx2x *bp)
549 bp->stats_state = STATS_STATE_DISABLED;
550 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
552 BNX2X_ERR("begin crash dump -----------------\n");
556 BNX2X_ERR("def_c_idx(0x%x) def_u_idx(0x%x) def_x_idx(0x%x)"
557 " def_t_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
558 " spq_prod_idx(0x%x)\n",
559 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
560 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
563 for_each_queue(bp, i) {
564 struct bnx2x_fastpath *fp = &bp->fp[i];
566 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
567 " *rx_bd_cons_sb(0x%x) rx_comp_prod(0x%x)"
568 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
569 i, fp->rx_bd_prod, fp->rx_bd_cons,
570 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
571 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
572 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
573 " fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
574 fp->rx_sge_prod, fp->last_max_sge,
575 le16_to_cpu(fp->fp_u_idx),
576 fp->status_blk->u_status_block.status_block_index);
580 for_each_queue(bp, i) {
581 struct bnx2x_fastpath *fp = &bp->fp[i];
583 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
584 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
585 " *tx_cons_sb(0x%x)\n",
586 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
587 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
588 BNX2X_ERR(" fp_c_idx(0x%x) *sb_c_idx(0x%x)"
589 " tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
590 fp->status_blk->c_status_block.status_block_index,
591 fp->tx_db.data.prod);
596 for_each_queue(bp, i) {
597 struct bnx2x_fastpath *fp = &bp->fp[i];
599 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
600 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
601 for (j = start; j != end; j = RX_BD(j + 1)) {
602 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
603 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
605 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
606 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
609 start = RX_SGE(fp->rx_sge_prod);
610 end = RX_SGE(fp->last_max_sge);
611 for (j = start; j != end; j = RX_SGE(j + 1)) {
612 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
613 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
615 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
616 i, j, rx_sge[1], rx_sge[0], sw_page->page);
619 start = RCQ_BD(fp->rx_comp_cons - 10);
620 end = RCQ_BD(fp->rx_comp_cons + 503);
621 for (j = start; j != end; j = RCQ_BD(j + 1)) {
622 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
624 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
625 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
630 for_each_queue(bp, i) {
631 struct bnx2x_fastpath *fp = &bp->fp[i];
633 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
634 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
635 for (j = start; j != end; j = TX_BD(j + 1)) {
636 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
638 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
639 i, j, sw_bd->skb, sw_bd->first_bd);
642 start = TX_BD(fp->tx_bd_cons - 10);
643 end = TX_BD(fp->tx_bd_cons + 254);
644 for (j = start; j != end; j = TX_BD(j + 1)) {
645 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
647 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
648 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
654 BNX2X_ERR("end crash dump -----------------\n");
657 static void bnx2x_int_enable(struct bnx2x *bp)
659 int port = BP_PORT(bp);
660 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
661 u32 val = REG_RD(bp, addr);
662 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
663 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
666 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667 HC_CONFIG_0_REG_INT_LINE_EN_0);
668 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
669 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
671 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
672 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
673 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
674 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
676 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
677 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
678 HC_CONFIG_0_REG_INT_LINE_EN_0 |
679 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
681 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
684 REG_WR(bp, addr, val);
686 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
689 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
690 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
692 REG_WR(bp, addr, val);
694 * Ensure that HC_CONFIG is written before leading/trailing edge config
699 if (CHIP_IS_E1H(bp)) {
700 /* init leading/trailing edge */
702 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
704 /* enable nig and gpio3 attention */
709 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
713 /* Make sure that interrupts are indeed enabled from here on */
717 static void bnx2x_int_disable(struct bnx2x *bp)
719 int port = BP_PORT(bp);
720 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
721 u32 val = REG_RD(bp, addr);
723 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
724 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
725 HC_CONFIG_0_REG_INT_LINE_EN_0 |
726 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
728 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
731 /* flush all outstanding writes */
734 REG_WR(bp, addr, val);
735 if (REG_RD(bp, addr) != val)
736 BNX2X_ERR("BUG! proper val not read from IGU!\n");
739 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
741 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
744 /* disable interrupt handling */
745 atomic_inc(&bp->intr_sem);
746 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
749 /* prevent the HW from sending interrupts */
750 bnx2x_int_disable(bp);
752 /* make sure all ISRs are done */
754 synchronize_irq(bp->msix_table[0].vector);
759 for_each_queue(bp, i)
760 synchronize_irq(bp->msix_table[i + offset].vector);
762 synchronize_irq(bp->pdev->irq);
764 /* make sure sp_task is not running */
765 cancel_delayed_work(&bp->sp_task);
766 flush_workqueue(bnx2x_wq);
772 * General service functions
775 /* Return true if succeeded to acquire the lock */
776 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
779 u32 resource_bit = (1 << resource);
780 int func = BP_FUNC(bp);
781 u32 hw_lock_control_reg;
783 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
785 /* Validating that the resource is within range */
786 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
788 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
789 resource, HW_LOCK_MAX_RESOURCE_VALUE);
794 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
796 hw_lock_control_reg =
797 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
799 /* Try to acquire the lock */
800 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
801 lock_status = REG_RD(bp, hw_lock_control_reg);
802 if (lock_status & resource_bit)
805 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
809 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
810 u8 storm, u16 index, u8 op, u8 update)
812 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
813 COMMAND_REG_INT_ACK);
814 struct igu_ack_register igu_ack;
816 igu_ack.status_block_index = index;
817 igu_ack.sb_id_and_flags =
818 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
819 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
820 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
821 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
823 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
824 (*(u32 *)&igu_ack), hc_addr);
825 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
827 /* Make sure that ACK is written */
832 static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
834 struct host_status_block *fpsb = fp->status_blk;
836 barrier(); /* status block is written to by the chip */
837 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
838 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
841 static u16 bnx2x_ack_int(struct bnx2x *bp)
843 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
844 COMMAND_REG_SIMD_MASK);
845 u32 result = REG_RD(bp, hc_addr);
847 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
855 * fast path service functions
858 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
860 /* Tell compiler that consumer and producer can change */
862 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
865 /* free skb in the packet ring at pos idx
866 * return idx of last bd freed
868 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
871 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
872 struct eth_tx_start_bd *tx_start_bd;
873 struct eth_tx_bd *tx_data_bd;
874 struct sk_buff *skb = tx_buf->skb;
875 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
878 /* prefetch skb end pointer to speedup dev_kfree_skb() */
881 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
885 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
886 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
887 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
888 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
890 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
891 #ifdef BNX2X_STOP_ON_ERROR
892 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
893 BNX2X_ERR("BAD nbd!\n");
897 new_cons = nbd + tx_buf->first_bd;
899 /* Get the next bd */
900 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
902 /* Skip a parse bd... */
904 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
906 /* ...and the TSO split header bd since they have no mapping */
907 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
909 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
915 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
916 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
917 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
918 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
920 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
926 tx_buf->first_bd = 0;
932 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
938 prod = fp->tx_bd_prod;
939 cons = fp->tx_bd_cons;
941 /* NUM_TX_RINGS = number of "next-page" entries
942 It will be used as a threshold */
943 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
945 #ifdef BNX2X_STOP_ON_ERROR
947 WARN_ON(used > fp->bp->tx_ring_size);
948 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
951 return (s16)(fp->bp->tx_ring_size) - used;
954 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
958 /* Tell compiler that status block fields can change */
960 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
961 return hw_cons != fp->tx_pkt_cons;
964 static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
966 struct bnx2x *bp = fp->bp;
967 struct netdev_queue *txq;
968 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
970 #ifdef BNX2X_STOP_ON_ERROR
971 if (unlikely(bp->panic))
975 txq = netdev_get_tx_queue(bp->dev, fp->index);
976 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
977 sw_cons = fp->tx_pkt_cons;
979 while (sw_cons != hw_cons) {
982 pkt_cons = TX_BD(sw_cons);
984 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
986 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
987 hw_cons, sw_cons, pkt_cons);
989 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
991 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
994 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
998 fp->tx_pkt_cons = sw_cons;
999 fp->tx_bd_cons = bd_cons;
1001 /* Need to make the tx_bd_cons update visible to start_xmit()
1002 * before checking for netif_tx_queue_stopped(). Without the
1003 * memory barrier, there is a small possibility that
1004 * start_xmit() will miss it and cause the queue to be stopped
1009 /* TBD need a thresh? */
1010 if (unlikely(netif_tx_queue_stopped(txq))) {
1011 /* Taking tx_lock() is needed to prevent reenabling the queue
1012 * while it's empty. This could have happen if rx_action() gets
1013 * suspended in bnx2x_tx_int() after the condition before
1014 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1016 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1017 * sends some packets consuming the whole queue again->
1021 __netif_tx_lock(txq, smp_processor_id());
1023 if ((netif_tx_queue_stopped(txq)) &&
1024 (bp->state == BNX2X_STATE_OPEN) &&
1025 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
1026 netif_tx_wake_queue(txq);
1028 __netif_tx_unlock(txq);
1034 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1037 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1038 union eth_rx_cqe *rr_cqe)
1040 struct bnx2x *bp = fp->bp;
1041 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1042 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1045 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
1046 fp->index, cid, command, bp->state,
1047 rr_cqe->ramrod_cqe.ramrod_type);
1052 switch (command | fp->state) {
1053 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1054 BNX2X_FP_STATE_OPENING):
1055 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1057 fp->state = BNX2X_FP_STATE_OPEN;
1060 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1061 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1063 fp->state = BNX2X_FP_STATE_HALTED;
1067 BNX2X_ERR("unexpected MC reply (%d) "
1068 "fp[%d] state is %x\n",
1069 command, fp->index, fp->state);
1072 mb(); /* force bnx2x_wait_ramrod() to see the change */
1076 switch (command | bp->state) {
1077 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1078 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1079 bp->state = BNX2X_STATE_OPEN;
1082 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1083 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1084 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1085 fp->state = BNX2X_FP_STATE_HALTED;
1088 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1089 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1090 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1094 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1095 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1096 bnx2x_cnic_cfc_comp(bp, cid);
1100 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1101 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1102 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1103 bp->set_mac_pending--;
1107 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1108 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1109 bp->set_mac_pending--;
1114 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1115 command, bp->state);
1118 mb(); /* force bnx2x_wait_ramrod() to see the change */
1121 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1122 struct bnx2x_fastpath *fp, u16 index)
1124 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1125 struct page *page = sw_buf->page;
1126 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1128 /* Skip "next page" elements */
1132 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
1133 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1134 __free_pages(page, PAGES_PER_SGE_SHIFT);
1136 sw_buf->page = NULL;
1141 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1142 struct bnx2x_fastpath *fp, int last)
1146 for (i = 0; i < last; i++)
1147 bnx2x_free_rx_sge(bp, fp, i);
1150 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1151 struct bnx2x_fastpath *fp, u16 index)
1153 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1154 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1155 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1158 if (unlikely(page == NULL))
1161 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1162 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1163 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1164 __free_pages(page, PAGES_PER_SGE_SHIFT);
1168 sw_buf->page = page;
1169 dma_unmap_addr_set(sw_buf, mapping, mapping);
1171 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1172 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1177 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1178 struct bnx2x_fastpath *fp, u16 index)
1180 struct sk_buff *skb;
1181 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1182 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1185 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1186 if (unlikely(skb == NULL))
1189 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1191 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1197 dma_unmap_addr_set(rx_buf, mapping, mapping);
1199 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1200 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1205 /* note that we are not allocating a new skb,
1206 * we are just moving one from cons to prod
1207 * we are not creating a new mapping,
1208 * so there is no need to check for dma_mapping_error().
1210 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1211 struct sk_buff *skb, u16 cons, u16 prod)
1213 struct bnx2x *bp = fp->bp;
1214 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1215 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1216 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1217 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1219 dma_sync_single_for_device(&bp->pdev->dev,
1220 dma_unmap_addr(cons_rx_buf, mapping),
1221 RX_COPY_THRESH, DMA_FROM_DEVICE);
1223 prod_rx_buf->skb = cons_rx_buf->skb;
1224 dma_unmap_addr_set(prod_rx_buf, mapping,
1225 dma_unmap_addr(cons_rx_buf, mapping));
1226 *prod_bd = *cons_bd;
1229 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1232 u16 last_max = fp->last_max_sge;
1234 if (SUB_S16(idx, last_max) > 0)
1235 fp->last_max_sge = idx;
1238 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1242 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1243 int idx = RX_SGE_CNT * i - 1;
1245 for (j = 0; j < 2; j++) {
1246 SGE_MASK_CLEAR_BIT(fp, idx);
1252 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1253 struct eth_fast_path_rx_cqe *fp_cqe)
1255 struct bnx2x *bp = fp->bp;
1256 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1257 le16_to_cpu(fp_cqe->len_on_bd)) >>
1259 u16 last_max, last_elem, first_elem;
1266 /* First mark all used pages */
1267 for (i = 0; i < sge_len; i++)
1268 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1270 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1271 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1273 /* Here we assume that the last SGE index is the biggest */
1274 prefetch((void *)(fp->sge_mask));
1275 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1277 last_max = RX_SGE(fp->last_max_sge);
1278 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1279 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1281 /* If ring is not full */
1282 if (last_elem + 1 != first_elem)
1285 /* Now update the prod */
1286 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1287 if (likely(fp->sge_mask[i]))
1290 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1291 delta += RX_SGE_MASK_ELEM_SZ;
1295 fp->rx_sge_prod += delta;
1296 /* clear page-end entries */
1297 bnx2x_clear_sge_mask_next_elems(fp);
1300 DP(NETIF_MSG_RX_STATUS,
1301 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1302 fp->last_max_sge, fp->rx_sge_prod);
1305 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1307 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1308 memset(fp->sge_mask, 0xff,
1309 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1311 /* Clear the two last indices in the page to 1:
1312 these are the indices that correspond to the "next" element,
1313 hence will never be indicated and should be removed from
1314 the calculations. */
1315 bnx2x_clear_sge_mask_next_elems(fp);
1318 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1319 struct sk_buff *skb, u16 cons, u16 prod)
1321 struct bnx2x *bp = fp->bp;
1322 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1323 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1324 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1327 /* move empty skb from pool to prod and map it */
1328 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1329 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1330 bp->rx_buf_size, DMA_FROM_DEVICE);
1331 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
1333 /* move partial skb from cons to pool (don't unmap yet) */
1334 fp->tpa_pool[queue] = *cons_rx_buf;
1336 /* mark bin state as start - print error if current state != stop */
1337 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1338 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1340 fp->tpa_state[queue] = BNX2X_TPA_START;
1342 /* point prod_bd to new skb */
1343 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1344 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1346 #ifdef BNX2X_STOP_ON_ERROR
1347 fp->tpa_queue_used |= (1 << queue);
1348 #ifdef _ASM_GENERIC_INT_L64_H
1349 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1351 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1353 fp->tpa_queue_used);
1357 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1358 struct sk_buff *skb,
1359 struct eth_fast_path_rx_cqe *fp_cqe,
1362 struct sw_rx_page *rx_pg, old_rx_pg;
1363 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1364 u32 i, frag_len, frag_size, pages;
1368 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1369 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1371 /* This is needed in order to enable forwarding support */
1373 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1374 max(frag_size, (u32)len_on_bd));
1376 #ifdef BNX2X_STOP_ON_ERROR
1377 if (pages > min_t(u32, 8, MAX_SKB_FRAGS)*SGE_PAGE_SIZE*PAGES_PER_SGE) {
1378 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1380 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1381 fp_cqe->pkt_len, len_on_bd);
1387 /* Run through the SGL and compose the fragmented skb */
1388 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1389 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1391 /* FW gives the indices of the SGE as if the ring is an array
1392 (meaning that "next" element will consume 2 indices) */
1393 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1394 rx_pg = &fp->rx_page_ring[sge_idx];
1397 /* If we fail to allocate a substitute page, we simply stop
1398 where we are and drop the whole packet */
1399 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1400 if (unlikely(err)) {
1401 fp->eth_q_stats.rx_skb_alloc_failed++;
1405 /* Unmap the page as we r going to pass it to the stack */
1406 dma_unmap_page(&bp->pdev->dev,
1407 dma_unmap_addr(&old_rx_pg, mapping),
1408 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
1410 /* Add one frag and update the appropriate fields in the skb */
1411 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1413 skb->data_len += frag_len;
1414 skb->truesize += frag_len;
1415 skb->len += frag_len;
1417 frag_size -= frag_len;
1423 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1424 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1427 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1428 struct sk_buff *skb = rx_buf->skb;
1430 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1432 /* Unmap skb in the pool anyway, as we are going to change
1433 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1435 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1436 bp->rx_buf_size, DMA_FROM_DEVICE);
1438 if (likely(new_skb)) {
1439 /* fix ip xsum and give it to the stack */
1440 /* (no need to map the new skb) */
1443 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1444 PARSING_FLAGS_VLAN);
1445 int is_not_hwaccel_vlan_cqe =
1446 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1450 prefetch(((char *)(skb)) + 128);
1452 #ifdef BNX2X_STOP_ON_ERROR
1453 if (pad + len > bp->rx_buf_size) {
1454 BNX2X_ERR("skb_put is about to fail... "
1455 "pad %d len %d rx_buf_size %d\n",
1456 pad, len, bp->rx_buf_size);
1462 skb_reserve(skb, pad);
1465 skb->protocol = eth_type_trans(skb, bp->dev);
1466 skb->ip_summed = CHECKSUM_UNNECESSARY;
1471 iph = (struct iphdr *)skb->data;
1473 /* If there is no Rx VLAN offloading -
1474 take VLAN tag into an account */
1475 if (unlikely(is_not_hwaccel_vlan_cqe))
1476 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1479 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1482 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1483 &cqe->fast_path_cqe, cqe_idx)) {
1485 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1486 (!is_not_hwaccel_vlan_cqe))
1487 vlan_gro_receive(&fp->napi, bp->vlgrp,
1488 le16_to_cpu(cqe->fast_path_cqe.
1492 napi_gro_receive(&fp->napi, skb);
1494 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1495 " - dropping packet!\n");
1500 /* put new skb in bin */
1501 fp->tpa_pool[queue].skb = new_skb;
1504 /* else drop the packet and keep the buffer in the bin */
1505 DP(NETIF_MSG_RX_STATUS,
1506 "Failed to allocate new skb - dropping packet!\n");
1507 fp->eth_q_stats.rx_skb_alloc_failed++;
1510 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1513 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1514 struct bnx2x_fastpath *fp,
1515 u16 bd_prod, u16 rx_comp_prod,
1518 struct ustorm_eth_rx_producers rx_prods = {0};
1521 /* Update producers */
1522 rx_prods.bd_prod = bd_prod;
1523 rx_prods.cqe_prod = rx_comp_prod;
1524 rx_prods.sge_prod = rx_sge_prod;
1527 * Make sure that the BD and SGE data is updated before updating the
1528 * producers since FW might read the BD/SGE right after the producer
1530 * This is only applicable for weak-ordered memory model archs such
1531 * as IA-64. The following barrier is also mandatory since FW will
1532 * assumes BDs must have buffers.
1536 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1537 REG_WR(bp, BAR_USTRORM_INTMEM +
1538 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1539 ((u32 *)&rx_prods)[i]);
1541 mmiowb(); /* keep prod updates ordered */
1543 DP(NETIF_MSG_RX_STATUS,
1544 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1545 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1548 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1550 struct bnx2x *bp = fp->bp;
1551 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1552 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1555 #ifdef BNX2X_STOP_ON_ERROR
1556 if (unlikely(bp->panic))
1560 /* CQ "next element" is of the size of the regular element,
1561 that's why it's ok here */
1562 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1563 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1566 bd_cons = fp->rx_bd_cons;
1567 bd_prod = fp->rx_bd_prod;
1568 bd_prod_fw = bd_prod;
1569 sw_comp_cons = fp->rx_comp_cons;
1570 sw_comp_prod = fp->rx_comp_prod;
1572 /* Memory barrier necessary as speculative reads of the rx
1573 * buffer can be ahead of the index in the status block
1577 DP(NETIF_MSG_RX_STATUS,
1578 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1579 fp->index, hw_comp_cons, sw_comp_cons);
1581 while (sw_comp_cons != hw_comp_cons) {
1582 struct sw_rx_bd *rx_buf = NULL;
1583 struct sk_buff *skb;
1584 union eth_rx_cqe *cqe;
1585 u8 cqe_fp_flags, cqe_fp_status_flags;
1588 comp_ring_cons = RCQ_BD(sw_comp_cons);
1589 bd_prod = RX_BD(bd_prod);
1590 bd_cons = RX_BD(bd_cons);
1592 /* Prefetch the page containing the BD descriptor
1593 at producer's index. It will be needed when new skb is
1595 prefetch((void *)(PAGE_ALIGN((unsigned long)
1596 (&fp->rx_desc_ring[bd_prod])) -
1599 cqe = &fp->rx_comp_ring[comp_ring_cons];
1600 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1601 cqe_fp_status_flags = cqe->fast_path_cqe.status_flags;
1603 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1604 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1605 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1606 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1608 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1610 /* is this a slowpath msg? */
1611 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1612 bnx2x_sp_event(fp, cqe);
1615 /* this is an rx packet */
1617 rx_buf = &fp->rx_buf_ring[bd_cons];
1620 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1621 pad = cqe->fast_path_cqe.placement_offset;
1623 /* If CQE is marked both TPA_START and TPA_END
1624 it is a non-TPA CQE */
1625 if ((!fp->disable_tpa) &&
1626 (TPA_TYPE(cqe_fp_flags) !=
1627 (TPA_TYPE_START | TPA_TYPE_END))) {
1628 u16 queue = cqe->fast_path_cqe.queue_index;
1630 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1631 DP(NETIF_MSG_RX_STATUS,
1632 "calling tpa_start on queue %d\n",
1635 bnx2x_tpa_start(fp, queue, skb,
1640 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1641 DP(NETIF_MSG_RX_STATUS,
1642 "calling tpa_stop on queue %d\n",
1645 if (!BNX2X_RX_SUM_FIX(cqe))
1646 BNX2X_ERR("STOP on none TCP "
1649 /* This is a size of the linear data
1651 len = le16_to_cpu(cqe->fast_path_cqe.
1653 bnx2x_tpa_stop(bp, fp, queue, pad,
1654 len, cqe, comp_ring_cons);
1655 #ifdef BNX2X_STOP_ON_ERROR
1660 bnx2x_update_sge_prod(fp,
1661 &cqe->fast_path_cqe);
1666 dma_sync_single_for_device(&bp->pdev->dev,
1667 dma_unmap_addr(rx_buf, mapping),
1668 pad + RX_COPY_THRESH,
1670 prefetch(((char *)(skb)) + 128);
1672 /* is this an error packet? */
1673 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1674 DP(NETIF_MSG_RX_ERR,
1675 "ERROR flags %x rx packet %u\n",
1676 cqe_fp_flags, sw_comp_cons);
1677 fp->eth_q_stats.rx_err_discard_pkt++;
1681 /* Since we don't have a jumbo ring
1682 * copy small packets if mtu > 1500
1684 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1685 (len <= RX_COPY_THRESH)) {
1686 struct sk_buff *new_skb;
1688 new_skb = netdev_alloc_skb(bp->dev,
1690 if (new_skb == NULL) {
1691 DP(NETIF_MSG_RX_ERR,
1692 "ERROR packet dropped "
1693 "because of alloc failure\n");
1694 fp->eth_q_stats.rx_skb_alloc_failed++;
1699 skb_copy_from_linear_data_offset(skb, pad,
1700 new_skb->data + pad, len);
1701 skb_reserve(new_skb, pad);
1702 skb_put(new_skb, len);
1704 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1709 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1710 dma_unmap_single(&bp->pdev->dev,
1711 dma_unmap_addr(rx_buf, mapping),
1714 skb_reserve(skb, pad);
1718 DP(NETIF_MSG_RX_ERR,
1719 "ERROR packet dropped because "
1720 "of alloc failure\n");
1721 fp->eth_q_stats.rx_skb_alloc_failed++;
1723 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1727 skb->protocol = eth_type_trans(skb, bp->dev);
1729 if ((bp->dev->features & NETIF_F_RXHASH) &&
1730 (cqe_fp_status_flags &
1731 ETH_FAST_PATH_RX_CQE_RSS_HASH_FLG))
1732 skb->rxhash = le32_to_cpu(
1733 cqe->fast_path_cqe.rss_hash_result);
1735 skb->ip_summed = CHECKSUM_NONE;
1737 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1738 skb->ip_summed = CHECKSUM_UNNECESSARY;
1740 fp->eth_q_stats.hw_csum_err++;
1744 skb_record_rx_queue(skb, fp->index);
1747 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1748 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1749 PARSING_FLAGS_VLAN))
1750 vlan_gro_receive(&fp->napi, bp->vlgrp,
1751 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
1754 napi_gro_receive(&fp->napi, skb);
1760 bd_cons = NEXT_RX_IDX(bd_cons);
1761 bd_prod = NEXT_RX_IDX(bd_prod);
1762 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1765 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1766 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1768 if (rx_pkt == budget)
1772 fp->rx_bd_cons = bd_cons;
1773 fp->rx_bd_prod = bd_prod_fw;
1774 fp->rx_comp_cons = sw_comp_cons;
1775 fp->rx_comp_prod = sw_comp_prod;
1777 /* Update producers */
1778 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1781 fp->rx_pkt += rx_pkt;
1787 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1789 struct bnx2x_fastpath *fp = fp_cookie;
1790 struct bnx2x *bp = fp->bp;
1792 /* Return here if interrupt is disabled */
1793 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1794 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1798 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1799 fp->index, fp->sb_id);
1800 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1802 #ifdef BNX2X_STOP_ON_ERROR
1803 if (unlikely(bp->panic))
1807 /* Handle Rx and Tx according to MSI-X vector */
1808 prefetch(fp->rx_cons_sb);
1809 prefetch(fp->tx_cons_sb);
1810 prefetch(&fp->status_blk->u_status_block.status_block_index);
1811 prefetch(&fp->status_blk->c_status_block.status_block_index);
1812 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1817 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1819 struct bnx2x *bp = netdev_priv(dev_instance);
1820 u16 status = bnx2x_ack_int(bp);
1824 /* Return here if interrupt is shared and it's not for us */
1825 if (unlikely(status == 0)) {
1826 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1829 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1831 /* Return here if interrupt is disabled */
1832 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1833 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1837 #ifdef BNX2X_STOP_ON_ERROR
1838 if (unlikely(bp->panic))
1842 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1843 struct bnx2x_fastpath *fp = &bp->fp[i];
1845 mask = 0x2 << fp->sb_id;
1846 if (status & mask) {
1847 /* Handle Rx and Tx according to SB id */
1848 prefetch(fp->rx_cons_sb);
1849 prefetch(&fp->status_blk->u_status_block.
1850 status_block_index);
1851 prefetch(fp->tx_cons_sb);
1852 prefetch(&fp->status_blk->c_status_block.
1853 status_block_index);
1854 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1860 mask = 0x2 << CNIC_SB_ID(bp);
1861 if (status & (mask | 0x1)) {
1862 struct cnic_ops *c_ops = NULL;
1865 c_ops = rcu_dereference(bp->cnic_ops);
1867 c_ops->cnic_handler(bp->cnic_data, NULL);
1874 if (unlikely(status & 0x1)) {
1875 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1882 if (unlikely(status))
1883 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
1889 /* end of fast path */
1891 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1896 * General service functions
1899 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1902 u32 resource_bit = (1 << resource);
1903 int func = BP_FUNC(bp);
1904 u32 hw_lock_control_reg;
1907 /* Validating that the resource is within range */
1908 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1910 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1911 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1916 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1918 hw_lock_control_reg =
1919 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1922 /* Validating that the resource is not already taken */
1923 lock_status = REG_RD(bp, hw_lock_control_reg);
1924 if (lock_status & resource_bit) {
1925 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1926 lock_status, resource_bit);
1930 /* Try for 5 second every 5ms */
1931 for (cnt = 0; cnt < 1000; cnt++) {
1932 /* Try to acquire the lock */
1933 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1934 lock_status = REG_RD(bp, hw_lock_control_reg);
1935 if (lock_status & resource_bit)
1940 DP(NETIF_MSG_HW, "Timeout\n");
1944 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1947 u32 resource_bit = (1 << resource);
1948 int func = BP_FUNC(bp);
1949 u32 hw_lock_control_reg;
1951 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1953 /* Validating that the resource is within range */
1954 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1956 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1957 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1962 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1964 hw_lock_control_reg =
1965 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1968 /* Validating that the resource is currently taken */
1969 lock_status = REG_RD(bp, hw_lock_control_reg);
1970 if (!(lock_status & resource_bit)) {
1971 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1972 lock_status, resource_bit);
1976 REG_WR(bp, hw_lock_control_reg, resource_bit);
1980 /* HW Lock for shared dual port PHYs */
1981 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1983 mutex_lock(&bp->port.phy_mutex);
1985 if (bp->port.need_hw_lock)
1986 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1989 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1991 if (bp->port.need_hw_lock)
1992 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1994 mutex_unlock(&bp->port.phy_mutex);
1997 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1999 /* The GPIO should be swapped if swap register is set and active */
2000 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2001 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2002 int gpio_shift = gpio_num +
2003 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2004 u32 gpio_mask = (1 << gpio_shift);
2008 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2009 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2013 /* read GPIO value */
2014 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2016 /* get the requested pin value */
2017 if ((gpio_reg & gpio_mask) == gpio_mask)
2022 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2027 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2029 /* The GPIO should be swapped if swap register is set and active */
2030 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2031 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2032 int gpio_shift = gpio_num +
2033 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2034 u32 gpio_mask = (1 << gpio_shift);
2037 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2038 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2042 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2043 /* read GPIO and mask except the float bits */
2044 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
2047 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2048 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2049 gpio_num, gpio_shift);
2050 /* clear FLOAT and set CLR */
2051 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2052 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2055 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2056 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2057 gpio_num, gpio_shift);
2058 /* clear FLOAT and set SET */
2059 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2060 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2063 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2064 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2065 gpio_num, gpio_shift);
2067 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2074 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2080 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2082 /* The GPIO should be swapped if swap register is set and active */
2083 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2084 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2085 int gpio_shift = gpio_num +
2086 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2087 u32 gpio_mask = (1 << gpio_shift);
2090 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2091 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2095 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2097 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2100 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2101 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2102 "output low\n", gpio_num, gpio_shift);
2103 /* clear SET and set CLR */
2104 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2105 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2108 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2109 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2110 "output high\n", gpio_num, gpio_shift);
2111 /* clear CLR and set SET */
2112 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2113 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2120 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2121 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2126 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2128 u32 spio_mask = (1 << spio_num);
2131 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2132 (spio_num > MISC_REGISTERS_SPIO_7)) {
2133 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2137 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2138 /* read SPIO and mask except the float bits */
2139 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2142 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2143 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2144 /* clear FLOAT and set CLR */
2145 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2146 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2149 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2150 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2151 /* clear FLOAT and set SET */
2152 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2153 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2156 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2157 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2159 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2166 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2167 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2172 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2174 switch (bp->link_vars.ieee_fc &
2175 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2176 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2177 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2181 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2182 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2186 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2187 bp->port.advertising |= ADVERTISED_Asym_Pause;
2191 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2197 static void bnx2x_link_report(struct bnx2x *bp)
2199 if (bp->flags & MF_FUNC_DIS) {
2200 netif_carrier_off(bp->dev);
2201 netdev_err(bp->dev, "NIC Link is Down\n");
2205 if (bp->link_vars.link_up) {
2208 if (bp->state == BNX2X_STATE_OPEN)
2209 netif_carrier_on(bp->dev);
2210 netdev_info(bp->dev, "NIC Link is Up, ");
2212 line_speed = bp->link_vars.line_speed;
2217 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2218 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2219 if (vn_max_rate < line_speed)
2220 line_speed = vn_max_rate;
2222 pr_cont("%d Mbps ", line_speed);
2224 if (bp->link_vars.duplex == DUPLEX_FULL)
2225 pr_cont("full duplex");
2227 pr_cont("half duplex");
2229 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2230 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2231 pr_cont(", receive ");
2232 if (bp->link_vars.flow_ctrl &
2234 pr_cont("& transmit ");
2236 pr_cont(", transmit ");
2238 pr_cont("flow control ON");
2242 } else { /* link_down */
2243 netif_carrier_off(bp->dev);
2244 netdev_err(bp->dev, "NIC Link is Down\n");
2248 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2250 if (!BP_NOMCP(bp)) {
2253 /* Initialize link parameters structure variables */
2254 /* It is recommended to turn off RX FC for jumbo frames
2255 for better performance */
2256 if (bp->dev->mtu > 5000)
2257 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2259 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2261 bnx2x_acquire_phy_lock(bp);
2263 if (load_mode == LOAD_DIAG)
2264 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2266 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2268 bnx2x_release_phy_lock(bp);
2270 bnx2x_calc_fc_adv(bp);
2272 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2273 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2274 bnx2x_link_report(bp);
2279 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2283 static void bnx2x_link_set(struct bnx2x *bp)
2285 if (!BP_NOMCP(bp)) {
2286 bnx2x_acquire_phy_lock(bp);
2287 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2288 bnx2x_release_phy_lock(bp);
2290 bnx2x_calc_fc_adv(bp);
2292 BNX2X_ERR("Bootcode is missing - can not set link\n");
2295 static void bnx2x__link_reset(struct bnx2x *bp)
2297 if (!BP_NOMCP(bp)) {
2298 bnx2x_acquire_phy_lock(bp);
2299 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2300 bnx2x_release_phy_lock(bp);
2302 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2305 static u8 bnx2x_link_test(struct bnx2x *bp)
2309 if (!BP_NOMCP(bp)) {
2310 bnx2x_acquire_phy_lock(bp);
2311 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2312 bnx2x_release_phy_lock(bp);
2314 BNX2X_ERR("Bootcode is missing - can not test link\n");
2319 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2321 u32 r_param = bp->link_vars.line_speed / 8;
2322 u32 fair_periodic_timeout_usec;
2325 memset(&(bp->cmng.rs_vars), 0,
2326 sizeof(struct rate_shaping_vars_per_port));
2327 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2329 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2330 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2332 /* this is the threshold below which no timer arming will occur
2333 1.25 coefficient is for the threshold to be a little bigger
2334 than the real time, to compensate for timer in-accuracy */
2335 bp->cmng.rs_vars.rs_threshold =
2336 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2338 /* resolution of fairness timer */
2339 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2340 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2341 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2343 /* this is the threshold below which we won't arm the timer anymore */
2344 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2346 /* we multiply by 1e3/8 to get bytes/msec.
2347 We don't want the credits to pass a credit
2348 of the t_fair*FAIR_MEM (algorithm resolution) */
2349 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2350 /* since each tick is 4 usec */
2351 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2354 /* Calculates the sum of vn_min_rates.
2355 It's needed for further normalizing of the min_rates.
2357 sum of vn_min_rates.
2359 0 - if all the min_rates are 0.
2360 In the later case fainess algorithm should be deactivated.
2361 If not all min_rates are zero then those that are zeroes will be set to 1.
2363 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2366 int port = BP_PORT(bp);
2369 bp->vn_weight_sum = 0;
2370 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2371 int func = 2*vn + port;
2372 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2373 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2374 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2376 /* Skip hidden vns */
2377 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2380 /* If min rate is zero - set it to 1 */
2382 vn_min_rate = DEF_MIN_RATE;
2386 bp->vn_weight_sum += vn_min_rate;
2389 /* ... only if all min rates are zeros - disable fairness */
2391 bp->cmng.flags.cmng_enables &=
2392 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2393 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2394 " fairness will be disabled\n");
2396 bp->cmng.flags.cmng_enables |=
2397 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2400 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2402 struct rate_shaping_vars_per_vn m_rs_vn;
2403 struct fairness_vars_per_vn m_fair_vn;
2404 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2405 u16 vn_min_rate, vn_max_rate;
2408 /* If function is hidden - set min and max to zeroes */
2409 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2414 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2415 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2416 /* If min rate is zero - set it to 1 */
2418 vn_min_rate = DEF_MIN_RATE;
2419 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2420 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2423 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2424 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2426 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2427 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2429 /* global vn counter - maximal Mbps for this vn */
2430 m_rs_vn.vn_counter.rate = vn_max_rate;
2432 /* quota - number of bytes transmitted in this period */
2433 m_rs_vn.vn_counter.quota =
2434 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2436 if (bp->vn_weight_sum) {
2437 /* credit for each period of the fairness algorithm:
2438 number of bytes in T_FAIR (the vn share the port rate).
2439 vn_weight_sum should not be larger than 10000, thus
2440 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2442 m_fair_vn.vn_credit_delta =
2443 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2444 (8 * bp->vn_weight_sum))),
2445 (bp->cmng.fair_vars.fair_threshold * 2));
2446 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
2447 m_fair_vn.vn_credit_delta);
2450 /* Store it to internal memory */
2451 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2452 REG_WR(bp, BAR_XSTRORM_INTMEM +
2453 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2454 ((u32 *)(&m_rs_vn))[i]);
2456 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2457 REG_WR(bp, BAR_XSTRORM_INTMEM +
2458 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2459 ((u32 *)(&m_fair_vn))[i]);
2463 /* This function is called upon link interrupt */
2464 static void bnx2x_link_attn(struct bnx2x *bp)
2466 u32 prev_link_status = bp->link_vars.link_status;
2467 /* Make sure that we are synced with the current statistics */
2468 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2470 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2472 if (bp->link_vars.link_up) {
2474 /* dropless flow control */
2475 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
2476 int port = BP_PORT(bp);
2477 u32 pause_enabled = 0;
2479 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2482 REG_WR(bp, BAR_USTRORM_INTMEM +
2483 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2487 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2488 struct host_port_stats *pstats;
2490 pstats = bnx2x_sp(bp, port_stats);
2491 /* reset old bmac stats */
2492 memset(&(pstats->mac_stx[0]), 0,
2493 sizeof(struct mac_stx));
2495 if (bp->state == BNX2X_STATE_OPEN)
2496 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2499 /* indicate link status only if link status actually changed */
2500 if (prev_link_status != bp->link_vars.link_status)
2501 bnx2x_link_report(bp);
2504 int port = BP_PORT(bp);
2508 /* Set the attention towards other drivers on the same port */
2509 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2510 if (vn == BP_E1HVN(bp))
2513 func = ((vn << 1) | port);
2514 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2515 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2518 if (bp->link_vars.link_up) {
2521 /* Init rate shaping and fairness contexts */
2522 bnx2x_init_port_minmax(bp);
2524 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2525 bnx2x_init_vn_minmax(bp, 2*vn + port);
2527 /* Store it to internal memory */
2529 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2530 REG_WR(bp, BAR_XSTRORM_INTMEM +
2531 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2532 ((u32 *)(&bp->cmng))[i]);
2537 static void bnx2x__link_status_update(struct bnx2x *bp)
2539 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2542 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2544 if (bp->link_vars.link_up)
2545 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2547 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2549 bnx2x_calc_vn_weight_sum(bp);
2551 /* indicate link status */
2552 bnx2x_link_report(bp);
2555 static void bnx2x_pmf_update(struct bnx2x *bp)
2557 int port = BP_PORT(bp);
2561 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2563 /* enable nig attention */
2564 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2565 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2566 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2568 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2576 * General service functions
2579 /* send the MCP a request, block until there is a reply */
2580 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2582 int func = BP_FUNC(bp);
2583 u32 seq = ++bp->fw_seq;
2586 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2588 mutex_lock(&bp->fw_mb_mutex);
2589 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2590 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2593 /* let the FW do it's magic ... */
2596 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2598 /* Give the FW up to 5 second (500*10ms) */
2599 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2601 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2602 cnt*delay, rc, seq);
2604 /* is this a reply to our command? */
2605 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2606 rc &= FW_MSG_CODE_MASK;
2609 BNX2X_ERR("FW failed to respond!\n");
2613 mutex_unlock(&bp->fw_mb_mutex);
2618 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2619 static void bnx2x_set_rx_mode(struct net_device *dev);
2621 static void bnx2x_e1h_disable(struct bnx2x *bp)
2623 int port = BP_PORT(bp);
2625 netif_tx_disable(bp->dev);
2627 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2629 netif_carrier_off(bp->dev);
2632 static void bnx2x_e1h_enable(struct bnx2x *bp)
2634 int port = BP_PORT(bp);
2636 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2638 /* Tx queue should be only reenabled */
2639 netif_tx_wake_all_queues(bp->dev);
2642 * Should not call netif_carrier_on since it will be called if the link
2643 * is up when checking for link state
2647 static void bnx2x_update_min_max(struct bnx2x *bp)
2649 int port = BP_PORT(bp);
2652 /* Init rate shaping and fairness contexts */
2653 bnx2x_init_port_minmax(bp);
2655 bnx2x_calc_vn_weight_sum(bp);
2657 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2658 bnx2x_init_vn_minmax(bp, 2*vn + port);
2663 /* Set the attention towards other drivers on the same port */
2664 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2665 if (vn == BP_E1HVN(bp))
2668 func = ((vn << 1) | port);
2669 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2670 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2673 /* Store it to internal memory */
2674 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2675 REG_WR(bp, BAR_XSTRORM_INTMEM +
2676 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2677 ((u32 *)(&bp->cmng))[i]);
2681 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2683 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2685 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2688 * This is the only place besides the function initialization
2689 * where the bp->flags can change so it is done without any
2692 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2693 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2694 bp->flags |= MF_FUNC_DIS;
2696 bnx2x_e1h_disable(bp);
2698 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2699 bp->flags &= ~MF_FUNC_DIS;
2701 bnx2x_e1h_enable(bp);
2703 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2705 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2707 bnx2x_update_min_max(bp);
2708 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2711 /* Report results to MCP */
2713 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2715 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2718 /* must be called under the spq lock */
2719 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2721 struct eth_spe *next_spe = bp->spq_prod_bd;
2723 if (bp->spq_prod_bd == bp->spq_last_bd) {
2724 bp->spq_prod_bd = bp->spq;
2725 bp->spq_prod_idx = 0;
2726 DP(NETIF_MSG_TIMER, "end of spq\n");
2734 /* must be called under the spq lock */
2735 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2737 int func = BP_FUNC(bp);
2739 /* Make sure that BD data is updated before writing the producer */
2742 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2747 /* the slow path queue is odd since completions arrive on the fastpath ring */
2748 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2749 u32 data_hi, u32 data_lo, int common)
2751 struct eth_spe *spe;
2753 #ifdef BNX2X_STOP_ON_ERROR
2754 if (unlikely(bp->panic))
2758 spin_lock_bh(&bp->spq_lock);
2760 if (!bp->spq_left) {
2761 BNX2X_ERR("BUG! SPQ ring full!\n");
2762 spin_unlock_bh(&bp->spq_lock);
2767 spe = bnx2x_sp_get_next(bp);
2769 /* CID needs port number to be encoded int it */
2770 spe->hdr.conn_and_cmd_data =
2771 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2773 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2776 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2778 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2779 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2783 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2784 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2785 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2786 (u32)(U64_LO(bp->spq_mapping) +
2787 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2788 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2790 bnx2x_sp_prod_update(bp);
2791 spin_unlock_bh(&bp->spq_lock);
2795 /* acquire split MCP access lock register */
2796 static int bnx2x_acquire_alr(struct bnx2x *bp)
2802 for (j = 0; j < 1000; j++) {
2804 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2805 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2806 if (val & (1L << 31))
2811 if (!(val & (1L << 31))) {
2812 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2819 /* release split MCP access lock register */
2820 static void bnx2x_release_alr(struct bnx2x *bp)
2822 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
2825 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2827 struct host_def_status_block *def_sb = bp->def_status_blk;
2830 barrier(); /* status block is written to by the chip */
2831 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2832 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2835 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2836 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2839 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2840 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2843 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2844 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2847 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2848 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2855 * slow path service functions
2858 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2860 int port = BP_PORT(bp);
2861 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2862 COMMAND_REG_ATTN_BITS_SET);
2863 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2864 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2865 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2866 NIG_REG_MASK_INTERRUPT_PORT0;
2870 if (bp->attn_state & asserted)
2871 BNX2X_ERR("IGU ERROR\n");
2873 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2874 aeu_mask = REG_RD(bp, aeu_addr);
2876 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2877 aeu_mask, asserted);
2878 aeu_mask &= ~(asserted & 0x3ff);
2879 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2881 REG_WR(bp, aeu_addr, aeu_mask);
2882 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2884 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2885 bp->attn_state |= asserted;
2886 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2888 if (asserted & ATTN_HARD_WIRED_MASK) {
2889 if (asserted & ATTN_NIG_FOR_FUNC) {
2891 bnx2x_acquire_phy_lock(bp);
2893 /* save nig interrupt mask */
2894 nig_mask = REG_RD(bp, nig_int_mask_addr);
2895 REG_WR(bp, nig_int_mask_addr, 0);
2897 bnx2x_link_attn(bp);
2899 /* handle unicore attn? */
2901 if (asserted & ATTN_SW_TIMER_4_FUNC)
2902 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2904 if (asserted & GPIO_2_FUNC)
2905 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2907 if (asserted & GPIO_3_FUNC)
2908 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2910 if (asserted & GPIO_4_FUNC)
2911 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2914 if (asserted & ATTN_GENERAL_ATTN_1) {
2915 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2916 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2918 if (asserted & ATTN_GENERAL_ATTN_2) {
2919 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2922 if (asserted & ATTN_GENERAL_ATTN_3) {
2923 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2924 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2927 if (asserted & ATTN_GENERAL_ATTN_4) {
2928 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2929 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2931 if (asserted & ATTN_GENERAL_ATTN_5) {
2932 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2933 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2935 if (asserted & ATTN_GENERAL_ATTN_6) {
2936 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2937 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2941 } /* if hardwired */
2943 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2945 REG_WR(bp, hc_addr, asserted);
2947 /* now set back the mask */
2948 if (asserted & ATTN_NIG_FOR_FUNC) {
2949 REG_WR(bp, nig_int_mask_addr, nig_mask);
2950 bnx2x_release_phy_lock(bp);
2954 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2956 int port = BP_PORT(bp);
2958 /* mark the failure */
2959 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2960 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2961 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2962 bp->link_params.ext_phy_config);
2964 /* log the failure */
2965 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2966 " the driver to shutdown the card to prevent permanent"
2967 " damage. Please contact OEM Support for assistance\n");
2970 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2972 int port = BP_PORT(bp);
2974 u32 val, swap_val, swap_override;
2976 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2977 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2979 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2981 val = REG_RD(bp, reg_offset);
2982 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2983 REG_WR(bp, reg_offset, val);
2985 BNX2X_ERR("SPIO5 hw attention\n");
2987 /* Fan failure attention */
2988 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2989 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2990 /* Low power mode is controlled by GPIO 2 */
2991 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2992 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2993 /* The PHY reset is controlled by GPIO 1 */
2994 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2995 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2998 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2999 /* The PHY reset is controlled by GPIO 1 */
3000 /* fake the port number to cancel the swap done in
3002 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
3003 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
3004 port = (swap_val && swap_override) ^ 1;
3005 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3006 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
3012 bnx2x_fan_failure(bp);
3015 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3016 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3017 bnx2x_acquire_phy_lock(bp);
3018 bnx2x_handle_module_detect_int(&bp->link_params);
3019 bnx2x_release_phy_lock(bp);
3022 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3024 val = REG_RD(bp, reg_offset);
3025 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3026 REG_WR(bp, reg_offset, val);
3028 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
3029 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
3034 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3038 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
3040 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3041 BNX2X_ERR("DB hw attention 0x%x\n", val);
3042 /* DORQ discard attention */
3044 BNX2X_ERR("FATAL error from DORQ\n");
3047 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3049 int port = BP_PORT(bp);
3052 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3053 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3055 val = REG_RD(bp, reg_offset);
3056 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3057 REG_WR(bp, reg_offset, val);
3059 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
3060 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
3065 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3069 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3071 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3072 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3073 /* CFC error attention */
3075 BNX2X_ERR("FATAL error from CFC\n");
3078 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3080 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3081 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3082 /* RQ_USDMDP_FIFO_OVERFLOW */
3084 BNX2X_ERR("FATAL error from PXP\n");
3087 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3089 int port = BP_PORT(bp);
3092 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3093 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3095 val = REG_RD(bp, reg_offset);
3096 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3097 REG_WR(bp, reg_offset, val);
3099 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
3100 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
3105 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3109 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3111 if (attn & BNX2X_PMF_LINK_ASSERT) {
3112 int func = BP_FUNC(bp);
3114 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3115 bp->mf_config = SHMEM_RD(bp,
3116 mf_cfg.func_mf_config[func].config);
3117 val = SHMEM_RD(bp, func_mb[func].drv_status);
3118 if (val & DRV_STATUS_DCC_EVENT_MASK)
3120 (val & DRV_STATUS_DCC_EVENT_MASK));
3121 bnx2x__link_status_update(bp);
3122 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3123 bnx2x_pmf_update(bp);
3125 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3127 BNX2X_ERR("MC assert!\n");
3128 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3129 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3131 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3134 } else if (attn & BNX2X_MCP_ASSERT) {
3136 BNX2X_ERR("MCP assert!\n");
3137 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
3141 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3144 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
3145 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3146 if (attn & BNX2X_GRC_TIMEOUT) {
3147 val = CHIP_IS_E1H(bp) ?
3148 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3149 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3151 if (attn & BNX2X_GRC_RSV) {
3152 val = CHIP_IS_E1H(bp) ?
3153 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3154 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3156 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
3160 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3161 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3164 #define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3165 #define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3166 #define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3167 #define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3168 #define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3169 #define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3171 * should be run under rtnl lock
3173 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3175 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3176 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3177 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3183 * should be run under rtnl lock
3185 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3187 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3189 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3195 * should be run under rtnl lock
3197 static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3199 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3201 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3205 * should be run under rtnl lock
3207 static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3209 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3211 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3213 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3214 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3220 * should be run under rtnl lock
3222 static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3224 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3226 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3228 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3229 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3237 * should be run under rtnl lock
3239 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3241 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3244 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3246 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3247 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3250 static inline void _print_next_block(int idx, const char *blk)
3257 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3261 for (i = 0; sig; i++) {
3262 cur_bit = ((u32)0x1 << i);
3263 if (sig & cur_bit) {
3265 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3266 _print_next_block(par_num++, "BRB");
3268 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3269 _print_next_block(par_num++, "PARSER");
3271 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3272 _print_next_block(par_num++, "TSDM");
3274 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3275 _print_next_block(par_num++, "SEARCHER");
3277 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3278 _print_next_block(par_num++, "TSEMI");
3290 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3294 for (i = 0; sig; i++) {
3295 cur_bit = ((u32)0x1 << i);
3296 if (sig & cur_bit) {
3298 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3299 _print_next_block(par_num++, "PBCLIENT");
3301 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3302 _print_next_block(par_num++, "QM");
3304 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3305 _print_next_block(par_num++, "XSDM");
3307 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3308 _print_next_block(par_num++, "XSEMI");
3310 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3311 _print_next_block(par_num++, "DOORBELLQ");
3313 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3314 _print_next_block(par_num++, "VAUX PCI CORE");
3316 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3317 _print_next_block(par_num++, "DEBUG");
3319 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3320 _print_next_block(par_num++, "USDM");
3322 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3323 _print_next_block(par_num++, "USEMI");
3325 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3326 _print_next_block(par_num++, "UPB");
3328 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3329 _print_next_block(par_num++, "CSDM");
3341 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3345 for (i = 0; sig; i++) {
3346 cur_bit = ((u32)0x1 << i);
3347 if (sig & cur_bit) {
3349 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3350 _print_next_block(par_num++, "CSEMI");
3352 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3353 _print_next_block(par_num++, "PXP");
3355 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3356 _print_next_block(par_num++,
3357 "PXPPCICLOCKCLIENT");
3359 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3360 _print_next_block(par_num++, "CFC");
3362 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3363 _print_next_block(par_num++, "CDU");
3365 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3366 _print_next_block(par_num++, "IGU");
3368 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3369 _print_next_block(par_num++, "MISC");
3381 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3385 for (i = 0; sig; i++) {
3386 cur_bit = ((u32)0x1 << i);
3387 if (sig & cur_bit) {
3389 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3390 _print_next_block(par_num++, "MCP ROM");
3392 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3393 _print_next_block(par_num++, "MCP UMP RX");
3395 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3396 _print_next_block(par_num++, "MCP UMP TX");
3398 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3399 _print_next_block(par_num++, "MCP SCPAD");
3411 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3414 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3415 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3417 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3418 "[0]:0x%08x [1]:0x%08x "
3419 "[2]:0x%08x [3]:0x%08x\n",
3420 sig0 & HW_PRTY_ASSERT_SET_0,
3421 sig1 & HW_PRTY_ASSERT_SET_1,
3422 sig2 & HW_PRTY_ASSERT_SET_2,
3423 sig3 & HW_PRTY_ASSERT_SET_3);
3424 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3426 par_num = bnx2x_print_blocks_with_parity0(
3427 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3428 par_num = bnx2x_print_blocks_with_parity1(
3429 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3430 par_num = bnx2x_print_blocks_with_parity2(
3431 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3432 par_num = bnx2x_print_blocks_with_parity3(
3433 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3440 static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
3442 struct attn_route attn;
3443 int port = BP_PORT(bp);
3445 attn.sig[0] = REG_RD(bp,
3446 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3448 attn.sig[1] = REG_RD(bp,
3449 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3451 attn.sig[2] = REG_RD(bp,
3452 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3454 attn.sig[3] = REG_RD(bp,
3455 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3458 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3462 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3464 struct attn_route attn, *group_mask;
3465 int port = BP_PORT(bp);
3471 /* need to take HW lock because MCP or other port might also
3472 try to handle this event */
3473 bnx2x_acquire_alr(bp);
3475 if (bnx2x_chk_parity_attn(bp)) {
3476 bp->recovery_state = BNX2X_RECOVERY_INIT;
3477 bnx2x_set_reset_in_progress(bp);
3478 schedule_delayed_work(&bp->reset_task, 0);
3479 /* Disable HW interrupts */
3480 bnx2x_int_disable(bp);
3481 bnx2x_release_alr(bp);
3482 /* In case of parity errors don't handle attentions so that
3483 * other function would "see" parity errors.
3488 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3489 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3490 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3491 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
3492 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3493 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
3495 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3496 if (deasserted & (1 << index)) {
3497 group_mask = &bp->attn_group[index];
3499 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3500 index, group_mask->sig[0], group_mask->sig[1],
3501 group_mask->sig[2], group_mask->sig[3]);
3503 bnx2x_attn_int_deasserted3(bp,
3504 attn.sig[3] & group_mask->sig[3]);
3505 bnx2x_attn_int_deasserted1(bp,
3506 attn.sig[1] & group_mask->sig[1]);
3507 bnx2x_attn_int_deasserted2(bp,
3508 attn.sig[2] & group_mask->sig[2]);
3509 bnx2x_attn_int_deasserted0(bp,
3510 attn.sig[0] & group_mask->sig[0]);
3514 bnx2x_release_alr(bp);
3516 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
3519 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3521 REG_WR(bp, reg_addr, val);
3523 if (~bp->attn_state & deasserted)
3524 BNX2X_ERR("IGU ERROR\n");
3526 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3527 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3529 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3530 aeu_mask = REG_RD(bp, reg_addr);
3532 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3533 aeu_mask, deasserted);
3534 aeu_mask |= (deasserted & 0x3ff);
3535 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3537 REG_WR(bp, reg_addr, aeu_mask);
3538 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3540 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3541 bp->attn_state &= ~deasserted;
3542 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3545 static void bnx2x_attn_int(struct bnx2x *bp)
3547 /* read local copy of bits */
3548 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3550 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3552 u32 attn_state = bp->attn_state;
3554 /* look for changed bits */
3555 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3556 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3559 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3560 attn_bits, attn_ack, asserted, deasserted);
3562 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
3563 BNX2X_ERR("BAD attention state\n");
3565 /* handle bits that were raised */
3567 bnx2x_attn_int_asserted(bp, asserted);
3570 bnx2x_attn_int_deasserted(bp, deasserted);
3573 static void bnx2x_sp_task(struct work_struct *work)
3575 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
3578 /* Return here if interrupt is disabled */
3579 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3580 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3584 status = bnx2x_update_dsb_idx(bp);
3585 /* if (status == 0) */
3586 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
3588 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
3596 /* CStorm events: STAT_QUERY */
3598 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
3602 if (unlikely(status))
3603 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3606 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
3608 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3610 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3612 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3614 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3618 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3620 struct net_device *dev = dev_instance;
3621 struct bnx2x *bp = netdev_priv(dev);
3623 /* Return here if interrupt is disabled */
3624 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3625 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3629 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3631 #ifdef BNX2X_STOP_ON_ERROR
3632 if (unlikely(bp->panic))
3638 struct cnic_ops *c_ops;
3641 c_ops = rcu_dereference(bp->cnic_ops);
3643 c_ops->cnic_handler(bp->cnic_data, NULL);
3647 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3652 /* end of slow path */
3656 /****************************************************************************
3658 ****************************************************************************/
3660 /* sum[hi:lo] += add[hi:lo] */
3661 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3664 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3667 /* difference = minuend - subtrahend */
3668 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3670 if (m_lo < s_lo) { \
3672 d_hi = m_hi - s_hi; \
3674 /* we can 'loan' 1 */ \
3676 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3678 /* m_hi <= s_hi */ \
3683 /* m_lo >= s_lo */ \
3684 if (m_hi < s_hi) { \
3688 /* m_hi >= s_hi */ \
3689 d_hi = m_hi - s_hi; \
3690 d_lo = m_lo - s_lo; \
3695 #define UPDATE_STAT64(s, t) \
3697 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3698 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3699 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3700 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3701 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3702 pstats->mac_stx[1].t##_lo, diff.lo); \
3705 #define UPDATE_STAT64_NIG(s, t) \
3707 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3708 diff.lo, new->s##_lo, old->s##_lo); \
3709 ADD_64(estats->t##_hi, diff.hi, \
3710 estats->t##_lo, diff.lo); \
3713 /* sum[hi:lo] += add */
3714 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3717 s_hi += (s_lo < a) ? 1 : 0; \
3720 #define UPDATE_EXTEND_STAT(s) \
3722 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3723 pstats->mac_stx[1].s##_lo, \
3727 #define UPDATE_EXTEND_TSTAT(s, t) \
3729 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3730 old_tclient->s = tclient->s; \
3731 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3734 #define UPDATE_EXTEND_USTAT(s, t) \
3736 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3737 old_uclient->s = uclient->s; \
3738 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3741 #define UPDATE_EXTEND_XSTAT(s, t) \
3743 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3744 old_xclient->s = xclient->s; \
3745 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3748 /* minuend -= subtrahend */
3749 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3751 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3754 /* minuend[hi:lo] -= subtrahend */
3755 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3757 SUB_64(m_hi, 0, m_lo, s); \
3760 #define SUB_EXTEND_USTAT(s, t) \
3762 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3763 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3767 * General service functions
3770 static inline long bnx2x_hilo(u32 *hiref)
3772 u32 lo = *(hiref + 1);
3773 #if (BITS_PER_LONG == 64)
3776 return HILO_U64(hi, lo);
3783 * Init service functions
3786 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3788 if (!bp->stats_pending) {
3789 struct eth_query_ramrod_data ramrod_data = {0};
3792 ramrod_data.drv_counter = bp->stats_counter++;
3793 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3794 for_each_queue(bp, i)
3795 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3797 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3798 ((u32 *)&ramrod_data)[1],
3799 ((u32 *)&ramrod_data)[0], 0);
3801 /* stats ramrod has it's own slot on the spq */
3803 bp->stats_pending = 1;
3808 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3810 struct dmae_command *dmae = &bp->stats_dmae;
3811 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3813 *stats_comp = DMAE_COMP_VAL;
3814 if (CHIP_REV_IS_SLOW(bp))
3818 if (bp->executer_idx) {
3819 int loader_idx = PMF_DMAE_C(bp);
3821 memset(dmae, 0, sizeof(struct dmae_command));
3823 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3824 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3825 DMAE_CMD_DST_RESET |
3827 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3829 DMAE_CMD_ENDIANITY_DW_SWAP |
3831 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3833 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3834 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3835 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3836 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3837 sizeof(struct dmae_command) *
3838 (loader_idx + 1)) >> 2;
3839 dmae->dst_addr_hi = 0;
3840 dmae->len = sizeof(struct dmae_command) >> 2;
3843 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3844 dmae->comp_addr_hi = 0;
3848 bnx2x_post_dmae(bp, dmae, loader_idx);
3850 } else if (bp->func_stx) {
3852 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3856 static int bnx2x_stats_comp(struct bnx2x *bp)
3858 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3862 while (*stats_comp != DMAE_COMP_VAL) {
3864 BNX2X_ERR("timeout waiting for stats finished\n");
3874 * Statistics service functions
3877 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3879 struct dmae_command *dmae;
3881 int loader_idx = PMF_DMAE_C(bp);
3882 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3885 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3886 BNX2X_ERR("BUG!\n");
3890 bp->executer_idx = 0;
3892 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3894 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3896 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3898 DMAE_CMD_ENDIANITY_DW_SWAP |
3900 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3901 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3903 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3904 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3905 dmae->src_addr_lo = bp->port.port_stx >> 2;
3906 dmae->src_addr_hi = 0;
3907 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3908 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3909 dmae->len = DMAE_LEN32_RD_MAX;
3910 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3911 dmae->comp_addr_hi = 0;
3914 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3915 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3916 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3917 dmae->src_addr_hi = 0;
3918 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3919 DMAE_LEN32_RD_MAX * 4);
3920 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3921 DMAE_LEN32_RD_MAX * 4);
3922 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3923 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3924 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3925 dmae->comp_val = DMAE_COMP_VAL;
3928 bnx2x_hw_stats_post(bp);
3929 bnx2x_stats_comp(bp);
3932 static void bnx2x_port_stats_init(struct bnx2x *bp)
3934 struct dmae_command *dmae;
3935 int port = BP_PORT(bp);
3936 int vn = BP_E1HVN(bp);
3938 int loader_idx = PMF_DMAE_C(bp);
3940 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3943 if (!bp->link_vars.link_up || !bp->port.pmf) {
3944 BNX2X_ERR("BUG!\n");
3948 bp->executer_idx = 0;
3951 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3952 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3953 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3955 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3957 DMAE_CMD_ENDIANITY_DW_SWAP |
3959 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3960 (vn << DMAE_CMD_E1HVN_SHIFT));
3962 if (bp->port.port_stx) {
3964 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3965 dmae->opcode = opcode;
3966 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3967 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3968 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3969 dmae->dst_addr_hi = 0;
3970 dmae->len = sizeof(struct host_port_stats) >> 2;
3971 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3972 dmae->comp_addr_hi = 0;
3978 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979 dmae->opcode = opcode;
3980 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3981 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3982 dmae->dst_addr_lo = bp->func_stx >> 2;
3983 dmae->dst_addr_hi = 0;
3984 dmae->len = sizeof(struct host_func_stats) >> 2;
3985 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3986 dmae->comp_addr_hi = 0;
3991 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3992 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3993 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3995 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3997 DMAE_CMD_ENDIANITY_DW_SWAP |
3999 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4000 (vn << DMAE_CMD_E1HVN_SHIFT));
4002 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
4004 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4005 NIG_REG_INGRESS_BMAC0_MEM);
4007 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4008 BIGMAC_REGISTER_TX_STAT_GTBYT */
4009 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4010 dmae->opcode = opcode;
4011 dmae->src_addr_lo = (mac_addr +
4012 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4013 dmae->src_addr_hi = 0;
4014 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4015 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4016 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4017 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4018 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4019 dmae->comp_addr_hi = 0;
4022 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4023 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4024 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4025 dmae->opcode = opcode;
4026 dmae->src_addr_lo = (mac_addr +
4027 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4028 dmae->src_addr_hi = 0;
4029 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4030 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4031 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4032 offsetof(struct bmac_stats, rx_stat_gr64_lo));
4033 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4034 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4035 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4036 dmae->comp_addr_hi = 0;
4039 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
4041 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4043 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4044 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4045 dmae->opcode = opcode;
4046 dmae->src_addr_lo = (mac_addr +
4047 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4048 dmae->src_addr_hi = 0;
4049 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4050 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4051 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4052 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4053 dmae->comp_addr_hi = 0;
4056 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4057 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4058 dmae->opcode = opcode;
4059 dmae->src_addr_lo = (mac_addr +
4060 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4061 dmae->src_addr_hi = 0;
4062 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4063 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4064 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4065 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
4067 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4068 dmae->comp_addr_hi = 0;
4071 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4072 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4073 dmae->opcode = opcode;
4074 dmae->src_addr_lo = (mac_addr +
4075 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4076 dmae->src_addr_hi = 0;
4077 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4078 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4079 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4080 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
4081 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4082 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4083 dmae->comp_addr_hi = 0;
4088 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4089 dmae->opcode = opcode;
4090 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4091 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4092 dmae->src_addr_hi = 0;
4093 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4094 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4095 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4096 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4097 dmae->comp_addr_hi = 0;
4100 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4101 dmae->opcode = opcode;
4102 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4103 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4104 dmae->src_addr_hi = 0;
4105 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4106 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4107 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4108 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4109 dmae->len = (2*sizeof(u32)) >> 2;
4110 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4111 dmae->comp_addr_hi = 0;
4114 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4115 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4116 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4117 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4119 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4121 DMAE_CMD_ENDIANITY_DW_SWAP |
4123 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4124 (vn << DMAE_CMD_E1HVN_SHIFT));
4125 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4126 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
4127 dmae->src_addr_hi = 0;
4128 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4129 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4130 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4131 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4132 dmae->len = (2*sizeof(u32)) >> 2;
4133 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4134 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4135 dmae->comp_val = DMAE_COMP_VAL;
4140 static void bnx2x_func_stats_init(struct bnx2x *bp)
4142 struct dmae_command *dmae = &bp->stats_dmae;
4143 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4146 if (!bp->func_stx) {
4147 BNX2X_ERR("BUG!\n");
4151 bp->executer_idx = 0;
4152 memset(dmae, 0, sizeof(struct dmae_command));
4154 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4155 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4156 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4158 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4160 DMAE_CMD_ENDIANITY_DW_SWAP |
4162 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4163 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4164 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4165 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4166 dmae->dst_addr_lo = bp->func_stx >> 2;
4167 dmae->dst_addr_hi = 0;
4168 dmae->len = sizeof(struct host_func_stats) >> 2;
4169 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4170 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4171 dmae->comp_val = DMAE_COMP_VAL;
4176 static void bnx2x_stats_start(struct bnx2x *bp)
4179 bnx2x_port_stats_init(bp);
4181 else if (bp->func_stx)
4182 bnx2x_func_stats_init(bp);
4184 bnx2x_hw_stats_post(bp);
4185 bnx2x_storm_stats_post(bp);
4188 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4190 bnx2x_stats_comp(bp);
4191 bnx2x_stats_pmf_update(bp);
4192 bnx2x_stats_start(bp);
4195 static void bnx2x_stats_restart(struct bnx2x *bp)
4197 bnx2x_stats_comp(bp);
4198 bnx2x_stats_start(bp);
4201 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4203 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4204 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4205 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4211 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4212 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4213 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4214 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4215 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4216 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
4217 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
4218 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
4219 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
4220 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4221 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4222 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4223 UPDATE_STAT64(tx_stat_gt127,
4224 tx_stat_etherstatspkts65octetsto127octets);
4225 UPDATE_STAT64(tx_stat_gt255,
4226 tx_stat_etherstatspkts128octetsto255octets);
4227 UPDATE_STAT64(tx_stat_gt511,
4228 tx_stat_etherstatspkts256octetsto511octets);
4229 UPDATE_STAT64(tx_stat_gt1023,
4230 tx_stat_etherstatspkts512octetsto1023octets);
4231 UPDATE_STAT64(tx_stat_gt1518,
4232 tx_stat_etherstatspkts1024octetsto1522octets);
4233 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4234 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4235 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4236 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4237 UPDATE_STAT64(tx_stat_gterr,
4238 tx_stat_dot3statsinternalmactransmiterrors);
4239 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
4241 estats->pause_frames_received_hi =
4242 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4243 estats->pause_frames_received_lo =
4244 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4246 estats->pause_frames_sent_hi =
4247 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4248 estats->pause_frames_sent_lo =
4249 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
4252 static void bnx2x_emac_stats_update(struct bnx2x *bp)
4254 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4255 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4256 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4258 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4259 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4260 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4261 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4262 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4263 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4264 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4265 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4266 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4267 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4268 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4269 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4270 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4271 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4272 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4273 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4274 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4275 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4276 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4277 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4278 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4279 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4280 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4281 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4282 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4283 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4284 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4285 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4286 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4287 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4288 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
4290 estats->pause_frames_received_hi =
4291 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4292 estats->pause_frames_received_lo =
4293 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4294 ADD_64(estats->pause_frames_received_hi,
4295 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4296 estats->pause_frames_received_lo,
4297 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4299 estats->pause_frames_sent_hi =
4300 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4301 estats->pause_frames_sent_lo =
4302 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4303 ADD_64(estats->pause_frames_sent_hi,
4304 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4305 estats->pause_frames_sent_lo,
4306 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
4309 static int bnx2x_hw_stats_update(struct bnx2x *bp)
4311 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4312 struct nig_stats *old = &(bp->port.old_nig_stats);
4313 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4314 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4320 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4321 bnx2x_bmac_stats_update(bp);
4323 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4324 bnx2x_emac_stats_update(bp);
4326 else { /* unreached */
4327 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
4331 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4332 new->brb_discard - old->brb_discard);
4333 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4334 new->brb_truncate - old->brb_truncate);
4336 UPDATE_STAT64_NIG(egress_mac_pkt0,
4337 etherstatspkts1024octetsto1522octets);
4338 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
4340 memcpy(old, new, sizeof(struct nig_stats));
4342 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4343 sizeof(struct mac_stx));
4344 estats->brb_drop_hi = pstats->brb_drop_hi;
4345 estats->brb_drop_lo = pstats->brb_drop_lo;
4347 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
4349 if (!BP_NOMCP(bp)) {
4351 SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4352 if (nig_timer_max != estats->nig_timer_max) {
4353 estats->nig_timer_max = nig_timer_max;
4354 BNX2X_ERR("NIG timer max (%u)\n",
4355 estats->nig_timer_max);
4362 static int bnx2x_storm_stats_update(struct bnx2x *bp)
4364 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4365 struct tstorm_per_port_stats *tport =
4366 &stats->tstorm_common.port_statistics;
4367 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4368 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4371 memcpy(&(fstats->total_bytes_received_hi),
4372 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
4373 sizeof(struct host_func_stats) - 2*sizeof(u32));
4374 estats->error_bytes_received_hi = 0;
4375 estats->error_bytes_received_lo = 0;
4376 estats->etherstatsoverrsizepkts_hi = 0;
4377 estats->etherstatsoverrsizepkts_lo = 0;
4378 estats->no_buff_discard_hi = 0;
4379 estats->no_buff_discard_lo = 0;
4381 for_each_queue(bp, i) {
4382 struct bnx2x_fastpath *fp = &bp->fp[i];
4383 int cl_id = fp->cl_id;
4384 struct tstorm_per_client_stats *tclient =
4385 &stats->tstorm_common.client_statistics[cl_id];
4386 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4387 struct ustorm_per_client_stats *uclient =
4388 &stats->ustorm_common.client_statistics[cl_id];
4389 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4390 struct xstorm_per_client_stats *xclient =
4391 &stats->xstorm_common.client_statistics[cl_id];
4392 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4393 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4396 /* are storm stats valid? */
4397 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
4398 bp->stats_counter) {
4399 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4400 " xstorm counter (0x%x) != stats_counter (0x%x)\n",
4401 i, xclient->stats_counter, bp->stats_counter);
4404 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
4405 bp->stats_counter) {
4406 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4407 " tstorm counter (0x%x) != stats_counter (0x%x)\n",
4408 i, tclient->stats_counter, bp->stats_counter);
4411 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4412 bp->stats_counter) {
4413 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4414 " ustorm counter (0x%x) != stats_counter (0x%x)\n",
4415 i, uclient->stats_counter, bp->stats_counter);
4419 qstats->total_bytes_received_hi =
4420 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4421 qstats->total_bytes_received_lo =
4422 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4424 ADD_64(qstats->total_bytes_received_hi,
4425 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4426 qstats->total_bytes_received_lo,
4427 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4429 ADD_64(qstats->total_bytes_received_hi,
4430 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4431 qstats->total_bytes_received_lo,
4432 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4434 SUB_64(qstats->total_bytes_received_hi,
4435 le32_to_cpu(uclient->bcast_no_buff_bytes.hi),
4436 qstats->total_bytes_received_lo,
4437 le32_to_cpu(uclient->bcast_no_buff_bytes.lo));
4439 SUB_64(qstats->total_bytes_received_hi,
4440 le32_to_cpu(uclient->mcast_no_buff_bytes.hi),
4441 qstats->total_bytes_received_lo,
4442 le32_to_cpu(uclient->mcast_no_buff_bytes.lo));
4444 SUB_64(qstats->total_bytes_received_hi,
4445 le32_to_cpu(uclient->ucast_no_buff_bytes.hi),
4446 qstats->total_bytes_received_lo,
4447 le32_to_cpu(uclient->ucast_no_buff_bytes.lo));
4449 qstats->valid_bytes_received_hi =
4450 qstats->total_bytes_received_hi;
4451 qstats->valid_bytes_received_lo =
4452 qstats->total_bytes_received_lo;
4454 qstats->error_bytes_received_hi =
4455 le32_to_cpu(tclient->rcv_error_bytes.hi);
4456 qstats->error_bytes_received_lo =
4457 le32_to_cpu(tclient->rcv_error_bytes.lo);
4459 ADD_64(qstats->total_bytes_received_hi,
4460 qstats->error_bytes_received_hi,
4461 qstats->total_bytes_received_lo,
4462 qstats->error_bytes_received_lo);
4464 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4465 total_unicast_packets_received);
4466 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4467 total_multicast_packets_received);
4468 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4469 total_broadcast_packets_received);
4470 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4471 etherstatsoverrsizepkts);
4472 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4474 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4475 total_unicast_packets_received);
4476 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4477 total_multicast_packets_received);
4478 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4479 total_broadcast_packets_received);
4480 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4481 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4482 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4484 qstats->total_bytes_transmitted_hi =
4485 le32_to_cpu(xclient->unicast_bytes_sent.hi);
4486 qstats->total_bytes_transmitted_lo =
4487 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4489 ADD_64(qstats->total_bytes_transmitted_hi,
4490 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4491 qstats->total_bytes_transmitted_lo,
4492 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4494 ADD_64(qstats->total_bytes_transmitted_hi,
4495 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4496 qstats->total_bytes_transmitted_lo,
4497 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
4499 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4500 total_unicast_packets_transmitted);
4501 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4502 total_multicast_packets_transmitted);
4503 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4504 total_broadcast_packets_transmitted);
4506 old_tclient->checksum_discard = tclient->checksum_discard;
4507 old_tclient->ttl0_discard = tclient->ttl0_discard;
4509 ADD_64(fstats->total_bytes_received_hi,
4510 qstats->total_bytes_received_hi,
4511 fstats->total_bytes_received_lo,
4512 qstats->total_bytes_received_lo);
4513 ADD_64(fstats->total_bytes_transmitted_hi,
4514 qstats->total_bytes_transmitted_hi,
4515 fstats->total_bytes_transmitted_lo,
4516 qstats->total_bytes_transmitted_lo);
4517 ADD_64(fstats->total_unicast_packets_received_hi,
4518 qstats->total_unicast_packets_received_hi,
4519 fstats->total_unicast_packets_received_lo,
4520 qstats->total_unicast_packets_received_lo);
4521 ADD_64(fstats->total_multicast_packets_received_hi,
4522 qstats->total_multicast_packets_received_hi,
4523 fstats->total_multicast_packets_received_lo,
4524 qstats->total_multicast_packets_received_lo);
4525 ADD_64(fstats->total_broadcast_packets_received_hi,
4526 qstats->total_broadcast_packets_received_hi,
4527 fstats->total_broadcast_packets_received_lo,
4528 qstats->total_broadcast_packets_received_lo);
4529 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4530 qstats->total_unicast_packets_transmitted_hi,
4531 fstats->total_unicast_packets_transmitted_lo,
4532 qstats->total_unicast_packets_transmitted_lo);
4533 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4534 qstats->total_multicast_packets_transmitted_hi,
4535 fstats->total_multicast_packets_transmitted_lo,
4536 qstats->total_multicast_packets_transmitted_lo);
4537 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4538 qstats->total_broadcast_packets_transmitted_hi,
4539 fstats->total_broadcast_packets_transmitted_lo,
4540 qstats->total_broadcast_packets_transmitted_lo);
4541 ADD_64(fstats->valid_bytes_received_hi,
4542 qstats->valid_bytes_received_hi,
4543 fstats->valid_bytes_received_lo,
4544 qstats->valid_bytes_received_lo);
4546 ADD_64(estats->error_bytes_received_hi,
4547 qstats->error_bytes_received_hi,
4548 estats->error_bytes_received_lo,
4549 qstats->error_bytes_received_lo);
4550 ADD_64(estats->etherstatsoverrsizepkts_hi,
4551 qstats->etherstatsoverrsizepkts_hi,
4552 estats->etherstatsoverrsizepkts_lo,
4553 qstats->etherstatsoverrsizepkts_lo);
4554 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4555 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4558 ADD_64(fstats->total_bytes_received_hi,
4559 estats->rx_stat_ifhcinbadoctets_hi,
4560 fstats->total_bytes_received_lo,
4561 estats->rx_stat_ifhcinbadoctets_lo);
4563 memcpy(estats, &(fstats->total_bytes_received_hi),
4564 sizeof(struct host_func_stats) - 2*sizeof(u32));
4566 ADD_64(estats->etherstatsoverrsizepkts_hi,
4567 estats->rx_stat_dot3statsframestoolong_hi,
4568 estats->etherstatsoverrsizepkts_lo,
4569 estats->rx_stat_dot3statsframestoolong_lo);
4570 ADD_64(estats->error_bytes_received_hi,
4571 estats->rx_stat_ifhcinbadoctets_hi,
4572 estats->error_bytes_received_lo,
4573 estats->rx_stat_ifhcinbadoctets_lo);
4576 estats->mac_filter_discard =
4577 le32_to_cpu(tport->mac_filter_discard);
4578 estats->xxoverflow_discard =
4579 le32_to_cpu(tport->xxoverflow_discard);
4580 estats->brb_truncate_discard =
4581 le32_to_cpu(tport->brb_truncate_discard);
4582 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4585 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
4587 bp->stats_pending = 0;
4592 static void bnx2x_net_stats_update(struct bnx2x *bp)
4594 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4595 struct net_device_stats *nstats = &bp->dev->stats;
4598 nstats->rx_packets =
4599 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4600 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4601 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4603 nstats->tx_packets =
4604 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4605 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4606 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4608 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4610 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4612 nstats->rx_dropped = estats->mac_discard;
4613 for_each_queue(bp, i)
4614 nstats->rx_dropped +=
4615 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4617 nstats->tx_dropped = 0;
4620 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4622 nstats->collisions =
4623 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4625 nstats->rx_length_errors =
4626 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4627 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4628 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4629 bnx2x_hilo(&estats->brb_truncate_hi);
4630 nstats->rx_crc_errors =
4631 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4632 nstats->rx_frame_errors =
4633 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4634 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4635 nstats->rx_missed_errors = estats->xxoverflow_discard;
4637 nstats->rx_errors = nstats->rx_length_errors +
4638 nstats->rx_over_errors +
4639 nstats->rx_crc_errors +
4640 nstats->rx_frame_errors +
4641 nstats->rx_fifo_errors +
4642 nstats->rx_missed_errors;
4644 nstats->tx_aborted_errors =
4645 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4646 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4647 nstats->tx_carrier_errors =
4648 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4649 nstats->tx_fifo_errors = 0;
4650 nstats->tx_heartbeat_errors = 0;
4651 nstats->tx_window_errors = 0;
4653 nstats->tx_errors = nstats->tx_aborted_errors +
4654 nstats->tx_carrier_errors +
4655 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4658 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4660 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4663 estats->driver_xoff = 0;
4664 estats->rx_err_discard_pkt = 0;
4665 estats->rx_skb_alloc_failed = 0;
4666 estats->hw_csum_err = 0;
4667 for_each_queue(bp, i) {
4668 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4670 estats->driver_xoff += qstats->driver_xoff;
4671 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4672 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4673 estats->hw_csum_err += qstats->hw_csum_err;
4677 static void bnx2x_stats_update(struct bnx2x *bp)
4679 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4681 if (*stats_comp != DMAE_COMP_VAL)
4685 bnx2x_hw_stats_update(bp);
4687 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4688 BNX2X_ERR("storm stats were not updated for 3 times\n");
4693 bnx2x_net_stats_update(bp);
4694 bnx2x_drv_stats_update(bp);
4696 if (netif_msg_timer(bp)) {
4697 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4700 printk(KERN_DEBUG "%s: brb drops %u brb truncate %u\n",
4702 estats->brb_drop_lo, estats->brb_truncate_lo);
4704 for_each_queue(bp, i) {
4705 struct bnx2x_fastpath *fp = &bp->fp[i];
4706 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4708 printk(KERN_DEBUG "%s: rx usage(%4u) *rx_cons_sb(%u)"
4709 " rx pkt(%lu) rx calls(%lu %lu)\n",
4710 fp->name, (le16_to_cpu(*fp->rx_cons_sb) -
4712 le16_to_cpu(*fp->rx_cons_sb),
4713 bnx2x_hilo(&qstats->
4714 total_unicast_packets_received_hi),
4715 fp->rx_calls, fp->rx_pkt);
4718 for_each_queue(bp, i) {
4719 struct bnx2x_fastpath *fp = &bp->fp[i];
4720 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4721 struct netdev_queue *txq =
4722 netdev_get_tx_queue(bp->dev, i);
4724 printk(KERN_DEBUG "%s: tx avail(%4u) *tx_cons_sb(%u)"
4725 " tx pkt(%lu) tx calls (%lu)"
4726 " %s (Xoff events %u)\n",
4727 fp->name, bnx2x_tx_avail(fp),
4728 le16_to_cpu(*fp->tx_cons_sb),
4729 bnx2x_hilo(&qstats->
4730 total_unicast_packets_transmitted_hi),
4732 (netif_tx_queue_stopped(txq) ? "Xoff" : "Xon"),
4733 qstats->driver_xoff);
4737 bnx2x_hw_stats_post(bp);
4738 bnx2x_storm_stats_post(bp);
4741 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4743 struct dmae_command *dmae;
4745 int loader_idx = PMF_DMAE_C(bp);
4746 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4748 bp->executer_idx = 0;
4750 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4752 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4754 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4756 DMAE_CMD_ENDIANITY_DW_SWAP |
4758 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4759 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4761 if (bp->port.port_stx) {
4763 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4765 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4767 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4768 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4769 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4770 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4771 dmae->dst_addr_hi = 0;
4772 dmae->len = sizeof(struct host_port_stats) >> 2;
4774 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4775 dmae->comp_addr_hi = 0;
4778 dmae->comp_addr_lo =
4779 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4780 dmae->comp_addr_hi =
4781 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4782 dmae->comp_val = DMAE_COMP_VAL;
4790 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4791 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4792 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4793 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4794 dmae->dst_addr_lo = bp->func_stx >> 2;
4795 dmae->dst_addr_hi = 0;
4796 dmae->len = sizeof(struct host_func_stats) >> 2;
4797 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4798 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4799 dmae->comp_val = DMAE_COMP_VAL;
4805 static void bnx2x_stats_stop(struct bnx2x *bp)
4809 bnx2x_stats_comp(bp);
4812 update = (bnx2x_hw_stats_update(bp) == 0);
4814 update |= (bnx2x_storm_stats_update(bp) == 0);
4817 bnx2x_net_stats_update(bp);
4820 bnx2x_port_stats_stop(bp);
4822 bnx2x_hw_stats_post(bp);
4823 bnx2x_stats_comp(bp);
4827 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4831 static const struct {
4832 void (*action)(struct bnx2x *bp);
4833 enum bnx2x_stats_state next_state;
4834 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4837 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4838 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4839 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4840 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4843 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4844 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4845 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4846 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4850 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4852 enum bnx2x_stats_state state;
4854 if (unlikely(bp->panic))
4857 /* Protect a state change flow */
4858 spin_lock_bh(&bp->stats_lock);
4859 state = bp->stats_state;
4860 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4861 spin_unlock_bh(&bp->stats_lock);
4863 bnx2x_stats_stm[state][event].action(bp);
4865 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
4866 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4867 state, event, bp->stats_state);
4870 static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4872 struct dmae_command *dmae;
4873 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4876 if (!bp->port.pmf || !bp->port.port_stx) {
4877 BNX2X_ERR("BUG!\n");
4881 bp->executer_idx = 0;
4883 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4884 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4885 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4886 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4888 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4890 DMAE_CMD_ENDIANITY_DW_SWAP |
4892 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4893 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4894 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4895 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4896 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4897 dmae->dst_addr_hi = 0;
4898 dmae->len = sizeof(struct host_port_stats) >> 2;
4899 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4900 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4901 dmae->comp_val = DMAE_COMP_VAL;
4904 bnx2x_hw_stats_post(bp);
4905 bnx2x_stats_comp(bp);
4908 static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4910 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4911 int port = BP_PORT(bp);
4916 if (!bp->port.pmf || !bp->func_stx) {
4917 BNX2X_ERR("BUG!\n");
4921 /* save our func_stx */
4922 func_stx = bp->func_stx;
4924 for (vn = VN_0; vn < vn_max; vn++) {
4927 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4928 bnx2x_func_stats_init(bp);
4929 bnx2x_hw_stats_post(bp);
4930 bnx2x_stats_comp(bp);
4933 /* restore our func_stx */
4934 bp->func_stx = func_stx;
4937 static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4939 struct dmae_command *dmae = &bp->stats_dmae;
4940 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4943 if (!bp->func_stx) {
4944 BNX2X_ERR("BUG!\n");
4948 bp->executer_idx = 0;
4949 memset(dmae, 0, sizeof(struct dmae_command));
4951 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4952 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4953 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4955 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4957 DMAE_CMD_ENDIANITY_DW_SWAP |
4959 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4960 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4961 dmae->src_addr_lo = bp->func_stx >> 2;
4962 dmae->src_addr_hi = 0;
4963 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4964 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4965 dmae->len = sizeof(struct host_func_stats) >> 2;
4966 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4967 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4968 dmae->comp_val = DMAE_COMP_VAL;
4971 bnx2x_hw_stats_post(bp);
4972 bnx2x_stats_comp(bp);
4975 static void bnx2x_stats_init(struct bnx2x *bp)
4977 int port = BP_PORT(bp);
4978 int func = BP_FUNC(bp);
4981 bp->stats_pending = 0;
4982 bp->executer_idx = 0;
4983 bp->stats_counter = 0;
4985 /* port and func stats for management */
4986 if (!BP_NOMCP(bp)) {
4987 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4988 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4991 bp->port.port_stx = 0;
4994 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4995 bp->port.port_stx, bp->func_stx);
4998 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4999 bp->port.old_nig_stats.brb_discard =
5000 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
5001 bp->port.old_nig_stats.brb_truncate =
5002 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
5003 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
5004 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
5005 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
5006 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
5008 /* function stats */
5009 for_each_queue(bp, i) {
5010 struct bnx2x_fastpath *fp = &bp->fp[i];
5012 memset(&fp->old_tclient, 0,
5013 sizeof(struct tstorm_per_client_stats));
5014 memset(&fp->old_uclient, 0,
5015 sizeof(struct ustorm_per_client_stats));
5016 memset(&fp->old_xclient, 0,
5017 sizeof(struct xstorm_per_client_stats));
5018 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
5021 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
5022 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
5024 bp->stats_state = STATS_STATE_DISABLED;
5027 if (bp->port.port_stx)
5028 bnx2x_port_stats_base_init(bp);
5031 bnx2x_func_stats_base_init(bp);
5033 } else if (bp->func_stx)
5034 bnx2x_func_stats_base_update(bp);
5037 static void bnx2x_timer(unsigned long data)
5039 struct bnx2x *bp = (struct bnx2x *) data;
5041 if (!netif_running(bp->dev))
5044 if (atomic_read(&bp->intr_sem) != 0)
5048 struct bnx2x_fastpath *fp = &bp->fp[0];
5052 rc = bnx2x_rx_int(fp, 1000);
5055 if (!BP_NOMCP(bp)) {
5056 int func = BP_FUNC(bp);
5060 ++bp->fw_drv_pulse_wr_seq;
5061 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5062 /* TBD - add SYSTEM_TIME */
5063 drv_pulse = bp->fw_drv_pulse_wr_seq;
5064 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
5066 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
5067 MCP_PULSE_SEQ_MASK);
5068 /* The delta between driver pulse and mcp response
5069 * should be 1 (before mcp response) or 0 (after mcp response)
5071 if ((drv_pulse != mcp_pulse) &&
5072 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5073 /* someone lost a heartbeat... */
5074 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5075 drv_pulse, mcp_pulse);
5079 if (bp->state == BNX2X_STATE_OPEN)
5080 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
5083 mod_timer(&bp->timer, jiffies + bp->current_interval);
5086 /* end of Statistics */
5091 * nic init service functions
5094 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
5096 int port = BP_PORT(bp);
5099 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5100 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5101 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5102 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5103 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5104 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
5107 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5108 dma_addr_t mapping, int sb_id)
5110 int port = BP_PORT(bp);
5111 int func = BP_FUNC(bp);
5116 section = ((u64)mapping) + offsetof(struct host_status_block,
5118 sb->u_status_block.status_block_id = sb_id;
5120 REG_WR(bp, BAR_CSTRORM_INTMEM +
5121 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5122 REG_WR(bp, BAR_CSTRORM_INTMEM +
5123 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
5125 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5126 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
5128 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5129 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5130 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
5133 section = ((u64)mapping) + offsetof(struct host_status_block,
5135 sb->c_status_block.status_block_id = sb_id;
5137 REG_WR(bp, BAR_CSTRORM_INTMEM +
5138 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
5139 REG_WR(bp, BAR_CSTRORM_INTMEM +
5140 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
5142 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
5143 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
5145 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5146 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5147 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
5149 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5152 static void bnx2x_zero_def_sb(struct bnx2x *bp)
5154 int func = BP_FUNC(bp);
5156 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
5157 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5158 sizeof(struct tstorm_def_status_block)/4);
5159 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5160 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5161 sizeof(struct cstorm_def_status_block_u)/4);
5162 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5163 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5164 sizeof(struct cstorm_def_status_block_c)/4);
5165 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
5166 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5167 sizeof(struct xstorm_def_status_block)/4);
5170 static void bnx2x_init_def_sb(struct bnx2x *bp,
5171 struct host_def_status_block *def_sb,
5172 dma_addr_t mapping, int sb_id)
5174 int port = BP_PORT(bp);
5175 int func = BP_FUNC(bp);
5176 int index, val, reg_offset;
5180 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5181 atten_status_block);
5182 def_sb->atten_status_block.status_block_id = sb_id;
5186 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5187 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5189 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
5190 bp->attn_group[index].sig[0] = REG_RD(bp,
5191 reg_offset + 0x10*index);
5192 bp->attn_group[index].sig[1] = REG_RD(bp,
5193 reg_offset + 0x4 + 0x10*index);
5194 bp->attn_group[index].sig[2] = REG_RD(bp,
5195 reg_offset + 0x8 + 0x10*index);
5196 bp->attn_group[index].sig[3] = REG_RD(bp,
5197 reg_offset + 0xc + 0x10*index);
5200 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5201 HC_REG_ATTN_MSG0_ADDR_L);
5203 REG_WR(bp, reg_offset, U64_LO(section));
5204 REG_WR(bp, reg_offset + 4, U64_HI(section));
5206 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5208 val = REG_RD(bp, reg_offset);
5210 REG_WR(bp, reg_offset, val);
5213 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5214 u_def_status_block);
5215 def_sb->u_def_status_block.status_block_id = sb_id;
5217 REG_WR(bp, BAR_CSTRORM_INTMEM +
5218 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5219 REG_WR(bp, BAR_CSTRORM_INTMEM +
5220 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
5222 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5223 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
5225 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5226 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5227 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
5230 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5231 c_def_status_block);
5232 def_sb->c_def_status_block.status_block_id = sb_id;
5234 REG_WR(bp, BAR_CSTRORM_INTMEM +
5235 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
5236 REG_WR(bp, BAR_CSTRORM_INTMEM +
5237 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
5239 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
5240 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
5242 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5243 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5244 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
5247 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5248 t_def_status_block);
5249 def_sb->t_def_status_block.status_block_id = sb_id;
5251 REG_WR(bp, BAR_TSTRORM_INTMEM +
5252 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5253 REG_WR(bp, BAR_TSTRORM_INTMEM +
5254 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5256 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
5257 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5259 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5260 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5261 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5264 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5265 x_def_status_block);
5266 def_sb->x_def_status_block.status_block_id = sb_id;
5268 REG_WR(bp, BAR_XSTRORM_INTMEM +
5269 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
5270 REG_WR(bp, BAR_XSTRORM_INTMEM +
5271 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
5273 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
5274 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
5276 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5277 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5278 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
5280 bp->stats_pending = 0;
5281 bp->set_mac_pending = 0;
5283 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5286 static void bnx2x_update_coalesce(struct bnx2x *bp)
5288 int port = BP_PORT(bp);
5291 for_each_queue(bp, i) {
5292 int sb_id = bp->fp[i].sb_id;
5294 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5295 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5296 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5297 U_SB_ETH_RX_CQ_INDEX),
5298 bp->rx_ticks/(4 * BNX2X_BTR));
5299 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5300 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5301 U_SB_ETH_RX_CQ_INDEX),
5302 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5304 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5305 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5306 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5307 C_SB_ETH_TX_CQ_INDEX),
5308 bp->tx_ticks/(4 * BNX2X_BTR));
5309 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5310 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5311 C_SB_ETH_TX_CQ_INDEX),
5312 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
5316 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5317 struct bnx2x_fastpath *fp, int last)
5321 for (i = 0; i < last; i++) {
5322 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5323 struct sk_buff *skb = rx_buf->skb;
5326 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5330 if (fp->tpa_state[i] == BNX2X_TPA_START)
5331 dma_unmap_single(&bp->pdev->dev,
5332 dma_unmap_addr(rx_buf, mapping),
5333 bp->rx_buf_size, DMA_FROM_DEVICE);
5340 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5342 int func = BP_FUNC(bp);
5343 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5344 ETH_MAX_AGGREGATION_QUEUES_E1H;
5345 u16 ring_prod, cqe_ring_prod;
5348 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
5350 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
5352 if (bp->flags & TPA_ENABLE_FLAG) {
5354 for_each_queue(bp, j) {
5355 struct bnx2x_fastpath *fp = &bp->fp[j];
5357 for (i = 0; i < max_agg_queues; i++) {
5358 fp->tpa_pool[i].skb =
5359 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5360 if (!fp->tpa_pool[i].skb) {
5361 BNX2X_ERR("Failed to allocate TPA "
5362 "skb pool for queue[%d] - "
5363 "disabling TPA on this "
5365 bnx2x_free_tpa_pool(bp, fp, i);
5366 fp->disable_tpa = 1;
5369 dma_unmap_addr_set((struct sw_rx_bd *)
5370 &bp->fp->tpa_pool[i],
5372 fp->tpa_state[i] = BNX2X_TPA_STOP;
5377 for_each_queue(bp, j) {
5378 struct bnx2x_fastpath *fp = &bp->fp[j];
5381 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5382 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5384 /* "next page" elements initialization */
5386 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5387 struct eth_rx_sge *sge;
5389 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5391 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5392 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5394 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5395 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5398 bnx2x_init_sge_ring_bit_mask(fp);
5401 for (i = 1; i <= NUM_RX_RINGS; i++) {
5402 struct eth_rx_bd *rx_bd;
5404 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5406 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5407 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5409 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5410 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5414 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5415 struct eth_rx_cqe_next_page *nextpg;
5417 nextpg = (struct eth_rx_cqe_next_page *)
5418 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5420 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5421 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5423 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5424 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5427 /* Allocate SGEs and initialize the ring elements */
5428 for (i = 0, ring_prod = 0;
5429 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
5431 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5432 BNX2X_ERR("was only able to allocate "
5434 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5435 /* Cleanup already allocated elements */
5436 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
5437 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
5438 fp->disable_tpa = 1;
5442 ring_prod = NEXT_SGE_IDX(ring_prod);
5444 fp->rx_sge_prod = ring_prod;
5446 /* Allocate BDs and initialize BD ring */
5447 fp->rx_comp_cons = 0;
5448 cqe_ring_prod = ring_prod = 0;
5449 for (i = 0; i < bp->rx_ring_size; i++) {
5450 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5451 BNX2X_ERR("was only able to allocate "
5452 "%d rx skbs on queue[%d]\n", i, j);
5453 fp->eth_q_stats.rx_skb_alloc_failed++;
5456 ring_prod = NEXT_RX_IDX(ring_prod);
5457 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
5458 WARN_ON(ring_prod <= i);
5461 fp->rx_bd_prod = ring_prod;
5462 /* must not have more available CQEs than BDs */
5463 fp->rx_comp_prod = min_t(u16, NUM_RCQ_RINGS*RCQ_DESC_CNT,
5465 fp->rx_pkt = fp->rx_calls = 0;
5468 * this will generate an interrupt (to the TSTORM)
5469 * must only be done after chip is initialized
5471 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5476 REG_WR(bp, BAR_USTRORM_INTMEM +
5477 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
5478 U64_LO(fp->rx_comp_mapping));
5479 REG_WR(bp, BAR_USTRORM_INTMEM +
5480 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
5481 U64_HI(fp->rx_comp_mapping));
5485 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5489 for_each_queue(bp, j) {
5490 struct bnx2x_fastpath *fp = &bp->fp[j];
5492 for (i = 1; i <= NUM_TX_RINGS; i++) {
5493 struct eth_tx_next_bd *tx_next_bd =
5494 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5496 tx_next_bd->addr_hi =
5497 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5498 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5499 tx_next_bd->addr_lo =
5500 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5501 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5504 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5505 fp->tx_db.data.zero_fill1 = 0;
5506 fp->tx_db.data.prod = 0;
5508 fp->tx_pkt_prod = 0;
5509 fp->tx_pkt_cons = 0;
5512 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5517 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5519 int func = BP_FUNC(bp);
5521 spin_lock_init(&bp->spq_lock);
5523 bp->spq_left = MAX_SPQ_PENDING;
5524 bp->spq_prod_idx = 0;
5525 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5526 bp->spq_prod_bd = bp->spq;
5527 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5529 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
5530 U64_LO(bp->spq_mapping));
5532 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
5533 U64_HI(bp->spq_mapping));
5535 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
5539 static void bnx2x_init_context(struct bnx2x *bp)
5544 for_each_queue(bp, i) {
5545 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5546 struct bnx2x_fastpath *fp = &bp->fp[i];
5547 u8 cl_id = fp->cl_id;
5549 context->ustorm_st_context.common.sb_index_numbers =
5550 BNX2X_RX_SB_INDEX_NUM;
5551 context->ustorm_st_context.common.clientId = cl_id;
5552 context->ustorm_st_context.common.status_block_id = fp->sb_id;
5553 context->ustorm_st_context.common.flags =
5554 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5555 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5556 context->ustorm_st_context.common.statistics_counter_id =
5558 context->ustorm_st_context.common.mc_alignment_log_size =
5559 BNX2X_RX_ALIGN_SHIFT;
5560 context->ustorm_st_context.common.bd_buff_size =
5562 context->ustorm_st_context.common.bd_page_base_hi =
5563 U64_HI(fp->rx_desc_mapping);
5564 context->ustorm_st_context.common.bd_page_base_lo =
5565 U64_LO(fp->rx_desc_mapping);
5566 if (!fp->disable_tpa) {
5567 context->ustorm_st_context.common.flags |=
5568 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
5569 context->ustorm_st_context.common.sge_buff_size =
5570 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
5572 context->ustorm_st_context.common.sge_page_base_hi =
5573 U64_HI(fp->rx_sge_mapping);
5574 context->ustorm_st_context.common.sge_page_base_lo =
5575 U64_LO(fp->rx_sge_mapping);
5577 context->ustorm_st_context.common.max_sges_for_packet =
5578 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5579 context->ustorm_st_context.common.max_sges_for_packet =
5580 ((context->ustorm_st_context.common.
5581 max_sges_for_packet + PAGES_PER_SGE - 1) &
5582 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
5585 context->ustorm_ag_context.cdu_usage =
5586 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5587 CDU_REGION_NUMBER_UCM_AG,
5588 ETH_CONNECTION_TYPE);
5590 context->xstorm_ag_context.cdu_reserved =
5591 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5592 CDU_REGION_NUMBER_XCM_AG,
5593 ETH_CONNECTION_TYPE);
5597 for_each_queue(bp, i) {
5598 struct bnx2x_fastpath *fp = &bp->fp[i];
5599 struct eth_context *context =
5600 bnx2x_sp(bp, context[i].eth);
5602 context->cstorm_st_context.sb_index_number =
5603 C_SB_ETH_TX_CQ_INDEX;
5604 context->cstorm_st_context.status_block_id = fp->sb_id;
5606 context->xstorm_st_context.tx_bd_page_base_hi =
5607 U64_HI(fp->tx_desc_mapping);
5608 context->xstorm_st_context.tx_bd_page_base_lo =
5609 U64_LO(fp->tx_desc_mapping);
5610 context->xstorm_st_context.statistics_data = (fp->cl_id |
5611 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
5615 static void bnx2x_init_ind_table(struct bnx2x *bp)
5617 int func = BP_FUNC(bp);
5620 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
5624 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
5625 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5626 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5627 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
5628 bp->fp->cl_id + (i % bp->num_queues));
5631 static void bnx2x_set_client_config(struct bnx2x *bp)
5633 struct tstorm_eth_client_config tstorm_client = {0};
5634 int port = BP_PORT(bp);
5637 tstorm_client.mtu = bp->dev->mtu;
5638 tstorm_client.config_flags =
5639 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5640 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
5642 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
5643 tstorm_client.config_flags |=
5644 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
5645 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5649 for_each_queue(bp, i) {
5650 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5652 REG_WR(bp, BAR_TSTRORM_INTMEM +
5653 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
5654 ((u32 *)&tstorm_client)[0]);
5655 REG_WR(bp, BAR_TSTRORM_INTMEM +
5656 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
5657 ((u32 *)&tstorm_client)[1]);
5660 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5661 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
5664 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5666 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5667 int mode = bp->rx_mode;
5668 int mask = bp->rx_mode_cl_mask;
5669 int func = BP_FUNC(bp);
5670 int port = BP_PORT(bp);
5672 /* All but management unicast packets should pass to the host as well */
5674 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5675 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5676 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5677 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
5679 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
5682 case BNX2X_RX_MODE_NONE: /* no Rx */
5683 tstorm_mac_filter.ucast_drop_all = mask;
5684 tstorm_mac_filter.mcast_drop_all = mask;
5685 tstorm_mac_filter.bcast_drop_all = mask;
5688 case BNX2X_RX_MODE_NORMAL:
5689 tstorm_mac_filter.bcast_accept_all = mask;
5692 case BNX2X_RX_MODE_ALLMULTI:
5693 tstorm_mac_filter.mcast_accept_all = mask;
5694 tstorm_mac_filter.bcast_accept_all = mask;
5697 case BNX2X_RX_MODE_PROMISC:
5698 tstorm_mac_filter.ucast_accept_all = mask;
5699 tstorm_mac_filter.mcast_accept_all = mask;
5700 tstorm_mac_filter.bcast_accept_all = mask;
5701 /* pass management unicast packets as well */
5702 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
5706 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5711 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5714 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5715 REG_WR(bp, BAR_TSTRORM_INTMEM +
5716 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
5717 ((u32 *)&tstorm_mac_filter)[i]);
5719 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5720 ((u32 *)&tstorm_mac_filter)[i]); */
5723 if (mode != BNX2X_RX_MODE_NONE)
5724 bnx2x_set_client_config(bp);
5727 static void bnx2x_init_internal_common(struct bnx2x *bp)
5731 /* Zero this manually as its initialization is
5732 currently missing in the initTool */
5733 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5734 REG_WR(bp, BAR_USTRORM_INTMEM +
5735 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5738 static void bnx2x_init_internal_port(struct bnx2x *bp)
5740 int port = BP_PORT(bp);
5743 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5745 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
5746 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5747 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5750 static void bnx2x_init_internal_func(struct bnx2x *bp)
5752 struct tstorm_eth_function_common_config tstorm_config = {0};
5753 struct stats_indication_flags stats_flags = {0};
5754 int port = BP_PORT(bp);
5755 int func = BP_FUNC(bp);
5760 tstorm_config.config_flags = RSS_FLAGS(bp);
5763 tstorm_config.rss_result_mask = MULTI_MASK;
5765 /* Enable TPA if needed */
5766 if (bp->flags & TPA_ENABLE_FLAG)
5767 tstorm_config.config_flags |=
5768 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5771 tstorm_config.config_flags |=
5772 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5774 tstorm_config.leading_client_id = BP_L_ID(bp);
5776 REG_WR(bp, BAR_TSTRORM_INTMEM +
5777 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5778 (*(u32 *)&tstorm_config));
5780 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5781 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5782 bnx2x_set_storm_rx_mode(bp);
5784 for_each_queue(bp, i) {
5785 u8 cl_id = bp->fp[i].cl_id;
5787 /* reset xstorm per client statistics */
5788 offset = BAR_XSTRORM_INTMEM +
5789 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5791 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5792 REG_WR(bp, offset + j*4, 0);
5794 /* reset tstorm per client statistics */
5795 offset = BAR_TSTRORM_INTMEM +
5796 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5798 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5799 REG_WR(bp, offset + j*4, 0);
5801 /* reset ustorm per client statistics */
5802 offset = BAR_USTRORM_INTMEM +
5803 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5805 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5806 REG_WR(bp, offset + j*4, 0);
5809 /* Init statistics related context */
5810 stats_flags.collect_eth = 1;
5812 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5813 ((u32 *)&stats_flags)[0]);
5814 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5815 ((u32 *)&stats_flags)[1]);
5817 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5818 ((u32 *)&stats_flags)[0]);
5819 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5820 ((u32 *)&stats_flags)[1]);
5822 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5823 ((u32 *)&stats_flags)[0]);
5824 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5825 ((u32 *)&stats_flags)[1]);
5827 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5828 ((u32 *)&stats_flags)[0]);
5829 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5830 ((u32 *)&stats_flags)[1]);
5832 REG_WR(bp, BAR_XSTRORM_INTMEM +
5833 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5834 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5835 REG_WR(bp, BAR_XSTRORM_INTMEM +
5836 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5837 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5839 REG_WR(bp, BAR_TSTRORM_INTMEM +
5840 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5841 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5842 REG_WR(bp, BAR_TSTRORM_INTMEM +
5843 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5844 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5846 REG_WR(bp, BAR_USTRORM_INTMEM +
5847 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5848 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5849 REG_WR(bp, BAR_USTRORM_INTMEM +
5850 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5851 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5853 if (CHIP_IS_E1H(bp)) {
5854 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5856 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5858 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5860 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5863 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5867 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5868 max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
5869 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
5870 for_each_queue(bp, i) {
5871 struct bnx2x_fastpath *fp = &bp->fp[i];
5873 REG_WR(bp, BAR_USTRORM_INTMEM +
5874 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5875 U64_LO(fp->rx_comp_mapping));
5876 REG_WR(bp, BAR_USTRORM_INTMEM +
5877 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5878 U64_HI(fp->rx_comp_mapping));
5881 REG_WR(bp, BAR_USTRORM_INTMEM +
5882 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5883 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5884 REG_WR(bp, BAR_USTRORM_INTMEM +
5885 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5886 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5888 REG_WR16(bp, BAR_USTRORM_INTMEM +
5889 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5893 /* dropless flow control */
5894 if (CHIP_IS_E1H(bp)) {
5895 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5897 rx_pause.bd_thr_low = 250;
5898 rx_pause.cqe_thr_low = 250;
5900 rx_pause.sge_thr_low = 0;
5901 rx_pause.bd_thr_high = 350;
5902 rx_pause.cqe_thr_high = 350;
5903 rx_pause.sge_thr_high = 0;
5905 for_each_queue(bp, i) {
5906 struct bnx2x_fastpath *fp = &bp->fp[i];
5908 if (!fp->disable_tpa) {
5909 rx_pause.sge_thr_low = 150;
5910 rx_pause.sge_thr_high = 250;
5914 offset = BAR_USTRORM_INTMEM +
5915 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5918 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5920 REG_WR(bp, offset + j*4,
5921 ((u32 *)&rx_pause)[j]);
5925 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5927 /* Init rate shaping and fairness contexts */
5931 /* During init there is no active link
5932 Until link is up, set link rate to 10Gbps */
5933 bp->link_vars.line_speed = SPEED_10000;
5934 bnx2x_init_port_minmax(bp);
5938 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5939 bnx2x_calc_vn_weight_sum(bp);
5941 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5942 bnx2x_init_vn_minmax(bp, 2*vn + port);
5944 /* Enable rate shaping and fairness */
5945 bp->cmng.flags.cmng_enables |=
5946 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5949 /* rate shaping and fairness are disabled */
5951 "single function mode minmax will be disabled\n");
5955 /* Store cmng structures to internal memory */
5957 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5958 REG_WR(bp, BAR_XSTRORM_INTMEM +
5959 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5960 ((u32 *)(&bp->cmng))[i]);
5963 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5965 switch (load_code) {
5966 case FW_MSG_CODE_DRV_LOAD_COMMON:
5967 bnx2x_init_internal_common(bp);
5970 case FW_MSG_CODE_DRV_LOAD_PORT:
5971 bnx2x_init_internal_port(bp);
5974 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5975 bnx2x_init_internal_func(bp);
5979 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5984 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5988 for_each_queue(bp, i) {
5989 struct bnx2x_fastpath *fp = &bp->fp[i];
5992 fp->state = BNX2X_FP_STATE_CLOSED;
5994 fp->cl_id = BP_L_ID(bp) + i;
5996 fp->sb_id = fp->cl_id + 1;
5998 fp->sb_id = fp->cl_id;
6001 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
6002 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
6003 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
6005 bnx2x_update_fpsb_idx(fp);
6008 /* ensure status block indices were read */
6012 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
6014 bnx2x_update_dsb_idx(bp);
6015 bnx2x_update_coalesce(bp);
6016 bnx2x_init_rx_rings(bp);
6017 bnx2x_init_tx_ring(bp);
6018 bnx2x_init_sp_ring(bp);
6019 bnx2x_init_context(bp);
6020 bnx2x_init_internal(bp, load_code);
6021 bnx2x_init_ind_table(bp);
6022 bnx2x_stats_init(bp);
6024 /* At this point, we are ready for interrupts */
6025 atomic_set(&bp->intr_sem, 0);
6027 /* flush all before enabling interrupts */
6031 bnx2x_int_enable(bp);
6033 /* Check for SPIO5 */
6034 bnx2x_attn_int_deasserted0(bp,
6035 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6036 AEU_INPUTS_ATTN_BITS_SPIO5);
6039 /* end of nic init */
6042 * gzip service functions
6045 static int bnx2x_gunzip_init(struct bnx2x *bp)
6047 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6048 &bp->gunzip_mapping, GFP_KERNEL);
6049 if (bp->gunzip_buf == NULL)
6052 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6053 if (bp->strm == NULL)
6056 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6058 if (bp->strm->workspace == NULL)
6068 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6069 bp->gunzip_mapping);
6070 bp->gunzip_buf = NULL;
6073 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
6074 " un-compression\n");
6078 static void bnx2x_gunzip_end(struct bnx2x *bp)
6080 kfree(bp->strm->workspace);
6085 if (bp->gunzip_buf) {
6086 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6087 bp->gunzip_mapping);
6088 bp->gunzip_buf = NULL;
6092 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
6096 /* check gzip header */
6097 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6098 BNX2X_ERR("Bad gzip header\n");
6106 if (zbuf[3] & FNAME)
6107 while ((zbuf[n++] != 0) && (n < len));
6109 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
6110 bp->strm->avail_in = len - n;
6111 bp->strm->next_out = bp->gunzip_buf;
6112 bp->strm->avail_out = FW_BUF_SIZE;
6114 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6118 rc = zlib_inflate(bp->strm, Z_FINISH);
6119 if ((rc != Z_OK) && (rc != Z_STREAM_END))
6120 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6123 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6124 if (bp->gunzip_outlen & 0x3)
6125 netdev_err(bp->dev, "Firmware decompression error:"
6126 " gunzip_outlen (%d) not aligned\n",
6128 bp->gunzip_outlen >>= 2;
6130 zlib_inflateEnd(bp->strm);
6132 if (rc == Z_STREAM_END)
6138 /* nic load/unload */
6141 * General service functions
6144 /* send a NIG loopback debug packet */
6145 static void bnx2x_lb_pckt(struct bnx2x *bp)
6149 /* Ethernet source and destination addresses */
6150 wb_write[0] = 0x55555555;
6151 wb_write[1] = 0x55555555;
6152 wb_write[2] = 0x20; /* SOP */
6153 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6155 /* NON-IP protocol */
6156 wb_write[0] = 0x09000000;
6157 wb_write[1] = 0x55555555;
6158 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
6159 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
6162 /* some of the internal memories
6163 * are not directly readable from the driver
6164 * to test them we send debug packets
6166 static int bnx2x_int_mem_test(struct bnx2x *bp)
6172 if (CHIP_REV_IS_FPGA(bp))
6174 else if (CHIP_REV_IS_EMUL(bp))
6179 DP(NETIF_MSG_HW, "start part1\n");
6181 /* Disable inputs of parser neighbor blocks */
6182 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6183 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6184 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6185 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6187 /* Write 0 to parser credits for CFC search request */
6188 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6190 /* send Ethernet packet */
6193 /* TODO do i reset NIG statistic? */
6194 /* Wait until NIG register shows 1 packet of size 0x10 */
6195 count = 1000 * factor;
6198 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6199 val = *bnx2x_sp(bp, wb_data[0]);
6207 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6211 /* Wait until PRS register shows 1 packet */
6212 count = 1000 * factor;
6214 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6222 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6226 /* Reset and init BRB, PRS */
6227 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6229 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6231 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6232 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6234 DP(NETIF_MSG_HW, "part2\n");
6236 /* Disable inputs of parser neighbor blocks */
6237 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6238 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6239 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
6240 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
6242 /* Write 0 to parser credits for CFC search request */
6243 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6245 /* send 10 Ethernet packets */
6246 for (i = 0; i < 10; i++)
6249 /* Wait until NIG register shows 10 + 1
6250 packets of size 11*0x10 = 0xb0 */
6251 count = 1000 * factor;
6254 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6255 val = *bnx2x_sp(bp, wb_data[0]);
6263 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6267 /* Wait until PRS register shows 2 packets */
6268 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6270 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6272 /* Write 1 to parser credits for CFC search request */
6273 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6275 /* Wait until PRS register shows 3 packets */
6276 msleep(10 * factor);
6277 /* Wait until NIG register shows 1 packet of size 0x10 */
6278 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6280 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6282 /* clear NIG EOP FIFO */
6283 for (i = 0; i < 11; i++)
6284 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6285 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6287 BNX2X_ERR("clear of NIG failed\n");
6291 /* Reset and init BRB, PRS, NIG */
6292 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6294 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6296 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6297 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6300 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6303 /* Enable inputs of parser neighbor blocks */
6304 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6305 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6306 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6307 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
6309 DP(NETIF_MSG_HW, "done\n");
6314 static void enable_blocks_attention(struct bnx2x *bp)
6316 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6317 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6318 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6319 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6320 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6321 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6322 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6323 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6324 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6325 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6326 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6327 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6328 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6329 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6330 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6331 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6332 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6333 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6334 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6335 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6336 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6337 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6338 if (CHIP_REV_IS_FPGA(bp))
6339 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6341 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
6342 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6343 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6344 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6345 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6346 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6347 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6348 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6349 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6350 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6353 static const struct {
6356 } bnx2x_parity_mask[] = {
6357 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6358 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6359 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6360 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6361 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6362 {QM_REG_QM_PRTY_MASK, 0x0},
6363 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6364 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6365 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6366 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6367 {CDU_REG_CDU_PRTY_MASK, 0x0},
6368 {CFC_REG_CFC_PRTY_MASK, 0x0},
6369 {DBG_REG_DBG_PRTY_MASK, 0x0},
6370 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6371 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6372 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6373 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6374 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6375 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6376 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6377 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6378 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6379 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6380 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6381 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6382 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6383 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6384 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6387 static void enable_blocks_parity(struct bnx2x *bp)
6389 int i, mask_arr_len =
6390 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6392 for (i = 0; i < mask_arr_len; i++)
6393 REG_WR(bp, bnx2x_parity_mask[i].addr,
6394 bnx2x_parity_mask[i].mask);
6398 static void bnx2x_reset_common(struct bnx2x *bp)
6401 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6403 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6406 static void bnx2x_init_pxp(struct bnx2x *bp)
6409 int r_order, w_order;
6411 pci_read_config_word(bp->pdev,
6412 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6413 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6414 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6416 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6418 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6422 bnx2x_init_pxp_arb(bp, r_order, w_order);
6425 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6435 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6436 SHARED_HW_CFG_FAN_FAILURE_MASK;
6438 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6442 * The fan failure mechanism is usually related to the PHY type since
6443 * the power consumption of the board is affected by the PHY. Currently,
6444 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6446 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6447 for (port = PORT_0; port < PORT_MAX; port++) {
6449 SHMEM_RD(bp, dev_info.port_hw_config[port].
6450 external_phy_config) &
6451 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6454 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
6456 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
6458 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6461 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6463 if (is_required == 0)
6466 /* Fan failure is indicated by SPIO 5 */
6467 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6468 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6470 /* set to active low mode */
6471 val = REG_RD(bp, MISC_REG_SPIO_INT);
6472 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6473 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6474 REG_WR(bp, MISC_REG_SPIO_INT, val);
6476 /* enable interrupt to signal the IGU */
6477 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6478 val |= (1 << MISC_REGISTERS_SPIO_5);
6479 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6482 static int bnx2x_init_common(struct bnx2x *bp)
6489 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6491 bnx2x_reset_common(bp);
6492 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6493 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
6495 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
6496 if (CHIP_IS_E1H(bp))
6497 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
6499 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6501 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6503 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
6504 if (CHIP_IS_E1(bp)) {
6505 /* enable HW interrupt from PXP on USDM overflow
6506 bit 16 on INT_MASK_0 */
6507 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6510 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
6514 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6515 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6516 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6517 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6518 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6519 /* make sure this value is 0 */
6520 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
6522 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6523 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6524 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6525 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6526 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6529 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6531 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6532 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6533 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6536 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6537 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
6539 /* let the HW do it's magic ... */
6541 /* finish PXP init */
6542 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6544 BNX2X_ERR("PXP2 CFG failed\n");
6547 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6549 BNX2X_ERR("PXP2 RD_INIT failed\n");
6553 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6554 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6556 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
6558 /* clean the DMAE memory */
6560 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6562 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6563 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6564 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6565 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
6567 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6568 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6569 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6570 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6572 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6577 for (i = 0; i < 64; i++) {
6578 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6579 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6581 if (CHIP_IS_E1H(bp)) {
6582 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6583 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6588 /* soft reset pulse */
6589 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6590 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6593 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6596 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
6597 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6598 if (!CHIP_REV_IS_SLOW(bp)) {
6599 /* enable hw interrupt from doorbell Q */
6600 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6603 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6604 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6605 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6608 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6610 if (CHIP_IS_E1H(bp))
6611 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6613 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6614 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6615 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6616 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
6618 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6619 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6620 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6621 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6623 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6624 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6625 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6626 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
6629 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6631 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6634 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6635 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6636 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
6638 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6639 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
6640 REG_WR(bp, i, random32());
6641 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6643 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6644 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6645 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6646 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6647 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6648 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6649 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6650 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6651 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6652 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6654 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6656 if (sizeof(union cdu_context) != 1024)
6657 /* we currently assume that a context is 1024 bytes */
6658 dev_alert(&bp->pdev->dev, "please adjust the size "
6659 "of cdu_context(%ld)\n",
6660 (long)sizeof(union cdu_context));
6662 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
6663 val = (4 << 24) + (0 << 12) + 1024;
6664 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6666 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
6667 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6668 /* enable context validation interrupt from CFC */
6669 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6671 /* set the thresholds to prevent CFC/CDU race */
6672 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
6674 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6675 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
6677 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
6678 /* Reset PCIE errors for debug */
6679 REG_WR(bp, 0x2814, 0xffffffff);
6680 REG_WR(bp, 0x3820, 0xffffffff);
6682 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
6683 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
6684 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
6685 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
6687 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
6688 if (CHIP_IS_E1H(bp)) {
6689 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6690 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6693 if (CHIP_REV_IS_SLOW(bp))
6696 /* finish CFC init */
6697 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6699 BNX2X_ERR("CFC LL_INIT failed\n");
6702 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6704 BNX2X_ERR("CFC AC_INIT failed\n");
6707 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6709 BNX2X_ERR("CFC CAM_INIT failed\n");
6712 REG_WR(bp, CFC_REG_DEBUG0, 0);
6714 /* read NIG statistic
6715 to see if this is our first up since powerup */
6716 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6717 val = *bnx2x_sp(bp, wb_data[0]);
6719 /* do internal memory self test */
6720 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6721 BNX2X_ERR("internal mem self test failed\n");
6725 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6726 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6728 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6729 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6730 bp->port.need_hw_lock = 1;
6737 bnx2x_setup_fan_failure_detection(bp);
6739 /* clear PXP2 attentions */
6740 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
6742 enable_blocks_attention(bp);
6743 if (CHIP_PARITY_SUPPORTED(bp))
6744 enable_blocks_parity(bp);
6746 if (!BP_NOMCP(bp)) {
6747 bnx2x_acquire_phy_lock(bp);
6748 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6749 bnx2x_release_phy_lock(bp);
6751 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6756 static int bnx2x_init_port(struct bnx2x *bp)
6758 int port = BP_PORT(bp);
6759 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
6763 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
6765 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6767 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
6768 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
6770 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6771 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6772 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6773 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6776 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6778 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6779 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6780 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6783 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6785 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
6786 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6787 /* no pause for emulation and FPGA */
6792 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6793 else if (bp->dev->mtu > 4096) {
6794 if (bp->flags & ONE_PORT_FLAG)
6798 /* (24*1024 + val*4)/256 */
6799 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6802 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6803 high = low + 56; /* 14*1024/256 */
6805 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6806 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6809 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
6811 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6812 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6813 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6814 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6816 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6817 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6818 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6819 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6821 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6822 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6824 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6826 /* configure PBF to work without PAUSE mtu 9000 */
6827 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6829 /* update threshold */
6830 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6831 /* update init credit */
6832 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6835 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6837 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6840 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6842 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6843 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6845 if (CHIP_IS_E1(bp)) {
6846 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6847 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6849 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6851 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6852 /* init aeu_mask_attn_func_0/1:
6853 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6854 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6855 * bits 4-7 are used for "per vn group attention" */
6856 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6857 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6859 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6860 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6861 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6862 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6863 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6865 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6867 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6869 if (CHIP_IS_E1H(bp)) {
6870 /* 0x2 disable e1hov, 0x1 enable */
6871 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6872 (IS_E1HMF(bp) ? 0x1 : 0x2));
6875 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6876 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6877 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6881 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6882 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6884 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6885 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6887 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6889 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6890 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6892 /* The GPIO should be swapped if the swap register is
6894 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6895 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6897 /* Select function upon port-swap configuration */
6899 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6900 aeu_gpio_mask = (swap_val && swap_override) ?
6901 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6902 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6904 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6905 aeu_gpio_mask = (swap_val && swap_override) ?
6906 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6907 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6909 val = REG_RD(bp, offset);
6910 /* add GPIO3 to group */
6911 val |= aeu_gpio_mask;
6912 REG_WR(bp, offset, val);
6916 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6917 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6918 /* add SPIO 5 to group 0 */
6920 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6921 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6922 val = REG_RD(bp, reg_addr);
6923 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6924 REG_WR(bp, reg_addr, val);
6932 bnx2x__link_reset(bp);
6937 #define ILT_PER_FUNC (768/2)
6938 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6939 /* the phys address is shifted right 12 bits and has an added
6940 1=valid bit added to the 53rd bit
6941 then since this is a wide register(TM)
6942 we split it into two 32 bit writes
6944 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6945 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6946 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6947 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6950 #define CNIC_ILT_LINES 127
6951 #define CNIC_CTX_PER_ILT 16
6953 #define CNIC_ILT_LINES 0
6956 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6960 if (CHIP_IS_E1H(bp))
6961 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6963 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6965 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6968 static int bnx2x_init_func(struct bnx2x *bp)
6970 int port = BP_PORT(bp);
6971 int func = BP_FUNC(bp);
6975 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
6977 /* set MSI reconfigure capability */
6978 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6979 val = REG_RD(bp, addr);
6980 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6981 REG_WR(bp, addr, val);
6983 i = FUNC_ILT_BASE(func);
6985 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6986 if (CHIP_IS_E1H(bp)) {
6987 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6988 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6990 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6991 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6994 i += 1 + CNIC_ILT_LINES;
6995 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6997 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6999 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
7000 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
7004 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
7006 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
7008 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
7009 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
7013 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
7015 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
7017 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
7018 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
7021 /* tell the searcher where the T2 table is */
7022 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
7024 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
7025 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
7027 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
7028 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
7029 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
7031 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
7034 if (CHIP_IS_E1H(bp)) {
7035 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
7036 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
7037 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
7038 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
7039 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
7040 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
7041 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
7042 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
7043 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
7045 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
7046 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
7049 /* HC init per function */
7050 if (CHIP_IS_E1H(bp)) {
7051 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7053 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7054 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7056 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
7058 /* Reset PCIE errors for debug */
7059 REG_WR(bp, 0x2114, 0xffffffff);
7060 REG_WR(bp, 0x2120, 0xffffffff);
7065 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7069 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7070 BP_FUNC(bp), load_code);
7073 mutex_init(&bp->dmae_mutex);
7074 rc = bnx2x_gunzip_init(bp);
7078 switch (load_code) {
7079 case FW_MSG_CODE_DRV_LOAD_COMMON:
7080 rc = bnx2x_init_common(bp);
7085 case FW_MSG_CODE_DRV_LOAD_PORT:
7087 rc = bnx2x_init_port(bp);
7092 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7094 rc = bnx2x_init_func(bp);
7100 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7104 if (!BP_NOMCP(bp)) {
7105 int func = BP_FUNC(bp);
7107 bp->fw_drv_pulse_wr_seq =
7108 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
7109 DRV_PULSE_SEQ_MASK);
7110 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7113 /* this needs to be done before gunzip end */
7114 bnx2x_zero_def_sb(bp);
7115 for_each_queue(bp, i)
7116 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7118 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7122 bnx2x_gunzip_end(bp);
7127 static void bnx2x_free_mem(struct bnx2x *bp)
7130 #define BNX2X_PCI_FREE(x, y, size) \
7133 dma_free_coherent(&bp->pdev->dev, size, x, y); \
7139 #define BNX2X_FREE(x) \
7151 for_each_queue(bp, i) {
7154 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7155 bnx2x_fp(bp, i, status_blk_mapping),
7156 sizeof(struct host_status_block));
7159 for_each_queue(bp, i) {
7161 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7162 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7163 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7164 bnx2x_fp(bp, i, rx_desc_mapping),
7165 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7167 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7168 bnx2x_fp(bp, i, rx_comp_mapping),
7169 sizeof(struct eth_fast_path_rx_cqe) *
7173 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7174 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7175 bnx2x_fp(bp, i, rx_sge_mapping),
7176 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7179 for_each_queue(bp, i) {
7181 /* fastpath tx rings: tx_buf tx_desc */
7182 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7183 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7184 bnx2x_fp(bp, i, tx_desc_mapping),
7185 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7187 /* end of fastpath */
7189 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
7190 sizeof(struct host_def_status_block));
7192 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
7193 sizeof(struct bnx2x_slowpath));
7196 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7197 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7198 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7199 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
7200 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7201 sizeof(struct host_status_block));
7203 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
7205 #undef BNX2X_PCI_FREE
7209 static int bnx2x_alloc_mem(struct bnx2x *bp)
7212 #define BNX2X_PCI_ALLOC(x, y, size) \
7214 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
7216 goto alloc_mem_err; \
7217 memset(x, 0, size); \
7220 #define BNX2X_ALLOC(x, size) \
7222 x = vmalloc(size); \
7224 goto alloc_mem_err; \
7225 memset(x, 0, size); \
7232 for_each_queue(bp, i) {
7233 bnx2x_fp(bp, i, bp) = bp;
7236 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7237 &bnx2x_fp(bp, i, status_blk_mapping),
7238 sizeof(struct host_status_block));
7241 for_each_queue(bp, i) {
7243 /* fastpath rx rings: rx_buf rx_desc rx_comp */
7244 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7245 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7246 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7247 &bnx2x_fp(bp, i, rx_desc_mapping),
7248 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7250 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7251 &bnx2x_fp(bp, i, rx_comp_mapping),
7252 sizeof(struct eth_fast_path_rx_cqe) *
7256 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7257 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7258 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7259 &bnx2x_fp(bp, i, rx_sge_mapping),
7260 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7263 for_each_queue(bp, i) {
7265 /* fastpath tx rings: tx_buf tx_desc */
7266 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7267 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7268 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7269 &bnx2x_fp(bp, i, tx_desc_mapping),
7270 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
7272 /* end of fastpath */
7274 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7275 sizeof(struct host_def_status_block));
7277 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7278 sizeof(struct bnx2x_slowpath));
7281 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7283 /* allocate searcher T2 table
7284 we allocate 1/4 of alloc num for T2
7285 (which is not entered into the ILT) */
7286 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7288 /* Initialize T2 (for 1024 connections) */
7289 for (i = 0; i < 16*1024; i += 64)
7290 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
7292 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
7293 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7295 /* QM queues (128*MAX_CONN) */
7296 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
7298 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7299 sizeof(struct host_status_block));
7302 /* Slow path ring */
7303 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7311 #undef BNX2X_PCI_ALLOC
7315 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7319 for_each_queue(bp, i) {
7320 struct bnx2x_fastpath *fp = &bp->fp[i];
7322 u16 bd_cons = fp->tx_bd_cons;
7323 u16 sw_prod = fp->tx_pkt_prod;
7324 u16 sw_cons = fp->tx_pkt_cons;
7326 while (sw_cons != sw_prod) {
7327 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7333 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7337 for_each_queue(bp, j) {
7338 struct bnx2x_fastpath *fp = &bp->fp[j];
7340 for (i = 0; i < NUM_RX_BD; i++) {
7341 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7342 struct sk_buff *skb = rx_buf->skb;
7347 dma_unmap_single(&bp->pdev->dev,
7348 dma_unmap_addr(rx_buf, mapping),
7349 bp->rx_buf_size, DMA_FROM_DEVICE);
7354 if (!fp->disable_tpa)
7355 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7356 ETH_MAX_AGGREGATION_QUEUES_E1 :
7357 ETH_MAX_AGGREGATION_QUEUES_E1H);
7361 static void bnx2x_free_skbs(struct bnx2x *bp)
7363 bnx2x_free_tx_skbs(bp);
7364 bnx2x_free_rx_skbs(bp);
7367 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7371 free_irq(bp->msix_table[0].vector, bp->dev);
7372 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
7373 bp->msix_table[0].vector);
7378 for_each_queue(bp, i) {
7379 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
7380 "state %x\n", i, bp->msix_table[i + offset].vector,
7381 bnx2x_fp(bp, i, state));
7383 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
7387 static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
7389 if (bp->flags & USING_MSIX_FLAG) {
7391 bnx2x_free_msix_irqs(bp);
7392 pci_disable_msix(bp->pdev);
7393 bp->flags &= ~USING_MSIX_FLAG;
7395 } else if (bp->flags & USING_MSI_FLAG) {
7397 free_irq(bp->pdev->irq, bp->dev);
7398 pci_disable_msi(bp->pdev);
7399 bp->flags &= ~USING_MSI_FLAG;
7401 } else if (!disable_only)
7402 free_irq(bp->pdev->irq, bp->dev);
7405 static int bnx2x_enable_msix(struct bnx2x *bp)
7407 int i, rc, offset = 1;
7410 bp->msix_table[0].entry = igu_vec;
7411 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
7414 igu_vec = BP_L_ID(bp) + offset;
7415 bp->msix_table[1].entry = igu_vec;
7416 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7419 for_each_queue(bp, i) {
7420 igu_vec = BP_L_ID(bp) + offset + i;
7421 bp->msix_table[i + offset].entry = igu_vec;
7422 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7423 "(fastpath #%u)\n", i + offset, igu_vec, i);
7426 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
7427 BNX2X_NUM_QUEUES(bp) + offset);
7430 * reconfigure number of tx/rx queues according to available
7433 if (rc >= BNX2X_MIN_MSIX_VEC_CNT) {
7434 /* vectors available for FP */
7435 int fp_vec = rc - BNX2X_MSIX_VEC_FP_START;
7438 "Trying to use less MSI-X vectors: %d\n", rc);
7440 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], rc);
7444 "MSI-X is not attainable rc %d\n", rc);
7448 bp->num_queues = min(bp->num_queues, fp_vec);
7450 DP(NETIF_MSG_IFUP, "New queue configuration set: %d\n",
7453 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7457 bp->flags |= USING_MSIX_FLAG;
7462 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7464 int i, rc, offset = 1;
7466 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7467 bp->dev->name, bp->dev);
7469 BNX2X_ERR("request sp irq failed\n");
7476 for_each_queue(bp, i) {
7477 struct bnx2x_fastpath *fp = &bp->fp[i];
7478 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7481 rc = request_irq(bp->msix_table[i + offset].vector,
7482 bnx2x_msix_fp_int, 0, fp->name, fp);
7484 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
7485 bnx2x_free_msix_irqs(bp);
7489 fp->state = BNX2X_FP_STATE_IRQ;
7492 i = BNX2X_NUM_QUEUES(bp);
7493 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
7495 bp->msix_table[0].vector,
7496 0, bp->msix_table[offset].vector,
7497 i - 1, bp->msix_table[offset + i - 1].vector);
7502 static int bnx2x_enable_msi(struct bnx2x *bp)
7506 rc = pci_enable_msi(bp->pdev);
7508 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7511 bp->flags |= USING_MSI_FLAG;
7516 static int bnx2x_req_irq(struct bnx2x *bp)
7518 unsigned long flags;
7521 if (bp->flags & USING_MSI_FLAG)
7524 flags = IRQF_SHARED;
7526 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
7527 bp->dev->name, bp->dev);
7529 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7534 static void bnx2x_napi_enable(struct bnx2x *bp)
7538 for_each_queue(bp, i)
7539 napi_enable(&bnx2x_fp(bp, i, napi));
7542 static void bnx2x_napi_disable(struct bnx2x *bp)
7546 for_each_queue(bp, i)
7547 napi_disable(&bnx2x_fp(bp, i, napi));
7550 static void bnx2x_netif_start(struct bnx2x *bp)
7554 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7555 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7558 if (netif_running(bp->dev)) {
7559 bnx2x_napi_enable(bp);
7560 bnx2x_int_enable(bp);
7561 if (bp->state == BNX2X_STATE_OPEN)
7562 netif_tx_wake_all_queues(bp->dev);
7567 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7569 bnx2x_int_disable_sync(bp, disable_hw);
7570 bnx2x_napi_disable(bp);
7571 netif_tx_disable(bp->dev);
7575 * Init service functions
7579 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7581 * @param bp driver descriptor
7582 * @param set set or clear an entry (1 or 0)
7583 * @param mac pointer to a buffer containing a MAC
7584 * @param cl_bit_vec bit vector of clients to register a MAC for
7585 * @param cam_offset offset in a CAM to use
7586 * @param with_bcast set broadcast MAC as well
7588 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7589 u32 cl_bit_vec, u8 cam_offset,
7592 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7593 int port = BP_PORT(bp);
7596 * unicasts 0-31:port0 32-63:port1
7597 * multicast 64-127:port0 128-191:port1
7599 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7600 config->hdr.offset = cam_offset;
7601 config->hdr.client_id = 0xff;
7602 config->hdr.reserved1 = 0;
7605 config->config_table[0].cam_entry.msb_mac_addr =
7606 swab16(*(u16 *)&mac[0]);
7607 config->config_table[0].cam_entry.middle_mac_addr =
7608 swab16(*(u16 *)&mac[2]);
7609 config->config_table[0].cam_entry.lsb_mac_addr =
7610 swab16(*(u16 *)&mac[4]);
7611 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7613 config->config_table[0].target_table_entry.flags = 0;
7615 CAM_INVALIDATE(config->config_table[0]);
7616 config->config_table[0].target_table_entry.clients_bit_vector =
7617 cpu_to_le32(cl_bit_vec);
7618 config->config_table[0].target_table_entry.vlan_id = 0;
7620 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7621 (set ? "setting" : "clearing"),
7622 config->config_table[0].cam_entry.msb_mac_addr,
7623 config->config_table[0].cam_entry.middle_mac_addr,
7624 config->config_table[0].cam_entry.lsb_mac_addr);
7628 config->config_table[1].cam_entry.msb_mac_addr =
7629 cpu_to_le16(0xffff);
7630 config->config_table[1].cam_entry.middle_mac_addr =
7631 cpu_to_le16(0xffff);
7632 config->config_table[1].cam_entry.lsb_mac_addr =
7633 cpu_to_le16(0xffff);
7634 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7636 config->config_table[1].target_table_entry.flags =
7637 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7639 CAM_INVALIDATE(config->config_table[1]);
7640 config->config_table[1].target_table_entry.clients_bit_vector =
7641 cpu_to_le32(cl_bit_vec);
7642 config->config_table[1].target_table_entry.vlan_id = 0;
7645 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7646 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7647 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7651 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7653 * @param bp driver descriptor
7654 * @param set set or clear an entry (1 or 0)
7655 * @param mac pointer to a buffer containing a MAC
7656 * @param cl_bit_vec bit vector of clients to register a MAC for
7657 * @param cam_offset offset in a CAM to use
7659 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7660 u32 cl_bit_vec, u8 cam_offset)
7662 struct mac_configuration_cmd_e1h *config =
7663 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7665 config->hdr.length = 1;
7666 config->hdr.offset = cam_offset;
7667 config->hdr.client_id = 0xff;
7668 config->hdr.reserved1 = 0;
7671 config->config_table[0].msb_mac_addr =
7672 swab16(*(u16 *)&mac[0]);
7673 config->config_table[0].middle_mac_addr =
7674 swab16(*(u16 *)&mac[2]);
7675 config->config_table[0].lsb_mac_addr =
7676 swab16(*(u16 *)&mac[4]);
7677 config->config_table[0].clients_bit_vector =
7678 cpu_to_le32(cl_bit_vec);
7679 config->config_table[0].vlan_id = 0;
7680 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7682 config->config_table[0].flags = BP_PORT(bp);
7684 config->config_table[0].flags =
7685 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7687 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7688 (set ? "setting" : "clearing"),
7689 config->config_table[0].msb_mac_addr,
7690 config->config_table[0].middle_mac_addr,
7691 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7693 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7694 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7695 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7698 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7699 int *state_p, int poll)
7701 /* can take a while if any port is running */
7704 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7705 poll ? "polling" : "waiting", state, idx);
7710 bnx2x_rx_int(bp->fp, 10);
7711 /* if index is different from 0
7712 * the reply for some commands will
7713 * be on the non default queue
7716 bnx2x_rx_int(&bp->fp[idx], 10);
7719 mb(); /* state is changed by bnx2x_sp_event() */
7720 if (*state_p == state) {
7721 #ifdef BNX2X_STOP_ON_ERROR
7722 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7734 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7735 poll ? "polling" : "waiting", state, idx);
7736 #ifdef BNX2X_STOP_ON_ERROR
7743 static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7745 bp->set_mac_pending++;
7748 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7749 (1 << bp->fp->cl_id), BP_FUNC(bp));
7751 /* Wait for a completion */
7752 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7755 static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7757 bp->set_mac_pending++;
7760 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7761 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7764 /* Wait for a completion */
7765 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7770 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7771 * MAC(s). This function will wait until the ramdord completion
7774 * @param bp driver handle
7775 * @param set set or clear the CAM entry
7777 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7779 static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7781 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7783 bp->set_mac_pending++;
7786 /* Send a SET_MAC ramrod */
7788 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7789 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7792 /* CAM allocation for E1H
7793 * unicasts: by func number
7794 * multicast: 20+FUNC*20, 20 each
7796 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7797 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7799 /* Wait for a completion when setting */
7800 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7806 static int bnx2x_setup_leading(struct bnx2x *bp)
7810 /* reset IGU state */
7811 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7814 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7816 /* Wait for completion */
7817 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
7822 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7824 struct bnx2x_fastpath *fp = &bp->fp[index];
7826 /* reset IGU state */
7827 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
7830 fp->state = BNX2X_FP_STATE_OPENING;
7831 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7834 /* Wait for completion */
7835 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7839 static int bnx2x_poll(struct napi_struct *napi, int budget);
7841 static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
7844 switch (bp->multi_mode) {
7845 case ETH_RSS_MODE_DISABLED:
7849 case ETH_RSS_MODE_REGULAR:
7851 bp->num_queues = min_t(u32, num_queues,
7852 BNX2X_MAX_QUEUES(bp));
7854 bp->num_queues = min_t(u32, num_online_cpus(),
7855 BNX2X_MAX_QUEUES(bp));
7865 static int bnx2x_set_num_queues(struct bnx2x *bp)
7873 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
7876 /* Set number of queues according to bp->multi_mode value */
7877 bnx2x_set_num_queues_msix(bp);
7879 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7882 /* if we can't use MSI-X we only need one fp,
7883 * so try to enable MSI-X with the requested number of fp's
7884 * and fallback to MSI or legacy INTx with one fp
7886 rc = bnx2x_enable_msix(bp);
7888 /* failed to enable MSI-X */
7892 bp->dev->real_num_tx_queues = bp->num_queues;
7897 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7898 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7901 /* must be called with rtnl_lock */
7902 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7907 #ifdef BNX2X_STOP_ON_ERROR
7908 if (unlikely(bp->panic))
7912 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7914 rc = bnx2x_set_num_queues(bp);
7916 if (bnx2x_alloc_mem(bp)) {
7917 bnx2x_free_irq(bp, true);
7921 for_each_queue(bp, i)
7922 bnx2x_fp(bp, i, disable_tpa) =
7923 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7925 for_each_queue(bp, i)
7926 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7929 bnx2x_napi_enable(bp);
7931 if (bp->flags & USING_MSIX_FLAG) {
7932 rc = bnx2x_req_msix_irqs(bp);
7934 bnx2x_free_irq(bp, true);
7938 /* Fall to INTx if failed to enable MSI-X due to lack of
7939 memory (in bnx2x_set_num_queues()) */
7940 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7941 bnx2x_enable_msi(bp);
7943 rc = bnx2x_req_irq(bp);
7945 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7946 bnx2x_free_irq(bp, true);
7949 if (bp->flags & USING_MSI_FLAG) {
7950 bp->dev->irq = bp->pdev->irq;
7951 netdev_info(bp->dev, "using MSI IRQ %d\n",
7956 /* Send LOAD_REQUEST command to MCP
7957 Returns the type of LOAD command:
7958 if it is the first port to be initialized
7959 common blocks should be initialized, otherwise - not
7961 if (!BP_NOMCP(bp)) {
7962 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7964 BNX2X_ERR("MCP response failure, aborting\n");
7968 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7969 rc = -EBUSY; /* other port in diagnostic mode */
7974 int port = BP_PORT(bp);
7976 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7977 load_count[0], load_count[1], load_count[2]);
7979 load_count[1 + port]++;
7980 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7981 load_count[0], load_count[1], load_count[2]);
7982 if (load_count[0] == 1)
7983 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7984 else if (load_count[1 + port] == 1)
7985 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7987 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7990 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7991 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7995 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7998 rc = bnx2x_init_hw(bp, load_code);
8000 BNX2X_ERR("HW init failed, aborting\n");
8001 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8002 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8003 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8007 /* Setup NIC internals and enable interrupts */
8008 bnx2x_nic_init(bp, load_code);
8010 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
8011 (bp->common.shmem2_base))
8012 SHMEM2_WR(bp, dcc_support,
8013 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
8014 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
8016 /* Send LOAD_DONE command to MCP */
8017 if (!BP_NOMCP(bp)) {
8018 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
8020 BNX2X_ERR("MCP response failure, aborting\n");
8026 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
8028 rc = bnx2x_setup_leading(bp);
8030 BNX2X_ERR("Setup leading failed!\n");
8031 #ifndef BNX2X_STOP_ON_ERROR
8039 if (CHIP_IS_E1H(bp))
8040 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
8041 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
8042 bp->flags |= MF_FUNC_DIS;
8045 if (bp->state == BNX2X_STATE_OPEN) {
8047 /* Enable Timer scan */
8048 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
8050 for_each_nondefault_queue(bp, i) {
8051 rc = bnx2x_setup_multi(bp, i);
8061 bnx2x_set_eth_mac_addr_e1(bp, 1);
8063 bnx2x_set_eth_mac_addr_e1h(bp, 1);
8065 /* Set iSCSI L2 MAC */
8066 mutex_lock(&bp->cnic_mutex);
8067 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
8068 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8069 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8070 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
8073 mutex_unlock(&bp->cnic_mutex);
8078 bnx2x_initial_phy_init(bp, load_mode);
8080 /* Start fast path */
8081 switch (load_mode) {
8083 if (bp->state == BNX2X_STATE_OPEN) {
8084 /* Tx queue should be only reenabled */
8085 netif_tx_wake_all_queues(bp->dev);
8087 /* Initialize the receive filter. */
8088 bnx2x_set_rx_mode(bp->dev);
8092 netif_tx_start_all_queues(bp->dev);
8093 if (bp->state != BNX2X_STATE_OPEN)
8094 netif_tx_disable(bp->dev);
8095 /* Initialize the receive filter. */
8096 bnx2x_set_rx_mode(bp->dev);
8100 /* Initialize the receive filter. */
8101 bnx2x_set_rx_mode(bp->dev);
8102 bp->state = BNX2X_STATE_DIAG;
8110 bnx2x__link_status_update(bp);
8112 /* start the timer */
8113 mod_timer(&bp->timer, jiffies + bp->current_interval);
8116 bnx2x_setup_cnic_irq_info(bp);
8117 if (bp->state == BNX2X_STATE_OPEN)
8118 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8120 bnx2x_inc_load_cnt(bp);
8126 /* Disable Timer scan */
8127 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8130 bnx2x_int_disable_sync(bp, 1);
8131 if (!BP_NOMCP(bp)) {
8132 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8133 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8136 /* Free SKBs, SGEs, TPA pool and driver internals */
8137 bnx2x_free_skbs(bp);
8138 for_each_queue(bp, i)
8139 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8142 bnx2x_free_irq(bp, false);
8144 bnx2x_napi_disable(bp);
8145 for_each_queue(bp, i)
8146 netif_napi_del(&bnx2x_fp(bp, i, napi));
8152 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8154 struct bnx2x_fastpath *fp = &bp->fp[index];
8157 /* halt the connection */
8158 fp->state = BNX2X_FP_STATE_HALTING;
8159 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
8161 /* Wait for completion */
8162 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
8164 if (rc) /* timeout */
8167 /* delete cfc entry */
8168 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8170 /* Wait for completion */
8171 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
8176 static int bnx2x_stop_leading(struct bnx2x *bp)
8178 __le16 dsb_sp_prod_idx;
8179 /* if the other port is handling traffic,
8180 this can take a lot of time */
8186 /* Send HALT ramrod */
8187 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
8188 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
8190 /* Wait for completion */
8191 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8192 &(bp->fp[0].state), 1);
8193 if (rc) /* timeout */
8196 dsb_sp_prod_idx = *bp->dsb_sp_prod;
8198 /* Send PORT_DELETE ramrod */
8199 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8201 /* Wait for completion to arrive on default status block
8202 we are going to reset the chip anyway
8203 so there is not much to do if this times out
8205 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
8207 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8208 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8209 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8210 #ifdef BNX2X_STOP_ON_ERROR
8218 rmb(); /* Refresh the dsb_sp_prod */
8220 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8221 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
8226 static void bnx2x_reset_func(struct bnx2x *bp)
8228 int port = BP_PORT(bp);
8229 int func = BP_FUNC(bp);
8233 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8234 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8237 /* Disable Timer scan */
8238 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8240 * Wait for at least 10ms and up to 2 second for the timers scan to
8243 for (i = 0; i < 200; i++) {
8245 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8250 base = FUNC_ILT_BASE(func);
8251 for (i = base; i < base + ILT_PER_FUNC; i++)
8252 bnx2x_ilt_wr(bp, i, 0);
8255 static void bnx2x_reset_port(struct bnx2x *bp)
8257 int port = BP_PORT(bp);
8260 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8262 /* Do not rcv packets to BRB */
8263 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8264 /* Do not direct rcv packets that are not for MCP to the BRB */
8265 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8266 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8269 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8272 /* Check for BRB port occupancy */
8273 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8275 DP(NETIF_MSG_IFDOWN,
8276 "BRB1 is not empty %d blocks are occupied\n", val);
8278 /* TODO: Close Doorbell port? */
8281 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8283 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8284 BP_FUNC(bp), reset_code);
8286 switch (reset_code) {
8287 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8288 bnx2x_reset_port(bp);
8289 bnx2x_reset_func(bp);
8290 bnx2x_reset_common(bp);
8293 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8294 bnx2x_reset_port(bp);
8295 bnx2x_reset_func(bp);
8298 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8299 bnx2x_reset_func(bp);
8303 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8308 static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
8310 int port = BP_PORT(bp);
8314 /* Wait until tx fastpath tasks complete */
8315 for_each_queue(bp, i) {
8316 struct bnx2x_fastpath *fp = &bp->fp[i];
8319 while (bnx2x_has_tx_work_unload(fp)) {
8323 BNX2X_ERR("timeout waiting for queue[%d]\n",
8325 #ifdef BNX2X_STOP_ON_ERROR
8336 /* Give HW time to discard old tx messages */
8339 if (CHIP_IS_E1(bp)) {
8340 struct mac_configuration_cmd *config =
8341 bnx2x_sp(bp, mcast_config);
8343 bnx2x_set_eth_mac_addr_e1(bp, 0);
8345 for (i = 0; i < config->hdr.length; i++)
8346 CAM_INVALIDATE(config->config_table[i]);
8348 config->hdr.length = i;
8349 if (CHIP_REV_IS_SLOW(bp))
8350 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8352 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
8353 config->hdr.client_id = bp->fp->cl_id;
8354 config->hdr.reserved1 = 0;
8356 bp->set_mac_pending++;
8359 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8360 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8361 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8364 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8366 bnx2x_set_eth_mac_addr_e1h(bp, 0);
8368 for (i = 0; i < MC_HASH_SIZE; i++)
8369 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
8371 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
8374 /* Clear iSCSI L2 MAC */
8375 mutex_lock(&bp->cnic_mutex);
8376 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8377 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8378 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8380 mutex_unlock(&bp->cnic_mutex);
8383 if (unload_mode == UNLOAD_NORMAL)
8384 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8386 else if (bp->flags & NO_WOL_FLAG)
8387 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
8390 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8391 u8 *mac_addr = bp->dev->dev_addr;
8393 /* The mac address is written to entries 1-4 to
8394 preserve entry 0 which is used by the PMF */
8395 u8 entry = (BP_E1HVN(bp) + 1)*8;
8397 val = (mac_addr[0] << 8) | mac_addr[1];
8398 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8400 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8401 (mac_addr[4] << 8) | mac_addr[5];
8402 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8404 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8407 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8409 /* Close multi and leading connections
8410 Completions for ramrods are collected in a synchronous way */
8411 for_each_nondefault_queue(bp, i)
8412 if (bnx2x_stop_multi(bp, i))
8415 rc = bnx2x_stop_leading(bp);
8417 BNX2X_ERR("Stop leading failed!\n");
8418 #ifdef BNX2X_STOP_ON_ERROR
8427 reset_code = bnx2x_fw_command(bp, reset_code);
8429 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
8430 load_count[0], load_count[1], load_count[2]);
8432 load_count[1 + port]--;
8433 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
8434 load_count[0], load_count[1], load_count[2]);
8435 if (load_count[0] == 0)
8436 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8437 else if (load_count[1 + port] == 0)
8438 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8440 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8443 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8444 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8445 bnx2x__link_reset(bp);
8447 /* Reset the chip */
8448 bnx2x_reset_chip(bp, reset_code);
8450 /* Report UNLOAD_DONE to MCP */
8452 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8456 static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8460 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8462 if (CHIP_IS_E1(bp)) {
8463 int port = BP_PORT(bp);
8464 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8465 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8467 val = REG_RD(bp, addr);
8469 REG_WR(bp, addr, val);
8470 } else if (CHIP_IS_E1H(bp)) {
8471 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8472 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8473 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8474 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8478 /* must be called with rtnl_lock */
8479 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8483 if (bp->state == BNX2X_STATE_CLOSED) {
8484 /* Interface has been removed - nothing to recover */
8485 bp->recovery_state = BNX2X_RECOVERY_DONE;
8487 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8494 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8496 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8498 /* Set "drop all" */
8499 bp->rx_mode = BNX2X_RX_MODE_NONE;
8500 bnx2x_set_storm_rx_mode(bp);
8502 /* Disable HW interrupts, NAPI and Tx */
8503 bnx2x_netif_stop(bp, 1);
8504 netif_carrier_off(bp->dev);
8506 del_timer_sync(&bp->timer);
8507 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8508 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8509 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8512 bnx2x_free_irq(bp, false);
8514 /* Cleanup the chip if needed */
8515 if (unload_mode != UNLOAD_RECOVERY)
8516 bnx2x_chip_cleanup(bp, unload_mode);
8520 /* Free SKBs, SGEs, TPA pool and driver internals */
8521 bnx2x_free_skbs(bp);
8522 for_each_queue(bp, i)
8523 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
8524 for_each_queue(bp, i)
8525 netif_napi_del(&bnx2x_fp(bp, i, napi));
8528 bp->state = BNX2X_STATE_CLOSED;
8530 /* The last driver must disable a "close the gate" if there is no
8531 * parity attention or "process kill" pending.
8533 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8534 bnx2x_reset_is_done(bp))
8535 bnx2x_disable_close_the_gate(bp);
8537 /* Reset MCP mail box sequence if there is on going recovery */
8538 if (unload_mode == UNLOAD_RECOVERY)
8544 /* Close gates #2, #3 and #4: */
8545 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8549 /* Gates #2 and #4a are closed/opened for "not E1" only */
8550 if (!CHIP_IS_E1(bp)) {
8552 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8553 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8554 close ? (val | 0x1) : (val & (~(u32)1)));
8556 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8557 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8558 close ? (val | 0x1) : (val & (~(u32)1)));
8562 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8563 val = REG_RD(bp, addr);
8564 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8566 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8567 close ? "closing" : "opening");
8571 #define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8573 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8575 /* Do some magic... */
8576 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8577 *magic_val = val & SHARED_MF_CLP_MAGIC;
8578 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8581 /* Restore the value of the `magic' bit.
8583 * @param pdev Device handle.
8584 * @param magic_val Old value of the `magic' bit.
8586 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8588 /* Restore the `magic' bit value... */
8589 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8590 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8591 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8592 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8593 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8594 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8597 /* Prepares for MCP reset: takes care of CLP configurations.
8600 * @param magic_val Old value of 'magic' bit.
8602 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8605 u32 validity_offset;
8607 DP(NETIF_MSG_HW, "Starting\n");
8609 /* Set `magic' bit in order to save MF config */
8610 if (!CHIP_IS_E1(bp))
8611 bnx2x_clp_reset_prep(bp, magic_val);
8613 /* Get shmem offset */
8614 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8615 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8617 /* Clear validity map flags */
8619 REG_WR(bp, shmem + validity_offset, 0);
8622 #define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8623 #define MCP_ONE_TIMEOUT 100 /* 100 ms */
8625 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8626 * depending on the HW type.
8630 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8632 /* special handling for emulation and FPGA,
8633 wait 10 times longer */
8634 if (CHIP_REV_IS_SLOW(bp))
8635 msleep(MCP_ONE_TIMEOUT*10);
8637 msleep(MCP_ONE_TIMEOUT);
8640 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8642 u32 shmem, cnt, validity_offset, val;
8647 /* Get shmem offset */
8648 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8650 BNX2X_ERR("Shmem 0 return failure\n");
8655 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8657 /* Wait for MCP to come up */
8658 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8659 /* TBD: its best to check validity map of last port.
8660 * currently checks on port 0.
8662 val = REG_RD(bp, shmem + validity_offset);
8663 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8664 shmem + validity_offset, val);
8666 /* check that shared memory is valid. */
8667 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8668 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8671 bnx2x_mcp_wait_one(bp);
8674 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8676 /* Check that shared memory is valid. This indicates that MCP is up. */
8677 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8678 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8679 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8685 /* Restore the `magic' bit value */
8686 if (!CHIP_IS_E1(bp))
8687 bnx2x_clp_reset_done(bp, magic_val);
8692 static void bnx2x_pxp_prep(struct bnx2x *bp)
8694 if (!CHIP_IS_E1(bp)) {
8695 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8696 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8697 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8703 * Reset the whole chip except for:
8705 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8708 * - MISC (including AEU)
8712 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8714 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8717 MISC_REGISTERS_RESET_REG_1_RST_HC |
8718 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8719 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8722 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8723 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8724 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8725 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8726 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8727 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8728 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8729 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8731 reset_mask1 = 0xffffffff;
8734 reset_mask2 = 0xffff;
8736 reset_mask2 = 0x1ffff;
8738 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8739 reset_mask1 & (~not_reset_mask1));
8740 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8741 reset_mask2 & (~not_reset_mask2));
8746 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8747 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8751 static int bnx2x_process_kill(struct bnx2x *bp)
8755 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8758 /* Empty the Tetris buffer, wait for 1s */
8760 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8761 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8762 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8763 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8764 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8765 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8766 ((port_is_idle_0 & 0x1) == 0x1) &&
8767 ((port_is_idle_1 & 0x1) == 0x1) &&
8768 (pgl_exp_rom2 == 0xffffffff))
8771 } while (cnt-- > 0);
8774 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8776 " outstanding read requests after 1s!\n");
8777 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8778 " port_is_idle_0=0x%08x,"
8779 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8780 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8787 /* Close gates #2, #3 and #4 */
8788 bnx2x_set_234_gates(bp, true);
8790 /* TBD: Indicate that "process kill" is in progress to MCP */
8792 /* Clear "unprepared" bit */
8793 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8796 /* Make sure all is written to the chip before the reset */
8799 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8800 * PSWHST, GRC and PSWRD Tetris buffer.
8804 /* Prepare to chip reset: */
8806 bnx2x_reset_mcp_prep(bp, &val);
8812 /* reset the chip */
8813 bnx2x_process_kill_chip_reset(bp);
8816 /* Recover after reset: */
8818 if (bnx2x_reset_mcp_comp(bp, val))
8824 /* Open the gates #2, #3 and #4 */
8825 bnx2x_set_234_gates(bp, false);
8827 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8828 * reset state, re-enable attentions. */
8833 static int bnx2x_leader_reset(struct bnx2x *bp)
8836 /* Try to recover after the failure */
8837 if (bnx2x_process_kill(bp)) {
8838 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8841 goto exit_leader_reset;
8844 /* Clear "reset is in progress" bit and update the driver state */
8845 bnx2x_set_reset_done(bp);
8846 bp->recovery_state = BNX2X_RECOVERY_DONE;
8850 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8855 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8857 /* Assumption: runs under rtnl lock. This together with the fact
8858 * that it's called only from bnx2x_reset_task() ensure that it
8859 * will never be called when netif_running(bp->dev) is false.
8861 static void bnx2x_parity_recover(struct bnx2x *bp)
8863 DP(NETIF_MSG_HW, "Handling parity\n");
8865 switch (bp->recovery_state) {
8866 case BNX2X_RECOVERY_INIT:
8867 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8868 /* Try to get a LEADER_LOCK HW lock */
8869 if (bnx2x_trylock_hw_lock(bp,
8870 HW_LOCK_RESOURCE_RESERVED_08))
8873 /* Stop the driver */
8874 /* If interface has been removed - break */
8875 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8878 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8879 /* Ensure "is_leader" and "recovery_state"
8880 * update values are seen on other CPUs
8885 case BNX2X_RECOVERY_WAIT:
8886 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8887 if (bp->is_leader) {
8888 u32 load_counter = bnx2x_get_load_cnt(bp);
8890 /* Wait until all other functions get
8893 schedule_delayed_work(&bp->reset_task,
8897 /* If all other functions got down -
8898 * try to bring the chip back to
8899 * normal. In any case it's an exit
8900 * point for a leader.
8902 if (bnx2x_leader_reset(bp) ||
8903 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8904 printk(KERN_ERR"%s: Recovery "
8905 "has failed. Power cycle is "
8906 "needed.\n", bp->dev->name);
8907 /* Disconnect this device */
8908 netif_device_detach(bp->dev);
8909 /* Block ifup for all function
8910 * of this ASIC until
8911 * "process kill" or power
8914 bnx2x_set_reset_in_progress(bp);
8915 /* Shut down the power */
8916 bnx2x_set_power_state(bp,
8923 } else { /* non-leader */
8924 if (!bnx2x_reset_is_done(bp)) {
8925 /* Try to get a LEADER_LOCK HW lock as
8926 * long as a former leader may have
8927 * been unloaded by the user or
8928 * released a leadership by another
8931 if (bnx2x_trylock_hw_lock(bp,
8932 HW_LOCK_RESOURCE_RESERVED_08)) {
8933 /* I'm a leader now! Restart a
8940 schedule_delayed_work(&bp->reset_task,
8944 } else { /* A leader has completed
8945 * the "process kill". It's an exit
8946 * point for a non-leader.
8948 bnx2x_nic_load(bp, LOAD_NORMAL);
8949 bp->recovery_state =
8950 BNX2X_RECOVERY_DONE;
8961 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8962 * scheduled on a general queue in order to prevent a dead lock.
8964 static void bnx2x_reset_task(struct work_struct *work)
8966 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
8968 #ifdef BNX2X_STOP_ON_ERROR
8969 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8970 " so reset not done to allow debug dump,\n"
8971 KERN_ERR " you will need to reboot when done\n");
8977 if (!netif_running(bp->dev))
8978 goto reset_task_exit;
8980 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8981 bnx2x_parity_recover(bp);
8983 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8984 bnx2x_nic_load(bp, LOAD_NORMAL);
8991 /* end of nic load/unload */
8996 * Init service functions
8999 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
9002 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
9003 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
9004 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
9005 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
9006 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
9007 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
9008 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
9009 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
9011 BNX2X_ERR("Unsupported function index: %d\n", func);
9016 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
9018 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
9020 /* Flush all outstanding writes */
9023 /* Pretend to be function 0 */
9025 /* Flush the GRC transaction (in the chip) */
9026 new_val = REG_RD(bp, reg);
9028 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
9033 /* From now we are in the "like-E1" mode */
9034 bnx2x_int_disable(bp);
9036 /* Flush all outstanding writes */
9039 /* Restore the original funtion settings */
9040 REG_WR(bp, reg, orig_func);
9041 new_val = REG_RD(bp, reg);
9042 if (new_val != orig_func) {
9043 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
9044 orig_func, new_val);
9049 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
9051 if (CHIP_IS_E1H(bp))
9052 bnx2x_undi_int_disable_e1h(bp, func);
9054 bnx2x_int_disable(bp);
9057 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
9061 /* Check if there is any driver already loaded */
9062 val = REG_RD(bp, MISC_REG_UNPREPARED);
9064 /* Check if it is the UNDI driver
9065 * UNDI driver initializes CID offset for normal bell to 0x7
9067 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9068 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9070 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9072 int func = BP_FUNC(bp);
9076 /* clear the UNDI indication */
9077 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9079 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9081 /* try unload UNDI on port 0 */
9084 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9085 DRV_MSG_SEQ_NUMBER_MASK);
9086 reset_code = bnx2x_fw_command(bp, reset_code);
9088 /* if UNDI is loaded on the other port */
9089 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9091 /* send "DONE" for previous unload */
9092 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9094 /* unload UNDI on port 1 */
9097 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9098 DRV_MSG_SEQ_NUMBER_MASK);
9099 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9101 bnx2x_fw_command(bp, reset_code);
9104 /* now it's safe to release the lock */
9105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9107 bnx2x_undi_int_disable(bp, func);
9109 /* close input traffic and wait for it */
9110 /* Do not rcv packets to BRB */
9112 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9113 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9114 /* Do not direct rcv packets that are not for MCP to
9117 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9118 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9121 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9122 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9125 /* save NIG port swap info */
9126 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9127 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
9130 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9133 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9135 /* take the NIG out of reset and restore swap values */
9137 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9138 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9139 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9140 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9142 /* send unload done to the MCP */
9143 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9145 /* restore our func and fw_seq */
9148 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9149 DRV_MSG_SEQ_NUMBER_MASK);
9152 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9156 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9158 u32 val, val2, val3, val4, id;
9161 /* Get the chip revision id and number. */
9162 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9163 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9164 id = ((val & 0xffff) << 16);
9165 val = REG_RD(bp, MISC_REG_CHIP_REV);
9166 id |= ((val & 0xf) << 12);
9167 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9168 id |= ((val & 0xff) << 4);
9169 val = REG_RD(bp, MISC_REG_BOND_ID);
9171 bp->common.chip_id = id;
9172 bp->link_params.chip_id = bp->common.chip_id;
9173 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9175 val = (REG_RD(bp, 0x2874) & 0x55);
9176 if ((bp->common.chip_id & 0x1) ||
9177 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9178 bp->flags |= ONE_PORT_FLAG;
9179 BNX2X_DEV_INFO("single port device\n");
9182 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9183 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9184 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9185 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9186 bp->common.flash_size, bp->common.flash_size);
9188 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9189 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
9190 bp->link_params.shmem_base = bp->common.shmem_base;
9191 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9192 bp->common.shmem_base, bp->common.shmem2_base);
9194 if (!bp->common.shmem_base ||
9195 (bp->common.shmem_base < 0xA0000) ||
9196 (bp->common.shmem_base >= 0xC0000)) {
9197 BNX2X_DEV_INFO("MCP not active\n");
9198 bp->flags |= NO_MCP_FLAG;
9202 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9203 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9204 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9205 BNX2X_ERROR("BAD MCP validity signature\n");
9207 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
9208 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
9210 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9211 SHARED_HW_CFG_LED_MODE_MASK) >>
9212 SHARED_HW_CFG_LED_MODE_SHIFT);
9214 bp->link_params.feature_config_flags = 0;
9215 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9216 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9217 bp->link_params.feature_config_flags |=
9218 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9220 bp->link_params.feature_config_flags &=
9221 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9223 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9224 bp->common.bc_ver = val;
9225 BNX2X_DEV_INFO("bc_ver %X\n", val);
9226 if (val < BNX2X_BC_VER) {
9227 /* for now only warn
9228 * later we might need to enforce this */
9229 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
9230 "please upgrade BC\n", BNX2X_BC_VER, val);
9232 bp->link_params.feature_config_flags |=
9233 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9234 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
9236 if (BP_E1HVN(bp) == 0) {
9237 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9238 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9240 /* no WOL capability for E1HVN != 0 */
9241 bp->flags |= NO_WOL_FLAG;
9243 BNX2X_DEV_INFO("%sWoL capable\n",
9244 (bp->flags & NO_WOL_FLAG) ? "not " : "");
9246 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9247 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9248 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9249 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9251 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
9252 val, val2, val3, val4);
9255 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9258 int port = BP_PORT(bp);
9261 switch (switch_cfg) {
9263 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9266 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9267 switch (ext_phy_type) {
9268 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9269 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9272 bp->port.supported |= (SUPPORTED_10baseT_Half |
9273 SUPPORTED_10baseT_Full |
9274 SUPPORTED_100baseT_Half |
9275 SUPPORTED_100baseT_Full |
9276 SUPPORTED_1000baseT_Full |
9277 SUPPORTED_2500baseX_Full |
9282 SUPPORTED_Asym_Pause);
9285 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9286 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9289 bp->port.supported |= (SUPPORTED_10baseT_Half |
9290 SUPPORTED_10baseT_Full |
9291 SUPPORTED_100baseT_Half |
9292 SUPPORTED_100baseT_Full |
9293 SUPPORTED_1000baseT_Full |
9298 SUPPORTED_Asym_Pause);
9302 BNX2X_ERR("NVRAM config error. "
9303 "BAD SerDes ext_phy_config 0x%x\n",
9304 bp->link_params.ext_phy_config);
9308 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9310 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9313 case SWITCH_CFG_10G:
9314 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9317 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9318 switch (ext_phy_type) {
9319 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9320 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9323 bp->port.supported |= (SUPPORTED_10baseT_Half |
9324 SUPPORTED_10baseT_Full |
9325 SUPPORTED_100baseT_Half |
9326 SUPPORTED_100baseT_Full |
9327 SUPPORTED_1000baseT_Full |
9328 SUPPORTED_2500baseX_Full |
9329 SUPPORTED_10000baseT_Full |
9334 SUPPORTED_Asym_Pause);
9337 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9338 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
9341 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9342 SUPPORTED_1000baseT_Full |
9346 SUPPORTED_Asym_Pause);
9349 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9350 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
9353 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9354 SUPPORTED_2500baseX_Full |
9355 SUPPORTED_1000baseT_Full |
9359 SUPPORTED_Asym_Pause);
9362 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9363 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9366 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9369 SUPPORTED_Asym_Pause);
9372 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9373 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
9376 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9377 SUPPORTED_1000baseT_Full |
9380 SUPPORTED_Asym_Pause);
9383 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9384 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
9387 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9388 SUPPORTED_1000baseT_Full |
9392 SUPPORTED_Asym_Pause);
9395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9396 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9399 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9400 SUPPORTED_1000baseT_Full |
9404 SUPPORTED_Asym_Pause);
9407 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9408 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9411 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9415 SUPPORTED_Asym_Pause);
9418 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9419 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9422 bp->port.supported |= (SUPPORTED_10baseT_Half |
9423 SUPPORTED_10baseT_Full |
9424 SUPPORTED_100baseT_Half |
9425 SUPPORTED_100baseT_Full |
9426 SUPPORTED_1000baseT_Full |
9427 SUPPORTED_10000baseT_Full |
9431 SUPPORTED_Asym_Pause);
9434 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9435 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9436 bp->link_params.ext_phy_config);
9440 BNX2X_ERR("NVRAM config error. "
9441 "BAD XGXS ext_phy_config 0x%x\n",
9442 bp->link_params.ext_phy_config);
9446 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9448 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
9453 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
9454 bp->port.link_config);
9457 bp->link_params.phy_addr = bp->port.phy_addr;
9459 /* mask what we support according to speed_cap_mask */
9460 if (!(bp->link_params.speed_cap_mask &
9461 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
9462 bp->port.supported &= ~SUPPORTED_10baseT_Half;
9464 if (!(bp->link_params.speed_cap_mask &
9465 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
9466 bp->port.supported &= ~SUPPORTED_10baseT_Full;
9468 if (!(bp->link_params.speed_cap_mask &
9469 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
9470 bp->port.supported &= ~SUPPORTED_100baseT_Half;
9472 if (!(bp->link_params.speed_cap_mask &
9473 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
9474 bp->port.supported &= ~SUPPORTED_100baseT_Full;
9476 if (!(bp->link_params.speed_cap_mask &
9477 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
9478 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9479 SUPPORTED_1000baseT_Full);
9481 if (!(bp->link_params.speed_cap_mask &
9482 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
9483 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
9485 if (!(bp->link_params.speed_cap_mask &
9486 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
9487 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
9489 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
9492 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
9494 bp->link_params.req_duplex = DUPLEX_FULL;
9496 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
9497 case PORT_FEATURE_LINK_SPEED_AUTO:
9498 if (bp->port.supported & SUPPORTED_Autoneg) {
9499 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9500 bp->port.advertising = bp->port.supported;
9503 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9505 if ((ext_phy_type ==
9506 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9508 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
9509 /* force 10G, no AN */
9510 bp->link_params.req_line_speed = SPEED_10000;
9511 bp->port.advertising =
9512 (ADVERTISED_10000baseT_Full |
9516 BNX2X_ERR("NVRAM config error. "
9517 "Invalid link_config 0x%x"
9518 " Autoneg not supported\n",
9519 bp->port.link_config);
9524 case PORT_FEATURE_LINK_SPEED_10M_FULL:
9525 if (bp->port.supported & SUPPORTED_10baseT_Full) {
9526 bp->link_params.req_line_speed = SPEED_10;
9527 bp->port.advertising = (ADVERTISED_10baseT_Full |
9530 BNX2X_ERROR("NVRAM config error. "
9531 "Invalid link_config 0x%x"
9532 " speed_cap_mask 0x%x\n",
9533 bp->port.link_config,
9534 bp->link_params.speed_cap_mask);
9539 case PORT_FEATURE_LINK_SPEED_10M_HALF:
9540 if (bp->port.supported & SUPPORTED_10baseT_Half) {
9541 bp->link_params.req_line_speed = SPEED_10;
9542 bp->link_params.req_duplex = DUPLEX_HALF;
9543 bp->port.advertising = (ADVERTISED_10baseT_Half |
9546 BNX2X_ERROR("NVRAM config error. "
9547 "Invalid link_config 0x%x"
9548 " speed_cap_mask 0x%x\n",
9549 bp->port.link_config,
9550 bp->link_params.speed_cap_mask);
9555 case PORT_FEATURE_LINK_SPEED_100M_FULL:
9556 if (bp->port.supported & SUPPORTED_100baseT_Full) {
9557 bp->link_params.req_line_speed = SPEED_100;
9558 bp->port.advertising = (ADVERTISED_100baseT_Full |
9561 BNX2X_ERROR("NVRAM config error. "
9562 "Invalid link_config 0x%x"
9563 " speed_cap_mask 0x%x\n",
9564 bp->port.link_config,
9565 bp->link_params.speed_cap_mask);
9570 case PORT_FEATURE_LINK_SPEED_100M_HALF:
9571 if (bp->port.supported & SUPPORTED_100baseT_Half) {
9572 bp->link_params.req_line_speed = SPEED_100;
9573 bp->link_params.req_duplex = DUPLEX_HALF;
9574 bp->port.advertising = (ADVERTISED_100baseT_Half |
9577 BNX2X_ERROR("NVRAM config error. "
9578 "Invalid link_config 0x%x"
9579 " speed_cap_mask 0x%x\n",
9580 bp->port.link_config,
9581 bp->link_params.speed_cap_mask);
9586 case PORT_FEATURE_LINK_SPEED_1G:
9587 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
9588 bp->link_params.req_line_speed = SPEED_1000;
9589 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9592 BNX2X_ERROR("NVRAM config error. "
9593 "Invalid link_config 0x%x"
9594 " speed_cap_mask 0x%x\n",
9595 bp->port.link_config,
9596 bp->link_params.speed_cap_mask);
9601 case PORT_FEATURE_LINK_SPEED_2_5G:
9602 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
9603 bp->link_params.req_line_speed = SPEED_2500;
9604 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9607 BNX2X_ERROR("NVRAM config error. "
9608 "Invalid link_config 0x%x"
9609 " speed_cap_mask 0x%x\n",
9610 bp->port.link_config,
9611 bp->link_params.speed_cap_mask);
9616 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9617 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9618 case PORT_FEATURE_LINK_SPEED_10G_KR:
9619 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
9620 bp->link_params.req_line_speed = SPEED_10000;
9621 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9624 BNX2X_ERROR("NVRAM config error. "
9625 "Invalid link_config 0x%x"
9626 " speed_cap_mask 0x%x\n",
9627 bp->port.link_config,
9628 bp->link_params.speed_cap_mask);
9634 BNX2X_ERROR("NVRAM config error. "
9635 "BAD link speed link_config 0x%x\n",
9636 bp->port.link_config);
9637 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
9638 bp->port.advertising = bp->port.supported;
9642 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9643 PORT_FEATURE_FLOW_CONTROL_MASK);
9644 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
9645 !(bp->port.supported & SUPPORTED_Autoneg))
9646 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9648 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
9649 " advertising 0x%x\n",
9650 bp->link_params.req_line_speed,
9651 bp->link_params.req_duplex,
9652 bp->link_params.req_flow_ctrl, bp->port.advertising);
9655 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9657 mac_hi = cpu_to_be16(mac_hi);
9658 mac_lo = cpu_to_be32(mac_lo);
9659 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9660 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9663 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
9665 int port = BP_PORT(bp);
9671 bp->link_params.bp = bp;
9672 bp->link_params.port = port;
9674 bp->link_params.lane_config =
9675 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
9676 bp->link_params.ext_phy_config =
9678 dev_info.port_hw_config[port].external_phy_config);
9679 /* BCM8727_NOC => BCM8727 no over current */
9680 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9681 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9682 bp->link_params.ext_phy_config &=
9683 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9684 bp->link_params.ext_phy_config |=
9685 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9686 bp->link_params.feature_config_flags |=
9687 FEATURE_CONFIG_BCM8727_NOC;
9690 bp->link_params.speed_cap_mask =
9692 dev_info.port_hw_config[port].speed_capability_mask);
9694 bp->port.link_config =
9695 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9697 /* Get the 4 lanes xgxs config rx and tx */
9698 for (i = 0; i < 2; i++) {
9700 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9701 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9702 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9705 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9706 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9707 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9710 /* If the device is capable of WoL, set the default state according
9713 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
9714 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9715 (config & PORT_FEATURE_WOL_ENABLED));
9717 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9718 " speed_cap_mask 0x%08x link_config 0x%08x\n",
9719 bp->link_params.lane_config,
9720 bp->link_params.ext_phy_config,
9721 bp->link_params.speed_cap_mask, bp->port.link_config);
9723 bp->link_params.switch_cfg |= (bp->port.link_config &
9724 PORT_FEATURE_CONNECTED_SWITCH_MASK);
9725 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
9727 bnx2x_link_settings_requested(bp);
9730 * If connected directly, work with the internal PHY, otherwise, work
9731 * with the external PHY
9733 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9734 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9735 bp->mdio.prtad = bp->link_params.phy_addr;
9737 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9738 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9740 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
9742 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9743 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
9744 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
9745 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9746 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9749 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9750 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9751 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9755 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9757 int func = BP_FUNC(bp);
9761 bnx2x_get_common_hwinfo(bp);
9765 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
9767 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
9769 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
9770 FUNC_MF_CFG_E1HOV_TAG_MASK);
9771 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
9773 BNX2X_DEV_INFO("%s function mode\n",
9774 IS_E1HMF(bp) ? "multi" : "single");
9777 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9779 FUNC_MF_CFG_E1HOV_TAG_MASK);
9780 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9782 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9784 func, bp->e1hov, bp->e1hov);
9786 BNX2X_ERROR("No valid E1HOV for func %d,"
9787 " aborting\n", func);
9792 BNX2X_ERROR("VN %d in single function mode,"
9793 " aborting\n", BP_E1HVN(bp));
9799 if (!BP_NOMCP(bp)) {
9800 bnx2x_get_port_hwinfo(bp);
9802 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9803 DRV_MSG_SEQ_NUMBER_MASK);
9804 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9808 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9809 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9810 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9811 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9812 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9813 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9814 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9815 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9816 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9817 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9818 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9820 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9828 /* only supposed to happen on emulation/FPGA */
9829 BNX2X_ERROR("warning: random MAC workaround active\n");
9830 random_ether_addr(bp->dev->dev_addr);
9831 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9837 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9839 int cnt, i, block_end, rodi;
9840 char vpd_data[BNX2X_VPD_LEN+1];
9841 char str_id_reg[VENDOR_ID_LEN+1];
9842 char str_id_cap[VENDOR_ID_LEN+1];
9845 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9846 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9848 if (cnt < BNX2X_VPD_LEN)
9851 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9852 PCI_VPD_LRDT_RO_DATA);
9857 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9858 pci_vpd_lrdt_size(&vpd_data[i]);
9860 i += PCI_VPD_LRDT_TAG_SIZE;
9862 if (block_end > BNX2X_VPD_LEN)
9865 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9866 PCI_VPD_RO_KEYWORD_MFR_ID);
9870 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9872 if (len != VENDOR_ID_LEN)
9875 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9877 /* vendor specific info */
9878 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9879 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9880 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9881 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9883 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9884 PCI_VPD_RO_KEYWORD_VENDOR0);
9886 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9888 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9890 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9891 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9892 bp->fw_ver[len] = ' ';
9901 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9903 int func = BP_FUNC(bp);
9907 /* Disable interrupt handling until HW is initialized */
9908 atomic_set(&bp->intr_sem, 1);
9909 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
9911 mutex_init(&bp->port.phy_mutex);
9912 mutex_init(&bp->fw_mb_mutex);
9913 spin_lock_init(&bp->stats_lock);
9915 mutex_init(&bp->cnic_mutex);
9918 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
9919 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
9921 rc = bnx2x_get_hwinfo(bp);
9923 bnx2x_read_fwinfo(bp);
9924 /* need to reset chip if undi was active */
9926 bnx2x_undi_unload(bp);
9928 if (CHIP_REV_IS_FPGA(bp))
9929 dev_err(&bp->pdev->dev, "FPGA detected\n");
9931 if (BP_NOMCP(bp) && (func == 0))
9932 dev_err(&bp->pdev->dev, "MCP disabled, "
9933 "must load devices in order!\n");
9935 /* Set multi queue mode */
9936 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9937 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
9938 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
9939 "requested is not MSI-X\n");
9940 multi_mode = ETH_RSS_MODE_DISABLED;
9942 bp->multi_mode = multi_mode;
9945 bp->dev->features |= NETIF_F_GRO;
9949 bp->flags &= ~TPA_ENABLE_FLAG;
9950 bp->dev->features &= ~NETIF_F_LRO;
9952 bp->flags |= TPA_ENABLE_FLAG;
9953 bp->dev->features |= NETIF_F_LRO;
9957 bp->dropless_fc = 0;
9959 bp->dropless_fc = dropless_fc;
9963 bp->tx_ring_size = MAX_TX_AVAIL;
9964 bp->rx_ring_size = MAX_RX_AVAIL;
9968 /* make sure that the numbers are in the right granularity */
9969 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9970 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9972 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9973 bp->current_interval = (poll ? poll : timer_interval);
9975 init_timer(&bp->timer);
9976 bp->timer.expires = jiffies + bp->current_interval;
9977 bp->timer.data = (unsigned long) bp;
9978 bp->timer.function = bnx2x_timer;
9984 * ethtool service functions
9987 /* All ethtool functions called with rtnl_lock */
9989 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9991 struct bnx2x *bp = netdev_priv(dev);
9993 cmd->supported = bp->port.supported;
9994 cmd->advertising = bp->port.advertising;
9996 if ((bp->state == BNX2X_STATE_OPEN) &&
9997 !(bp->flags & MF_FUNC_DIS) &&
9998 (bp->link_vars.link_up)) {
9999 cmd->speed = bp->link_vars.line_speed;
10000 cmd->duplex = bp->link_vars.duplex;
10001 if (IS_E1HMF(bp)) {
10005 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
10006 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
10007 if (vn_max_rate < cmd->speed)
10008 cmd->speed = vn_max_rate;
10015 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
10017 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
10019 switch (ext_phy_type) {
10020 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
10021 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
10022 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
10023 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
10024 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
10025 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
10026 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
10027 cmd->port = PORT_FIBRE;
10030 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
10031 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
10032 cmd->port = PORT_TP;
10035 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
10036 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
10037 bp->link_params.ext_phy_config);
10041 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
10042 bp->link_params.ext_phy_config);
10046 cmd->port = PORT_TP;
10048 cmd->phy_address = bp->mdio.prtad;
10049 cmd->transceiver = XCVR_INTERNAL;
10051 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10052 cmd->autoneg = AUTONEG_ENABLE;
10054 cmd->autoneg = AUTONEG_DISABLE;
10059 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10060 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10061 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10062 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10063 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10064 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10065 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10070 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10072 struct bnx2x *bp = netdev_priv(dev);
10078 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10079 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10080 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10081 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10082 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10083 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10084 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10086 if (cmd->autoneg == AUTONEG_ENABLE) {
10087 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10088 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
10092 /* advertise the requested speed and duplex if supported */
10093 cmd->advertising &= bp->port.supported;
10095 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10096 bp->link_params.req_duplex = DUPLEX_FULL;
10097 bp->port.advertising |= (ADVERTISED_Autoneg |
10100 } else { /* forced speed */
10101 /* advertise the requested speed and duplex if supported */
10102 switch (cmd->speed) {
10104 if (cmd->duplex == DUPLEX_FULL) {
10105 if (!(bp->port.supported &
10106 SUPPORTED_10baseT_Full)) {
10108 "10M full not supported\n");
10112 advertising = (ADVERTISED_10baseT_Full |
10115 if (!(bp->port.supported &
10116 SUPPORTED_10baseT_Half)) {
10118 "10M half not supported\n");
10122 advertising = (ADVERTISED_10baseT_Half |
10128 if (cmd->duplex == DUPLEX_FULL) {
10129 if (!(bp->port.supported &
10130 SUPPORTED_100baseT_Full)) {
10132 "100M full not supported\n");
10136 advertising = (ADVERTISED_100baseT_Full |
10139 if (!(bp->port.supported &
10140 SUPPORTED_100baseT_Half)) {
10142 "100M half not supported\n");
10146 advertising = (ADVERTISED_100baseT_Half |
10152 if (cmd->duplex != DUPLEX_FULL) {
10153 DP(NETIF_MSG_LINK, "1G half not supported\n");
10157 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
10158 DP(NETIF_MSG_LINK, "1G full not supported\n");
10162 advertising = (ADVERTISED_1000baseT_Full |
10167 if (cmd->duplex != DUPLEX_FULL) {
10169 "2.5G half not supported\n");
10173 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
10175 "2.5G full not supported\n");
10179 advertising = (ADVERTISED_2500baseX_Full |
10184 if (cmd->duplex != DUPLEX_FULL) {
10185 DP(NETIF_MSG_LINK, "10G half not supported\n");
10189 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
10190 DP(NETIF_MSG_LINK, "10G full not supported\n");
10194 advertising = (ADVERTISED_10000baseT_Full |
10199 DP(NETIF_MSG_LINK, "Unsupported speed\n");
10203 bp->link_params.req_line_speed = cmd->speed;
10204 bp->link_params.req_duplex = cmd->duplex;
10205 bp->port.advertising = advertising;
10208 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
10209 DP_LEVEL " req_duplex %d advertising 0x%x\n",
10210 bp->link_params.req_line_speed, bp->link_params.req_duplex,
10211 bp->port.advertising);
10213 if (netif_running(dev)) {
10214 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10215 bnx2x_link_set(bp);
10221 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10222 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10224 static int bnx2x_get_regs_len(struct net_device *dev)
10226 struct bnx2x *bp = netdev_priv(dev);
10227 int regdump_len = 0;
10230 if (CHIP_IS_E1(bp)) {
10231 for (i = 0; i < REGS_COUNT; i++)
10232 if (IS_E1_ONLINE(reg_addrs[i].info))
10233 regdump_len += reg_addrs[i].size;
10235 for (i = 0; i < WREGS_COUNT_E1; i++)
10236 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10237 regdump_len += wreg_addrs_e1[i].size *
10238 (1 + wreg_addrs_e1[i].read_regs_count);
10241 for (i = 0; i < REGS_COUNT; i++)
10242 if (IS_E1H_ONLINE(reg_addrs[i].info))
10243 regdump_len += reg_addrs[i].size;
10245 for (i = 0; i < WREGS_COUNT_E1H; i++)
10246 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10247 regdump_len += wreg_addrs_e1h[i].size *
10248 (1 + wreg_addrs_e1h[i].read_regs_count);
10251 regdump_len += sizeof(struct dump_hdr);
10253 return regdump_len;
10256 static void bnx2x_get_regs(struct net_device *dev,
10257 struct ethtool_regs *regs, void *_p)
10260 struct bnx2x *bp = netdev_priv(dev);
10261 struct dump_hdr dump_hdr = {0};
10264 memset(p, 0, regs->len);
10266 if (!netif_running(bp->dev))
10269 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10270 dump_hdr.dump_sign = dump_sign_all;
10271 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10272 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10273 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10274 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10275 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10277 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10278 p += dump_hdr.hdr_size + 1;
10280 if (CHIP_IS_E1(bp)) {
10281 for (i = 0; i < REGS_COUNT; i++)
10282 if (IS_E1_ONLINE(reg_addrs[i].info))
10283 for (j = 0; j < reg_addrs[i].size; j++)
10285 reg_addrs[i].addr + j*4);
10288 for (i = 0; i < REGS_COUNT; i++)
10289 if (IS_E1H_ONLINE(reg_addrs[i].info))
10290 for (j = 0; j < reg_addrs[i].size; j++)
10292 reg_addrs[i].addr + j*4);
10296 #define PHY_FW_VER_LEN 10
10298 static void bnx2x_get_drvinfo(struct net_device *dev,
10299 struct ethtool_drvinfo *info)
10301 struct bnx2x *bp = netdev_priv(dev);
10302 u8 phy_fw_ver[PHY_FW_VER_LEN];
10304 strcpy(info->driver, DRV_MODULE_NAME);
10305 strcpy(info->version, DRV_MODULE_VERSION);
10307 phy_fw_ver[0] = '\0';
10308 if (bp->port.pmf) {
10309 bnx2x_acquire_phy_lock(bp);
10310 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10311 (bp->state != BNX2X_STATE_CLOSED),
10312 phy_fw_ver, PHY_FW_VER_LEN);
10313 bnx2x_release_phy_lock(bp);
10316 strncpy(info->fw_version, bp->fw_ver, 32);
10317 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10319 (bp->common.bc_ver & 0xff0000) >> 16,
10320 (bp->common.bc_ver & 0xff00) >> 8,
10321 (bp->common.bc_ver & 0xff),
10322 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
10323 strcpy(info->bus_info, pci_name(bp->pdev));
10324 info->n_stats = BNX2X_NUM_STATS;
10325 info->testinfo_len = BNX2X_NUM_TESTS;
10326 info->eedump_len = bp->common.flash_size;
10327 info->regdump_len = bnx2x_get_regs_len(dev);
10330 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10332 struct bnx2x *bp = netdev_priv(dev);
10334 if (bp->flags & NO_WOL_FLAG) {
10335 wol->supported = 0;
10338 wol->supported = WAKE_MAGIC;
10340 wol->wolopts = WAKE_MAGIC;
10344 memset(&wol->sopass, 0, sizeof(wol->sopass));
10347 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10349 struct bnx2x *bp = netdev_priv(dev);
10351 if (wol->wolopts & ~WAKE_MAGIC)
10354 if (wol->wolopts & WAKE_MAGIC) {
10355 if (bp->flags & NO_WOL_FLAG)
10365 static u32 bnx2x_get_msglevel(struct net_device *dev)
10367 struct bnx2x *bp = netdev_priv(dev);
10369 return bp->msg_enable;
10372 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10374 struct bnx2x *bp = netdev_priv(dev);
10376 if (capable(CAP_NET_ADMIN))
10377 bp->msg_enable = level;
10380 static int bnx2x_nway_reset(struct net_device *dev)
10382 struct bnx2x *bp = netdev_priv(dev);
10387 if (netif_running(dev)) {
10388 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10389 bnx2x_link_set(bp);
10395 static u32 bnx2x_get_link(struct net_device *dev)
10397 struct bnx2x *bp = netdev_priv(dev);
10399 if (bp->flags & MF_FUNC_DIS)
10402 return bp->link_vars.link_up;
10405 static int bnx2x_get_eeprom_len(struct net_device *dev)
10407 struct bnx2x *bp = netdev_priv(dev);
10409 return bp->common.flash_size;
10412 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10414 int port = BP_PORT(bp);
10418 /* adjust timeout for emulation/FPGA */
10419 count = NVRAM_TIMEOUT_COUNT;
10420 if (CHIP_REV_IS_SLOW(bp))
10423 /* request access to nvram interface */
10424 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10425 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10427 for (i = 0; i < count*10; i++) {
10428 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10429 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10435 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
10436 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
10443 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10445 int port = BP_PORT(bp);
10449 /* adjust timeout for emulation/FPGA */
10450 count = NVRAM_TIMEOUT_COUNT;
10451 if (CHIP_REV_IS_SLOW(bp))
10454 /* relinquish nvram interface */
10455 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10456 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10458 for (i = 0; i < count*10; i++) {
10459 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10460 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10466 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
10467 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
10474 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10478 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10480 /* enable both bits, even on read */
10481 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10482 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10483 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10486 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10490 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10492 /* disable both bits, even after read */
10493 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10494 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10495 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10498 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
10504 /* build the command word */
10505 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10507 /* need to clear DONE bit separately */
10508 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10510 /* address of the NVRAM to read from */
10511 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10512 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10514 /* issue a read command */
10515 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10517 /* adjust timeout for emulation/FPGA */
10518 count = NVRAM_TIMEOUT_COUNT;
10519 if (CHIP_REV_IS_SLOW(bp))
10522 /* wait for completion */
10525 for (i = 0; i < count; i++) {
10527 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10529 if (val & MCPR_NVM_COMMAND_DONE) {
10530 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
10531 /* we read nvram data in cpu order
10532 * but ethtool sees it as an array of bytes
10533 * converting to big-endian will do the work */
10534 *ret_val = cpu_to_be32(val);
10543 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10550 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10552 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10557 if (offset + buf_size > bp->common.flash_size) {
10558 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10559 " buf_size (0x%x) > flash_size (0x%x)\n",
10560 offset, buf_size, bp->common.flash_size);
10564 /* request access to nvram interface */
10565 rc = bnx2x_acquire_nvram_lock(bp);
10569 /* enable access to nvram interface */
10570 bnx2x_enable_nvram_access(bp);
10572 /* read the first word(s) */
10573 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10574 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10575 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10576 memcpy(ret_buf, &val, 4);
10578 /* advance to the next dword */
10579 offset += sizeof(u32);
10580 ret_buf += sizeof(u32);
10581 buf_size -= sizeof(u32);
10586 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10587 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10588 memcpy(ret_buf, &val, 4);
10591 /* disable access to nvram interface */
10592 bnx2x_disable_nvram_access(bp);
10593 bnx2x_release_nvram_lock(bp);
10598 static int bnx2x_get_eeprom(struct net_device *dev,
10599 struct ethtool_eeprom *eeprom, u8 *eebuf)
10601 struct bnx2x *bp = netdev_priv(dev);
10604 if (!netif_running(dev))
10607 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10608 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10609 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10610 eeprom->len, eeprom->len);
10612 /* parameters already validated in ethtool_get_eeprom */
10614 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10619 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10624 /* build the command word */
10625 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10627 /* need to clear DONE bit separately */
10628 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10630 /* write the data */
10631 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10633 /* address of the NVRAM to write to */
10634 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10635 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10637 /* issue the write command */
10638 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10640 /* adjust timeout for emulation/FPGA */
10641 count = NVRAM_TIMEOUT_COUNT;
10642 if (CHIP_REV_IS_SLOW(bp))
10645 /* wait for completion */
10647 for (i = 0; i < count; i++) {
10649 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10650 if (val & MCPR_NVM_COMMAND_DONE) {
10659 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
10661 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10669 if (offset + buf_size > bp->common.flash_size) {
10670 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10671 " buf_size (0x%x) > flash_size (0x%x)\n",
10672 offset, buf_size, bp->common.flash_size);
10676 /* request access to nvram interface */
10677 rc = bnx2x_acquire_nvram_lock(bp);
10681 /* enable access to nvram interface */
10682 bnx2x_enable_nvram_access(bp);
10684 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10685 align_offset = (offset & ~0x03);
10686 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10689 val &= ~(0xff << BYTE_OFFSET(offset));
10690 val |= (*data_buf << BYTE_OFFSET(offset));
10692 /* nvram data is returned as an array of bytes
10693 * convert it back to cpu order */
10694 val = be32_to_cpu(val);
10696 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10700 /* disable access to nvram interface */
10701 bnx2x_disable_nvram_access(bp);
10702 bnx2x_release_nvram_lock(bp);
10707 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10713 u32 written_so_far;
10715 if (buf_size == 1) /* ethtool */
10716 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
10718 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
10720 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
10725 if (offset + buf_size > bp->common.flash_size) {
10726 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
10727 " buf_size (0x%x) > flash_size (0x%x)\n",
10728 offset, buf_size, bp->common.flash_size);
10732 /* request access to nvram interface */
10733 rc = bnx2x_acquire_nvram_lock(bp);
10737 /* enable access to nvram interface */
10738 bnx2x_enable_nvram_access(bp);
10740 written_so_far = 0;
10741 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10742 while ((written_so_far < buf_size) && (rc == 0)) {
10743 if (written_so_far == (buf_size - sizeof(u32)))
10744 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10745 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10746 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10747 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10748 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10750 memcpy(&val, data_buf, 4);
10752 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10754 /* advance to the next dword */
10755 offset += sizeof(u32);
10756 data_buf += sizeof(u32);
10757 written_so_far += sizeof(u32);
10761 /* disable access to nvram interface */
10762 bnx2x_disable_nvram_access(bp);
10763 bnx2x_release_nvram_lock(bp);
10768 static int bnx2x_set_eeprom(struct net_device *dev,
10769 struct ethtool_eeprom *eeprom, u8 *eebuf)
10771 struct bnx2x *bp = netdev_priv(dev);
10772 int port = BP_PORT(bp);
10775 if (!netif_running(dev))
10778 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
10779 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10780 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10781 eeprom->len, eeprom->len);
10783 /* parameters already validated in ethtool_set_eeprom */
10785 /* PHY eeprom can be accessed only by the PMF */
10786 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10790 if (eeprom->magic == 0x50485950) {
10791 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10792 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10794 bnx2x_acquire_phy_lock(bp);
10795 rc |= bnx2x_link_reset(&bp->link_params,
10796 &bp->link_vars, 0);
10797 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10798 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10799 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10800 MISC_REGISTERS_GPIO_HIGH, port);
10801 bnx2x_release_phy_lock(bp);
10802 bnx2x_link_report(bp);
10804 } else if (eeprom->magic == 0x50485952) {
10805 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
10806 if (bp->state == BNX2X_STATE_OPEN) {
10807 bnx2x_acquire_phy_lock(bp);
10808 rc |= bnx2x_link_reset(&bp->link_params,
10809 &bp->link_vars, 1);
10811 rc |= bnx2x_phy_init(&bp->link_params,
10813 bnx2x_release_phy_lock(bp);
10814 bnx2x_calc_fc_adv(bp);
10816 } else if (eeprom->magic == 0x53985943) {
10817 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10818 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10819 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10821 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
10823 /* DSP Remove Download Mode */
10824 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10825 MISC_REGISTERS_GPIO_LOW, port);
10827 bnx2x_acquire_phy_lock(bp);
10829 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10831 /* wait 0.5 sec to allow it to run */
10833 bnx2x_ext_phy_hw_reset(bp, port);
10835 bnx2x_release_phy_lock(bp);
10838 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
10843 static int bnx2x_get_coalesce(struct net_device *dev,
10844 struct ethtool_coalesce *coal)
10846 struct bnx2x *bp = netdev_priv(dev);
10848 memset(coal, 0, sizeof(struct ethtool_coalesce));
10850 coal->rx_coalesce_usecs = bp->rx_ticks;
10851 coal->tx_coalesce_usecs = bp->tx_ticks;
10856 static int bnx2x_set_coalesce(struct net_device *dev,
10857 struct ethtool_coalesce *coal)
10859 struct bnx2x *bp = netdev_priv(dev);
10861 bp->rx_ticks = (u16)coal->rx_coalesce_usecs;
10862 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
10863 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
10865 bp->tx_ticks = (u16)coal->tx_coalesce_usecs;
10866 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
10867 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
10869 if (netif_running(dev))
10870 bnx2x_update_coalesce(bp);
10875 static void bnx2x_get_ringparam(struct net_device *dev,
10876 struct ethtool_ringparam *ering)
10878 struct bnx2x *bp = netdev_priv(dev);
10880 ering->rx_max_pending = MAX_RX_AVAIL;
10881 ering->rx_mini_max_pending = 0;
10882 ering->rx_jumbo_max_pending = 0;
10884 ering->rx_pending = bp->rx_ring_size;
10885 ering->rx_mini_pending = 0;
10886 ering->rx_jumbo_pending = 0;
10888 ering->tx_max_pending = MAX_TX_AVAIL;
10889 ering->tx_pending = bp->tx_ring_size;
10892 static int bnx2x_set_ringparam(struct net_device *dev,
10893 struct ethtool_ringparam *ering)
10895 struct bnx2x *bp = netdev_priv(dev);
10898 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10899 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10903 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10904 (ering->tx_pending > MAX_TX_AVAIL) ||
10905 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10908 bp->rx_ring_size = ering->rx_pending;
10909 bp->tx_ring_size = ering->tx_pending;
10911 if (netif_running(dev)) {
10912 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10913 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10919 static void bnx2x_get_pauseparam(struct net_device *dev,
10920 struct ethtool_pauseparam *epause)
10922 struct bnx2x *bp = netdev_priv(dev);
10924 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10925 BNX2X_FLOW_CTRL_AUTO) &&
10926 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10928 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10929 BNX2X_FLOW_CTRL_RX);
10930 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10931 BNX2X_FLOW_CTRL_TX);
10933 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10934 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10935 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10938 static int bnx2x_set_pauseparam(struct net_device *dev,
10939 struct ethtool_pauseparam *epause)
10941 struct bnx2x *bp = netdev_priv(dev);
10946 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10947 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10948 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10950 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10952 if (epause->rx_pause)
10953 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
10955 if (epause->tx_pause)
10956 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
10958 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10959 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
10961 if (epause->autoneg) {
10962 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10963 DP(NETIF_MSG_LINK, "autoneg not supported\n");
10967 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
10968 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
10972 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
10974 if (netif_running(dev)) {
10975 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
10976 bnx2x_link_set(bp);
10982 static int bnx2x_set_flags(struct net_device *dev, u32 data)
10984 struct bnx2x *bp = netdev_priv(dev);
10988 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10989 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10993 /* TPA requires Rx CSUM offloading */
10994 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
10995 if (!disable_tpa) {
10996 if (!(dev->features & NETIF_F_LRO)) {
10997 dev->features |= NETIF_F_LRO;
10998 bp->flags |= TPA_ENABLE_FLAG;
11003 } else if (dev->features & NETIF_F_LRO) {
11004 dev->features &= ~NETIF_F_LRO;
11005 bp->flags &= ~TPA_ENABLE_FLAG;
11009 if (data & ETH_FLAG_RXHASH)
11010 dev->features |= NETIF_F_RXHASH;
11012 dev->features &= ~NETIF_F_RXHASH;
11014 if (changed && netif_running(dev)) {
11015 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11016 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11022 static u32 bnx2x_get_rx_csum(struct net_device *dev)
11024 struct bnx2x *bp = netdev_priv(dev);
11026 return bp->rx_csum;
11029 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
11031 struct bnx2x *bp = netdev_priv(dev);
11034 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11035 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11039 bp->rx_csum = data;
11041 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
11042 TPA'ed packets will be discarded due to wrong TCP CSUM */
11044 u32 flags = ethtool_op_get_flags(dev);
11046 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
11052 static int bnx2x_set_tso(struct net_device *dev, u32 data)
11055 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11056 dev->features |= NETIF_F_TSO6;
11058 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
11059 dev->features &= ~NETIF_F_TSO6;
11065 static const struct {
11066 char string[ETH_GSTRING_LEN];
11067 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
11068 { "register_test (offline)" },
11069 { "memory_test (offline)" },
11070 { "loopback_test (offline)" },
11071 { "nvram_test (online)" },
11072 { "interrupt_test (online)" },
11073 { "link_test (online)" },
11074 { "idle check (online)" }
11077 static int bnx2x_test_registers(struct bnx2x *bp)
11079 int idx, i, rc = -ENODEV;
11081 int port = BP_PORT(bp);
11082 static const struct {
11087 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11088 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11089 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11090 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11091 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11092 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11093 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11094 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11095 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11096 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11097 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11098 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11099 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11100 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11101 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11102 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11103 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11104 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
11105 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
11106 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11107 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
11108 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11109 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11110 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11111 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11112 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11113 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11114 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11115 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
11116 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11117 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
11118 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11119 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11120 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11121 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11122 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11123 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11125 { 0xffffffff, 0, 0x00000000 }
11128 if (!netif_running(bp->dev))
11131 /* Repeat the test twice:
11132 First by writing 0x00000000, second by writing 0xffffffff */
11133 for (idx = 0; idx < 2; idx++) {
11140 wr_val = 0xffffffff;
11144 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11145 u32 offset, mask, save_val, val;
11147 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11148 mask = reg_tbl[i].mask;
11150 save_val = REG_RD(bp, offset);
11152 REG_WR(bp, offset, (wr_val & mask));
11153 val = REG_RD(bp, offset);
11155 /* Restore the original register's value */
11156 REG_WR(bp, offset, save_val);
11158 /* verify value is as expected */
11159 if ((val & mask) != (wr_val & mask)) {
11160 DP(NETIF_MSG_PROBE,
11161 "offset 0x%x: val 0x%x != 0x%x mask 0x%x\n",
11162 offset, val, wr_val, mask);
11163 goto test_reg_exit;
11174 static int bnx2x_test_memory(struct bnx2x *bp)
11176 int i, j, rc = -ENODEV;
11178 static const struct {
11182 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11183 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11184 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11185 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11186 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11187 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11188 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11192 static const struct {
11198 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11199 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11200 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11201 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11202 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11203 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11205 { NULL, 0xffffffff, 0, 0 }
11208 if (!netif_running(bp->dev))
11211 /* Go through all the memories */
11212 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11213 for (j = 0; j < mem_tbl[i].size; j++)
11214 REG_RD(bp, mem_tbl[i].offset + j*4);
11216 /* Check the parity status */
11217 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11218 val = REG_RD(bp, prty_tbl[i].offset);
11219 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11220 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
11222 "%s is 0x%x\n", prty_tbl[i].name, val);
11223 goto test_mem_exit;
11233 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11238 while (bnx2x_link_test(bp) && cnt--)
11242 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11244 unsigned int pkt_size, num_pkts, i;
11245 struct sk_buff *skb;
11246 unsigned char *packet;
11247 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
11248 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
11249 u16 tx_start_idx, tx_idx;
11250 u16 rx_start_idx, rx_idx;
11251 u16 pkt_prod, bd_prod;
11252 struct sw_tx_bd *tx_buf;
11253 struct eth_tx_start_bd *tx_start_bd;
11254 struct eth_tx_parse_bd *pbd = NULL;
11255 dma_addr_t mapping;
11256 union eth_rx_cqe *cqe;
11258 struct sw_rx_bd *rx_buf;
11262 /* check the loopback mode */
11263 switch (loopback_mode) {
11264 case BNX2X_PHY_LOOPBACK:
11265 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11268 case BNX2X_MAC_LOOPBACK:
11269 bp->link_params.loopback_mode = LOOPBACK_BMAC;
11270 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
11276 /* prepare the loopback packet */
11277 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11278 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
11279 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11282 goto test_loopback_exit;
11284 packet = skb_put(skb, pkt_size);
11285 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
11286 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11287 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
11288 for (i = ETH_HLEN; i < pkt_size; i++)
11289 packet[i] = (unsigned char) (i & 0xff);
11291 /* send the loopback packet */
11293 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11294 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11296 pkt_prod = fp_tx->tx_pkt_prod++;
11297 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11298 tx_buf->first_bd = fp_tx->tx_bd_prod;
11302 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11303 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
11304 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11305 skb_headlen(skb), DMA_TO_DEVICE);
11306 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11307 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11308 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11309 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11310 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11311 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11312 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11313 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11315 /* turn on parsing and get a BD */
11316 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11317 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11319 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
11323 fp_tx->tx_db.data.prod += 2;
11325 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
11330 fp_tx->tx_bd_prod += 2; /* start + pbd */
11334 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11335 if (tx_idx != tx_start_idx + num_pkts)
11336 goto test_loopback_exit;
11338 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
11339 if (rx_idx != rx_start_idx + num_pkts)
11340 goto test_loopback_exit;
11342 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
11343 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11344 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11345 goto test_loopback_rx_exit;
11347 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11348 if (len != pkt_size)
11349 goto test_loopback_rx_exit;
11351 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
11353 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11354 for (i = ETH_HLEN; i < pkt_size; i++)
11355 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11356 goto test_loopback_rx_exit;
11360 test_loopback_rx_exit:
11362 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11363 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11364 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11365 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
11367 /* Update producers */
11368 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11369 fp_rx->rx_sge_prod);
11371 test_loopback_exit:
11372 bp->link_params.loopback_mode = LOOPBACK_NONE;
11377 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11384 if (!netif_running(bp->dev))
11385 return BNX2X_LOOPBACK_FAILED;
11387 bnx2x_netif_stop(bp, 1);
11388 bnx2x_acquire_phy_lock(bp);
11390 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11392 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11393 rc |= BNX2X_PHY_LOOPBACK_FAILED;
11396 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11398 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11399 rc |= BNX2X_MAC_LOOPBACK_FAILED;
11402 bnx2x_release_phy_lock(bp);
11403 bnx2x_netif_start(bp);
11408 #define CRC32_RESIDUAL 0xdebb20e3
11410 static int bnx2x_test_nvram(struct bnx2x *bp)
11412 static const struct {
11416 { 0, 0x14 }, /* bootstrap */
11417 { 0x14, 0xec }, /* dir */
11418 { 0x100, 0x350 }, /* manuf_info */
11419 { 0x450, 0xf0 }, /* feature_info */
11420 { 0x640, 0x64 }, /* upgrade_key_info */
11422 { 0x708, 0x70 }, /* manuf_key_info */
11426 __be32 buf[0x350 / 4];
11427 u8 *data = (u8 *)buf;
11434 rc = bnx2x_nvram_read(bp, 0, data, 4);
11436 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
11437 goto test_nvram_exit;
11440 magic = be32_to_cpu(buf[0]);
11441 if (magic != 0x669955aa) {
11442 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11444 goto test_nvram_exit;
11447 for (i = 0; nvram_tbl[i].size; i++) {
11449 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11450 nvram_tbl[i].size);
11452 DP(NETIF_MSG_PROBE,
11453 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
11454 goto test_nvram_exit;
11457 crc = ether_crc_le(nvram_tbl[i].size, data);
11458 if (crc != CRC32_RESIDUAL) {
11459 DP(NETIF_MSG_PROBE,
11460 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
11462 goto test_nvram_exit;
11470 static int bnx2x_test_intr(struct bnx2x *bp)
11472 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11475 if (!netif_running(bp->dev))
11478 config->hdr.length = 0;
11479 if (CHIP_IS_E1(bp))
11480 /* use last unicast entries */
11481 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
11483 config->hdr.offset = BP_FUNC(bp);
11484 config->hdr.client_id = bp->fp->cl_id;
11485 config->hdr.reserved1 = 0;
11487 bp->set_mac_pending++;
11489 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11490 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11491 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11493 for (i = 0; i < 10; i++) {
11494 if (!bp->set_mac_pending)
11497 msleep_interruptible(10);
11506 static void bnx2x_self_test(struct net_device *dev,
11507 struct ethtool_test *etest, u64 *buf)
11509 struct bnx2x *bp = netdev_priv(dev);
11511 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11512 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11513 etest->flags |= ETH_TEST_FL_FAILED;
11517 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11519 if (!netif_running(dev))
11522 /* offline tests are not supported in MF mode */
11524 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11526 if (etest->flags & ETH_TEST_FL_OFFLINE) {
11527 int port = BP_PORT(bp);
11531 /* save current value of input enable for TX port IF */
11532 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11533 /* disable input for TX port IF */
11534 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11536 link_up = (bnx2x_link_test(bp) == 0);
11537 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11538 bnx2x_nic_load(bp, LOAD_DIAG);
11539 /* wait until link state is restored */
11540 bnx2x_wait_for_link(bp, link_up);
11542 if (bnx2x_test_registers(bp) != 0) {
11544 etest->flags |= ETH_TEST_FL_FAILED;
11546 if (bnx2x_test_memory(bp) != 0) {
11548 etest->flags |= ETH_TEST_FL_FAILED;
11550 buf[2] = bnx2x_test_loopback(bp, link_up);
11552 etest->flags |= ETH_TEST_FL_FAILED;
11554 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11556 /* restore input for TX port IF */
11557 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11559 bnx2x_nic_load(bp, LOAD_NORMAL);
11560 /* wait until link state is restored */
11561 bnx2x_wait_for_link(bp, link_up);
11563 if (bnx2x_test_nvram(bp) != 0) {
11565 etest->flags |= ETH_TEST_FL_FAILED;
11567 if (bnx2x_test_intr(bp) != 0) {
11569 etest->flags |= ETH_TEST_FL_FAILED;
11572 if (bnx2x_link_test(bp) != 0) {
11574 etest->flags |= ETH_TEST_FL_FAILED;
11577 #ifdef BNX2X_EXTRA_DEBUG
11578 bnx2x_panic_dump(bp);
11582 static const struct {
11585 u8 string[ETH_GSTRING_LEN];
11586 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11587 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11588 { Q_STATS_OFFSET32(error_bytes_received_hi),
11589 8, "[%d]: rx_error_bytes" },
11590 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11591 8, "[%d]: rx_ucast_packets" },
11592 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11593 8, "[%d]: rx_mcast_packets" },
11594 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11595 8, "[%d]: rx_bcast_packets" },
11596 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11597 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11598 4, "[%d]: rx_phy_ip_err_discards"},
11599 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11600 4, "[%d]: rx_skb_alloc_discard" },
11601 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11603 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11604 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11605 8, "[%d]: tx_ucast_packets" },
11606 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11607 8, "[%d]: tx_mcast_packets" },
11608 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11609 8, "[%d]: tx_bcast_packets" }
11612 static const struct {
11616 #define STATS_FLAGS_PORT 1
11617 #define STATS_FLAGS_FUNC 2
11618 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
11619 u8 string[ETH_GSTRING_LEN];
11620 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
11621 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11622 8, STATS_FLAGS_BOTH, "rx_bytes" },
11623 { STATS_OFFSET32(error_bytes_received_hi),
11624 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
11625 { STATS_OFFSET32(total_unicast_packets_received_hi),
11626 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
11627 { STATS_OFFSET32(total_multicast_packets_received_hi),
11628 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
11629 { STATS_OFFSET32(total_broadcast_packets_received_hi),
11630 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
11631 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
11632 8, STATS_FLAGS_PORT, "rx_crc_errors" },
11633 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
11634 8, STATS_FLAGS_PORT, "rx_align_errors" },
11635 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11636 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11637 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11638 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11639 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11640 8, STATS_FLAGS_PORT, "rx_fragments" },
11641 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11642 8, STATS_FLAGS_PORT, "rx_jabbers" },
11643 { STATS_OFFSET32(no_buff_discard_hi),
11644 8, STATS_FLAGS_BOTH, "rx_discards" },
11645 { STATS_OFFSET32(mac_filter_discard),
11646 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11647 { STATS_OFFSET32(xxoverflow_discard),
11648 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11649 { STATS_OFFSET32(brb_drop_hi),
11650 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11651 { STATS_OFFSET32(brb_truncate_hi),
11652 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11653 { STATS_OFFSET32(pause_frames_received_hi),
11654 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11655 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11656 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11657 { STATS_OFFSET32(nig_timer_max),
11658 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11659 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11660 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11661 { STATS_OFFSET32(rx_skb_alloc_failed),
11662 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11663 { STATS_OFFSET32(hw_csum_err),
11664 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11666 { STATS_OFFSET32(total_bytes_transmitted_hi),
11667 8, STATS_FLAGS_BOTH, "tx_bytes" },
11668 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11669 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11670 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11671 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
11672 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
11673 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
11674 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
11675 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
11676 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11677 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11678 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11679 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
11680 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
11681 8, STATS_FLAGS_PORT, "tx_single_collisions" },
11682 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
11683 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
11684 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
11685 8, STATS_FLAGS_PORT, "tx_deferred" },
11686 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
11687 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
11688 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
11689 8, STATS_FLAGS_PORT, "tx_late_collisions" },
11690 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
11691 8, STATS_FLAGS_PORT, "tx_total_collisions" },
11692 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
11693 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
11694 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
11695 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
11696 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
11697 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
11698 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
11699 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
11700 /* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
11701 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
11702 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
11703 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
11704 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
11705 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
11706 { STATS_OFFSET32(pause_frames_sent_hi),
11707 8, STATS_FLAGS_PORT, "tx_pause_frames" }
11710 #define IS_PORT_STAT(i) \
11711 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11712 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11713 #define IS_E1HMF_MODE_STAT(bp) \
11714 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
11716 static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11718 struct bnx2x *bp = netdev_priv(dev);
11721 switch (stringset) {
11723 if (is_multi(bp)) {
11724 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
11725 if (!IS_E1HMF_MODE_STAT(bp))
11726 num_stats += BNX2X_NUM_STATS;
11728 if (IS_E1HMF_MODE_STAT(bp)) {
11730 for (i = 0; i < BNX2X_NUM_STATS; i++)
11731 if (IS_FUNC_STAT(i))
11734 num_stats = BNX2X_NUM_STATS;
11739 return BNX2X_NUM_TESTS;
11746 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11748 struct bnx2x *bp = netdev_priv(dev);
11751 switch (stringset) {
11753 if (is_multi(bp)) {
11755 for_each_queue(bp, i) {
11756 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11757 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11758 bnx2x_q_stats_arr[j].string, i);
11759 k += BNX2X_NUM_Q_STATS;
11761 if (IS_E1HMF_MODE_STAT(bp))
11763 for (j = 0; j < BNX2X_NUM_STATS; j++)
11764 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11765 bnx2x_stats_arr[j].string);
11767 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11768 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11770 strcpy(buf + j*ETH_GSTRING_LEN,
11771 bnx2x_stats_arr[i].string);
11778 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11783 static void bnx2x_get_ethtool_stats(struct net_device *dev,
11784 struct ethtool_stats *stats, u64 *buf)
11786 struct bnx2x *bp = netdev_priv(dev);
11787 u32 *hw_stats, *offset;
11790 if (is_multi(bp)) {
11792 for_each_queue(bp, i) {
11793 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11794 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11795 if (bnx2x_q_stats_arr[j].size == 0) {
11796 /* skip this counter */
11800 offset = (hw_stats +
11801 bnx2x_q_stats_arr[j].offset);
11802 if (bnx2x_q_stats_arr[j].size == 4) {
11803 /* 4-byte counter */
11804 buf[k + j] = (u64) *offset;
11807 /* 8-byte counter */
11808 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11810 k += BNX2X_NUM_Q_STATS;
11812 if (IS_E1HMF_MODE_STAT(bp))
11814 hw_stats = (u32 *)&bp->eth_stats;
11815 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11816 if (bnx2x_stats_arr[j].size == 0) {
11817 /* skip this counter */
11821 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11822 if (bnx2x_stats_arr[j].size == 4) {
11823 /* 4-byte counter */
11824 buf[k + j] = (u64) *offset;
11827 /* 8-byte counter */
11828 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11831 hw_stats = (u32 *)&bp->eth_stats;
11832 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11833 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11835 if (bnx2x_stats_arr[i].size == 0) {
11836 /* skip this counter */
11841 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11842 if (bnx2x_stats_arr[i].size == 4) {
11843 /* 4-byte counter */
11844 buf[j] = (u64) *offset;
11848 /* 8-byte counter */
11849 buf[j] = HILO_U64(*offset, *(offset + 1));
11855 static int bnx2x_phys_id(struct net_device *dev, u32 data)
11857 struct bnx2x *bp = netdev_priv(dev);
11860 if (!netif_running(dev))
11869 for (i = 0; i < (data * 2); i++) {
11871 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11874 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
11876 msleep_interruptible(500);
11877 if (signal_pending(current))
11881 if (bp->link_vars.link_up)
11882 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11883 bp->link_vars.line_speed);
11888 static const struct ethtool_ops bnx2x_ethtool_ops = {
11889 .get_settings = bnx2x_get_settings,
11890 .set_settings = bnx2x_set_settings,
11891 .get_drvinfo = bnx2x_get_drvinfo,
11892 .get_regs_len = bnx2x_get_regs_len,
11893 .get_regs = bnx2x_get_regs,
11894 .get_wol = bnx2x_get_wol,
11895 .set_wol = bnx2x_set_wol,
11896 .get_msglevel = bnx2x_get_msglevel,
11897 .set_msglevel = bnx2x_set_msglevel,
11898 .nway_reset = bnx2x_nway_reset,
11899 .get_link = bnx2x_get_link,
11900 .get_eeprom_len = bnx2x_get_eeprom_len,
11901 .get_eeprom = bnx2x_get_eeprom,
11902 .set_eeprom = bnx2x_set_eeprom,
11903 .get_coalesce = bnx2x_get_coalesce,
11904 .set_coalesce = bnx2x_set_coalesce,
11905 .get_ringparam = bnx2x_get_ringparam,
11906 .set_ringparam = bnx2x_set_ringparam,
11907 .get_pauseparam = bnx2x_get_pauseparam,
11908 .set_pauseparam = bnx2x_set_pauseparam,
11909 .get_rx_csum = bnx2x_get_rx_csum,
11910 .set_rx_csum = bnx2x_set_rx_csum,
11911 .get_tx_csum = ethtool_op_get_tx_csum,
11912 .set_tx_csum = ethtool_op_set_tx_hw_csum,
11913 .set_flags = bnx2x_set_flags,
11914 .get_flags = ethtool_op_get_flags,
11915 .get_sg = ethtool_op_get_sg,
11916 .set_sg = ethtool_op_set_sg,
11917 .get_tso = ethtool_op_get_tso,
11918 .set_tso = bnx2x_set_tso,
11919 .self_test = bnx2x_self_test,
11920 .get_sset_count = bnx2x_get_sset_count,
11921 .get_strings = bnx2x_get_strings,
11922 .phys_id = bnx2x_phys_id,
11923 .get_ethtool_stats = bnx2x_get_ethtool_stats,
11926 /* end of ethtool_ops */
11928 /****************************************************************************
11929 * General service functions
11930 ****************************************************************************/
11932 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11936 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11940 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11941 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11942 PCI_PM_CTRL_PME_STATUS));
11944 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
11945 /* delay required during transition out of D3hot */
11950 /* If there are other clients above don't
11951 shut down the power */
11952 if (atomic_read(&bp->pdev->enable_cnt) != 1)
11954 /* Don't shut down the power for emulation and FPGA */
11955 if (CHIP_REV_IS_SLOW(bp))
11958 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11962 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
11964 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11967 /* No more memory access after this point until
11968 * device is brought back to D0.
11978 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11982 /* Tell compiler that status block fields can change */
11984 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11985 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11987 return (fp->rx_comp_cons != rx_cons_sb);
11991 * net_device service functions
11994 static int bnx2x_poll(struct napi_struct *napi, int budget)
11997 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11999 struct bnx2x *bp = fp->bp;
12002 #ifdef BNX2X_STOP_ON_ERROR
12003 if (unlikely(bp->panic)) {
12004 napi_complete(napi);
12009 if (bnx2x_has_tx_work(fp))
12012 if (bnx2x_has_rx_work(fp)) {
12013 work_done += bnx2x_rx_int(fp, budget - work_done);
12015 /* must not complete if we consumed full budget */
12016 if (work_done >= budget)
12020 /* Fall out from the NAPI loop if needed */
12021 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12022 bnx2x_update_fpsb_idx(fp);
12023 /* bnx2x_has_rx_work() reads the status block, thus we need
12024 * to ensure that status block indices have been actually read
12025 * (bnx2x_update_fpsb_idx) prior to this check
12026 * (bnx2x_has_rx_work) so that we won't write the "newer"
12027 * value of the status block to IGU (if there was a DMA right
12028 * after bnx2x_has_rx_work and if there is no rmb, the memory
12029 * reading (bnx2x_update_fpsb_idx) may be postponed to right
12030 * before bnx2x_ack_sb). In this case there will never be
12031 * another interrupt until there is another update of the
12032 * status block, while there is still unhandled work.
12036 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
12037 napi_complete(napi);
12038 /* Re-enable interrupts */
12039 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
12040 le16_to_cpu(fp->fp_c_idx),
12042 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
12043 le16_to_cpu(fp->fp_u_idx),
12044 IGU_INT_ENABLE, 1);
12054 /* we split the first BD into headers and data BDs
12055 * to ease the pain of our fellow microcode engineers
12056 * we use one mapping for both BDs
12057 * So far this has only been observed to happen
12058 * in Other Operating Systems(TM)
12060 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
12061 struct bnx2x_fastpath *fp,
12062 struct sw_tx_bd *tx_buf,
12063 struct eth_tx_start_bd **tx_bd, u16 hlen,
12064 u16 bd_prod, int nbd)
12066 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
12067 struct eth_tx_bd *d_tx_bd;
12068 dma_addr_t mapping;
12069 int old_len = le16_to_cpu(h_tx_bd->nbytes);
12071 /* first fix first BD */
12072 h_tx_bd->nbd = cpu_to_le16(nbd);
12073 h_tx_bd->nbytes = cpu_to_le16(hlen);
12075 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
12076 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
12077 h_tx_bd->addr_lo, h_tx_bd->nbd);
12079 /* now get a new data BD
12080 * (after the pbd) and fill it */
12081 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12082 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12084 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
12085 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
12087 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12088 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12089 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
12091 /* this marks the BD as one that has no individual mapping */
12092 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
12094 DP(NETIF_MSG_TX_QUEUED,
12095 "TSO split data size is %d (%x:%x)\n",
12096 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
12099 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
12104 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
12107 csum = (u16) ~csum_fold(csum_sub(csum,
12108 csum_partial(t_header - fix, fix, 0)));
12111 csum = (u16) ~csum_fold(csum_add(csum,
12112 csum_partial(t_header, -fix, 0)));
12114 return swab16(csum);
12117 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12121 if (skb->ip_summed != CHECKSUM_PARTIAL)
12125 if (skb->protocol == htons(ETH_P_IPV6)) {
12127 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12128 rc |= XMIT_CSUM_TCP;
12132 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12133 rc |= XMIT_CSUM_TCP;
12137 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
12138 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
12140 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
12141 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
12146 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12147 /* check if packet requires linearization (packet is too fragmented)
12148 no need to check fragmentation if page size > 8K (there will be no
12149 violation to FW restrictions) */
12150 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12155 int first_bd_sz = 0;
12157 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12158 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12160 if (xmit_type & XMIT_GSO) {
12161 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12162 /* Check if LSO packet needs to be copied:
12163 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12164 int wnd_size = MAX_FETCH_BD - 3;
12165 /* Number of windows to check */
12166 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12171 /* Headers length */
12172 hlen = (int)(skb_transport_header(skb) - skb->data) +
12175 /* Amount of data (w/o headers) on linear part of SKB*/
12176 first_bd_sz = skb_headlen(skb) - hlen;
12178 wnd_sum = first_bd_sz;
12180 /* Calculate the first sum - it's special */
12181 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12183 skb_shinfo(skb)->frags[frag_idx].size;
12185 /* If there was data on linear skb data - check it */
12186 if (first_bd_sz > 0) {
12187 if (unlikely(wnd_sum < lso_mss)) {
12192 wnd_sum -= first_bd_sz;
12195 /* Others are easier: run through the frag list and
12196 check all windows */
12197 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12199 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12201 if (unlikely(wnd_sum < lso_mss)) {
12206 skb_shinfo(skb)->frags[wnd_idx].size;
12209 /* in non-LSO too fragmented packet should always
12216 if (unlikely(to_copy))
12217 DP(NETIF_MSG_TX_QUEUED,
12218 "Linearization IS REQUIRED for %s packet. "
12219 "num_frags %d hlen %d first_bd_sz %d\n",
12220 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12221 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12227 /* called with netif_tx_lock
12228 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
12229 * netif_wake_queue()
12231 static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
12233 struct bnx2x *bp = netdev_priv(dev);
12234 struct bnx2x_fastpath *fp;
12235 struct netdev_queue *txq;
12236 struct sw_tx_bd *tx_buf;
12237 struct eth_tx_start_bd *tx_start_bd;
12238 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
12239 struct eth_tx_parse_bd *pbd = NULL;
12240 u16 pkt_prod, bd_prod;
12242 dma_addr_t mapping;
12243 u32 xmit_type = bnx2x_xmit_type(bp, skb);
12246 __le16 pkt_size = 0;
12247 struct ethhdr *eth;
12248 u8 mac_type = UNICAST_ADDRESS;
12250 #ifdef BNX2X_STOP_ON_ERROR
12251 if (unlikely(bp->panic))
12252 return NETDEV_TX_BUSY;
12255 fp_index = skb_get_queue_mapping(skb);
12256 txq = netdev_get_tx_queue(dev, fp_index);
12258 fp = &bp->fp[fp_index];
12260 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
12261 fp->eth_q_stats.driver_xoff++;
12262 netif_tx_stop_queue(txq);
12263 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12264 return NETDEV_TX_BUSY;
12267 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12268 " gso type %x xmit_type %x\n",
12269 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12270 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12272 eth = (struct ethhdr *)skb->data;
12274 /* set flag according to packet type (UNICAST_ADDRESS is default)*/
12275 if (unlikely(is_multicast_ether_addr(eth->h_dest))) {
12276 if (is_broadcast_ether_addr(eth->h_dest))
12277 mac_type = BROADCAST_ADDRESS;
12279 mac_type = MULTICAST_ADDRESS;
12282 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
12283 /* First, check if we need to linearize the skb (due to FW
12284 restrictions). No need to check fragmentation if page size > 8K
12285 (there will be no violation to FW restrictions) */
12286 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12287 /* Statistics of linearization */
12289 if (skb_linearize(skb) != 0) {
12290 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12291 "silently dropping this SKB\n");
12292 dev_kfree_skb_any(skb);
12293 return NETDEV_TX_OK;
12299 Please read carefully. First we use one BD which we mark as start,
12300 then we have a parsing info BD (used for TSO or xsum),
12301 and only then we have the rest of the TSO BDs.
12302 (don't forget to mark the last one as last,
12303 and to unmap only AFTER you write to the BD ...)
12304 And above all, all pdb sizes are in words - NOT DWORDS!
12307 pkt_prod = fp->tx_pkt_prod++;
12308 bd_prod = TX_BD(fp->tx_bd_prod);
12310 /* get a tx_buf and first BD */
12311 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
12312 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
12314 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12315 tx_start_bd->general_data = (mac_type <<
12316 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
12318 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
12320 /* remember the first BD of the packet */
12321 tx_buf->first_bd = fp->tx_bd_prod;
12325 DP(NETIF_MSG_TX_QUEUED,
12326 "sending pkt %u @%p next_idx %u bd %u @%p\n",
12327 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
12330 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12331 (bp->flags & HW_VLAN_TX_FLAG)) {
12332 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12333 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
12336 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
12338 /* turn on parsing and get a BD */
12339 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12340 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
12342 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
12344 if (xmit_type & XMIT_CSUM) {
12345 hlen = (skb_network_header(skb) - skb->data) / 2;
12347 /* for now NS flag is not used in Linux */
12349 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12350 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
12352 pbd->ip_hlen = (skb_transport_header(skb) -
12353 skb_network_header(skb)) / 2;
12355 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
12357 pbd->total_hlen = cpu_to_le16(hlen);
12360 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
12362 if (xmit_type & XMIT_CSUM_V4)
12363 tx_start_bd->bd_flags.as_bitfield |=
12364 ETH_TX_BD_FLAGS_IP_CSUM;
12366 tx_start_bd->bd_flags.as_bitfield |=
12367 ETH_TX_BD_FLAGS_IPV6;
12369 if (xmit_type & XMIT_CSUM_TCP) {
12370 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12373 s8 fix = SKB_CS_OFF(skb); /* signed! */
12375 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
12377 DP(NETIF_MSG_TX_QUEUED,
12378 "hlen %d fix %d csum before fix %x\n",
12379 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
12381 /* HW bug: fixup the CSUM */
12382 pbd->tcp_pseudo_csum =
12383 bnx2x_csum_fix(skb_transport_header(skb),
12386 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12387 pbd->tcp_pseudo_csum);
12391 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12392 skb_headlen(skb), DMA_TO_DEVICE);
12394 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12395 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12396 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12397 tx_start_bd->nbd = cpu_to_le16(nbd);
12398 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12399 pkt_size = tx_start_bd->nbytes;
12401 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
12402 " nbytes %d flags %x vlan %x\n",
12403 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12404 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12405 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
12407 if (xmit_type & XMIT_GSO) {
12409 DP(NETIF_MSG_TX_QUEUED,
12410 "TSO packet len %d hlen %d total len %d tso size %d\n",
12411 skb->len, hlen, skb_headlen(skb),
12412 skb_shinfo(skb)->gso_size);
12414 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
12416 if (unlikely(skb_headlen(skb) > hlen))
12417 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12418 hlen, bd_prod, ++nbd);
12420 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12421 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
12422 pbd->tcp_flags = pbd_tcp_flags(skb);
12424 if (xmit_type & XMIT_GSO_V4) {
12425 pbd->ip_id = swab16(ip_hdr(skb)->id);
12426 pbd->tcp_pseudo_csum =
12427 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12428 ip_hdr(skb)->daddr,
12429 0, IPPROTO_TCP, 0));
12432 pbd->tcp_pseudo_csum =
12433 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12434 &ipv6_hdr(skb)->daddr,
12435 0, IPPROTO_TCP, 0));
12437 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12439 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
12441 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12442 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
12444 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12445 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12446 if (total_pkt_bd == NULL)
12447 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12449 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12451 frag->size, DMA_TO_DEVICE);
12453 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12454 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12455 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12456 le16_add_cpu(&pkt_size, frag->size);
12458 DP(NETIF_MSG_TX_QUEUED,
12459 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12460 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12461 le16_to_cpu(tx_data_bd->nbytes));
12464 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
12466 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12468 /* now send a tx doorbell, counting the next BD
12469 * if the packet contains or ends with it
12471 if (TX_BD_POFF(bd_prod) < nbd)
12474 if (total_pkt_bd != NULL)
12475 total_pkt_bd->total_pkt_bytes = pkt_size;
12478 DP(NETIF_MSG_TX_QUEUED,
12479 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12480 " tcp_flags %x xsum %x seq %u hlen %u\n",
12481 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12482 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
12483 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
12485 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
12488 * Make sure that the BD data is updated before updating the producer
12489 * since FW might read the BD right after the producer is updated.
12490 * This is only applicable for weak-ordered memory model archs such
12491 * as IA-64. The following barrier is also mandatory since FW will
12492 * assumes packets must have BDs.
12496 fp->tx_db.data.prod += nbd;
12498 DOORBELL(bp, fp->index, fp->tx_db.raw);
12502 fp->tx_bd_prod += nbd;
12504 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
12505 netif_tx_stop_queue(txq);
12507 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12508 * ordering of set_bit() in netif_tx_stop_queue() and read of
12509 * fp->bd_tx_cons */
12512 fp->eth_q_stats.driver_xoff++;
12513 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
12514 netif_tx_wake_queue(txq);
12518 return NETDEV_TX_OK;
12521 /* called with rtnl_lock */
12522 static int bnx2x_open(struct net_device *dev)
12524 struct bnx2x *bp = netdev_priv(dev);
12526 netif_carrier_off(dev);
12528 bnx2x_set_power_state(bp, PCI_D0);
12530 if (!bnx2x_reset_is_done(bp)) {
12532 /* Reset MCP mail box sequence if there is on going
12537 /* If it's the first function to load and reset done
12538 * is still not cleared it may mean that. We don't
12539 * check the attention state here because it may have
12540 * already been cleared by a "common" reset but we
12541 * shell proceed with "process kill" anyway.
12543 if ((bnx2x_get_load_cnt(bp) == 0) &&
12544 bnx2x_trylock_hw_lock(bp,
12545 HW_LOCK_RESOURCE_RESERVED_08) &&
12546 (!bnx2x_leader_reset(bp))) {
12547 DP(NETIF_MSG_HW, "Recovered in open\n");
12551 bnx2x_set_power_state(bp, PCI_D3hot);
12553 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12554 " completed yet. Try again later. If u still see this"
12555 " message after a few retries then power cycle is"
12556 " required.\n", bp->dev->name);
12562 bp->recovery_state = BNX2X_RECOVERY_DONE;
12564 return bnx2x_nic_load(bp, LOAD_OPEN);
12567 /* called with rtnl_lock */
12568 static int bnx2x_close(struct net_device *dev)
12570 struct bnx2x *bp = netdev_priv(dev);
12572 /* Unload the driver, release IRQs */
12573 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12574 bnx2x_set_power_state(bp, PCI_D3hot);
12579 /* called with netif_tx_lock from dev_mcast.c */
12580 static void bnx2x_set_rx_mode(struct net_device *dev)
12582 struct bnx2x *bp = netdev_priv(dev);
12583 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12584 int port = BP_PORT(bp);
12586 if (bp->state != BNX2X_STATE_OPEN) {
12587 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12591 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12593 if (dev->flags & IFF_PROMISC)
12594 rx_mode = BNX2X_RX_MODE_PROMISC;
12596 else if ((dev->flags & IFF_ALLMULTI) ||
12597 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12599 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12601 else { /* some multicasts */
12602 if (CHIP_IS_E1(bp)) {
12603 int i, old, offset;
12604 struct netdev_hw_addr *ha;
12605 struct mac_configuration_cmd *config =
12606 bnx2x_sp(bp, mcast_config);
12609 netdev_for_each_mc_addr(ha, dev) {
12610 config->config_table[i].
12611 cam_entry.msb_mac_addr =
12612 swab16(*(u16 *)&ha->addr[0]);
12613 config->config_table[i].
12614 cam_entry.middle_mac_addr =
12615 swab16(*(u16 *)&ha->addr[2]);
12616 config->config_table[i].
12617 cam_entry.lsb_mac_addr =
12618 swab16(*(u16 *)&ha->addr[4]);
12619 config->config_table[i].cam_entry.flags =
12621 config->config_table[i].
12622 target_table_entry.flags = 0;
12623 config->config_table[i].target_table_entry.
12624 clients_bit_vector =
12625 cpu_to_le32(1 << BP_L_ID(bp));
12626 config->config_table[i].
12627 target_table_entry.vlan_id = 0;
12630 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12631 config->config_table[i].
12632 cam_entry.msb_mac_addr,
12633 config->config_table[i].
12634 cam_entry.middle_mac_addr,
12635 config->config_table[i].
12636 cam_entry.lsb_mac_addr);
12639 old = config->hdr.length;
12641 for (; i < old; i++) {
12642 if (CAM_IS_INVALID(config->
12643 config_table[i])) {
12644 /* already invalidated */
12648 CAM_INVALIDATE(config->
12653 if (CHIP_REV_IS_SLOW(bp))
12654 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12656 offset = BNX2X_MAX_MULTICAST*(1 + port);
12658 config->hdr.length = i;
12659 config->hdr.offset = offset;
12660 config->hdr.client_id = bp->fp->cl_id;
12661 config->hdr.reserved1 = 0;
12663 bp->set_mac_pending++;
12666 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12667 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12668 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12671 /* Accept one or more multicasts */
12672 struct netdev_hw_addr *ha;
12673 u32 mc_filter[MC_HASH_SIZE];
12674 u32 crc, bit, regidx;
12677 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12679 netdev_for_each_mc_addr(ha, dev) {
12680 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
12683 crc = crc32c_le(0, ha->addr, ETH_ALEN);
12684 bit = (crc >> 24) & 0xff;
12687 mc_filter[regidx] |= (1 << bit);
12690 for (i = 0; i < MC_HASH_SIZE; i++)
12691 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12696 bp->rx_mode = rx_mode;
12697 bnx2x_set_storm_rx_mode(bp);
12700 /* called with rtnl_lock */
12701 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12703 struct sockaddr *addr = p;
12704 struct bnx2x *bp = netdev_priv(dev);
12706 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
12709 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
12710 if (netif_running(dev)) {
12711 if (CHIP_IS_E1(bp))
12712 bnx2x_set_eth_mac_addr_e1(bp, 1);
12714 bnx2x_set_eth_mac_addr_e1h(bp, 1);
12720 /* called with rtnl_lock */
12721 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12722 int devad, u16 addr)
12724 struct bnx2x *bp = netdev_priv(netdev);
12727 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12729 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12730 prtad, devad, addr);
12732 if (prtad != bp->mdio.prtad) {
12733 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12734 prtad, bp->mdio.prtad);
12738 /* The HW expects different devad if CL22 is used */
12739 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12741 bnx2x_acquire_phy_lock(bp);
12742 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12743 devad, addr, &value);
12744 bnx2x_release_phy_lock(bp);
12745 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
12752 /* called with rtnl_lock */
12753 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12754 u16 addr, u16 value)
12756 struct bnx2x *bp = netdev_priv(netdev);
12757 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12760 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12761 " value 0x%x\n", prtad, devad, addr, value);
12763 if (prtad != bp->mdio.prtad) {
12764 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12765 prtad, bp->mdio.prtad);
12769 /* The HW expects different devad if CL22 is used */
12770 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
12772 bnx2x_acquire_phy_lock(bp);
12773 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12774 devad, addr, value);
12775 bnx2x_release_phy_lock(bp);
12779 /* called with rtnl_lock */
12780 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12782 struct bnx2x *bp = netdev_priv(dev);
12783 struct mii_ioctl_data *mdio = if_mii(ifr);
12785 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12786 mdio->phy_id, mdio->reg_num, mdio->val_in);
12788 if (!netif_running(dev))
12791 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
12794 /* called with rtnl_lock */
12795 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12797 struct bnx2x *bp = netdev_priv(dev);
12800 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12801 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12805 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12806 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12809 /* This does not race with packet allocation
12810 * because the actual alloc size is
12811 * only updated as part of load
12813 dev->mtu = new_mtu;
12815 if (netif_running(dev)) {
12816 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12817 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
12823 static void bnx2x_tx_timeout(struct net_device *dev)
12825 struct bnx2x *bp = netdev_priv(dev);
12827 #ifdef BNX2X_STOP_ON_ERROR
12831 /* This allows the netif to be shutdown gracefully before resetting */
12832 schedule_delayed_work(&bp->reset_task, 0);
12836 /* called with rtnl_lock */
12837 static void bnx2x_vlan_rx_register(struct net_device *dev,
12838 struct vlan_group *vlgrp)
12840 struct bnx2x *bp = netdev_priv(dev);
12844 /* Set flags according to the required capabilities */
12845 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12847 if (dev->features & NETIF_F_HW_VLAN_TX)
12848 bp->flags |= HW_VLAN_TX_FLAG;
12850 if (dev->features & NETIF_F_HW_VLAN_RX)
12851 bp->flags |= HW_VLAN_RX_FLAG;
12853 if (netif_running(dev))
12854 bnx2x_set_client_config(bp);
12859 #ifdef CONFIG_NET_POLL_CONTROLLER
12860 static void poll_bnx2x(struct net_device *dev)
12862 struct bnx2x *bp = netdev_priv(dev);
12864 disable_irq(bp->pdev->irq);
12865 bnx2x_interrupt(bp->pdev->irq, dev);
12866 enable_irq(bp->pdev->irq);
12870 static const struct net_device_ops bnx2x_netdev_ops = {
12871 .ndo_open = bnx2x_open,
12872 .ndo_stop = bnx2x_close,
12873 .ndo_start_xmit = bnx2x_start_xmit,
12874 .ndo_set_multicast_list = bnx2x_set_rx_mode,
12875 .ndo_set_mac_address = bnx2x_change_mac_addr,
12876 .ndo_validate_addr = eth_validate_addr,
12877 .ndo_do_ioctl = bnx2x_ioctl,
12878 .ndo_change_mtu = bnx2x_change_mtu,
12879 .ndo_tx_timeout = bnx2x_tx_timeout,
12881 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12883 #ifdef CONFIG_NET_POLL_CONTROLLER
12884 .ndo_poll_controller = poll_bnx2x,
12888 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12889 struct net_device *dev)
12894 SET_NETDEV_DEV(dev, &pdev->dev);
12895 bp = netdev_priv(dev);
12900 bp->func = PCI_FUNC(pdev->devfn);
12902 rc = pci_enable_device(pdev);
12904 dev_err(&bp->pdev->dev,
12905 "Cannot enable PCI device, aborting\n");
12909 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
12910 dev_err(&bp->pdev->dev,
12911 "Cannot find PCI device base address, aborting\n");
12913 goto err_out_disable;
12916 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
12917 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
12918 " base address, aborting\n");
12920 goto err_out_disable;
12923 if (atomic_read(&pdev->enable_cnt) == 1) {
12924 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12926 dev_err(&bp->pdev->dev,
12927 "Cannot obtain PCI resources, aborting\n");
12928 goto err_out_disable;
12931 pci_set_master(pdev);
12932 pci_save_state(pdev);
12935 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12936 if (bp->pm_cap == 0) {
12937 dev_err(&bp->pdev->dev,
12938 "Cannot find power management capability, aborting\n");
12940 goto err_out_release;
12943 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12944 if (bp->pcie_cap == 0) {
12945 dev_err(&bp->pdev->dev,
12946 "Cannot find PCI Express capability, aborting\n");
12948 goto err_out_release;
12951 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
12952 bp->flags |= USING_DAC_FLAG;
12953 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12954 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
12955 " failed, aborting\n");
12957 goto err_out_release;
12960 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
12961 dev_err(&bp->pdev->dev,
12962 "System does not support DMA, aborting\n");
12964 goto err_out_release;
12967 dev->mem_start = pci_resource_start(pdev, 0);
12968 dev->base_addr = dev->mem_start;
12969 dev->mem_end = pci_resource_end(pdev, 0);
12971 dev->irq = pdev->irq;
12973 bp->regview = pci_ioremap_bar(pdev, 0);
12974 if (!bp->regview) {
12975 dev_err(&bp->pdev->dev,
12976 "Cannot map register space, aborting\n");
12978 goto err_out_release;
12981 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12982 min_t(u64, BNX2X_DB_SIZE,
12983 pci_resource_len(pdev, 2)));
12984 if (!bp->doorbells) {
12985 dev_err(&bp->pdev->dev,
12986 "Cannot map doorbell space, aborting\n");
12988 goto err_out_unmap;
12991 bnx2x_set_power_state(bp, PCI_D0);
12993 /* clean indirect addresses */
12994 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12995 PCICFG_VENDOR_ID_OFFSET);
12996 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12997 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12998 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12999 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
13001 /* Reset the load counter */
13002 bnx2x_clear_load_cnt(bp);
13004 dev->watchdog_timeo = TX_TIMEOUT;
13006 dev->netdev_ops = &bnx2x_netdev_ops;
13007 dev->ethtool_ops = &bnx2x_ethtool_ops;
13008 dev->features |= NETIF_F_SG;
13009 dev->features |= NETIF_F_HW_CSUM;
13010 if (bp->flags & USING_DAC_FLAG)
13011 dev->features |= NETIF_F_HIGHDMA;
13012 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13013 dev->features |= NETIF_F_TSO6;
13015 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
13016 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
13018 dev->vlan_features |= NETIF_F_SG;
13019 dev->vlan_features |= NETIF_F_HW_CSUM;
13020 if (bp->flags & USING_DAC_FLAG)
13021 dev->vlan_features |= NETIF_F_HIGHDMA;
13022 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
13023 dev->vlan_features |= NETIF_F_TSO6;
13026 /* get_port_hwinfo() will set prtad and mmds properly */
13027 bp->mdio.prtad = MDIO_PRTAD_NONE;
13029 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
13030 bp->mdio.dev = dev;
13031 bp->mdio.mdio_read = bnx2x_mdio_read;
13032 bp->mdio.mdio_write = bnx2x_mdio_write;
13038 iounmap(bp->regview);
13039 bp->regview = NULL;
13041 if (bp->doorbells) {
13042 iounmap(bp->doorbells);
13043 bp->doorbells = NULL;
13047 if (atomic_read(&pdev->enable_cnt) == 1)
13048 pci_release_regions(pdev);
13051 pci_disable_device(pdev);
13052 pci_set_drvdata(pdev, NULL);
13058 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
13059 int *width, int *speed)
13061 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
13063 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
13065 /* return value of 1=2.5GHz 2=5GHz */
13066 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
13069 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
13071 const struct firmware *firmware = bp->firmware;
13072 struct bnx2x_fw_file_hdr *fw_hdr;
13073 struct bnx2x_fw_file_section *sections;
13074 u32 offset, len, num_ops;
13079 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
13082 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
13083 sections = (struct bnx2x_fw_file_section *)fw_hdr;
13085 /* Make sure none of the offsets and sizes make us read beyond
13086 * the end of the firmware data */
13087 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
13088 offset = be32_to_cpu(sections[i].offset);
13089 len = be32_to_cpu(sections[i].len);
13090 if (offset + len > firmware->size) {
13091 dev_err(&bp->pdev->dev,
13092 "Section %d length is out of bounds\n", i);
13097 /* Likewise for the init_ops offsets */
13098 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
13099 ops_offsets = (u16 *)(firmware->data + offset);
13100 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
13102 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
13103 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
13104 dev_err(&bp->pdev->dev,
13105 "Section offset %d is out of bounds\n", i);
13110 /* Check FW version */
13111 offset = be32_to_cpu(fw_hdr->fw_version.offset);
13112 fw_ver = firmware->data + offset;
13113 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
13114 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
13115 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
13116 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
13117 dev_err(&bp->pdev->dev,
13118 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
13119 fw_ver[0], fw_ver[1], fw_ver[2],
13120 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
13121 BCM_5710_FW_MINOR_VERSION,
13122 BCM_5710_FW_REVISION_VERSION,
13123 BCM_5710_FW_ENGINEERING_VERSION);
13130 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13132 const __be32 *source = (const __be32 *)_source;
13133 u32 *target = (u32 *)_target;
13136 for (i = 0; i < n/4; i++)
13137 target[i] = be32_to_cpu(source[i]);
13141 Ops array is stored in the following format:
13142 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13144 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
13146 const __be32 *source = (const __be32 *)_source;
13147 struct raw_op *target = (struct raw_op *)_target;
13150 for (i = 0, j = 0; i < n/8; i++, j += 2) {
13151 tmp = be32_to_cpu(source[j]);
13152 target[i].op = (tmp >> 24) & 0xff;
13153 target[i].offset = tmp & 0xffffff;
13154 target[i].raw_data = be32_to_cpu(source[j + 1]);
13158 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
13160 const __be16 *source = (const __be16 *)_source;
13161 u16 *target = (u16 *)_target;
13164 for (i = 0; i < n/2; i++)
13165 target[i] = be16_to_cpu(source[i]);
13168 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13170 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13171 bp->arr = kmalloc(len, GFP_KERNEL); \
13173 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13176 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13177 (u8 *)bp->arr, len); \
13180 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13182 const char *fw_file_name;
13183 struct bnx2x_fw_file_hdr *fw_hdr;
13186 if (CHIP_IS_E1(bp))
13187 fw_file_name = FW_FILE_NAME_E1;
13188 else if (CHIP_IS_E1H(bp))
13189 fw_file_name = FW_FILE_NAME_E1H;
13191 dev_err(dev, "Unsupported chip revision\n");
13195 dev_info(dev, "Loading %s\n", fw_file_name);
13197 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13199 dev_err(dev, "Can't load firmware file %s\n", fw_file_name);
13200 goto request_firmware_exit;
13203 rc = bnx2x_check_firmware(bp);
13205 dev_err(dev, "Corrupt firmware file %s\n", fw_file_name);
13206 goto request_firmware_exit;
13209 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13211 /* Initialize the pointers to the init arrays */
13213 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13216 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13219 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13222 /* STORMs firmware */
13223 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13224 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13225 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13226 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13227 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13228 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13229 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13230 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13231 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13232 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13233 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13234 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13235 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13236 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13237 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13238 be32_to_cpu(fw_hdr->csem_pram_data.offset);
13242 init_offsets_alloc_err:
13243 kfree(bp->init_ops);
13244 init_ops_alloc_err:
13245 kfree(bp->init_data);
13246 request_firmware_exit:
13247 release_firmware(bp->firmware);
13253 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13254 const struct pci_device_id *ent)
13256 struct net_device *dev = NULL;
13258 int pcie_width, pcie_speed;
13261 /* dev zeroed in init_etherdev */
13262 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
13264 dev_err(&pdev->dev, "Cannot allocate net device\n");
13268 bp = netdev_priv(dev);
13269 bp->msg_enable = debug;
13271 pci_set_drvdata(pdev, dev);
13273 rc = bnx2x_init_dev(pdev, dev);
13279 rc = bnx2x_init_bp(bp);
13281 goto init_one_exit;
13283 /* Set init arrays */
13284 rc = bnx2x_init_firmware(bp, &pdev->dev);
13286 dev_err(&pdev->dev, "Error loading firmware\n");
13287 goto init_one_exit;
13290 rc = register_netdev(dev);
13292 dev_err(&pdev->dev, "Cannot register net device\n");
13293 goto init_one_exit;
13296 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
13297 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
13298 " IRQ %d, ", board_info[ent->driver_data].name,
13299 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13300 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13301 dev->base_addr, bp->pdev->irq);
13302 pr_cont("node addr %pM\n", dev->dev_addr);
13308 iounmap(bp->regview);
13311 iounmap(bp->doorbells);
13315 if (atomic_read(&pdev->enable_cnt) == 1)
13316 pci_release_regions(pdev);
13318 pci_disable_device(pdev);
13319 pci_set_drvdata(pdev, NULL);
13324 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13326 struct net_device *dev = pci_get_drvdata(pdev);
13330 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13333 bp = netdev_priv(dev);
13335 unregister_netdev(dev);
13337 /* Make sure RESET task is not scheduled before continuing */
13338 cancel_delayed_work_sync(&bp->reset_task);
13340 kfree(bp->init_ops_offsets);
13341 kfree(bp->init_ops);
13342 kfree(bp->init_data);
13343 release_firmware(bp->firmware);
13346 iounmap(bp->regview);
13349 iounmap(bp->doorbells);
13353 if (atomic_read(&pdev->enable_cnt) == 1)
13354 pci_release_regions(pdev);
13356 pci_disable_device(pdev);
13357 pci_set_drvdata(pdev, NULL);
13360 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13362 struct net_device *dev = pci_get_drvdata(pdev);
13366 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13369 bp = netdev_priv(dev);
13373 pci_save_state(pdev);
13375 if (!netif_running(dev)) {
13380 netif_device_detach(dev);
13382 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
13384 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
13391 static int bnx2x_resume(struct pci_dev *pdev)
13393 struct net_device *dev = pci_get_drvdata(pdev);
13398 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
13401 bp = netdev_priv(dev);
13403 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13404 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13410 pci_restore_state(pdev);
13412 if (!netif_running(dev)) {
13417 bnx2x_set_power_state(bp, PCI_D0);
13418 netif_device_attach(dev);
13420 rc = bnx2x_nic_load(bp, LOAD_OPEN);
13427 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13431 bp->state = BNX2X_STATE_ERROR;
13433 bp->rx_mode = BNX2X_RX_MODE_NONE;
13435 bnx2x_netif_stop(bp, 0);
13436 netif_carrier_off(bp->dev);
13438 del_timer_sync(&bp->timer);
13439 bp->stats_state = STATS_STATE_DISABLED;
13440 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13443 bnx2x_free_irq(bp, false);
13445 if (CHIP_IS_E1(bp)) {
13446 struct mac_configuration_cmd *config =
13447 bnx2x_sp(bp, mcast_config);
13449 for (i = 0; i < config->hdr.length; i++)
13450 CAM_INVALIDATE(config->config_table[i]);
13453 /* Free SKBs, SGEs, TPA pool and driver internals */
13454 bnx2x_free_skbs(bp);
13455 for_each_queue(bp, i)
13456 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
13457 for_each_queue(bp, i)
13458 netif_napi_del(&bnx2x_fp(bp, i, napi));
13459 bnx2x_free_mem(bp);
13461 bp->state = BNX2X_STATE_CLOSED;
13466 static void bnx2x_eeh_recover(struct bnx2x *bp)
13470 mutex_init(&bp->port.phy_mutex);
13472 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13473 bp->link_params.shmem_base = bp->common.shmem_base;
13474 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13476 if (!bp->common.shmem_base ||
13477 (bp->common.shmem_base < 0xA0000) ||
13478 (bp->common.shmem_base >= 0xC0000)) {
13479 BNX2X_DEV_INFO("MCP not active\n");
13480 bp->flags |= NO_MCP_FLAG;
13484 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13485 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13486 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13487 BNX2X_ERR("BAD MCP validity signature\n");
13489 if (!BP_NOMCP(bp)) {
13490 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13491 & DRV_MSG_SEQ_NUMBER_MASK);
13492 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13497 * bnx2x_io_error_detected - called when PCI error is detected
13498 * @pdev: Pointer to PCI device
13499 * @state: The current pci connection state
13501 * This function is called after a PCI bus error affecting
13502 * this device has been detected.
13504 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13505 pci_channel_state_t state)
13507 struct net_device *dev = pci_get_drvdata(pdev);
13508 struct bnx2x *bp = netdev_priv(dev);
13512 netif_device_detach(dev);
13514 if (state == pci_channel_io_perm_failure) {
13516 return PCI_ERS_RESULT_DISCONNECT;
13519 if (netif_running(dev))
13520 bnx2x_eeh_nic_unload(bp);
13522 pci_disable_device(pdev);
13526 /* Request a slot reset */
13527 return PCI_ERS_RESULT_NEED_RESET;
13531 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13532 * @pdev: Pointer to PCI device
13534 * Restart the card from scratch, as if from a cold-boot.
13536 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13538 struct net_device *dev = pci_get_drvdata(pdev);
13539 struct bnx2x *bp = netdev_priv(dev);
13543 if (pci_enable_device(pdev)) {
13544 dev_err(&pdev->dev,
13545 "Cannot re-enable PCI device after reset\n");
13547 return PCI_ERS_RESULT_DISCONNECT;
13550 pci_set_master(pdev);
13551 pci_restore_state(pdev);
13553 if (netif_running(dev))
13554 bnx2x_set_power_state(bp, PCI_D0);
13558 return PCI_ERS_RESULT_RECOVERED;
13562 * bnx2x_io_resume - called when traffic can start flowing again
13563 * @pdev: Pointer to PCI device
13565 * This callback is called when the error recovery driver tells us that
13566 * its OK to resume normal operation.
13568 static void bnx2x_io_resume(struct pci_dev *pdev)
13570 struct net_device *dev = pci_get_drvdata(pdev);
13571 struct bnx2x *bp = netdev_priv(dev);
13573 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13574 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13580 bnx2x_eeh_recover(bp);
13582 if (netif_running(dev))
13583 bnx2x_nic_load(bp, LOAD_NORMAL);
13585 netif_device_attach(dev);
13590 static struct pci_error_handlers bnx2x_err_handler = {
13591 .error_detected = bnx2x_io_error_detected,
13592 .slot_reset = bnx2x_io_slot_reset,
13593 .resume = bnx2x_io_resume,
13596 static struct pci_driver bnx2x_pci_driver = {
13597 .name = DRV_MODULE_NAME,
13598 .id_table = bnx2x_pci_tbl,
13599 .probe = bnx2x_init_one,
13600 .remove = __devexit_p(bnx2x_remove_one),
13601 .suspend = bnx2x_suspend,
13602 .resume = bnx2x_resume,
13603 .err_handler = &bnx2x_err_handler,
13606 static int __init bnx2x_init(void)
13610 pr_info("%s", version);
13612 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13613 if (bnx2x_wq == NULL) {
13614 pr_err("Cannot create workqueue\n");
13618 ret = pci_register_driver(&bnx2x_pci_driver);
13620 pr_err("Cannot register driver\n");
13621 destroy_workqueue(bnx2x_wq);
13626 static void __exit bnx2x_cleanup(void)
13628 pci_unregister_driver(&bnx2x_pci_driver);
13630 destroy_workqueue(bnx2x_wq);
13633 module_init(bnx2x_init);
13634 module_exit(bnx2x_cleanup);
13638 /* count denotes the number of new completions we have seen */
13639 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13641 struct eth_spe *spe;
13643 #ifdef BNX2X_STOP_ON_ERROR
13644 if (unlikely(bp->panic))
13648 spin_lock_bh(&bp->spq_lock);
13649 bp->cnic_spq_pending -= count;
13651 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13652 bp->cnic_spq_pending++) {
13654 if (!bp->cnic_kwq_pending)
13657 spe = bnx2x_sp_get_next(bp);
13658 *spe = *bp->cnic_kwq_cons;
13660 bp->cnic_kwq_pending--;
13662 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13663 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13665 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13666 bp->cnic_kwq_cons = bp->cnic_kwq;
13668 bp->cnic_kwq_cons++;
13670 bnx2x_sp_prod_update(bp);
13671 spin_unlock_bh(&bp->spq_lock);
13674 static int bnx2x_cnic_sp_queue(struct net_device *dev,
13675 struct kwqe_16 *kwqes[], u32 count)
13677 struct bnx2x *bp = netdev_priv(dev);
13680 #ifdef BNX2X_STOP_ON_ERROR
13681 if (unlikely(bp->panic))
13685 spin_lock_bh(&bp->spq_lock);
13687 for (i = 0; i < count; i++) {
13688 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13690 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13693 *bp->cnic_kwq_prod = *spe;
13695 bp->cnic_kwq_pending++;
13697 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13698 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13699 spe->data.mac_config_addr.hi,
13700 spe->data.mac_config_addr.lo,
13701 bp->cnic_kwq_pending);
13703 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13704 bp->cnic_kwq_prod = bp->cnic_kwq;
13706 bp->cnic_kwq_prod++;
13709 spin_unlock_bh(&bp->spq_lock);
13711 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13712 bnx2x_cnic_sp_post(bp, 0);
13717 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13719 struct cnic_ops *c_ops;
13722 mutex_lock(&bp->cnic_mutex);
13723 c_ops = bp->cnic_ops;
13725 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13726 mutex_unlock(&bp->cnic_mutex);
13731 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13733 struct cnic_ops *c_ops;
13737 c_ops = rcu_dereference(bp->cnic_ops);
13739 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13746 * for commands that have no data
13748 static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13750 struct cnic_ctl_info ctl = {0};
13754 return bnx2x_cnic_ctl_send(bp, &ctl);
13757 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13759 struct cnic_ctl_info ctl;
13761 /* first we tell CNIC and only then we count this as a completion */
13762 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13763 ctl.data.comp.cid = cid;
13765 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13766 bnx2x_cnic_sp_post(bp, 1);
13769 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13771 struct bnx2x *bp = netdev_priv(dev);
13774 switch (ctl->cmd) {
13775 case DRV_CTL_CTXTBL_WR_CMD: {
13776 u32 index = ctl->data.io.offset;
13777 dma_addr_t addr = ctl->data.io.dma_addr;
13779 bnx2x_ilt_wr(bp, index, addr);
13783 case DRV_CTL_COMPLETION_CMD: {
13784 int count = ctl->data.comp.comp_count;
13786 bnx2x_cnic_sp_post(bp, count);
13790 /* rtnl_lock is held. */
13791 case DRV_CTL_START_L2_CMD: {
13792 u32 cli = ctl->data.ring.client_id;
13794 bp->rx_mode_cl_mask |= (1 << cli);
13795 bnx2x_set_storm_rx_mode(bp);
13799 /* rtnl_lock is held. */
13800 case DRV_CTL_STOP_L2_CMD: {
13801 u32 cli = ctl->data.ring.client_id;
13803 bp->rx_mode_cl_mask &= ~(1 << cli);
13804 bnx2x_set_storm_rx_mode(bp);
13809 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13816 static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13818 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13820 if (bp->flags & USING_MSIX_FLAG) {
13821 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13822 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13823 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13825 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13826 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13828 cp->irq_arr[0].status_blk = bp->cnic_sb;
13829 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13830 cp->irq_arr[1].status_blk = bp->def_status_blk;
13831 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13836 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13839 struct bnx2x *bp = netdev_priv(dev);
13840 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13845 if (atomic_read(&bp->intr_sem) != 0)
13848 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13852 bp->cnic_kwq_cons = bp->cnic_kwq;
13853 bp->cnic_kwq_prod = bp->cnic_kwq;
13854 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13856 bp->cnic_spq_pending = 0;
13857 bp->cnic_kwq_pending = 0;
13859 bp->cnic_data = data;
13862 cp->drv_state = CNIC_DRV_STATE_REGD;
13864 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13866 bnx2x_setup_cnic_irq_info(bp);
13867 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13868 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13869 rcu_assign_pointer(bp->cnic_ops, ops);
13874 static int bnx2x_unregister_cnic(struct net_device *dev)
13876 struct bnx2x *bp = netdev_priv(dev);
13877 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13879 mutex_lock(&bp->cnic_mutex);
13880 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13881 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13882 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13885 rcu_assign_pointer(bp->cnic_ops, NULL);
13886 mutex_unlock(&bp->cnic_mutex);
13888 kfree(bp->cnic_kwq);
13889 bp->cnic_kwq = NULL;
13894 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13896 struct bnx2x *bp = netdev_priv(dev);
13897 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13899 cp->drv_owner = THIS_MODULE;
13900 cp->chip_id = CHIP_ID(bp);
13901 cp->pdev = bp->pdev;
13902 cp->io_base = bp->regview;
13903 cp->io_base2 = bp->doorbells;
13904 cp->max_kwqe_pending = 8;
13905 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13906 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13907 cp->ctx_tbl_len = CNIC_ILT_LINES;
13908 cp->starting_cid = BCM_CNIC_CID_START;
13909 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13910 cp->drv_ctl = bnx2x_drv_ctl;
13911 cp->drv_register_cnic = bnx2x_register_cnic;
13912 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13916 EXPORT_SYMBOL(bnx2x_cnic_probe);
13918 #endif /* BCM_CNIC */