1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
53 #include "bnx2x_reg.h"
54 #include "bnx2x_fw_defs.h"
55 #include "bnx2x_hsi.h"
56 #include "bnx2x_link.h"
58 #include "bnx2x_init.h"
60 #define DRV_MODULE_VERSION "1.45.26"
61 #define DRV_MODULE_RELDATE "2009/01/26"
62 #define BNX2X_BC_VER 0x040200
64 /* Time in jiffies before concluding the transmitter is hung */
65 #define TX_TIMEOUT (5*HZ)
67 static char version[] __devinitdata =
68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71 MODULE_AUTHOR("Eliezer Tamir");
72 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION);
76 static int multi_mode = 1;
77 module_param(multi_mode, int, 0);
79 static int disable_tpa;
82 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
84 module_param(disable_tpa, int, 0);
87 module_param(int_mode, int, 0);
88 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
90 module_param(poll, int, 0);
91 module_param(debug, int, 0);
92 MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
93 MODULE_PARM_DESC(poll, "use polling (for debug)");
94 MODULE_PARM_DESC(debug, "default debug msglevel");
96 static struct workqueue_struct *bnx2x_wq;
98 enum bnx2x_board_type {
104 /* indexed by board_type, above */
107 } board_info[] __devinitdata = {
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
114 static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
124 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126 /****************************************************************************
127 * General service functions
128 ****************************************************************************/
131 * locking is done by mcp
133 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
141 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 struct dmae_command *dmae = &bp->init_dmae;
181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
193 mutex_lock(&bp->dmae_mutex);
195 memset(dmae, 0, sizeof(struct dmae_command));
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203 DMAE_CMD_ENDIANITY_DW_SWAP |
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_val = DMAE_COMP_VAL;
216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237 BNX2X_ERR("dmae timeout!\n");
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
248 mutex_unlock(&bp->dmae_mutex);
251 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
253 struct dmae_command *dmae = &bp->init_dmae;
254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 mutex_lock(&bp->dmae_mutex);
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279 DMAE_CMD_ENDIANITY_DW_SWAP |
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_val = DMAE_COMP_VAL;
292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
306 while (*wb_comp != DMAE_COMP_VAL) {
309 BNX2X_ERR("dmae timeout!\n");
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
323 mutex_unlock(&bp->dmae_mutex);
326 /* used only for slowpath so not inlined */
327 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
337 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
341 REG_RD_DMAE(bp, reg, wb_data, 2);
343 return HILO_U64(wb_data[0], wb_data[1]);
347 static int bnx2x_mc_assert(struct bnx2x *bp)
351 u32 row0, row1, row2, row3;
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
468 static void bnx2x_fw_dump(struct bnx2x *bp)
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
483 printk(KERN_CONT "%s", (char *)data);
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
490 printk(KERN_CONT "%s", (char *)data);
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
495 static void bnx2x_panic_dump(struct bnx2x *bp)
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503 BNX2X_ERR("begin crash dump -----------------\n");
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
578 " spq_prod_idx(%u)\n",
579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
584 BNX2X_ERR("end crash dump -----------------\n");
587 static void bnx2x_int_enable(struct bnx2x *bp)
589 int port = BP_PORT(bp);
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
614 REG_WR(bp, addr, val);
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
622 REG_WR(bp, addr, val);
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
629 /* enable nig attention */
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
639 static void bnx2x_int_disable(struct bnx2x *bp)
641 int port = BP_PORT(bp);
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
653 /* flush all outstanding writes */
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
661 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
666 /* disable interrupt handling */
667 atomic_inc(&bp->intr_sem);
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
672 /* make sure all ISRs are done */
674 synchronize_irq(bp->msix_table[0].vector);
676 for_each_queue(bp, i)
677 synchronize_irq(bp->msix_table[i + offset].vector);
679 synchronize_irq(bp->pdev->irq);
681 /* make sure sp_task is not running */
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
689 * General service functions
692 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
693 u8 storm, u16 index, u8 op, u8 update)
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
697 struct igu_ack_register igu_ack;
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
711 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
713 struct host_status_block *fpsb = fp->status_blk;
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 static u16 bnx2x_ack_int(struct bnx2x *bp)
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
742 * fast path service functions
745 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
749 /* Tell compiler that status block fields can change */
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
752 return (fp->tx_pkt_cons != tx_cons_sb);
755 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
757 /* Tell compiler that consumer and producer can change */
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
763 /* free skb in the packet ring at pos idx
764 * return idx of last bd freed
766 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
785 new_cons = nbd + tx_buf->first_bd;
786 #ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
788 BNX2X_ERR("BAD nbd!\n");
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 tx_buf->first_bd = 0;
831 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
837 barrier(); /* Tell compiler that prod and cons can change */
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
845 #ifdef BNX2X_STOP_ON_ERROR
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
851 return (s16)(fp->bp->tx_ring_size) - used;
854 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
856 struct bnx2x *bp = fp->bp;
857 struct netdev_queue *txq;
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
861 #ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
866 txq = netdev_get_tx_queue(bp->dev, fp->index);
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
870 while (sw_cons != hw_cons) {
873 pkt_cons = TX_BD(sw_cons);
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
878 hw_cons, sw_cons, pkt_cons);
880 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
903 /* TBD need a thresh? */
904 if (unlikely(netif_tx_queue_stopped(txq))) {
906 __netif_tx_lock(txq, smp_processor_id());
908 if ((netif_tx_queue_stopped(txq)) &&
909 (bp->state == BNX2X_STATE_OPEN) &&
910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
911 netif_tx_wake_queue(txq);
913 __netif_tx_unlock(txq);
918 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
938 fp->state = BNX2X_FP_STATE_OPEN;
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
944 fp->state = BNX2X_FP_STATE_HALTED;
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
952 mb(); /* force bnx2x_wait_ramrod() to see the change */
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
977 bp->set_mac_pending = 0;
980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
992 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
999 /* Skip "next page" elements */
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 sw_buf->page = NULL;
1012 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1021 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1029 if (unlikely(page == NULL))
1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1033 PCI_DMA_FROMDEVICE);
1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1048 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1061 PCI_DMA_FROMDEVICE);
1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1076 /* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1081 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1101 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1104 u16 last_max = fp->last_max_sge;
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1110 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1124 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1127 struct bnx2x *bp = fp->bp;
1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
1131 u16 last_max, last_elem, first_elem;
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1177 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1190 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1218 #ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220 #ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1225 fp->tpa_queue_used);
1229 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1234 struct sw_rx_page *rx_pg, old_rx_pg;
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1243 /* This is needed in order to enable forwarding support */
1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1246 max(frag_size, (u32)len_on_bd));
1248 #ifdef BNX2X_STOP_ON_ERROR
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1267 rx_pg = &fp->rx_page_ring[sge_idx];
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
1274 fp->eth_q_stats.rx_skb_alloc_failed++;
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1289 frag_size -= frag_len;
1295 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1310 if (likely(new_skb)) {
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1322 prefetch(((char *)(skb)) + 128);
1324 #ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1334 skb_reserve(skb, pad);
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
1343 iph = (struct iphdr *)skb->data;
1345 /* If there is no Rx VLAN offloading -
1346 take VLAN tag into an account */
1347 if (unlikely(is_not_hwaccel_vlan_cqe))
1348 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1351 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1354 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1355 &cqe->fast_path_cqe, cqe_idx)) {
1357 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1358 (!is_not_hwaccel_vlan_cqe))
1359 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1360 le16_to_cpu(cqe->fast_path_cqe.
1364 netif_receive_skb(skb);
1366 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1367 " - dropping packet!\n");
1372 /* put new skb in bin */
1373 fp->tpa_pool[queue].skb = new_skb;
1376 /* else drop the packet and keep the buffer in the bin */
1377 DP(NETIF_MSG_RX_STATUS,
1378 "Failed to allocate new skb - dropping packet!\n");
1379 fp->eth_q_stats.rx_skb_alloc_failed++;
1382 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1385 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1386 struct bnx2x_fastpath *fp,
1387 u16 bd_prod, u16 rx_comp_prod,
1390 struct ustorm_eth_rx_producers rx_prods = {0};
1393 /* Update producers */
1394 rx_prods.bd_prod = bd_prod;
1395 rx_prods.cqe_prod = rx_comp_prod;
1396 rx_prods.sge_prod = rx_sge_prod;
1399 * Make sure that the BD and SGE data is updated before updating the
1400 * producers since FW might read the BD/SGE right after the producer
1402 * This is only applicable for weak-ordered memory model archs such
1403 * as IA-64. The following barrier is also mandatory since FW will
1404 * assumes BDs must have buffers.
1408 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1409 REG_WR(bp, BAR_USTRORM_INTMEM +
1410 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1411 ((u32 *)&rx_prods)[i]);
1413 mmiowb(); /* keep prod updates ordered */
1415 DP(NETIF_MSG_RX_STATUS,
1416 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1417 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1420 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1422 struct bnx2x *bp = fp->bp;
1423 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1424 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1427 #ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1432 /* CQ "next element" is of the size of the regular element,
1433 that's why it's ok here */
1434 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1435 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1438 bd_cons = fp->rx_bd_cons;
1439 bd_prod = fp->rx_bd_prod;
1440 bd_prod_fw = bd_prod;
1441 sw_comp_cons = fp->rx_comp_cons;
1442 sw_comp_prod = fp->rx_comp_prod;
1444 /* Memory barrier necessary as speculative reads of the rx
1445 * buffer can be ahead of the index in the status block
1449 DP(NETIF_MSG_RX_STATUS,
1450 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1451 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
1453 while (sw_comp_cons != hw_comp_cons) {
1454 struct sw_rx_bd *rx_buf = NULL;
1455 struct sk_buff *skb;
1456 union eth_rx_cqe *cqe;
1460 comp_ring_cons = RCQ_BD(sw_comp_cons);
1461 bd_prod = RX_BD(bd_prod);
1462 bd_cons = RX_BD(bd_cons);
1464 cqe = &fp->rx_comp_ring[comp_ring_cons];
1465 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1467 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1468 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1469 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1470 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1471 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1472 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1474 /* is this a slowpath msg? */
1475 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1476 bnx2x_sp_event(fp, cqe);
1479 /* this is an rx packet */
1481 rx_buf = &fp->rx_buf_ring[bd_cons];
1483 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1484 pad = cqe->fast_path_cqe.placement_offset;
1486 /* If CQE is marked both TPA_START and TPA_END
1487 it is a non-TPA CQE */
1488 if ((!fp->disable_tpa) &&
1489 (TPA_TYPE(cqe_fp_flags) !=
1490 (TPA_TYPE_START | TPA_TYPE_END))) {
1491 u16 queue = cqe->fast_path_cqe.queue_index;
1493 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1494 DP(NETIF_MSG_RX_STATUS,
1495 "calling tpa_start on queue %d\n",
1498 bnx2x_tpa_start(fp, queue, skb,
1503 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1504 DP(NETIF_MSG_RX_STATUS,
1505 "calling tpa_stop on queue %d\n",
1508 if (!BNX2X_RX_SUM_FIX(cqe))
1509 BNX2X_ERR("STOP on none TCP "
1512 /* This is a size of the linear data
1514 len = le16_to_cpu(cqe->fast_path_cqe.
1516 bnx2x_tpa_stop(bp, fp, queue, pad,
1517 len, cqe, comp_ring_cons);
1518 #ifdef BNX2X_STOP_ON_ERROR
1523 bnx2x_update_sge_prod(fp,
1524 &cqe->fast_path_cqe);
1529 pci_dma_sync_single_for_device(bp->pdev,
1530 pci_unmap_addr(rx_buf, mapping),
1531 pad + RX_COPY_THRESH,
1532 PCI_DMA_FROMDEVICE);
1534 prefetch(((char *)(skb)) + 128);
1536 /* is this an error packet? */
1537 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1538 DP(NETIF_MSG_RX_ERR,
1539 "ERROR flags %x rx packet %u\n",
1540 cqe_fp_flags, sw_comp_cons);
1541 fp->eth_q_stats.rx_err_discard_pkt++;
1545 /* Since we don't have a jumbo ring
1546 * copy small packets if mtu > 1500
1548 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1549 (len <= RX_COPY_THRESH)) {
1550 struct sk_buff *new_skb;
1552 new_skb = netdev_alloc_skb(bp->dev,
1554 if (new_skb == NULL) {
1555 DP(NETIF_MSG_RX_ERR,
1556 "ERROR packet dropped "
1557 "because of alloc failure\n");
1558 fp->eth_q_stats.rx_skb_alloc_failed++;
1563 skb_copy_from_linear_data_offset(skb, pad,
1564 new_skb->data + pad, len);
1565 skb_reserve(new_skb, pad);
1566 skb_put(new_skb, len);
1568 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1572 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1573 pci_unmap_single(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
1576 PCI_DMA_FROMDEVICE);
1577 skb_reserve(skb, pad);
1581 DP(NETIF_MSG_RX_ERR,
1582 "ERROR packet dropped because "
1583 "of alloc failure\n");
1584 fp->eth_q_stats.rx_skb_alloc_failed++;
1586 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1590 skb->protocol = eth_type_trans(skb, bp->dev);
1592 skb->ip_summed = CHECKSUM_NONE;
1594 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1595 skb->ip_summed = CHECKSUM_UNNECESSARY;
1597 fp->eth_q_stats.hw_csum_err++;
1601 skb_record_rx_queue(skb, fp->index);
1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1610 netif_receive_skb(skb);
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1624 if (rx_pkt == budget)
1628 fp->rx_bd_cons = bd_cons;
1629 fp->rx_bd_prod = bd_prod_fw;
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1637 fp->rx_pkt += rx_pkt;
1643 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
1647 int index = FP_IDX(fp);
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
1659 #ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1669 napi_schedule(&bnx2x_fp(bp, index, napi));
1674 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1676 struct bnx2x *bp = netdev_priv(dev_instance);
1677 u16 status = bnx2x_ack_int(bp);
1680 /* Return here if interrupt is shared and it's not for us */
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
1687 /* Return here if interrupt is disabled */
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1693 #ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
1713 if (unlikely(status & 0x1)) {
1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1728 /* end of fast path */
1730 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1735 * General service functions
1738 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1741 u32 resource_bit = (1 << resource);
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1761 /* Validating that the resource is not already taken */
1762 lock_status = REG_RD(bp, hw_lock_control_reg);
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
1771 /* Try to acquire the lock */
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
1774 if (lock_status & resource_bit)
1779 DP(NETIF_MSG_HW, "Timeout\n");
1783 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1786 u32 resource_bit = (1 << resource);
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1805 /* Validating that the resource is currently taken */
1806 lock_status = REG_RD(bp, hw_lock_control_reg);
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
1817 /* HW Lock for shared dual port PHYs */
1818 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1822 mutex_lock(&bp->port.phy_mutex);
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1829 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1837 mutex_unlock(&bp->port.phy_mutex);
1840 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1893 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1895 u32 spio_mask = (1 << spio_num);
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1939 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
1961 static void bnx2x_link_report(struct bnx2x *bp)
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
1968 printk("%d Mbps ", bp->link_vars.line_speed);
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1973 printk("half duplex");
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
1977 printk(", receive ");
1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1979 printk("& transmit ");
1981 printk(", transmit ");
1983 printk("flow control ON");
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
1993 static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1995 if (!BP_NOMCP(bp)) {
1998 /* Initialize link parameters structure variables */
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2003 else if (bp->dev->mtu > 5000)
2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2008 bnx2x_acquire_phy_lock(bp);
2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2010 bnx2x_release_phy_lock(bp);
2012 bnx2x_calc_fc_adv(bp);
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2024 static void bnx2x_link_set(struct bnx2x *bp)
2026 if (!BP_NOMCP(bp)) {
2027 bnx2x_acquire_phy_lock(bp);
2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2029 bnx2x_release_phy_lock(bp);
2031 bnx2x_calc_fc_adv(bp);
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
2036 static void bnx2x__link_reset(struct bnx2x *bp)
2038 if (!BP_NOMCP(bp)) {
2039 bnx2x_acquire_phy_lock(bp);
2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
2041 bnx2x_release_phy_lock(bp);
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
2046 static u8 bnx2x_link_test(struct bnx2x *bp)
2050 bnx2x_acquire_phy_lock(bp);
2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2052 bnx2x_release_phy_lock(bp);
2057 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2059 u32 r_param = bp->link_vars.line_speed / 8;
2060 u32 fair_periodic_timeout_usec;
2063 memset(&(bp->cmng.rs_vars), 0,
2064 sizeof(struct rate_shaping_vars_per_port));
2065 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2067 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2068 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2070 /* this is the threshold below which no timer arming will occur
2071 1.25 coefficient is for the threshold to be a little bigger
2072 than the real time, to compensate for timer in-accuracy */
2073 bp->cmng.rs_vars.rs_threshold =
2074 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2076 /* resolution of fairness timer */
2077 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2078 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2079 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2081 /* this is the threshold below which we won't arm the timer anymore */
2082 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2084 /* we multiply by 1e3/8 to get bytes/msec.
2085 We don't want the credits to pass a credit
2086 of the t_fair*FAIR_MEM (algorithm resolution) */
2087 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2092 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2094 struct rate_shaping_vars_per_vn m_rs_vn;
2095 struct fairness_vars_per_vn m_fair_vn;
2096 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2097 u16 vn_min_rate, vn_max_rate;
2100 /* If function is hidden - set min and max to zeroes */
2101 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2106 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2107 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2108 /* If fairness is enabled (not all min rates are zeroes) and
2109 if current min rate is zero - set it to 1.
2110 This is a requirement of the algorithm. */
2111 if (bp->vn_weight_sum && (vn_min_rate == 0))
2112 vn_min_rate = DEF_MIN_RATE;
2113 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2114 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2118 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2119 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2121 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2122 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2124 /* global vn counter - maximal Mbps for this vn */
2125 m_rs_vn.vn_counter.rate = vn_max_rate;
2127 /* quota - number of bytes transmitted in this period */
2128 m_rs_vn.vn_counter.quota =
2129 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2131 if (bp->vn_weight_sum) {
2132 /* credit for each period of the fairness algorithm:
2133 number of bytes in T_FAIR (the vn share the port rate).
2134 vn_weight_sum should not be larger than 10000, thus
2135 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2137 m_fair_vn.vn_credit_delta =
2138 max((u32)(vn_min_rate * (T_FAIR_COEF /
2139 (8 * bp->vn_weight_sum))),
2140 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2141 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2142 m_fair_vn.vn_credit_delta);
2145 /* Store it to internal memory */
2146 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2147 REG_WR(bp, BAR_XSTRORM_INTMEM +
2148 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2149 ((u32 *)(&m_rs_vn))[i]);
2151 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2152 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2154 ((u32 *)(&m_fair_vn))[i]);
2158 /* This function is called upon link interrupt */
2159 static void bnx2x_link_attn(struct bnx2x *bp)
2161 /* Make sure that we are synced with the current statistics */
2162 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2164 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2166 if (bp->link_vars.link_up) {
2168 /* dropless flow control */
2169 if (CHIP_IS_E1H(bp)) {
2170 int port = BP_PORT(bp);
2171 u32 pause_enabled = 0;
2173 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2176 REG_WR(bp, BAR_USTRORM_INTMEM +
2177 USTORM_PAUSE_ENABLED_OFFSET(port),
2181 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2182 struct host_port_stats *pstats;
2184 pstats = bnx2x_sp(bp, port_stats);
2185 /* reset old bmac stats */
2186 memset(&(pstats->mac_stx[0]), 0,
2187 sizeof(struct mac_stx));
2189 if ((bp->state == BNX2X_STATE_OPEN) ||
2190 (bp->state == BNX2X_STATE_DISABLED))
2191 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2194 /* indicate link status */
2195 bnx2x_link_report(bp);
2198 int port = BP_PORT(bp);
2202 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2203 if (vn == BP_E1HVN(bp))
2206 func = ((vn << 1) | port);
2208 /* Set the attention towards other drivers
2210 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2211 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2214 if (bp->link_vars.link_up) {
2217 /* Init rate shaping and fairness contexts */
2218 bnx2x_init_port_minmax(bp);
2220 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2221 bnx2x_init_vn_minmax(bp, 2*vn + port);
2223 /* Store it to internal memory */
2225 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2226 REG_WR(bp, BAR_XSTRORM_INTMEM +
2227 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2228 ((u32 *)(&bp->cmng))[i]);
2233 static void bnx2x__link_status_update(struct bnx2x *bp)
2235 if (bp->state != BNX2X_STATE_OPEN)
2238 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2240 if (bp->link_vars.link_up)
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2243 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2245 /* indicate link status */
2246 bnx2x_link_report(bp);
2249 static void bnx2x_pmf_update(struct bnx2x *bp)
2251 int port = BP_PORT(bp);
2255 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2257 /* enable nig attention */
2258 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2259 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2260 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2262 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2270 * General service functions
2273 /* the slow path queue is odd since completions arrive on the fastpath ring */
2274 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2275 u32 data_hi, u32 data_lo, int common)
2277 int func = BP_FUNC(bp);
2279 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2280 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2281 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2282 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2283 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2285 #ifdef BNX2X_STOP_ON_ERROR
2286 if (unlikely(bp->panic))
2290 spin_lock_bh(&bp->spq_lock);
2292 if (!bp->spq_left) {
2293 BNX2X_ERR("BUG! SPQ ring full!\n");
2294 spin_unlock_bh(&bp->spq_lock);
2299 /* CID needs port number to be encoded int it */
2300 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2301 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2303 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2305 bp->spq_prod_bd->hdr.type |=
2306 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2308 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2309 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2313 if (bp->spq_prod_bd == bp->spq_last_bd) {
2314 bp->spq_prod_bd = bp->spq;
2315 bp->spq_prod_idx = 0;
2316 DP(NETIF_MSG_TIMER, "end of spq\n");
2323 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2326 spin_unlock_bh(&bp->spq_lock);
2330 /* acquire split MCP access lock register */
2331 static int bnx2x_acquire_alr(struct bnx2x *bp)
2338 for (j = 0; j < i*10; j++) {
2340 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2341 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2342 if (val & (1L << 31))
2347 if (!(val & (1L << 31))) {
2348 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2355 /* release split MCP access lock register */
2356 static void bnx2x_release_alr(struct bnx2x *bp)
2360 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2363 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2365 struct host_def_status_block *def_sb = bp->def_status_blk;
2368 barrier(); /* status block is written to by the chip */
2369 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2370 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2373 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2374 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2377 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2378 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2381 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2382 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2385 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2386 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2393 * slow path service functions
2396 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2398 int port = BP_PORT(bp);
2399 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2400 COMMAND_REG_ATTN_BITS_SET);
2401 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2402 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2403 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2404 NIG_REG_MASK_INTERRUPT_PORT0;
2407 if (bp->attn_state & asserted)
2408 BNX2X_ERR("IGU ERROR\n");
2410 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2411 aeu_mask = REG_RD(bp, aeu_addr);
2413 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2414 aeu_mask, asserted);
2415 aeu_mask &= ~(asserted & 0xff);
2416 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2418 REG_WR(bp, aeu_addr, aeu_mask);
2419 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2421 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2422 bp->attn_state |= asserted;
2423 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2425 if (asserted & ATTN_HARD_WIRED_MASK) {
2426 if (asserted & ATTN_NIG_FOR_FUNC) {
2428 bnx2x_acquire_phy_lock(bp);
2430 /* save nig interrupt mask */
2431 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2432 REG_WR(bp, nig_int_mask_addr, 0);
2434 bnx2x_link_attn(bp);
2436 /* handle unicore attn? */
2438 if (asserted & ATTN_SW_TIMER_4_FUNC)
2439 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2441 if (asserted & GPIO_2_FUNC)
2442 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2444 if (asserted & GPIO_3_FUNC)
2445 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2447 if (asserted & GPIO_4_FUNC)
2448 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2451 if (asserted & ATTN_GENERAL_ATTN_1) {
2452 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2453 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2455 if (asserted & ATTN_GENERAL_ATTN_2) {
2456 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2457 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2459 if (asserted & ATTN_GENERAL_ATTN_3) {
2460 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2461 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2464 if (asserted & ATTN_GENERAL_ATTN_4) {
2465 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2466 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2468 if (asserted & ATTN_GENERAL_ATTN_5) {
2469 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2470 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2472 if (asserted & ATTN_GENERAL_ATTN_6) {
2473 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2474 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2478 } /* if hardwired */
2480 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2482 REG_WR(bp, hc_addr, asserted);
2484 /* now set back the mask */
2485 if (asserted & ATTN_NIG_FOR_FUNC) {
2486 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
2487 bnx2x_release_phy_lock(bp);
2491 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2493 int port = BP_PORT(bp);
2497 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2498 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2500 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2502 val = REG_RD(bp, reg_offset);
2503 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2504 REG_WR(bp, reg_offset, val);
2506 BNX2X_ERR("SPIO5 hw attention\n");
2508 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2509 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2510 /* Fan failure attention */
2512 /* The PHY reset is controlled by GPIO 1 */
2513 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2514 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2515 /* Low power mode is controlled by GPIO 2 */
2516 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2517 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2518 /* mark the failure */
2519 bp->link_params.ext_phy_config &=
2520 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2521 bp->link_params.ext_phy_config |=
2522 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2524 dev_info.port_hw_config[port].
2525 external_phy_config,
2526 bp->link_params.ext_phy_config);
2527 /* log the failure */
2528 printk(KERN_ERR PFX "Fan Failure on Network"
2529 " Controller %s has caused the driver to"
2530 " shutdown the card to prevent permanent"
2531 " damage. Please contact Dell Support for"
2532 " assistance\n", bp->dev->name);
2540 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2542 val = REG_RD(bp, reg_offset);
2543 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2544 REG_WR(bp, reg_offset, val);
2546 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2547 (attn & HW_INTERRUT_ASSERT_SET_0));
2552 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2556 if (attn & BNX2X_DOORQ_ASSERT) {
2558 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2559 BNX2X_ERR("DB hw attention 0x%x\n", val);
2560 /* DORQ discard attention */
2562 BNX2X_ERR("FATAL error from DORQ\n");
2565 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2567 int port = BP_PORT(bp);
2570 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2571 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2573 val = REG_RD(bp, reg_offset);
2574 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2575 REG_WR(bp, reg_offset, val);
2577 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2578 (attn & HW_INTERRUT_ASSERT_SET_1));
2583 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2587 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2589 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2590 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2591 /* CFC error attention */
2593 BNX2X_ERR("FATAL error from CFC\n");
2596 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2598 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2599 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2600 /* RQ_USDMDP_FIFO_OVERFLOW */
2602 BNX2X_ERR("FATAL error from PXP\n");
2605 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2607 int port = BP_PORT(bp);
2610 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2611 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2613 val = REG_RD(bp, reg_offset);
2614 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2615 REG_WR(bp, reg_offset, val);
2617 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2618 (attn & HW_INTERRUT_ASSERT_SET_2));
2623 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2627 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2629 if (attn & BNX2X_PMF_LINK_ASSERT) {
2630 int func = BP_FUNC(bp);
2632 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2633 bnx2x__link_status_update(bp);
2634 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2636 bnx2x_pmf_update(bp);
2638 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2640 BNX2X_ERR("MC assert!\n");
2641 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2642 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2643 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2644 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2647 } else if (attn & BNX2X_MCP_ASSERT) {
2649 BNX2X_ERR("MCP assert!\n");
2650 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2654 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2657 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2658 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2659 if (attn & BNX2X_GRC_TIMEOUT) {
2660 val = CHIP_IS_E1H(bp) ?
2661 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2662 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2664 if (attn & BNX2X_GRC_RSV) {
2665 val = CHIP_IS_E1H(bp) ?
2666 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2667 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2669 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2673 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2675 struct attn_route attn;
2676 struct attn_route group_mask;
2677 int port = BP_PORT(bp);
2683 /* need to take HW lock because MCP or other port might also
2684 try to handle this event */
2685 bnx2x_acquire_alr(bp);
2687 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2688 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2689 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2690 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2691 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2692 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2694 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2695 if (deasserted & (1 << index)) {
2696 group_mask = bp->attn_group[index];
2698 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2699 index, group_mask.sig[0], group_mask.sig[1],
2700 group_mask.sig[2], group_mask.sig[3]);
2702 bnx2x_attn_int_deasserted3(bp,
2703 attn.sig[3] & group_mask.sig[3]);
2704 bnx2x_attn_int_deasserted1(bp,
2705 attn.sig[1] & group_mask.sig[1]);
2706 bnx2x_attn_int_deasserted2(bp,
2707 attn.sig[2] & group_mask.sig[2]);
2708 bnx2x_attn_int_deasserted0(bp,
2709 attn.sig[0] & group_mask.sig[0]);
2711 if ((attn.sig[0] & group_mask.sig[0] &
2712 HW_PRTY_ASSERT_SET_0) ||
2713 (attn.sig[1] & group_mask.sig[1] &
2714 HW_PRTY_ASSERT_SET_1) ||
2715 (attn.sig[2] & group_mask.sig[2] &
2716 HW_PRTY_ASSERT_SET_2))
2717 BNX2X_ERR("FATAL HW block parity attention\n");
2721 bnx2x_release_alr(bp);
2723 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2726 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2728 REG_WR(bp, reg_addr, val);
2730 if (~bp->attn_state & deasserted)
2731 BNX2X_ERR("IGU ERROR\n");
2733 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2734 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2736 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2737 aeu_mask = REG_RD(bp, reg_addr);
2739 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2740 aeu_mask, deasserted);
2741 aeu_mask |= (deasserted & 0xff);
2742 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2744 REG_WR(bp, reg_addr, aeu_mask);
2745 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2747 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2748 bp->attn_state &= ~deasserted;
2749 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2752 static void bnx2x_attn_int(struct bnx2x *bp)
2754 /* read local copy of bits */
2755 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2757 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2759 u32 attn_state = bp->attn_state;
2761 /* look for changed bits */
2762 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2763 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2766 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2767 attn_bits, attn_ack, asserted, deasserted);
2769 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2770 BNX2X_ERR("BAD attention state\n");
2772 /* handle bits that were raised */
2774 bnx2x_attn_int_asserted(bp, asserted);
2777 bnx2x_attn_int_deasserted(bp, deasserted);
2780 static void bnx2x_sp_task(struct work_struct *work)
2782 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2786 /* Return here if interrupt is disabled */
2787 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2788 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2792 status = bnx2x_update_dsb_idx(bp);
2793 /* if (status == 0) */
2794 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2796 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2802 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2804 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2806 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2808 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2810 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2815 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2817 struct net_device *dev = dev_instance;
2818 struct bnx2x *bp = netdev_priv(dev);
2820 /* Return here if interrupt is disabled */
2821 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2822 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2826 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2828 #ifdef BNX2X_STOP_ON_ERROR
2829 if (unlikely(bp->panic))
2833 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2838 /* end of slow path */
2842 /****************************************************************************
2844 ****************************************************************************/
2846 /* sum[hi:lo] += add[hi:lo] */
2847 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2850 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
2853 /* difference = minuend - subtrahend */
2854 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2856 if (m_lo < s_lo) { \
2858 d_hi = m_hi - s_hi; \
2860 /* we can 'loan' 1 */ \
2862 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2864 /* m_hi <= s_hi */ \
2869 /* m_lo >= s_lo */ \
2870 if (m_hi < s_hi) { \
2874 /* m_hi >= s_hi */ \
2875 d_hi = m_hi - s_hi; \
2876 d_lo = m_lo - s_lo; \
2881 #define UPDATE_STAT64(s, t) \
2883 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2884 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2885 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2886 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2887 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2888 pstats->mac_stx[1].t##_lo, diff.lo); \
2891 #define UPDATE_STAT64_NIG(s, t) \
2893 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2894 diff.lo, new->s##_lo, old->s##_lo); \
2895 ADD_64(estats->t##_hi, diff.hi, \
2896 estats->t##_lo, diff.lo); \
2899 /* sum[hi:lo] += add */
2900 #define ADD_EXTEND_64(s_hi, s_lo, a) \
2903 s_hi += (s_lo < a) ? 1 : 0; \
2906 #define UPDATE_EXTEND_STAT(s) \
2908 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2909 pstats->mac_stx[1].s##_lo, \
2913 #define UPDATE_EXTEND_TSTAT(s, t) \
2915 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2916 old_tclient->s = le32_to_cpu(tclient->s); \
2917 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2920 #define UPDATE_EXTEND_USTAT(s, t) \
2922 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2923 old_uclient->s = uclient->s; \
2924 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2927 #define UPDATE_EXTEND_XSTAT(s, t) \
2929 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2930 old_xclient->s = le32_to_cpu(xclient->s); \
2931 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2934 /* minuend -= subtrahend */
2935 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2937 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2940 /* minuend[hi:lo] -= subtrahend */
2941 #define SUB_EXTEND_64(m_hi, m_lo, s) \
2943 SUB_64(m_hi, 0, m_lo, s); \
2946 #define SUB_EXTEND_USTAT(s, t) \
2948 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2949 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2953 * General service functions
2956 static inline long bnx2x_hilo(u32 *hiref)
2958 u32 lo = *(hiref + 1);
2959 #if (BITS_PER_LONG == 64)
2962 return HILO_U64(hi, lo);
2969 * Init service functions
2972 static void bnx2x_storm_stats_post(struct bnx2x *bp)
2974 if (!bp->stats_pending) {
2975 struct eth_query_ramrod_data ramrod_data = {0};
2978 ramrod_data.drv_counter = bp->stats_counter++;
2979 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
2980 for_each_queue(bp, i)
2981 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
2983 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2984 ((u32 *)&ramrod_data)[1],
2985 ((u32 *)&ramrod_data)[0], 0);
2987 /* stats ramrod has it's own slot on the spq */
2989 bp->stats_pending = 1;
2994 static void bnx2x_stats_init(struct bnx2x *bp)
2996 int port = BP_PORT(bp);
2999 bp->stats_pending = 0;
3000 bp->executer_idx = 0;
3001 bp->stats_counter = 0;
3005 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3007 bp->port.port_stx = 0;
3008 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3010 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3011 bp->port.old_nig_stats.brb_discard =
3012 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3013 bp->port.old_nig_stats.brb_truncate =
3014 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3015 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3016 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3017 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3018 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3020 /* function stats */
3021 for_each_queue(bp, i) {
3022 struct bnx2x_fastpath *fp = &bp->fp[i];
3024 memset(&fp->old_tclient, 0,
3025 sizeof(struct tstorm_per_client_stats));
3026 memset(&fp->old_uclient, 0,
3027 sizeof(struct ustorm_per_client_stats));
3028 memset(&fp->old_xclient, 0,
3029 sizeof(struct xstorm_per_client_stats));
3030 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3033 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3034 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3036 bp->stats_state = STATS_STATE_DISABLED;
3037 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3038 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3041 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3043 struct dmae_command *dmae = &bp->stats_dmae;
3044 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3046 *stats_comp = DMAE_COMP_VAL;
3047 if (CHIP_REV_IS_SLOW(bp))
3051 if (bp->executer_idx) {
3052 int loader_idx = PMF_DMAE_C(bp);
3054 memset(dmae, 0, sizeof(struct dmae_command));
3056 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3057 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3058 DMAE_CMD_DST_RESET |
3060 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3062 DMAE_CMD_ENDIANITY_DW_SWAP |
3064 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3066 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3067 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3068 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3069 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3070 sizeof(struct dmae_command) *
3071 (loader_idx + 1)) >> 2;
3072 dmae->dst_addr_hi = 0;
3073 dmae->len = sizeof(struct dmae_command) >> 2;
3076 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3077 dmae->comp_addr_hi = 0;
3081 bnx2x_post_dmae(bp, dmae, loader_idx);
3083 } else if (bp->func_stx) {
3085 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3089 static int bnx2x_stats_comp(struct bnx2x *bp)
3091 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3095 while (*stats_comp != DMAE_COMP_VAL) {
3097 BNX2X_ERR("timeout waiting for stats finished\n");
3107 * Statistics service functions
3110 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3112 struct dmae_command *dmae;
3114 int loader_idx = PMF_DMAE_C(bp);
3115 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3118 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3119 BNX2X_ERR("BUG!\n");
3123 bp->executer_idx = 0;
3125 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3127 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3129 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3131 DMAE_CMD_ENDIANITY_DW_SWAP |
3133 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3134 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3136 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3137 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3138 dmae->src_addr_lo = bp->port.port_stx >> 2;
3139 dmae->src_addr_hi = 0;
3140 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3141 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3142 dmae->len = DMAE_LEN32_RD_MAX;
3143 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3144 dmae->comp_addr_hi = 0;
3147 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3148 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3149 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3150 dmae->src_addr_hi = 0;
3151 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3152 DMAE_LEN32_RD_MAX * 4);
3153 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3154 DMAE_LEN32_RD_MAX * 4);
3155 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3156 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3157 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3158 dmae->comp_val = DMAE_COMP_VAL;
3161 bnx2x_hw_stats_post(bp);
3162 bnx2x_stats_comp(bp);
3165 static void bnx2x_port_stats_init(struct bnx2x *bp)
3167 struct dmae_command *dmae;
3168 int port = BP_PORT(bp);
3169 int vn = BP_E1HVN(bp);
3171 int loader_idx = PMF_DMAE_C(bp);
3173 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3176 if (!bp->link_vars.link_up || !bp->port.pmf) {
3177 BNX2X_ERR("BUG!\n");
3181 bp->executer_idx = 0;
3184 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3185 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3186 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3188 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3190 DMAE_CMD_ENDIANITY_DW_SWAP |
3192 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3193 (vn << DMAE_CMD_E1HVN_SHIFT));
3195 if (bp->port.port_stx) {
3197 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3198 dmae->opcode = opcode;
3199 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3200 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3201 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3202 dmae->dst_addr_hi = 0;
3203 dmae->len = sizeof(struct host_port_stats) >> 2;
3204 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3205 dmae->comp_addr_hi = 0;
3211 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3212 dmae->opcode = opcode;
3213 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3214 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3215 dmae->dst_addr_lo = bp->func_stx >> 2;
3216 dmae->dst_addr_hi = 0;
3217 dmae->len = sizeof(struct host_func_stats) >> 2;
3218 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3219 dmae->comp_addr_hi = 0;
3224 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3225 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3226 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3228 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3230 DMAE_CMD_ENDIANITY_DW_SWAP |
3232 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3233 (vn << DMAE_CMD_E1HVN_SHIFT));
3235 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3237 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3238 NIG_REG_INGRESS_BMAC0_MEM);
3240 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3241 BIGMAC_REGISTER_TX_STAT_GTBYT */
3242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3243 dmae->opcode = opcode;
3244 dmae->src_addr_lo = (mac_addr +
3245 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3246 dmae->src_addr_hi = 0;
3247 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3248 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3249 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3250 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3255 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3256 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3257 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258 dmae->opcode = opcode;
3259 dmae->src_addr_lo = (mac_addr +
3260 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3261 dmae->src_addr_hi = 0;
3262 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3263 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3264 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3265 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3266 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3267 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3269 dmae->comp_addr_hi = 0;
3272 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3274 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3276 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3277 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3278 dmae->opcode = opcode;
3279 dmae->src_addr_lo = (mac_addr +
3280 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3281 dmae->src_addr_hi = 0;
3282 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3283 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3284 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3285 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3286 dmae->comp_addr_hi = 0;
3289 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3290 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291 dmae->opcode = opcode;
3292 dmae->src_addr_lo = (mac_addr +
3293 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3294 dmae->src_addr_hi = 0;
3295 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3296 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3298 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3300 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3301 dmae->comp_addr_hi = 0;
3304 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3305 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3306 dmae->opcode = opcode;
3307 dmae->src_addr_lo = (mac_addr +
3308 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3309 dmae->src_addr_hi = 0;
3310 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3311 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3312 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3313 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3314 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3315 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3316 dmae->comp_addr_hi = 0;
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3324 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3325 dmae->src_addr_hi = 0;
3326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3327 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3328 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330 dmae->comp_addr_hi = 0;
3333 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3334 dmae->opcode = opcode;
3335 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3336 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3337 dmae->src_addr_hi = 0;
3338 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3339 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3340 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3341 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3342 dmae->len = (2*sizeof(u32)) >> 2;
3343 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3344 dmae->comp_addr_hi = 0;
3347 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3348 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3349 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3350 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3352 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3354 DMAE_CMD_ENDIANITY_DW_SWAP |
3356 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3357 (vn << DMAE_CMD_E1HVN_SHIFT));
3358 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3359 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3360 dmae->src_addr_hi = 0;
3361 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3362 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3363 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3364 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3365 dmae->len = (2*sizeof(u32)) >> 2;
3366 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3367 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3368 dmae->comp_val = DMAE_COMP_VAL;
3373 static void bnx2x_func_stats_init(struct bnx2x *bp)
3375 struct dmae_command *dmae = &bp->stats_dmae;
3376 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3379 if (!bp->func_stx) {
3380 BNX2X_ERR("BUG!\n");
3384 bp->executer_idx = 0;
3385 memset(dmae, 0, sizeof(struct dmae_command));
3387 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3388 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3389 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3391 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3393 DMAE_CMD_ENDIANITY_DW_SWAP |
3395 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3396 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3397 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3398 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3399 dmae->dst_addr_lo = bp->func_stx >> 2;
3400 dmae->dst_addr_hi = 0;
3401 dmae->len = sizeof(struct host_func_stats) >> 2;
3402 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3403 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3404 dmae->comp_val = DMAE_COMP_VAL;
3409 static void bnx2x_stats_start(struct bnx2x *bp)
3412 bnx2x_port_stats_init(bp);
3414 else if (bp->func_stx)
3415 bnx2x_func_stats_init(bp);
3417 bnx2x_hw_stats_post(bp);
3418 bnx2x_storm_stats_post(bp);
3421 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3423 bnx2x_stats_comp(bp);
3424 bnx2x_stats_pmf_update(bp);
3425 bnx2x_stats_start(bp);
3428 static void bnx2x_stats_restart(struct bnx2x *bp)
3430 bnx2x_stats_comp(bp);
3431 bnx2x_stats_start(bp);
3434 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3436 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3437 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3438 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3439 struct regpair diff;
3441 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3442 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3443 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3444 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3445 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3446 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3447 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3448 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3449 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3450 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3451 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3452 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3453 UPDATE_STAT64(tx_stat_gt127,
3454 tx_stat_etherstatspkts65octetsto127octets);
3455 UPDATE_STAT64(tx_stat_gt255,
3456 tx_stat_etherstatspkts128octetsto255octets);
3457 UPDATE_STAT64(tx_stat_gt511,
3458 tx_stat_etherstatspkts256octetsto511octets);
3459 UPDATE_STAT64(tx_stat_gt1023,
3460 tx_stat_etherstatspkts512octetsto1023octets);
3461 UPDATE_STAT64(tx_stat_gt1518,
3462 tx_stat_etherstatspkts1024octetsto1522octets);
3463 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3464 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3465 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3466 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3467 UPDATE_STAT64(tx_stat_gterr,
3468 tx_stat_dot3statsinternalmactransmiterrors);
3469 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3471 estats->pause_frames_received_hi =
3472 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3473 estats->pause_frames_received_lo =
3474 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3476 estats->pause_frames_sent_hi =
3477 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3478 estats->pause_frames_sent_lo =
3479 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3482 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3484 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3485 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3486 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3488 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3489 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3490 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3491 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3492 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3493 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3494 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3495 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3496 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3497 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3498 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3499 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3500 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3501 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3502 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3503 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3504 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3506 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3507 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3508 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3509 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3510 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3513 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3514 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3515 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3516 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3518 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3520 estats->pause_frames_received_hi =
3521 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3522 estats->pause_frames_received_lo =
3523 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3524 ADD_64(estats->pause_frames_received_hi,
3525 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3526 estats->pause_frames_received_lo,
3527 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3529 estats->pause_frames_sent_hi =
3530 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3531 estats->pause_frames_sent_lo =
3532 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3533 ADD_64(estats->pause_frames_sent_hi,
3534 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3535 estats->pause_frames_sent_lo,
3536 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3539 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3541 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3542 struct nig_stats *old = &(bp->port.old_nig_stats);
3543 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3544 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3545 struct regpair diff;
3548 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3549 bnx2x_bmac_stats_update(bp);
3551 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3552 bnx2x_emac_stats_update(bp);
3554 else { /* unreached */
3555 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3559 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3560 new->brb_discard - old->brb_discard);
3561 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3562 new->brb_truncate - old->brb_truncate);
3564 UPDATE_STAT64_NIG(egress_mac_pkt0,
3565 etherstatspkts1024octetsto1522octets);
3566 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3568 memcpy(old, new, sizeof(struct nig_stats));
3570 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3571 sizeof(struct mac_stx));
3572 estats->brb_drop_hi = pstats->brb_drop_hi;
3573 estats->brb_drop_lo = pstats->brb_drop_lo;
3575 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3577 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3578 if (nig_timer_max != estats->nig_timer_max) {
3579 estats->nig_timer_max = nig_timer_max;
3580 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3586 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3588 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3589 struct tstorm_per_port_stats *tport =
3590 &stats->tstorm_common.port_statistics;
3591 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3592 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3595 memset(&(fstats->total_bytes_received_hi), 0,
3596 sizeof(struct host_func_stats) - 2*sizeof(u32));
3597 estats->error_bytes_received_hi = 0;
3598 estats->error_bytes_received_lo = 0;
3599 estats->etherstatsoverrsizepkts_hi = 0;
3600 estats->etherstatsoverrsizepkts_lo = 0;
3601 estats->no_buff_discard_hi = 0;
3602 estats->no_buff_discard_lo = 0;
3604 for_each_queue(bp, i) {
3605 struct bnx2x_fastpath *fp = &bp->fp[i];
3606 int cl_id = fp->cl_id;
3607 struct tstorm_per_client_stats *tclient =
3608 &stats->tstorm_common.client_statistics[cl_id];
3609 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3610 struct ustorm_per_client_stats *uclient =
3611 &stats->ustorm_common.client_statistics[cl_id];
3612 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3613 struct xstorm_per_client_stats *xclient =
3614 &stats->xstorm_common.client_statistics[cl_id];
3615 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3616 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3619 /* are storm stats valid? */
3620 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3621 bp->stats_counter) {
3622 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3623 " xstorm counter (%d) != stats_counter (%d)\n",
3624 i, xclient->stats_counter, bp->stats_counter);
3627 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3628 bp->stats_counter) {
3629 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3630 " tstorm counter (%d) != stats_counter (%d)\n",
3631 i, tclient->stats_counter, bp->stats_counter);
3634 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3635 bp->stats_counter) {
3636 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3637 " ustorm counter (%d) != stats_counter (%d)\n",
3638 i, uclient->stats_counter, bp->stats_counter);
3642 qstats->total_bytes_received_hi =
3643 qstats->valid_bytes_received_hi =
3644 le32_to_cpu(tclient->total_rcv_bytes.hi);
3645 qstats->total_bytes_received_lo =
3646 qstats->valid_bytes_received_lo =
3647 le32_to_cpu(tclient->total_rcv_bytes.lo);
3649 qstats->error_bytes_received_hi =
3650 le32_to_cpu(tclient->rcv_error_bytes.hi);
3651 qstats->error_bytes_received_lo =
3652 le32_to_cpu(tclient->rcv_error_bytes.lo);
3654 ADD_64(qstats->total_bytes_received_hi,
3655 qstats->error_bytes_received_hi,
3656 qstats->total_bytes_received_lo,
3657 qstats->error_bytes_received_lo);
3659 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3660 total_unicast_packets_received);
3661 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3662 total_multicast_packets_received);
3663 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3664 total_broadcast_packets_received);
3665 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3666 etherstatsoverrsizepkts);
3667 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3669 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3670 total_unicast_packets_received);
3671 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3672 total_multicast_packets_received);
3673 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3674 total_broadcast_packets_received);
3675 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3676 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3677 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3679 qstats->total_bytes_transmitted_hi =
3680 le32_to_cpu(xclient->total_sent_bytes.hi);
3681 qstats->total_bytes_transmitted_lo =
3682 le32_to_cpu(xclient->total_sent_bytes.lo);
3684 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3685 total_unicast_packets_transmitted);
3686 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3687 total_multicast_packets_transmitted);
3688 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3689 total_broadcast_packets_transmitted);
3691 old_tclient->checksum_discard = tclient->checksum_discard;
3692 old_tclient->ttl0_discard = tclient->ttl0_discard;
3694 ADD_64(fstats->total_bytes_received_hi,
3695 qstats->total_bytes_received_hi,
3696 fstats->total_bytes_received_lo,
3697 qstats->total_bytes_received_lo);
3698 ADD_64(fstats->total_bytes_transmitted_hi,
3699 qstats->total_bytes_transmitted_hi,
3700 fstats->total_bytes_transmitted_lo,
3701 qstats->total_bytes_transmitted_lo);
3702 ADD_64(fstats->total_unicast_packets_received_hi,
3703 qstats->total_unicast_packets_received_hi,
3704 fstats->total_unicast_packets_received_lo,
3705 qstats->total_unicast_packets_received_lo);
3706 ADD_64(fstats->total_multicast_packets_received_hi,
3707 qstats->total_multicast_packets_received_hi,
3708 fstats->total_multicast_packets_received_lo,
3709 qstats->total_multicast_packets_received_lo);
3710 ADD_64(fstats->total_broadcast_packets_received_hi,
3711 qstats->total_broadcast_packets_received_hi,
3712 fstats->total_broadcast_packets_received_lo,
3713 qstats->total_broadcast_packets_received_lo);
3714 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3715 qstats->total_unicast_packets_transmitted_hi,
3716 fstats->total_unicast_packets_transmitted_lo,
3717 qstats->total_unicast_packets_transmitted_lo);
3718 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3719 qstats->total_multicast_packets_transmitted_hi,
3720 fstats->total_multicast_packets_transmitted_lo,
3721 qstats->total_multicast_packets_transmitted_lo);
3722 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3723 qstats->total_broadcast_packets_transmitted_hi,
3724 fstats->total_broadcast_packets_transmitted_lo,
3725 qstats->total_broadcast_packets_transmitted_lo);
3726 ADD_64(fstats->valid_bytes_received_hi,
3727 qstats->valid_bytes_received_hi,
3728 fstats->valid_bytes_received_lo,
3729 qstats->valid_bytes_received_lo);
3731 ADD_64(estats->error_bytes_received_hi,
3732 qstats->error_bytes_received_hi,
3733 estats->error_bytes_received_lo,
3734 qstats->error_bytes_received_lo);
3735 ADD_64(estats->etherstatsoverrsizepkts_hi,
3736 qstats->etherstatsoverrsizepkts_hi,
3737 estats->etherstatsoverrsizepkts_lo,
3738 qstats->etherstatsoverrsizepkts_lo);
3739 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3740 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3743 ADD_64(fstats->total_bytes_received_hi,
3744 estats->rx_stat_ifhcinbadoctets_hi,
3745 fstats->total_bytes_received_lo,
3746 estats->rx_stat_ifhcinbadoctets_lo);
3748 memcpy(estats, &(fstats->total_bytes_received_hi),
3749 sizeof(struct host_func_stats) - 2*sizeof(u32));
3751 ADD_64(estats->etherstatsoverrsizepkts_hi,
3752 estats->rx_stat_dot3statsframestoolong_hi,
3753 estats->etherstatsoverrsizepkts_lo,
3754 estats->rx_stat_dot3statsframestoolong_lo);
3755 ADD_64(estats->error_bytes_received_hi,
3756 estats->rx_stat_ifhcinbadoctets_hi,
3757 estats->error_bytes_received_lo,
3758 estats->rx_stat_ifhcinbadoctets_lo);
3761 estats->mac_filter_discard =
3762 le32_to_cpu(tport->mac_filter_discard);
3763 estats->xxoverflow_discard =
3764 le32_to_cpu(tport->xxoverflow_discard);
3765 estats->brb_truncate_discard =
3766 le32_to_cpu(tport->brb_truncate_discard);
3767 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3770 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3772 bp->stats_pending = 0;
3777 static void bnx2x_net_stats_update(struct bnx2x *bp)
3779 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3780 struct net_device_stats *nstats = &bp->dev->stats;
3783 nstats->rx_packets =
3784 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3785 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3786 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3788 nstats->tx_packets =
3789 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3790 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3791 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3793 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3795 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3797 nstats->rx_dropped = estats->mac_discard;
3798 for_each_queue(bp, i)
3799 nstats->rx_dropped +=
3800 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3802 nstats->tx_dropped = 0;
3805 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3807 nstats->collisions =
3808 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3810 nstats->rx_length_errors =
3811 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3812 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3813 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3814 bnx2x_hilo(&estats->brb_truncate_hi);
3815 nstats->rx_crc_errors =
3816 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3817 nstats->rx_frame_errors =
3818 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3819 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3820 nstats->rx_missed_errors = estats->xxoverflow_discard;
3822 nstats->rx_errors = nstats->rx_length_errors +
3823 nstats->rx_over_errors +
3824 nstats->rx_crc_errors +
3825 nstats->rx_frame_errors +
3826 nstats->rx_fifo_errors +
3827 nstats->rx_missed_errors;
3829 nstats->tx_aborted_errors =
3830 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3831 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3832 nstats->tx_carrier_errors =
3833 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3834 nstats->tx_fifo_errors = 0;
3835 nstats->tx_heartbeat_errors = 0;
3836 nstats->tx_window_errors = 0;
3838 nstats->tx_errors = nstats->tx_aborted_errors +
3839 nstats->tx_carrier_errors +
3840 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3843 static void bnx2x_drv_stats_update(struct bnx2x *bp)
3845 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3848 estats->driver_xoff = 0;
3849 estats->rx_err_discard_pkt = 0;
3850 estats->rx_skb_alloc_failed = 0;
3851 estats->hw_csum_err = 0;
3852 for_each_queue(bp, i) {
3853 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3855 estats->driver_xoff += qstats->driver_xoff;
3856 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3857 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3858 estats->hw_csum_err += qstats->hw_csum_err;
3862 static void bnx2x_stats_update(struct bnx2x *bp)
3864 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3866 if (*stats_comp != DMAE_COMP_VAL)
3870 bnx2x_hw_stats_update(bp);
3872 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3873 BNX2X_ERR("storm stats were not updated for 3 times\n");
3878 bnx2x_net_stats_update(bp);
3879 bnx2x_drv_stats_update(bp);
3881 if (bp->msglevel & NETIF_MSG_TIMER) {
3882 struct tstorm_per_client_stats *old_tclient =
3883 &bp->fp->old_tclient;
3884 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
3885 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3886 struct net_device_stats *nstats = &bp->dev->stats;
3889 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3890 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3892 bnx2x_tx_avail(bp->fp),
3893 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
3894 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3896 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3897 bp->fp->rx_comp_cons),
3898 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
3899 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3900 "brb truncate %u\n",
3901 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3902 qstats->driver_xoff,
3903 estats->brb_drop_lo, estats->brb_truncate_lo);
3904 printk(KERN_DEBUG "tstats: checksum_discard %u "
3905 "packets_too_big_discard %lu no_buff_discard %lu "
3906 "mac_discard %u mac_filter_discard %u "
3907 "xxovrflow_discard %u brb_truncate_discard %u "
3908 "ttl0_discard %u\n",
3909 old_tclient->checksum_discard,
3910 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3911 bnx2x_hilo(&qstats->no_buff_discard_hi),
3912 estats->mac_discard, estats->mac_filter_discard,
3913 estats->xxoverflow_discard, estats->brb_truncate_discard,
3914 old_tclient->ttl0_discard);
3916 for_each_queue(bp, i) {
3917 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3918 bnx2x_fp(bp, i, tx_pkt),
3919 bnx2x_fp(bp, i, rx_pkt),
3920 bnx2x_fp(bp, i, rx_calls));
3924 bnx2x_hw_stats_post(bp);
3925 bnx2x_storm_stats_post(bp);
3928 static void bnx2x_port_stats_stop(struct bnx2x *bp)
3930 struct dmae_command *dmae;
3932 int loader_idx = PMF_DMAE_C(bp);
3933 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3935 bp->executer_idx = 0;
3937 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3939 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3941 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3943 DMAE_CMD_ENDIANITY_DW_SWAP |
3945 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3946 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3948 if (bp->port.port_stx) {
3950 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3952 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3954 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3955 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3956 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3957 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3958 dmae->dst_addr_hi = 0;
3959 dmae->len = sizeof(struct host_port_stats) >> 2;
3961 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3962 dmae->comp_addr_hi = 0;
3965 dmae->comp_addr_lo =
3966 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3967 dmae->comp_addr_hi =
3968 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3969 dmae->comp_val = DMAE_COMP_VAL;
3977 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3978 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3979 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3980 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3981 dmae->dst_addr_lo = bp->func_stx >> 2;
3982 dmae->dst_addr_hi = 0;
3983 dmae->len = sizeof(struct host_func_stats) >> 2;
3984 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3985 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3986 dmae->comp_val = DMAE_COMP_VAL;
3992 static void bnx2x_stats_stop(struct bnx2x *bp)
3996 bnx2x_stats_comp(bp);
3999 update = (bnx2x_hw_stats_update(bp) == 0);
4001 update |= (bnx2x_storm_stats_update(bp) == 0);
4004 bnx2x_net_stats_update(bp);
4007 bnx2x_port_stats_stop(bp);
4009 bnx2x_hw_stats_post(bp);
4010 bnx2x_stats_comp(bp);
4014 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4018 static const struct {
4019 void (*action)(struct bnx2x *bp);
4020 enum bnx2x_stats_state next_state;
4021 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4024 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4025 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4026 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4027 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4030 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4031 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4032 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4033 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4037 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4039 enum bnx2x_stats_state state = bp->stats_state;
4041 bnx2x_stats_stm[state][event].action(bp);
4042 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4044 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4045 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4046 state, event, bp->stats_state);
4049 static void bnx2x_timer(unsigned long data)
4051 struct bnx2x *bp = (struct bnx2x *) data;
4053 if (!netif_running(bp->dev))
4056 if (atomic_read(&bp->intr_sem) != 0)
4060 struct bnx2x_fastpath *fp = &bp->fp[0];
4063 bnx2x_tx_int(fp, 1000);
4064 rc = bnx2x_rx_int(fp, 1000);
4067 if (!BP_NOMCP(bp)) {
4068 int func = BP_FUNC(bp);
4072 ++bp->fw_drv_pulse_wr_seq;
4073 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4074 /* TBD - add SYSTEM_TIME */
4075 drv_pulse = bp->fw_drv_pulse_wr_seq;
4076 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4078 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4079 MCP_PULSE_SEQ_MASK);
4080 /* The delta between driver pulse and mcp response
4081 * should be 1 (before mcp response) or 0 (after mcp response)
4083 if ((drv_pulse != mcp_pulse) &&
4084 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4085 /* someone lost a heartbeat... */
4086 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4087 drv_pulse, mcp_pulse);
4091 if ((bp->state == BNX2X_STATE_OPEN) ||
4092 (bp->state == BNX2X_STATE_DISABLED))
4093 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4096 mod_timer(&bp->timer, jiffies + bp->current_interval);
4099 /* end of Statistics */
4104 * nic init service functions
4107 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4109 int port = BP_PORT(bp);
4111 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4112 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4113 sizeof(struct ustorm_status_block)/4);
4114 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4115 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4116 sizeof(struct cstorm_status_block)/4);
4119 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4120 dma_addr_t mapping, int sb_id)
4122 int port = BP_PORT(bp);
4123 int func = BP_FUNC(bp);
4128 section = ((u64)mapping) + offsetof(struct host_status_block,
4130 sb->u_status_block.status_block_id = sb_id;
4132 REG_WR(bp, BAR_USTRORM_INTMEM +
4133 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4134 REG_WR(bp, BAR_USTRORM_INTMEM +
4135 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4137 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4138 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4140 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4141 REG_WR16(bp, BAR_USTRORM_INTMEM +
4142 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4145 section = ((u64)mapping) + offsetof(struct host_status_block,
4147 sb->c_status_block.status_block_id = sb_id;
4149 REG_WR(bp, BAR_CSTRORM_INTMEM +
4150 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4151 REG_WR(bp, BAR_CSTRORM_INTMEM +
4152 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4154 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4155 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4157 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4158 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4159 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4161 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4164 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4166 int func = BP_FUNC(bp);
4168 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4169 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4170 sizeof(struct ustorm_def_status_block)/4);
4171 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4173 sizeof(struct cstorm_def_status_block)/4);
4174 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4175 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4176 sizeof(struct xstorm_def_status_block)/4);
4177 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4178 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4179 sizeof(struct tstorm_def_status_block)/4);
4182 static void bnx2x_init_def_sb(struct bnx2x *bp,
4183 struct host_def_status_block *def_sb,
4184 dma_addr_t mapping, int sb_id)
4186 int port = BP_PORT(bp);
4187 int func = BP_FUNC(bp);
4188 int index, val, reg_offset;
4192 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4193 atten_status_block);
4194 def_sb->atten_status_block.status_block_id = sb_id;
4198 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4199 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4201 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4202 bp->attn_group[index].sig[0] = REG_RD(bp,
4203 reg_offset + 0x10*index);
4204 bp->attn_group[index].sig[1] = REG_RD(bp,
4205 reg_offset + 0x4 + 0x10*index);
4206 bp->attn_group[index].sig[2] = REG_RD(bp,
4207 reg_offset + 0x8 + 0x10*index);
4208 bp->attn_group[index].sig[3] = REG_RD(bp,
4209 reg_offset + 0xc + 0x10*index);
4212 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4213 HC_REG_ATTN_MSG0_ADDR_L);
4215 REG_WR(bp, reg_offset, U64_LO(section));
4216 REG_WR(bp, reg_offset + 4, U64_HI(section));
4218 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4220 val = REG_RD(bp, reg_offset);
4222 REG_WR(bp, reg_offset, val);
4225 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4226 u_def_status_block);
4227 def_sb->u_def_status_block.status_block_id = sb_id;
4229 REG_WR(bp, BAR_USTRORM_INTMEM +
4230 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4231 REG_WR(bp, BAR_USTRORM_INTMEM +
4232 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4234 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4235 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4237 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4238 REG_WR16(bp, BAR_USTRORM_INTMEM +
4239 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4242 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4243 c_def_status_block);
4244 def_sb->c_def_status_block.status_block_id = sb_id;
4246 REG_WR(bp, BAR_CSTRORM_INTMEM +
4247 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4248 REG_WR(bp, BAR_CSTRORM_INTMEM +
4249 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4251 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4252 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4254 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4255 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4256 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4259 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4260 t_def_status_block);
4261 def_sb->t_def_status_block.status_block_id = sb_id;
4263 REG_WR(bp, BAR_TSTRORM_INTMEM +
4264 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4265 REG_WR(bp, BAR_TSTRORM_INTMEM +
4266 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4268 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4269 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4271 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4272 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4273 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4276 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4277 x_def_status_block);
4278 def_sb->x_def_status_block.status_block_id = sb_id;
4280 REG_WR(bp, BAR_XSTRORM_INTMEM +
4281 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4282 REG_WR(bp, BAR_XSTRORM_INTMEM +
4283 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4285 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4286 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4288 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4289 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4290 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4292 bp->stats_pending = 0;
4293 bp->set_mac_pending = 0;
4295 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4298 static void bnx2x_update_coalesce(struct bnx2x *bp)
4300 int port = BP_PORT(bp);
4303 for_each_queue(bp, i) {
4304 int sb_id = bp->fp[i].sb_id;
4306 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4307 REG_WR8(bp, BAR_USTRORM_INTMEM +
4308 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4309 U_SB_ETH_RX_CQ_INDEX),
4311 REG_WR16(bp, BAR_USTRORM_INTMEM +
4312 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4313 U_SB_ETH_RX_CQ_INDEX),
4314 bp->rx_ticks ? 0 : 1);
4316 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4317 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4318 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4319 C_SB_ETH_TX_CQ_INDEX),
4321 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4322 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4323 C_SB_ETH_TX_CQ_INDEX),
4324 bp->tx_ticks ? 0 : 1);
4328 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4329 struct bnx2x_fastpath *fp, int last)
4333 for (i = 0; i < last; i++) {
4334 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4335 struct sk_buff *skb = rx_buf->skb;
4338 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4342 if (fp->tpa_state[i] == BNX2X_TPA_START)
4343 pci_unmap_single(bp->pdev,
4344 pci_unmap_addr(rx_buf, mapping),
4346 PCI_DMA_FROMDEVICE);
4353 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4355 int func = BP_FUNC(bp);
4356 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4357 ETH_MAX_AGGREGATION_QUEUES_E1H;
4358 u16 ring_prod, cqe_ring_prod;
4361 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4363 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4365 if (bp->flags & TPA_ENABLE_FLAG) {
4367 for_each_rx_queue(bp, j) {
4368 struct bnx2x_fastpath *fp = &bp->fp[j];
4370 for (i = 0; i < max_agg_queues; i++) {
4371 fp->tpa_pool[i].skb =
4372 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4373 if (!fp->tpa_pool[i].skb) {
4374 BNX2X_ERR("Failed to allocate TPA "
4375 "skb pool for queue[%d] - "
4376 "disabling TPA on this "
4378 bnx2x_free_tpa_pool(bp, fp, i);
4379 fp->disable_tpa = 1;
4382 pci_unmap_addr_set((struct sw_rx_bd *)
4383 &bp->fp->tpa_pool[i],
4385 fp->tpa_state[i] = BNX2X_TPA_STOP;
4390 for_each_rx_queue(bp, j) {
4391 struct bnx2x_fastpath *fp = &bp->fp[j];
4394 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4395 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4397 /* "next page" elements initialization */
4399 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4400 struct eth_rx_sge *sge;
4402 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4404 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4405 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4407 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4408 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4411 bnx2x_init_sge_ring_bit_mask(fp);
4414 for (i = 1; i <= NUM_RX_RINGS; i++) {
4415 struct eth_rx_bd *rx_bd;
4417 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4419 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4420 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4422 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4423 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4427 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4428 struct eth_rx_cqe_next_page *nextpg;
4430 nextpg = (struct eth_rx_cqe_next_page *)
4431 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4433 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4434 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4436 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4437 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4440 /* Allocate SGEs and initialize the ring elements */
4441 for (i = 0, ring_prod = 0;
4442 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4444 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4445 BNX2X_ERR("was only able to allocate "
4447 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4448 /* Cleanup already allocated elements */
4449 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4450 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4451 fp->disable_tpa = 1;
4455 ring_prod = NEXT_SGE_IDX(ring_prod);
4457 fp->rx_sge_prod = ring_prod;
4459 /* Allocate BDs and initialize BD ring */
4460 fp->rx_comp_cons = 0;
4461 cqe_ring_prod = ring_prod = 0;
4462 for (i = 0; i < bp->rx_ring_size; i++) {
4463 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4464 BNX2X_ERR("was only able to allocate "
4465 "%d rx skbs on queue[%d]\n", i, j);
4466 fp->eth_q_stats.rx_skb_alloc_failed++;
4469 ring_prod = NEXT_RX_IDX(ring_prod);
4470 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4471 WARN_ON(ring_prod <= i);
4474 fp->rx_bd_prod = ring_prod;
4475 /* must not have more available CQEs than BDs */
4476 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4478 fp->rx_pkt = fp->rx_calls = 0;
4481 * this will generate an interrupt (to the TSTORM)
4482 * must only be done after chip is initialized
4484 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4489 REG_WR(bp, BAR_USTRORM_INTMEM +
4490 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4491 U64_LO(fp->rx_comp_mapping));
4492 REG_WR(bp, BAR_USTRORM_INTMEM +
4493 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4494 U64_HI(fp->rx_comp_mapping));
4498 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4502 for_each_tx_queue(bp, j) {
4503 struct bnx2x_fastpath *fp = &bp->fp[j];
4505 for (i = 1; i <= NUM_TX_RINGS; i++) {
4506 struct eth_tx_bd *tx_bd =
4507 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4510 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4511 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4513 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4514 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4517 fp->tx_pkt_prod = 0;
4518 fp->tx_pkt_cons = 0;
4521 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4526 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4528 int func = BP_FUNC(bp);
4530 spin_lock_init(&bp->spq_lock);
4532 bp->spq_left = MAX_SPQ_PENDING;
4533 bp->spq_prod_idx = 0;
4534 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4535 bp->spq_prod_bd = bp->spq;
4536 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4538 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4539 U64_LO(bp->spq_mapping));
4541 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4542 U64_HI(bp->spq_mapping));
4544 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4548 static void bnx2x_init_context(struct bnx2x *bp)
4552 for_each_queue(bp, i) {
4553 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4554 struct bnx2x_fastpath *fp = &bp->fp[i];
4555 u8 cl_id = fp->cl_id;
4556 u8 sb_id = FP_SB_ID(fp);
4558 context->ustorm_st_context.common.sb_index_numbers =
4559 BNX2X_RX_SB_INDEX_NUM;
4560 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4561 context->ustorm_st_context.common.status_block_id = sb_id;
4562 context->ustorm_st_context.common.flags =
4563 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4564 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4565 context->ustorm_st_context.common.statistics_counter_id =
4567 context->ustorm_st_context.common.mc_alignment_log_size =
4568 BNX2X_RX_ALIGN_SHIFT;
4569 context->ustorm_st_context.common.bd_buff_size =
4571 context->ustorm_st_context.common.bd_page_base_hi =
4572 U64_HI(fp->rx_desc_mapping);
4573 context->ustorm_st_context.common.bd_page_base_lo =
4574 U64_LO(fp->rx_desc_mapping);
4575 if (!fp->disable_tpa) {
4576 context->ustorm_st_context.common.flags |=
4577 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4578 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4579 context->ustorm_st_context.common.sge_buff_size =
4580 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4582 context->ustorm_st_context.common.sge_page_base_hi =
4583 U64_HI(fp->rx_sge_mapping);
4584 context->ustorm_st_context.common.sge_page_base_lo =
4585 U64_LO(fp->rx_sge_mapping);
4588 context->ustorm_ag_context.cdu_usage =
4589 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4590 CDU_REGION_NUMBER_UCM_AG,
4591 ETH_CONNECTION_TYPE);
4593 context->xstorm_st_context.tx_bd_page_base_hi =
4594 U64_HI(fp->tx_desc_mapping);
4595 context->xstorm_st_context.tx_bd_page_base_lo =
4596 U64_LO(fp->tx_desc_mapping);
4597 context->xstorm_st_context.db_data_addr_hi =
4598 U64_HI(fp->tx_prods_mapping);
4599 context->xstorm_st_context.db_data_addr_lo =
4600 U64_LO(fp->tx_prods_mapping);
4601 context->xstorm_st_context.statistics_data = (fp->cl_id |
4602 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4603 context->cstorm_st_context.sb_index_number =
4604 C_SB_ETH_TX_CQ_INDEX;
4605 context->cstorm_st_context.status_block_id = sb_id;
4607 context->xstorm_ag_context.cdu_reserved =
4608 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4609 CDU_REGION_NUMBER_XCM_AG,
4610 ETH_CONNECTION_TYPE);
4614 static void bnx2x_init_ind_table(struct bnx2x *bp)
4616 int func = BP_FUNC(bp);
4619 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4623 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4624 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4625 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4626 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4627 BP_CL_ID(bp) + (i % bp->num_rx_queues));
4630 static void bnx2x_set_client_config(struct bnx2x *bp)
4632 struct tstorm_eth_client_config tstorm_client = {0};
4633 int port = BP_PORT(bp);
4636 tstorm_client.mtu = bp->dev->mtu;
4637 tstorm_client.config_flags =
4638 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4639 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4641 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4642 tstorm_client.config_flags |=
4643 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4644 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4648 if (bp->flags & TPA_ENABLE_FLAG) {
4649 tstorm_client.max_sges_for_packet =
4650 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4651 tstorm_client.max_sges_for_packet =
4652 ((tstorm_client.max_sges_for_packet +
4653 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4654 PAGES_PER_SGE_SHIFT;
4656 tstorm_client.config_flags |=
4657 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4660 for_each_queue(bp, i) {
4661 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4663 REG_WR(bp, BAR_TSTRORM_INTMEM +
4664 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4665 ((u32 *)&tstorm_client)[0]);
4666 REG_WR(bp, BAR_TSTRORM_INTMEM +
4667 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4668 ((u32 *)&tstorm_client)[1]);
4671 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4672 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4675 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4677 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4678 int mode = bp->rx_mode;
4679 int mask = (1 << BP_L_ID(bp));
4680 int func = BP_FUNC(bp);
4683 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4686 case BNX2X_RX_MODE_NONE: /* no Rx */
4687 tstorm_mac_filter.ucast_drop_all = mask;
4688 tstorm_mac_filter.mcast_drop_all = mask;
4689 tstorm_mac_filter.bcast_drop_all = mask;
4691 case BNX2X_RX_MODE_NORMAL:
4692 tstorm_mac_filter.bcast_accept_all = mask;
4694 case BNX2X_RX_MODE_ALLMULTI:
4695 tstorm_mac_filter.mcast_accept_all = mask;
4696 tstorm_mac_filter.bcast_accept_all = mask;
4698 case BNX2X_RX_MODE_PROMISC:
4699 tstorm_mac_filter.ucast_accept_all = mask;
4700 tstorm_mac_filter.mcast_accept_all = mask;
4701 tstorm_mac_filter.bcast_accept_all = mask;
4704 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4708 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4709 REG_WR(bp, BAR_TSTRORM_INTMEM +
4710 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4711 ((u32 *)&tstorm_mac_filter)[i]);
4713 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4714 ((u32 *)&tstorm_mac_filter)[i]); */
4717 if (mode != BNX2X_RX_MODE_NONE)
4718 bnx2x_set_client_config(bp);
4721 static void bnx2x_init_internal_common(struct bnx2x *bp)
4725 if (bp->flags & TPA_ENABLE_FLAG) {
4726 struct tstorm_eth_tpa_exist tpa = {0};
4730 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4732 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4736 /* Zero this manually as its initialization is
4737 currently missing in the initTool */
4738 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4739 REG_WR(bp, BAR_USTRORM_INTMEM +
4740 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4743 static void bnx2x_init_internal_port(struct bnx2x *bp)
4745 int port = BP_PORT(bp);
4747 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4748 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4749 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4750 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4753 /* Calculates the sum of vn_min_rates.
4754 It's needed for further normalizing of the min_rates.
4756 sum of vn_min_rates.
4758 0 - if all the min_rates are 0.
4759 In the later case fainess algorithm should be deactivated.
4760 If not all min_rates are zero then those that are zeroes will be set to 1.
4762 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4765 int port = BP_PORT(bp);
4768 bp->vn_weight_sum = 0;
4769 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4770 int func = 2*vn + port;
4772 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4773 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4774 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4776 /* Skip hidden vns */
4777 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4780 /* If min rate is zero - set it to 1 */
4782 vn_min_rate = DEF_MIN_RATE;
4786 bp->vn_weight_sum += vn_min_rate;
4789 /* ... only if all min rates are zeros - disable fairness */
4791 bp->vn_weight_sum = 0;
4794 static void bnx2x_init_internal_func(struct bnx2x *bp)
4796 struct tstorm_eth_function_common_config tstorm_config = {0};
4797 struct stats_indication_flags stats_flags = {0};
4798 int port = BP_PORT(bp);
4799 int func = BP_FUNC(bp);
4805 tstorm_config.config_flags = MULTI_FLAGS(bp);
4806 tstorm_config.rss_result_mask = MULTI_MASK;
4809 tstorm_config.config_flags |=
4810 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4812 tstorm_config.leading_client_id = BP_L_ID(bp);
4814 REG_WR(bp, BAR_TSTRORM_INTMEM +
4815 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4816 (*(u32 *)&tstorm_config));
4818 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4819 bnx2x_set_storm_rx_mode(bp);
4821 for_each_queue(bp, i) {
4822 u8 cl_id = bp->fp[i].cl_id;
4824 /* reset xstorm per client statistics */
4825 offset = BAR_XSTRORM_INTMEM +
4826 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4828 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4829 REG_WR(bp, offset + j*4, 0);
4831 /* reset tstorm per client statistics */
4832 offset = BAR_TSTRORM_INTMEM +
4833 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4835 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4836 REG_WR(bp, offset + j*4, 0);
4838 /* reset ustorm per client statistics */
4839 offset = BAR_USTRORM_INTMEM +
4840 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4842 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4843 REG_WR(bp, offset + j*4, 0);
4846 /* Init statistics related context */
4847 stats_flags.collect_eth = 1;
4849 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
4850 ((u32 *)&stats_flags)[0]);
4851 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
4852 ((u32 *)&stats_flags)[1]);
4854 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
4855 ((u32 *)&stats_flags)[0]);
4856 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
4857 ((u32 *)&stats_flags)[1]);
4859 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4860 ((u32 *)&stats_flags)[0]);
4861 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4862 ((u32 *)&stats_flags)[1]);
4864 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
4865 ((u32 *)&stats_flags)[0]);
4866 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
4867 ((u32 *)&stats_flags)[1]);
4869 REG_WR(bp, BAR_XSTRORM_INTMEM +
4870 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4871 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4872 REG_WR(bp, BAR_XSTRORM_INTMEM +
4873 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4874 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4876 REG_WR(bp, BAR_TSTRORM_INTMEM +
4877 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4878 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4879 REG_WR(bp, BAR_TSTRORM_INTMEM +
4880 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4881 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4883 REG_WR(bp, BAR_USTRORM_INTMEM +
4884 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4885 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4886 REG_WR(bp, BAR_USTRORM_INTMEM +
4887 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4888 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4890 if (CHIP_IS_E1H(bp)) {
4891 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4893 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4895 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4897 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4900 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4904 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4906 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4907 SGE_PAGE_SIZE * PAGES_PER_SGE),
4909 for_each_rx_queue(bp, i) {
4910 struct bnx2x_fastpath *fp = &bp->fp[i];
4912 REG_WR(bp, BAR_USTRORM_INTMEM +
4913 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4914 U64_LO(fp->rx_comp_mapping));
4915 REG_WR(bp, BAR_USTRORM_INTMEM +
4916 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4917 U64_HI(fp->rx_comp_mapping));
4919 REG_WR16(bp, BAR_USTRORM_INTMEM +
4920 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4924 /* dropless flow control */
4925 if (CHIP_IS_E1H(bp)) {
4926 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
4928 rx_pause.bd_thr_low = 250;
4929 rx_pause.cqe_thr_low = 250;
4931 rx_pause.sge_thr_low = 0;
4932 rx_pause.bd_thr_high = 350;
4933 rx_pause.cqe_thr_high = 350;
4934 rx_pause.sge_thr_high = 0;
4936 for_each_rx_queue(bp, i) {
4937 struct bnx2x_fastpath *fp = &bp->fp[i];
4939 if (!fp->disable_tpa) {
4940 rx_pause.sge_thr_low = 150;
4941 rx_pause.sge_thr_high = 250;
4945 offset = BAR_USTRORM_INTMEM +
4946 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
4949 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
4951 REG_WR(bp, offset + j*4,
4952 ((u32 *)&rx_pause)[j]);
4956 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
4958 /* Init rate shaping and fairness contexts */
4962 /* During init there is no active link
4963 Until link is up, set link rate to 10Gbps */
4964 bp->link_vars.line_speed = SPEED_10000;
4965 bnx2x_init_port_minmax(bp);
4967 bnx2x_calc_vn_weight_sum(bp);
4969 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4970 bnx2x_init_vn_minmax(bp, 2*vn + port);
4972 /* Enable rate shaping and fairness */
4973 bp->cmng.flags.cmng_enables =
4974 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
4975 if (bp->vn_weight_sum)
4976 bp->cmng.flags.cmng_enables |=
4977 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
4979 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
4980 " fairness will be disabled\n");
4982 /* rate shaping and fairness are disabled */
4984 "single function mode minmax will be disabled\n");
4988 /* Store it to internal memory */
4990 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
4991 REG_WR(bp, BAR_XSTRORM_INTMEM +
4992 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
4993 ((u32 *)(&bp->cmng))[i]);
4996 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4998 switch (load_code) {
4999 case FW_MSG_CODE_DRV_LOAD_COMMON:
5000 bnx2x_init_internal_common(bp);
5003 case FW_MSG_CODE_DRV_LOAD_PORT:
5004 bnx2x_init_internal_port(bp);
5007 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5008 bnx2x_init_internal_func(bp);
5012 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5017 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5021 for_each_queue(bp, i) {
5022 struct bnx2x_fastpath *fp = &bp->fp[i];
5025 fp->state = BNX2X_FP_STATE_CLOSED;
5027 fp->cl_id = BP_L_ID(bp) + i;
5028 fp->sb_id = fp->cl_id;
5030 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5031 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5032 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5034 bnx2x_update_fpsb_idx(fp);
5037 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5039 bnx2x_update_dsb_idx(bp);
5040 bnx2x_update_coalesce(bp);
5041 bnx2x_init_rx_rings(bp);
5042 bnx2x_init_tx_ring(bp);
5043 bnx2x_init_sp_ring(bp);
5044 bnx2x_init_context(bp);
5045 bnx2x_init_internal(bp, load_code);
5046 bnx2x_init_ind_table(bp);
5047 bnx2x_stats_init(bp);
5049 /* At this point, we are ready for interrupts */
5050 atomic_set(&bp->intr_sem, 0);
5052 /* flush all before enabling interrupts */
5056 bnx2x_int_enable(bp);
5059 /* end of nic init */
5062 * gzip service functions
5065 static int bnx2x_gunzip_init(struct bnx2x *bp)
5067 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5068 &bp->gunzip_mapping);
5069 if (bp->gunzip_buf == NULL)
5072 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5073 if (bp->strm == NULL)
5076 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5078 if (bp->strm->workspace == NULL)
5088 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5089 bp->gunzip_mapping);
5090 bp->gunzip_buf = NULL;
5093 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5094 " un-compression\n", bp->dev->name);
5098 static void bnx2x_gunzip_end(struct bnx2x *bp)
5100 kfree(bp->strm->workspace);
5105 if (bp->gunzip_buf) {
5106 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5107 bp->gunzip_mapping);
5108 bp->gunzip_buf = NULL;
5112 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5116 /* check gzip header */
5117 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5124 if (zbuf[3] & FNAME)
5125 while ((zbuf[n++] != 0) && (n < len));
5127 bp->strm->next_in = zbuf + n;
5128 bp->strm->avail_in = len - n;
5129 bp->strm->next_out = bp->gunzip_buf;
5130 bp->strm->avail_out = FW_BUF_SIZE;
5132 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5136 rc = zlib_inflate(bp->strm, Z_FINISH);
5137 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5138 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5139 bp->dev->name, bp->strm->msg);
5141 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5142 if (bp->gunzip_outlen & 0x3)
5143 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5144 " gunzip_outlen (%d) not aligned\n",
5145 bp->dev->name, bp->gunzip_outlen);
5146 bp->gunzip_outlen >>= 2;
5148 zlib_inflateEnd(bp->strm);
5150 if (rc == Z_STREAM_END)
5156 /* nic load/unload */
5159 * General service functions
5162 /* send a NIG loopback debug packet */
5163 static void bnx2x_lb_pckt(struct bnx2x *bp)
5167 /* Ethernet source and destination addresses */
5168 wb_write[0] = 0x55555555;
5169 wb_write[1] = 0x55555555;
5170 wb_write[2] = 0x20; /* SOP */
5171 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5173 /* NON-IP protocol */
5174 wb_write[0] = 0x09000000;
5175 wb_write[1] = 0x55555555;
5176 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5177 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5180 /* some of the internal memories
5181 * are not directly readable from the driver
5182 * to test them we send debug packets
5184 static int bnx2x_int_mem_test(struct bnx2x *bp)
5190 if (CHIP_REV_IS_FPGA(bp))
5192 else if (CHIP_REV_IS_EMUL(bp))
5197 DP(NETIF_MSG_HW, "start part1\n");
5199 /* Disable inputs of parser neighbor blocks */
5200 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5201 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5202 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5203 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5205 /* Write 0 to parser credits for CFC search request */
5206 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5208 /* send Ethernet packet */
5211 /* TODO do i reset NIG statistic? */
5212 /* Wait until NIG register shows 1 packet of size 0x10 */
5213 count = 1000 * factor;
5216 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5217 val = *bnx2x_sp(bp, wb_data[0]);
5225 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5229 /* Wait until PRS register shows 1 packet */
5230 count = 1000 * factor;
5232 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5240 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5244 /* Reset and init BRB, PRS */
5245 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5247 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5249 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5250 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5252 DP(NETIF_MSG_HW, "part2\n");
5254 /* Disable inputs of parser neighbor blocks */
5255 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5256 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5257 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5258 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5260 /* Write 0 to parser credits for CFC search request */
5261 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5263 /* send 10 Ethernet packets */
5264 for (i = 0; i < 10; i++)
5267 /* Wait until NIG register shows 10 + 1
5268 packets of size 11*0x10 = 0xb0 */
5269 count = 1000 * factor;
5272 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5273 val = *bnx2x_sp(bp, wb_data[0]);
5281 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5285 /* Wait until PRS register shows 2 packets */
5286 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5288 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5290 /* Write 1 to parser credits for CFC search request */
5291 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5293 /* Wait until PRS register shows 3 packets */
5294 msleep(10 * factor);
5295 /* Wait until NIG register shows 1 packet of size 0x10 */
5296 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5298 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5300 /* clear NIG EOP FIFO */
5301 for (i = 0; i < 11; i++)
5302 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5303 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5305 BNX2X_ERR("clear of NIG failed\n");
5309 /* Reset and init BRB, PRS, NIG */
5310 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5312 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5314 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5315 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5318 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5321 /* Enable inputs of parser neighbor blocks */
5322 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5323 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5324 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5325 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5327 DP(NETIF_MSG_HW, "done\n");
5332 static void enable_blocks_attention(struct bnx2x *bp)
5334 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5335 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5336 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5337 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5338 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5339 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5340 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5341 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5342 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5343 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5344 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5345 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5346 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5347 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5348 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5349 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5350 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5351 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5352 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5353 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5354 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5355 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5356 if (CHIP_REV_IS_FPGA(bp))
5357 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5359 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5360 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5361 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5362 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5363 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5364 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5365 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5366 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5367 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5368 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5372 static void bnx2x_reset_common(struct bnx2x *bp)
5375 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5377 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5380 static int bnx2x_init_common(struct bnx2x *bp)
5384 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5386 bnx2x_reset_common(bp);
5387 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5388 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5390 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5391 if (CHIP_IS_E1H(bp))
5392 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5394 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5396 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5398 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5399 if (CHIP_IS_E1(bp)) {
5400 /* enable HW interrupt from PXP on USDM overflow
5401 bit 16 on INT_MASK_0 */
5402 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5405 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5409 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5410 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5411 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5412 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5413 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5414 /* make sure this value is 0 */
5415 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5417 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5418 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5419 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5420 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5421 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5424 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5426 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5427 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5428 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5431 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5432 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5434 /* let the HW do it's magic ... */
5436 /* finish PXP init */
5437 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5439 BNX2X_ERR("PXP2 CFG failed\n");
5442 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5444 BNX2X_ERR("PXP2 RD_INIT failed\n");
5448 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5449 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5451 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
5453 /* clean the DMAE memory */
5455 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5457 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5458 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5459 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5460 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
5462 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5463 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5464 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5465 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5467 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5468 /* soft reset pulse */
5469 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5470 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5473 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
5476 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5477 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5478 if (!CHIP_REV_IS_SLOW(bp)) {
5479 /* enable hw interrupt from doorbell Q */
5480 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5483 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5484 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5485 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5487 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5488 if (CHIP_IS_E1H(bp))
5489 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5491 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5492 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5493 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5494 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
5496 if (CHIP_IS_E1H(bp)) {
5497 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5498 STORM_INTMEM_SIZE_E1H/2);
5500 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5501 0, STORM_INTMEM_SIZE_E1H/2);
5502 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5503 STORM_INTMEM_SIZE_E1H/2);
5505 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5506 0, STORM_INTMEM_SIZE_E1H/2);
5507 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5508 STORM_INTMEM_SIZE_E1H/2);
5510 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5511 0, STORM_INTMEM_SIZE_E1H/2);
5512 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5513 STORM_INTMEM_SIZE_E1H/2);
5515 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5516 0, STORM_INTMEM_SIZE_E1H/2);
5518 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5519 STORM_INTMEM_SIZE_E1);
5520 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5521 STORM_INTMEM_SIZE_E1);
5522 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5523 STORM_INTMEM_SIZE_E1);
5524 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5525 STORM_INTMEM_SIZE_E1);
5528 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5529 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5530 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5531 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
5534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5536 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5539 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5540 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5541 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
5543 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5544 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5545 REG_WR(bp, i, 0xc0cac01a);
5546 /* TODO: replace with something meaningful */
5548 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5549 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5551 if (sizeof(union cdu_context) != 1024)
5552 /* we currently assume that a context is 1024 bytes */
5553 printk(KERN_ALERT PFX "please adjust the size of"
5554 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5556 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5557 val = (4 << 24) + (0 << 12) + 1024;
5558 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5559 if (CHIP_IS_E1(bp)) {
5560 /* !!! fix pxp client crdit until excel update */
5561 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5562 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5565 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5566 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5567 /* enable context validation interrupt from CFC */
5568 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5570 /* set the thresholds to prevent CFC/CDU race */
5571 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5573 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5574 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
5576 /* PXPCS COMMON comes here */
5577 /* Reset PCIE errors for debug */
5578 REG_WR(bp, 0x2814, 0xffffffff);
5579 REG_WR(bp, 0x3820, 0xffffffff);
5581 /* EMAC0 COMMON comes here */
5582 /* EMAC1 COMMON comes here */
5583 /* DBU COMMON comes here */
5584 /* DBG COMMON comes here */
5586 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5587 if (CHIP_IS_E1H(bp)) {
5588 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5589 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5592 if (CHIP_REV_IS_SLOW(bp))
5595 /* finish CFC init */
5596 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5598 BNX2X_ERR("CFC LL_INIT failed\n");
5601 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5603 BNX2X_ERR("CFC AC_INIT failed\n");
5606 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5608 BNX2X_ERR("CFC CAM_INIT failed\n");
5611 REG_WR(bp, CFC_REG_DEBUG0, 0);
5613 /* read NIG statistic
5614 to see if this is our first up since powerup */
5615 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5616 val = *bnx2x_sp(bp, wb_data[0]);
5618 /* do internal memory self test */
5619 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5620 BNX2X_ERR("internal mem self test failed\n");
5624 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5625 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5626 /* Fan failure is indicated by SPIO 5 */
5627 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5628 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5630 /* set to active low mode */
5631 val = REG_RD(bp, MISC_REG_SPIO_INT);
5632 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5633 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5634 REG_WR(bp, MISC_REG_SPIO_INT, val);
5636 /* enable interrupt to signal the IGU */
5637 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5638 val |= (1 << MISC_REGISTERS_SPIO_5);
5639 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5646 /* clear PXP2 attentions */
5647 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5649 enable_blocks_attention(bp);
5651 if (!BP_NOMCP(bp)) {
5652 bnx2x_acquire_phy_lock(bp);
5653 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5654 bnx2x_release_phy_lock(bp);
5656 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5661 static int bnx2x_init_port(struct bnx2x *bp)
5663 int port = BP_PORT(bp);
5667 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5669 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5671 /* Port PXP comes here */
5672 /* Port PXP2 comes here */
5677 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5678 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5679 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5680 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5685 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5686 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5687 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5688 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5693 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5694 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5695 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5696 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5698 /* Port CMs come here */
5699 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5700 (port ? XCM_PORT1_END : XCM_PORT0_END));
5702 /* Port QM comes here */
5704 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5705 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5707 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5708 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5710 /* Port DQ comes here */
5712 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5713 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5714 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5715 /* no pause for emulation and FPGA */
5720 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5721 else if (bp->dev->mtu > 4096) {
5722 if (bp->flags & ONE_PORT_FLAG)
5726 /* (24*1024 + val*4)/256 */
5727 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5730 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5731 high = low + 56; /* 14*1024/256 */
5733 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5734 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5737 /* Port PRS comes here */
5738 /* Port TSDM comes here */
5739 /* Port CSDM comes here */
5740 /* Port USDM comes here */
5741 /* Port XSDM comes here */
5742 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5743 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5744 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5745 port ? USEM_PORT1_END : USEM_PORT0_END);
5746 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5747 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5748 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5749 port ? XSEM_PORT1_END : XSEM_PORT0_END);
5750 /* Port UPB comes here */
5751 /* Port XPB comes here */
5753 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5754 port ? PBF_PORT1_END : PBF_PORT0_END);
5756 /* configure PBF to work without PAUSE mtu 9000 */
5757 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5759 /* update threshold */
5760 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5761 /* update init credit */
5762 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5765 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5767 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5770 /* tell the searcher where the T2 table is */
5771 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5773 wb_write[0] = U64_LO(bp->t2_mapping);
5774 wb_write[1] = U64_HI(bp->t2_mapping);
5775 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5776 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5777 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5778 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5780 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5781 /* Port SRCH comes here */
5783 /* Port CDU comes here */
5784 /* Port CFC comes here */
5786 if (CHIP_IS_E1(bp)) {
5787 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5788 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5790 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5791 port ? HC_PORT1_END : HC_PORT0_END);
5793 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
5794 MISC_AEU_PORT0_START,
5795 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5796 /* init aeu_mask_attn_func_0/1:
5797 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5798 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5799 * bits 4-7 are used for "per vn group attention" */
5800 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5801 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5803 /* Port PXPCS comes here */
5804 /* Port EMAC0 comes here */
5805 /* Port EMAC1 comes here */
5806 /* Port DBU comes here */
5807 /* Port DBG comes here */
5808 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5809 port ? NIG_PORT1_END : NIG_PORT0_END);
5811 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5813 if (CHIP_IS_E1H(bp)) {
5814 /* 0x2 disable e1hov, 0x1 enable */
5815 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5816 (IS_E1HMF(bp) ? 0x1 : 0x2));
5818 /* support pause requests from USDM, TSDM and BRB */
5819 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5822 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5823 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5824 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5828 /* Port MCP comes here */
5829 /* Port DMAE comes here */
5831 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5832 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5833 /* add SPIO 5 to group 0 */
5834 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5835 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5836 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5843 bnx2x__link_reset(bp);
5848 #define ILT_PER_FUNC (768/2)
5849 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5850 /* the phys address is shifted right 12 bits and has an added
5851 1=valid bit added to the 53rd bit
5852 then since this is a wide register(TM)
5853 we split it into two 32 bit writes
5855 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5856 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5857 #define PXP_ONE_ILT(x) (((x) << 10) | x)
5858 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5860 #define CNIC_ILT_LINES 0
5862 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5866 if (CHIP_IS_E1H(bp))
5867 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5869 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5871 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5874 static int bnx2x_init_func(struct bnx2x *bp)
5876 int port = BP_PORT(bp);
5877 int func = BP_FUNC(bp);
5881 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5883 /* set MSI reconfigure capability */
5884 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5885 val = REG_RD(bp, addr);
5886 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5887 REG_WR(bp, addr, val);
5889 i = FUNC_ILT_BASE(func);
5891 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5892 if (CHIP_IS_E1H(bp)) {
5893 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5894 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5896 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5897 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5900 if (CHIP_IS_E1H(bp)) {
5901 for (i = 0; i < 9; i++)
5902 bnx2x_init_block(bp,
5903 cm_start[func][i], cm_end[func][i]);
5905 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5906 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5909 /* HC init per function */
5910 if (CHIP_IS_E1H(bp)) {
5911 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5913 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5914 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5916 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5918 /* Reset PCIE errors for debug */
5919 REG_WR(bp, 0x2114, 0xffffffff);
5920 REG_WR(bp, 0x2120, 0xffffffff);
5925 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5929 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5930 BP_FUNC(bp), load_code);
5933 mutex_init(&bp->dmae_mutex);
5934 bnx2x_gunzip_init(bp);
5936 switch (load_code) {
5937 case FW_MSG_CODE_DRV_LOAD_COMMON:
5938 rc = bnx2x_init_common(bp);
5943 case FW_MSG_CODE_DRV_LOAD_PORT:
5945 rc = bnx2x_init_port(bp);
5950 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5952 rc = bnx2x_init_func(bp);
5958 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5962 if (!BP_NOMCP(bp)) {
5963 int func = BP_FUNC(bp);
5965 bp->fw_drv_pulse_wr_seq =
5966 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
5967 DRV_PULSE_SEQ_MASK);
5968 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5969 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5970 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5974 /* this needs to be done before gunzip end */
5975 bnx2x_zero_def_sb(bp);
5976 for_each_queue(bp, i)
5977 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5980 bnx2x_gunzip_end(bp);
5985 /* send the MCP a request, block until there is a reply */
5986 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5988 int func = BP_FUNC(bp);
5989 u32 seq = ++bp->fw_seq;
5992 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
5994 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
5995 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
5998 /* let the FW do it's magic ... */
6001 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6003 /* Give the FW up to 2 second (200*10ms) */
6004 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6006 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6007 cnt*delay, rc, seq);
6009 /* is this a reply to our command? */
6010 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6011 rc &= FW_MSG_CODE_MASK;
6015 BNX2X_ERR("FW failed to respond!\n");
6023 static void bnx2x_free_mem(struct bnx2x *bp)
6026 #define BNX2X_PCI_FREE(x, y, size) \
6029 pci_free_consistent(bp->pdev, size, x, y); \
6035 #define BNX2X_FREE(x) \
6047 for_each_queue(bp, i) {
6050 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6051 bnx2x_fp(bp, i, status_blk_mapping),
6052 sizeof(struct host_status_block) +
6053 sizeof(struct eth_tx_db_data));
6056 for_each_rx_queue(bp, i) {
6058 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6059 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6060 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6061 bnx2x_fp(bp, i, rx_desc_mapping),
6062 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6064 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6065 bnx2x_fp(bp, i, rx_comp_mapping),
6066 sizeof(struct eth_fast_path_rx_cqe) *
6070 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6071 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6072 bnx2x_fp(bp, i, rx_sge_mapping),
6073 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6076 for_each_tx_queue(bp, i) {
6078 /* fastpath tx rings: tx_buf tx_desc */
6079 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6080 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6081 bnx2x_fp(bp, i, tx_desc_mapping),
6082 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6084 /* end of fastpath */
6086 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6087 sizeof(struct host_def_status_block));
6089 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6090 sizeof(struct bnx2x_slowpath));
6093 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6094 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6095 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6096 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6098 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6100 #undef BNX2X_PCI_FREE
6104 static int bnx2x_alloc_mem(struct bnx2x *bp)
6107 #define BNX2X_PCI_ALLOC(x, y, size) \
6109 x = pci_alloc_consistent(bp->pdev, size, y); \
6111 goto alloc_mem_err; \
6112 memset(x, 0, size); \
6115 #define BNX2X_ALLOC(x, size) \
6117 x = vmalloc(size); \
6119 goto alloc_mem_err; \
6120 memset(x, 0, size); \
6127 for_each_queue(bp, i) {
6128 bnx2x_fp(bp, i, bp) = bp;
6131 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6132 &bnx2x_fp(bp, i, status_blk_mapping),
6133 sizeof(struct host_status_block) +
6134 sizeof(struct eth_tx_db_data));
6137 for_each_rx_queue(bp, i) {
6139 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6140 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6141 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6142 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6143 &bnx2x_fp(bp, i, rx_desc_mapping),
6144 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6146 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6147 &bnx2x_fp(bp, i, rx_comp_mapping),
6148 sizeof(struct eth_fast_path_rx_cqe) *
6152 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6153 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6154 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6155 &bnx2x_fp(bp, i, rx_sge_mapping),
6156 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6159 for_each_tx_queue(bp, i) {
6161 bnx2x_fp(bp, i, hw_tx_prods) =
6162 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6164 bnx2x_fp(bp, i, tx_prods_mapping) =
6165 bnx2x_fp(bp, i, status_blk_mapping) +
6166 sizeof(struct host_status_block);
6168 /* fastpath tx rings: tx_buf tx_desc */
6169 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6170 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6171 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6172 &bnx2x_fp(bp, i, tx_desc_mapping),
6173 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6175 /* end of fastpath */
6177 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6178 sizeof(struct host_def_status_block));
6180 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6181 sizeof(struct bnx2x_slowpath));
6184 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6187 for (i = 0; i < 64*1024; i += 64) {
6188 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6189 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6192 /* allocate searcher T2 table
6193 we allocate 1/4 of alloc num for T2
6194 (which is not entered into the ILT) */
6195 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6198 for (i = 0; i < 16*1024; i += 64)
6199 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6201 /* now fixup the last line in the block to point to the next block */
6202 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6204 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6205 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6207 /* QM queues (128*MAX_CONN) */
6208 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6211 /* Slow path ring */
6212 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6220 #undef BNX2X_PCI_ALLOC
6224 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6228 for_each_tx_queue(bp, i) {
6229 struct bnx2x_fastpath *fp = &bp->fp[i];
6231 u16 bd_cons = fp->tx_bd_cons;
6232 u16 sw_prod = fp->tx_pkt_prod;
6233 u16 sw_cons = fp->tx_pkt_cons;
6235 while (sw_cons != sw_prod) {
6236 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6242 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6246 for_each_rx_queue(bp, j) {
6247 struct bnx2x_fastpath *fp = &bp->fp[j];
6249 for (i = 0; i < NUM_RX_BD; i++) {
6250 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6251 struct sk_buff *skb = rx_buf->skb;
6256 pci_unmap_single(bp->pdev,
6257 pci_unmap_addr(rx_buf, mapping),
6259 PCI_DMA_FROMDEVICE);
6264 if (!fp->disable_tpa)
6265 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6266 ETH_MAX_AGGREGATION_QUEUES_E1 :
6267 ETH_MAX_AGGREGATION_QUEUES_E1H);
6271 static void bnx2x_free_skbs(struct bnx2x *bp)
6273 bnx2x_free_tx_skbs(bp);
6274 bnx2x_free_rx_skbs(bp);
6277 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6281 free_irq(bp->msix_table[0].vector, bp->dev);
6282 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6283 bp->msix_table[0].vector);
6285 for_each_queue(bp, i) {
6286 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6287 "state %x\n", i, bp->msix_table[i + offset].vector,
6288 bnx2x_fp(bp, i, state));
6290 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6294 static void bnx2x_free_irq(struct bnx2x *bp)
6296 if (bp->flags & USING_MSIX_FLAG) {
6297 bnx2x_free_msix_irqs(bp);
6298 pci_disable_msix(bp->pdev);
6299 bp->flags &= ~USING_MSIX_FLAG;
6301 } else if (bp->flags & USING_MSI_FLAG) {
6302 free_irq(bp->pdev->irq, bp->dev);
6303 pci_disable_msi(bp->pdev);
6304 bp->flags &= ~USING_MSI_FLAG;
6307 free_irq(bp->pdev->irq, bp->dev);
6310 static int bnx2x_enable_msix(struct bnx2x *bp)
6312 int i, rc, offset = 1;
6315 bp->msix_table[0].entry = igu_vec;
6316 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6318 for_each_queue(bp, i) {
6319 igu_vec = BP_L_ID(bp) + offset + i;
6320 bp->msix_table[i + offset].entry = igu_vec;
6321 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6322 "(fastpath #%u)\n", i + offset, igu_vec, i);
6325 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6326 BNX2X_NUM_QUEUES(bp) + offset);
6328 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6332 bp->flags |= USING_MSIX_FLAG;
6337 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6339 int i, rc, offset = 1;
6341 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6342 bp->dev->name, bp->dev);
6344 BNX2X_ERR("request sp irq failed\n");
6348 for_each_queue(bp, i) {
6349 struct bnx2x_fastpath *fp = &bp->fp[i];
6351 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6352 rc = request_irq(bp->msix_table[i + offset].vector,
6353 bnx2x_msix_fp_int, 0, fp->name, fp);
6355 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6356 bnx2x_free_msix_irqs(bp);
6360 fp->state = BNX2X_FP_STATE_IRQ;
6363 i = BNX2X_NUM_QUEUES(bp);
6365 printk(KERN_INFO PFX
6366 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6367 bp->dev->name, bp->msix_table[0].vector,
6368 bp->msix_table[offset].vector,
6369 bp->msix_table[offset + i - 1].vector);
6371 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6372 bp->dev->name, bp->msix_table[0].vector,
6373 bp->msix_table[offset + i - 1].vector);
6378 static int bnx2x_enable_msi(struct bnx2x *bp)
6382 rc = pci_enable_msi(bp->pdev);
6384 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6387 bp->flags |= USING_MSI_FLAG;
6392 static int bnx2x_req_irq(struct bnx2x *bp)
6394 unsigned long flags;
6397 if (bp->flags & USING_MSI_FLAG)
6400 flags = IRQF_SHARED;
6402 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6403 bp->dev->name, bp->dev);
6405 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6410 static void bnx2x_napi_enable(struct bnx2x *bp)
6414 for_each_rx_queue(bp, i)
6415 napi_enable(&bnx2x_fp(bp, i, napi));
6418 static void bnx2x_napi_disable(struct bnx2x *bp)
6422 for_each_rx_queue(bp, i)
6423 napi_disable(&bnx2x_fp(bp, i, napi));
6426 static void bnx2x_netif_start(struct bnx2x *bp)
6428 if (atomic_dec_and_test(&bp->intr_sem)) {
6429 if (netif_running(bp->dev)) {
6430 bnx2x_napi_enable(bp);
6431 bnx2x_int_enable(bp);
6432 if (bp->state == BNX2X_STATE_OPEN)
6433 netif_tx_wake_all_queues(bp->dev);
6438 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6440 bnx2x_int_disable_sync(bp, disable_hw);
6441 bnx2x_napi_disable(bp);
6442 if (netif_running(bp->dev)) {
6443 netif_tx_disable(bp->dev);
6444 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6449 * Init service functions
6452 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6454 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6455 int port = BP_PORT(bp);
6458 * unicasts 0-31:port0 32-63:port1
6459 * multicast 64-127:port0 128-191:port1
6461 config->hdr.length = 2;
6462 config->hdr.offset = port ? 32 : 0;
6463 config->hdr.client_id = BP_CL_ID(bp);
6464 config->hdr.reserved1 = 0;
6467 config->config_table[0].cam_entry.msb_mac_addr =
6468 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6469 config->config_table[0].cam_entry.middle_mac_addr =
6470 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6471 config->config_table[0].cam_entry.lsb_mac_addr =
6472 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6473 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6475 config->config_table[0].target_table_entry.flags = 0;
6477 CAM_INVALIDATE(config->config_table[0]);
6478 config->config_table[0].target_table_entry.client_id = 0;
6479 config->config_table[0].target_table_entry.vlan_id = 0;
6481 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6482 (set ? "setting" : "clearing"),
6483 config->config_table[0].cam_entry.msb_mac_addr,
6484 config->config_table[0].cam_entry.middle_mac_addr,
6485 config->config_table[0].cam_entry.lsb_mac_addr);
6488 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6489 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6490 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6491 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6493 config->config_table[1].target_table_entry.flags =
6494 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6496 CAM_INVALIDATE(config->config_table[1]);
6497 config->config_table[1].target_table_entry.client_id = 0;
6498 config->config_table[1].target_table_entry.vlan_id = 0;
6500 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6501 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6502 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6505 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6507 struct mac_configuration_cmd_e1h *config =
6508 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6510 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6511 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6515 /* CAM allocation for E1H
6516 * unicasts: by func number
6517 * multicast: 20+FUNC*20, 20 each
6519 config->hdr.length = 1;
6520 config->hdr.offset = BP_FUNC(bp);
6521 config->hdr.client_id = BP_CL_ID(bp);
6522 config->hdr.reserved1 = 0;
6525 config->config_table[0].msb_mac_addr =
6526 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6527 config->config_table[0].middle_mac_addr =
6528 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6529 config->config_table[0].lsb_mac_addr =
6530 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6531 config->config_table[0].client_id = BP_L_ID(bp);
6532 config->config_table[0].vlan_id = 0;
6533 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6535 config->config_table[0].flags = BP_PORT(bp);
6537 config->config_table[0].flags =
6538 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6540 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6541 (set ? "setting" : "clearing"),
6542 config->config_table[0].msb_mac_addr,
6543 config->config_table[0].middle_mac_addr,
6544 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6546 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6547 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6548 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6551 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6552 int *state_p, int poll)
6554 /* can take a while if any port is running */
6557 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6558 poll ? "polling" : "waiting", state, idx);
6563 bnx2x_rx_int(bp->fp, 10);
6564 /* if index is different from 0
6565 * the reply for some commands will
6566 * be on the non default queue
6569 bnx2x_rx_int(&bp->fp[idx], 10);
6572 mb(); /* state is changed by bnx2x_sp_event() */
6573 if (*state_p == state)
6580 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6581 poll ? "polling" : "waiting", state, idx);
6582 #ifdef BNX2X_STOP_ON_ERROR
6589 static int bnx2x_setup_leading(struct bnx2x *bp)
6593 /* reset IGU state */
6594 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6597 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6599 /* Wait for completion */
6600 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6605 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6607 struct bnx2x_fastpath *fp = &bp->fp[index];
6609 /* reset IGU state */
6610 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6613 fp->state = BNX2X_FP_STATE_OPENING;
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6617 /* Wait for completion */
6618 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6622 static int bnx2x_poll(struct napi_struct *napi, int budget);
6624 static void bnx2x_set_int_mode(struct bnx2x *bp)
6632 bp->num_rx_queues = num_queues;
6633 bp->num_tx_queues = num_queues;
6635 "set number of queues to %d\n", num_queues);
6640 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6641 num_queues = min_t(u32, num_online_cpus(),
6642 BNX2X_MAX_QUEUES(bp));
6645 bp->num_rx_queues = num_queues;
6646 bp->num_tx_queues = num_queues;
6647 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6648 " number of tx queues to %d\n",
6649 bp->num_rx_queues, bp->num_tx_queues);
6650 /* if we can't use MSI-X we only need one fp,
6651 * so try to enable MSI-X with the requested number of fp's
6652 * and fallback to MSI or legacy INTx with one fp
6654 if (bnx2x_enable_msix(bp)) {
6655 /* failed to enable MSI-X */
6657 bp->num_rx_queues = num_queues;
6658 bp->num_tx_queues = num_queues;
6660 BNX2X_ERR("Multi requested but failed to "
6661 "enable MSI-X set number of "
6662 "queues to %d\n", num_queues);
6666 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6669 static void bnx2x_set_rx_mode(struct net_device *dev);
6671 /* must be called with rtnl_lock */
6672 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6676 #ifdef BNX2X_STOP_ON_ERROR
6677 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6678 if (unlikely(bp->panic))
6682 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6684 bnx2x_set_int_mode(bp);
6686 if (bnx2x_alloc_mem(bp))
6689 for_each_rx_queue(bp, i)
6690 bnx2x_fp(bp, i, disable_tpa) =
6691 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6693 for_each_rx_queue(bp, i)
6694 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6697 #ifdef BNX2X_STOP_ON_ERROR
6698 for_each_rx_queue(bp, i) {
6699 struct bnx2x_fastpath *fp = &bp->fp[i];
6701 fp->poll_no_work = 0;
6703 fp->poll_max_calls = 0;
6704 fp->poll_complete = 0;
6708 bnx2x_napi_enable(bp);
6710 if (bp->flags & USING_MSIX_FLAG) {
6711 rc = bnx2x_req_msix_irqs(bp);
6713 pci_disable_msix(bp->pdev);
6717 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6718 bnx2x_enable_msi(bp);
6720 rc = bnx2x_req_irq(bp);
6722 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6723 if (bp->flags & USING_MSI_FLAG)
6724 pci_disable_msi(bp->pdev);
6727 if (bp->flags & USING_MSI_FLAG) {
6728 bp->dev->irq = bp->pdev->irq;
6729 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6730 bp->dev->name, bp->pdev->irq);
6734 /* Send LOAD_REQUEST command to MCP
6735 Returns the type of LOAD command:
6736 if it is the first port to be initialized
6737 common blocks should be initialized, otherwise - not
6739 if (!BP_NOMCP(bp)) {
6740 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6742 BNX2X_ERR("MCP response failure, aborting\n");
6746 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6747 rc = -EBUSY; /* other port in diagnostic mode */
6752 int port = BP_PORT(bp);
6754 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6755 load_count[0], load_count[1], load_count[2]);
6757 load_count[1 + port]++;
6758 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6759 load_count[0], load_count[1], load_count[2]);
6760 if (load_count[0] == 1)
6761 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6762 else if (load_count[1 + port] == 1)
6763 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6765 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6768 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6769 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6773 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6776 rc = bnx2x_init_hw(bp, load_code);
6778 BNX2X_ERR("HW init failed, aborting\n");
6782 /* Setup NIC internals and enable interrupts */
6783 bnx2x_nic_init(bp, load_code);
6785 /* Send LOAD_DONE command to MCP */
6786 if (!BP_NOMCP(bp)) {
6787 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6789 BNX2X_ERR("MCP response failure, aborting\n");
6795 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6797 rc = bnx2x_setup_leading(bp);
6799 BNX2X_ERR("Setup leading failed!\n");
6803 if (CHIP_IS_E1H(bp))
6804 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6805 BNX2X_ERR("!!! mf_cfg function disabled\n");
6806 bp->state = BNX2X_STATE_DISABLED;
6809 if (bp->state == BNX2X_STATE_OPEN)
6810 for_each_nondefault_queue(bp, i) {
6811 rc = bnx2x_setup_multi(bp, i);
6817 bnx2x_set_mac_addr_e1(bp, 1);
6819 bnx2x_set_mac_addr_e1h(bp, 1);
6822 bnx2x_initial_phy_init(bp);
6824 /* Start fast path */
6825 switch (load_mode) {
6827 /* Tx queue should be only reenabled */
6828 netif_tx_wake_all_queues(bp->dev);
6829 /* Initialize the receive filter. */
6830 bnx2x_set_rx_mode(bp->dev);
6834 netif_tx_start_all_queues(bp->dev);
6835 /* Initialize the receive filter. */
6836 bnx2x_set_rx_mode(bp->dev);
6840 /* Initialize the receive filter. */
6841 bnx2x_set_rx_mode(bp->dev);
6842 bp->state = BNX2X_STATE_DIAG;
6850 bnx2x__link_status_update(bp);
6852 /* start the timer */
6853 mod_timer(&bp->timer, jiffies + bp->current_interval);
6859 bnx2x_int_disable_sync(bp, 1);
6860 if (!BP_NOMCP(bp)) {
6861 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6862 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6865 /* Free SKBs, SGEs, TPA pool and driver internals */
6866 bnx2x_free_skbs(bp);
6867 for_each_rx_queue(bp, i)
6868 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
6873 bnx2x_napi_disable(bp);
6874 for_each_rx_queue(bp, i)
6875 netif_napi_del(&bnx2x_fp(bp, i, napi));
6878 /* TBD we really need to reset the chip
6879 if we want to recover from this */
6883 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6885 struct bnx2x_fastpath *fp = &bp->fp[index];
6888 /* halt the connection */
6889 fp->state = BNX2X_FP_STATE_HALTING;
6890 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
6892 /* Wait for completion */
6893 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
6895 if (rc) /* timeout */
6898 /* delete cfc entry */
6899 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6901 /* Wait for completion */
6902 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6907 static int bnx2x_stop_leading(struct bnx2x *bp)
6909 u16 dsb_sp_prod_idx;
6910 /* if the other port is handling traffic,
6911 this can take a lot of time */
6917 /* Send HALT ramrod */
6918 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
6919 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
6921 /* Wait for completion */
6922 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6923 &(bp->fp[0].state), 1);
6924 if (rc) /* timeout */
6927 dsb_sp_prod_idx = *bp->dsb_sp_prod;
6929 /* Send PORT_DELETE ramrod */
6930 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6932 /* Wait for completion to arrive on default status block
6933 we are going to reset the chip anyway
6934 so there is not much to do if this times out
6936 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
6938 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6939 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6940 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6941 #ifdef BNX2X_STOP_ON_ERROR
6950 rmb(); /* Refresh the dsb_sp_prod */
6952 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6953 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
6958 static void bnx2x_reset_func(struct bnx2x *bp)
6960 int port = BP_PORT(bp);
6961 int func = BP_FUNC(bp);
6965 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6966 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6969 base = FUNC_ILT_BASE(func);
6970 for (i = base; i < base + ILT_PER_FUNC; i++)
6971 bnx2x_ilt_wr(bp, i, 0);
6974 static void bnx2x_reset_port(struct bnx2x *bp)
6976 int port = BP_PORT(bp);
6979 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6981 /* Do not rcv packets to BRB */
6982 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6983 /* Do not direct rcv packets that are not for MCP to the BRB */
6984 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6985 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6988 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6991 /* Check for BRB port occupancy */
6992 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6994 DP(NETIF_MSG_IFDOWN,
6995 "BRB1 is not empty %d blocks are occupied\n", val);
6997 /* TODO: Close Doorbell port? */
7000 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7002 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7003 BP_FUNC(bp), reset_code);
7005 switch (reset_code) {
7006 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7007 bnx2x_reset_port(bp);
7008 bnx2x_reset_func(bp);
7009 bnx2x_reset_common(bp);
7012 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7013 bnx2x_reset_port(bp);
7014 bnx2x_reset_func(bp);
7017 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7018 bnx2x_reset_func(bp);
7022 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7027 /* must be called with rtnl_lock */
7028 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7030 int port = BP_PORT(bp);
7034 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7036 bp->rx_mode = BNX2X_RX_MODE_NONE;
7037 bnx2x_set_storm_rx_mode(bp);
7039 bnx2x_netif_stop(bp, 1);
7041 del_timer_sync(&bp->timer);
7042 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7043 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7044 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7049 /* Wait until tx fastpath tasks complete */
7050 for_each_tx_queue(bp, i) {
7051 struct bnx2x_fastpath *fp = &bp->fp[i];
7055 while (bnx2x_has_tx_work_unload(fp)) {
7057 bnx2x_tx_int(fp, 1000);
7059 BNX2X_ERR("timeout waiting for queue[%d]\n",
7061 #ifdef BNX2X_STOP_ON_ERROR
7073 /* Give HW time to discard old tx messages */
7076 if (CHIP_IS_E1(bp)) {
7077 struct mac_configuration_cmd *config =
7078 bnx2x_sp(bp, mcast_config);
7080 bnx2x_set_mac_addr_e1(bp, 0);
7082 for (i = 0; i < config->hdr.length; i++)
7083 CAM_INVALIDATE(config->config_table[i]);
7085 config->hdr.length = i;
7086 if (CHIP_REV_IS_SLOW(bp))
7087 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7089 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7090 config->hdr.client_id = BP_CL_ID(bp);
7091 config->hdr.reserved1 = 0;
7093 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7094 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7095 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7098 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7100 bnx2x_set_mac_addr_e1h(bp, 0);
7102 for (i = 0; i < MC_HASH_SIZE; i++)
7103 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7106 if (unload_mode == UNLOAD_NORMAL)
7107 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7109 else if (bp->flags & NO_WOL_FLAG) {
7110 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7111 if (CHIP_IS_E1H(bp))
7112 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7114 } else if (bp->wol) {
7115 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7116 u8 *mac_addr = bp->dev->dev_addr;
7118 /* The mac address is written to entries 1-4 to
7119 preserve entry 0 which is used by the PMF */
7120 u8 entry = (BP_E1HVN(bp) + 1)*8;
7122 val = (mac_addr[0] << 8) | mac_addr[1];
7123 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7125 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7126 (mac_addr[4] << 8) | mac_addr[5];
7127 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7129 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7132 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7134 /* Close multi and leading connections
7135 Completions for ramrods are collected in a synchronous way */
7136 for_each_nondefault_queue(bp, i)
7137 if (bnx2x_stop_multi(bp, i))
7140 rc = bnx2x_stop_leading(bp);
7142 BNX2X_ERR("Stop leading failed!\n");
7143 #ifdef BNX2X_STOP_ON_ERROR
7152 reset_code = bnx2x_fw_command(bp, reset_code);
7154 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7155 load_count[0], load_count[1], load_count[2]);
7157 load_count[1 + port]--;
7158 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7159 load_count[0], load_count[1], load_count[2]);
7160 if (load_count[0] == 0)
7161 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7162 else if (load_count[1 + port] == 0)
7163 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7165 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7168 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7169 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7170 bnx2x__link_reset(bp);
7172 /* Reset the chip */
7173 bnx2x_reset_chip(bp, reset_code);
7175 /* Report UNLOAD_DONE to MCP */
7177 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7180 /* Free SKBs, SGEs, TPA pool and driver internals */
7181 bnx2x_free_skbs(bp);
7182 for_each_rx_queue(bp, i)
7183 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7184 for_each_rx_queue(bp, i)
7185 netif_napi_del(&bnx2x_fp(bp, i, napi));
7188 bp->state = BNX2X_STATE_CLOSED;
7190 netif_carrier_off(bp->dev);
7195 static void bnx2x_reset_task(struct work_struct *work)
7197 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7199 #ifdef BNX2X_STOP_ON_ERROR
7200 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7201 " so reset not done to allow debug dump,\n"
7202 KERN_ERR " you will need to reboot when done\n");
7208 if (!netif_running(bp->dev))
7209 goto reset_task_exit;
7211 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7212 bnx2x_nic_load(bp, LOAD_NORMAL);
7218 /* end of nic load/unload */
7223 * Init service functions
7226 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7229 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7230 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7231 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7232 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7233 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7234 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7235 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7236 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7238 BNX2X_ERR("Unsupported function index: %d\n", func);
7243 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7245 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7247 /* Flush all outstanding writes */
7250 /* Pretend to be function 0 */
7252 /* Flush the GRC transaction (in the chip) */
7253 new_val = REG_RD(bp, reg);
7255 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7260 /* From now we are in the "like-E1" mode */
7261 bnx2x_int_disable(bp);
7263 /* Flush all outstanding writes */
7266 /* Restore the original funtion settings */
7267 REG_WR(bp, reg, orig_func);
7268 new_val = REG_RD(bp, reg);
7269 if (new_val != orig_func) {
7270 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7271 orig_func, new_val);
7276 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7278 if (CHIP_IS_E1H(bp))
7279 bnx2x_undi_int_disable_e1h(bp, func);
7281 bnx2x_int_disable(bp);
7284 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7288 /* Check if there is any driver already loaded */
7289 val = REG_RD(bp, MISC_REG_UNPREPARED);
7291 /* Check if it is the UNDI driver
7292 * UNDI driver initializes CID offset for normal bell to 0x7
7294 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7295 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7297 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7299 int func = BP_FUNC(bp);
7303 /* clear the UNDI indication */
7304 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7306 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7308 /* try unload UNDI on port 0 */
7311 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7312 DRV_MSG_SEQ_NUMBER_MASK);
7313 reset_code = bnx2x_fw_command(bp, reset_code);
7315 /* if UNDI is loaded on the other port */
7316 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7318 /* send "DONE" for previous unload */
7319 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7321 /* unload UNDI on port 1 */
7324 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7325 DRV_MSG_SEQ_NUMBER_MASK);
7326 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7328 bnx2x_fw_command(bp, reset_code);
7331 /* now it's safe to release the lock */
7332 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7334 bnx2x_undi_int_disable(bp, func);
7336 /* close input traffic and wait for it */
7337 /* Do not rcv packets to BRB */
7339 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7340 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7341 /* Do not direct rcv packets that are not for MCP to
7344 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7345 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7348 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7349 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7352 /* save NIG port swap info */
7353 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7354 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7357 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7360 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7362 /* take the NIG out of reset and restore swap values */
7364 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7365 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7366 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7367 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7369 /* send unload done to the MCP */
7370 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7372 /* restore our func and fw_seq */
7375 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7376 DRV_MSG_SEQ_NUMBER_MASK);
7379 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7383 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7385 u32 val, val2, val3, val4, id;
7388 /* Get the chip revision id and number. */
7389 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7390 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7391 id = ((val & 0xffff) << 16);
7392 val = REG_RD(bp, MISC_REG_CHIP_REV);
7393 id |= ((val & 0xf) << 12);
7394 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7395 id |= ((val & 0xff) << 4);
7396 val = REG_RD(bp, MISC_REG_BOND_ID);
7398 bp->common.chip_id = id;
7399 bp->link_params.chip_id = bp->common.chip_id;
7400 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7402 val = (REG_RD(bp, 0x2874) & 0x55);
7403 if ((bp->common.chip_id & 0x1) ||
7404 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7405 bp->flags |= ONE_PORT_FLAG;
7406 BNX2X_DEV_INFO("single port device\n");
7409 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7410 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7411 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7412 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7413 bp->common.flash_size, bp->common.flash_size);
7415 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7416 bp->link_params.shmem_base = bp->common.shmem_base;
7417 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7419 if (!bp->common.shmem_base ||
7420 (bp->common.shmem_base < 0xA0000) ||
7421 (bp->common.shmem_base >= 0xC0000)) {
7422 BNX2X_DEV_INFO("MCP not active\n");
7423 bp->flags |= NO_MCP_FLAG;
7427 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7428 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7429 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7430 BNX2X_ERR("BAD MCP validity signature\n");
7432 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7433 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7435 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7436 SHARED_HW_CFG_LED_MODE_MASK) >>
7437 SHARED_HW_CFG_LED_MODE_SHIFT);
7439 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7440 bp->common.bc_ver = val;
7441 BNX2X_DEV_INFO("bc_ver %X\n", val);
7442 if (val < BNX2X_BC_VER) {
7443 /* for now only warn
7444 * later we might need to enforce this */
7445 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7446 " please upgrade BC\n", BNX2X_BC_VER, val);
7449 if (BP_E1HVN(bp) == 0) {
7450 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7451 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7453 /* no WOL capability for E1HVN != 0 */
7454 bp->flags |= NO_WOL_FLAG;
7456 BNX2X_DEV_INFO("%sWoL capable\n",
7457 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
7459 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7460 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7461 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7462 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7464 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7465 val, val2, val3, val4);
7468 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7471 int port = BP_PORT(bp);
7474 switch (switch_cfg) {
7476 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7479 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7480 switch (ext_phy_type) {
7481 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7482 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7485 bp->port.supported |= (SUPPORTED_10baseT_Half |
7486 SUPPORTED_10baseT_Full |
7487 SUPPORTED_100baseT_Half |
7488 SUPPORTED_100baseT_Full |
7489 SUPPORTED_1000baseT_Full |
7490 SUPPORTED_2500baseX_Full |
7495 SUPPORTED_Asym_Pause);
7498 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7499 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7502 bp->port.supported |= (SUPPORTED_10baseT_Half |
7503 SUPPORTED_10baseT_Full |
7504 SUPPORTED_100baseT_Half |
7505 SUPPORTED_100baseT_Full |
7506 SUPPORTED_1000baseT_Full |
7511 SUPPORTED_Asym_Pause);
7515 BNX2X_ERR("NVRAM config error. "
7516 "BAD SerDes ext_phy_config 0x%x\n",
7517 bp->link_params.ext_phy_config);
7521 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7523 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7526 case SWITCH_CFG_10G:
7527 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7530 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7531 switch (ext_phy_type) {
7532 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7533 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7536 bp->port.supported |= (SUPPORTED_10baseT_Half |
7537 SUPPORTED_10baseT_Full |
7538 SUPPORTED_100baseT_Half |
7539 SUPPORTED_100baseT_Full |
7540 SUPPORTED_1000baseT_Full |
7541 SUPPORTED_2500baseX_Full |
7542 SUPPORTED_10000baseT_Full |
7547 SUPPORTED_Asym_Pause);
7550 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7551 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7554 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7557 SUPPORTED_Asym_Pause);
7560 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7561 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7564 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7565 SUPPORTED_1000baseT_Full |
7568 SUPPORTED_Asym_Pause);
7571 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7572 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7575 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7576 SUPPORTED_1000baseT_Full |
7580 SUPPORTED_Asym_Pause);
7583 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7584 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7587 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7588 SUPPORTED_2500baseX_Full |
7589 SUPPORTED_1000baseT_Full |
7593 SUPPORTED_Asym_Pause);
7596 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7597 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7600 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7604 SUPPORTED_Asym_Pause);
7607 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7608 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7609 bp->link_params.ext_phy_config);
7613 BNX2X_ERR("NVRAM config error. "
7614 "BAD XGXS ext_phy_config 0x%x\n",
7615 bp->link_params.ext_phy_config);
7619 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7621 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7626 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7627 bp->port.link_config);
7630 bp->link_params.phy_addr = bp->port.phy_addr;
7632 /* mask what we support according to speed_cap_mask */
7633 if (!(bp->link_params.speed_cap_mask &
7634 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7635 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7637 if (!(bp->link_params.speed_cap_mask &
7638 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7639 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7641 if (!(bp->link_params.speed_cap_mask &
7642 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7643 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7645 if (!(bp->link_params.speed_cap_mask &
7646 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7647 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7649 if (!(bp->link_params.speed_cap_mask &
7650 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7651 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7652 SUPPORTED_1000baseT_Full);
7654 if (!(bp->link_params.speed_cap_mask &
7655 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7656 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7658 if (!(bp->link_params.speed_cap_mask &
7659 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7660 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7662 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7665 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7667 bp->link_params.req_duplex = DUPLEX_FULL;
7669 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7670 case PORT_FEATURE_LINK_SPEED_AUTO:
7671 if (bp->port.supported & SUPPORTED_Autoneg) {
7672 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7673 bp->port.advertising = bp->port.supported;
7676 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7678 if ((ext_phy_type ==
7679 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7681 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7682 /* force 10G, no AN */
7683 bp->link_params.req_line_speed = SPEED_10000;
7684 bp->port.advertising =
7685 (ADVERTISED_10000baseT_Full |
7689 BNX2X_ERR("NVRAM config error. "
7690 "Invalid link_config 0x%x"
7691 " Autoneg not supported\n",
7692 bp->port.link_config);
7697 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7698 if (bp->port.supported & SUPPORTED_10baseT_Full) {
7699 bp->link_params.req_line_speed = SPEED_10;
7700 bp->port.advertising = (ADVERTISED_10baseT_Full |
7703 BNX2X_ERR("NVRAM config error. "
7704 "Invalid link_config 0x%x"
7705 " speed_cap_mask 0x%x\n",
7706 bp->port.link_config,
7707 bp->link_params.speed_cap_mask);
7712 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7713 if (bp->port.supported & SUPPORTED_10baseT_Half) {
7714 bp->link_params.req_line_speed = SPEED_10;
7715 bp->link_params.req_duplex = DUPLEX_HALF;
7716 bp->port.advertising = (ADVERTISED_10baseT_Half |
7719 BNX2X_ERR("NVRAM config error. "
7720 "Invalid link_config 0x%x"
7721 " speed_cap_mask 0x%x\n",
7722 bp->port.link_config,
7723 bp->link_params.speed_cap_mask);
7728 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7729 if (bp->port.supported & SUPPORTED_100baseT_Full) {
7730 bp->link_params.req_line_speed = SPEED_100;
7731 bp->port.advertising = (ADVERTISED_100baseT_Full |
7734 BNX2X_ERR("NVRAM config error. "
7735 "Invalid link_config 0x%x"
7736 " speed_cap_mask 0x%x\n",
7737 bp->port.link_config,
7738 bp->link_params.speed_cap_mask);
7743 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7744 if (bp->port.supported & SUPPORTED_100baseT_Half) {
7745 bp->link_params.req_line_speed = SPEED_100;
7746 bp->link_params.req_duplex = DUPLEX_HALF;
7747 bp->port.advertising = (ADVERTISED_100baseT_Half |
7750 BNX2X_ERR("NVRAM config error. "
7751 "Invalid link_config 0x%x"
7752 " speed_cap_mask 0x%x\n",
7753 bp->port.link_config,
7754 bp->link_params.speed_cap_mask);
7759 case PORT_FEATURE_LINK_SPEED_1G:
7760 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
7761 bp->link_params.req_line_speed = SPEED_1000;
7762 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7765 BNX2X_ERR("NVRAM config error. "
7766 "Invalid link_config 0x%x"
7767 " speed_cap_mask 0x%x\n",
7768 bp->port.link_config,
7769 bp->link_params.speed_cap_mask);
7774 case PORT_FEATURE_LINK_SPEED_2_5G:
7775 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
7776 bp->link_params.req_line_speed = SPEED_2500;
7777 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7780 BNX2X_ERR("NVRAM config error. "
7781 "Invalid link_config 0x%x"
7782 " speed_cap_mask 0x%x\n",
7783 bp->port.link_config,
7784 bp->link_params.speed_cap_mask);
7789 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7790 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7791 case PORT_FEATURE_LINK_SPEED_10G_KR:
7792 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
7793 bp->link_params.req_line_speed = SPEED_10000;
7794 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7797 BNX2X_ERR("NVRAM config error. "
7798 "Invalid link_config 0x%x"
7799 " speed_cap_mask 0x%x\n",
7800 bp->port.link_config,
7801 bp->link_params.speed_cap_mask);
7807 BNX2X_ERR("NVRAM config error. "
7808 "BAD link speed link_config 0x%x\n",
7809 bp->port.link_config);
7810 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7811 bp->port.advertising = bp->port.supported;
7815 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7816 PORT_FEATURE_FLOW_CONTROL_MASK);
7817 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
7818 !(bp->port.supported & SUPPORTED_Autoneg))
7819 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
7821 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
7822 " advertising 0x%x\n",
7823 bp->link_params.req_line_speed,
7824 bp->link_params.req_duplex,
7825 bp->link_params.req_flow_ctrl, bp->port.advertising);
7828 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
7830 int port = BP_PORT(bp);
7833 bp->link_params.bp = bp;
7834 bp->link_params.port = port;
7836 bp->link_params.serdes_config =
7837 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7838 bp->link_params.lane_config =
7839 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7840 bp->link_params.ext_phy_config =
7842 dev_info.port_hw_config[port].external_phy_config);
7843 bp->link_params.speed_cap_mask =
7845 dev_info.port_hw_config[port].speed_capability_mask);
7847 bp->port.link_config =
7848 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7850 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7851 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7852 " link_config 0x%08x\n",
7853 bp->link_params.serdes_config,
7854 bp->link_params.lane_config,
7855 bp->link_params.ext_phy_config,
7856 bp->link_params.speed_cap_mask, bp->port.link_config);
7858 bp->link_params.switch_cfg = (bp->port.link_config &
7859 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7860 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
7862 bnx2x_link_settings_requested(bp);
7864 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7865 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7866 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7867 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7868 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7869 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7870 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7871 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7872 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7873 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7876 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7878 int func = BP_FUNC(bp);
7882 bnx2x_get_common_hwinfo(bp);
7886 if (CHIP_IS_E1H(bp)) {
7888 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
7890 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7891 FUNC_MF_CFG_E1HOV_TAG_MASK);
7892 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
7896 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7898 func, bp->e1hov, bp->e1hov);
7900 BNX2X_DEV_INFO("Single function mode\n");
7902 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7903 " aborting\n", func);
7909 if (!BP_NOMCP(bp)) {
7910 bnx2x_get_port_hwinfo(bp);
7912 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7913 DRV_MSG_SEQ_NUMBER_MASK);
7914 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7918 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7919 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7920 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7921 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7922 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7923 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7924 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7925 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7926 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7927 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7928 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7930 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7938 /* only supposed to happen on emulation/FPGA */
7939 BNX2X_ERR("warning random MAC workaround active\n");
7940 random_ether_addr(bp->dev->dev_addr);
7941 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7947 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7949 int func = BP_FUNC(bp);
7952 /* Disable interrupt handling until HW is initialized */
7953 atomic_set(&bp->intr_sem, 1);
7955 mutex_init(&bp->port.phy_mutex);
7957 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7958 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7960 rc = bnx2x_get_hwinfo(bp);
7962 /* need to reset chip if undi was active */
7964 bnx2x_undi_unload(bp);
7966 if (CHIP_REV_IS_FPGA(bp))
7967 printk(KERN_ERR PFX "FPGA detected\n");
7969 if (BP_NOMCP(bp) && (func == 0))
7971 "MCP disabled, must load devices in order!\n");
7973 /* Set multi queue mode */
7974 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7975 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7977 "Multi disabled since int_mode requested is not MSI-X\n");
7978 multi_mode = ETH_RSS_MODE_DISABLED;
7980 bp->multi_mode = multi_mode;
7985 bp->flags &= ~TPA_ENABLE_FLAG;
7986 bp->dev->features &= ~NETIF_F_LRO;
7988 bp->flags |= TPA_ENABLE_FLAG;
7989 bp->dev->features |= NETIF_F_LRO;
7993 bp->tx_ring_size = MAX_TX_AVAIL;
7994 bp->rx_ring_size = MAX_RX_AVAIL;
8002 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8003 bp->current_interval = (poll ? poll : bp->timer_interval);
8005 init_timer(&bp->timer);
8006 bp->timer.expires = jiffies + bp->current_interval;
8007 bp->timer.data = (unsigned long) bp;
8008 bp->timer.function = bnx2x_timer;
8014 * ethtool service functions
8017 /* All ethtool functions called with rtnl_lock */
8019 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8021 struct bnx2x *bp = netdev_priv(dev);
8023 cmd->supported = bp->port.supported;
8024 cmd->advertising = bp->port.advertising;
8026 if (netif_carrier_ok(dev)) {
8027 cmd->speed = bp->link_vars.line_speed;
8028 cmd->duplex = bp->link_vars.duplex;
8030 cmd->speed = bp->link_params.req_line_speed;
8031 cmd->duplex = bp->link_params.req_duplex;
8036 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8037 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8038 if (vn_max_rate < cmd->speed)
8039 cmd->speed = vn_max_rate;
8042 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8044 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8046 switch (ext_phy_type) {
8047 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8049 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8052 cmd->port = PORT_FIBRE;
8055 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8056 cmd->port = PORT_TP;
8059 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8060 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8061 bp->link_params.ext_phy_config);
8065 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8066 bp->link_params.ext_phy_config);
8070 cmd->port = PORT_TP;
8072 cmd->phy_address = bp->port.phy_addr;
8073 cmd->transceiver = XCVR_INTERNAL;
8075 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8076 cmd->autoneg = AUTONEG_ENABLE;
8078 cmd->autoneg = AUTONEG_DISABLE;
8083 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8084 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8085 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8086 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8087 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8088 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8089 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8094 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8096 struct bnx2x *bp = netdev_priv(dev);
8102 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8103 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8104 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8105 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8106 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8107 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8108 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8110 if (cmd->autoneg == AUTONEG_ENABLE) {
8111 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8112 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8116 /* advertise the requested speed and duplex if supported */
8117 cmd->advertising &= bp->port.supported;
8119 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8120 bp->link_params.req_duplex = DUPLEX_FULL;
8121 bp->port.advertising |= (ADVERTISED_Autoneg |
8124 } else { /* forced speed */
8125 /* advertise the requested speed and duplex if supported */
8126 switch (cmd->speed) {
8128 if (cmd->duplex == DUPLEX_FULL) {
8129 if (!(bp->port.supported &
8130 SUPPORTED_10baseT_Full)) {
8132 "10M full not supported\n");
8136 advertising = (ADVERTISED_10baseT_Full |
8139 if (!(bp->port.supported &
8140 SUPPORTED_10baseT_Half)) {
8142 "10M half not supported\n");
8146 advertising = (ADVERTISED_10baseT_Half |
8152 if (cmd->duplex == DUPLEX_FULL) {
8153 if (!(bp->port.supported &
8154 SUPPORTED_100baseT_Full)) {
8156 "100M full not supported\n");
8160 advertising = (ADVERTISED_100baseT_Full |
8163 if (!(bp->port.supported &
8164 SUPPORTED_100baseT_Half)) {
8166 "100M half not supported\n");
8170 advertising = (ADVERTISED_100baseT_Half |
8176 if (cmd->duplex != DUPLEX_FULL) {
8177 DP(NETIF_MSG_LINK, "1G half not supported\n");
8181 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8182 DP(NETIF_MSG_LINK, "1G full not supported\n");
8186 advertising = (ADVERTISED_1000baseT_Full |
8191 if (cmd->duplex != DUPLEX_FULL) {
8193 "2.5G half not supported\n");
8197 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8199 "2.5G full not supported\n");
8203 advertising = (ADVERTISED_2500baseX_Full |
8208 if (cmd->duplex != DUPLEX_FULL) {
8209 DP(NETIF_MSG_LINK, "10G half not supported\n");
8213 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8214 DP(NETIF_MSG_LINK, "10G full not supported\n");
8218 advertising = (ADVERTISED_10000baseT_Full |
8223 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8227 bp->link_params.req_line_speed = cmd->speed;
8228 bp->link_params.req_duplex = cmd->duplex;
8229 bp->port.advertising = advertising;
8232 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8233 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8234 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8235 bp->port.advertising);
8237 if (netif_running(dev)) {
8238 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8245 #define PHY_FW_VER_LEN 10
8247 static void bnx2x_get_drvinfo(struct net_device *dev,
8248 struct ethtool_drvinfo *info)
8250 struct bnx2x *bp = netdev_priv(dev);
8251 u8 phy_fw_ver[PHY_FW_VER_LEN];
8253 strcpy(info->driver, DRV_MODULE_NAME);
8254 strcpy(info->version, DRV_MODULE_VERSION);
8256 phy_fw_ver[0] = '\0';
8258 bnx2x_acquire_phy_lock(bp);
8259 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8260 (bp->state != BNX2X_STATE_CLOSED),
8261 phy_fw_ver, PHY_FW_VER_LEN);
8262 bnx2x_release_phy_lock(bp);
8265 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8266 (bp->common.bc_ver & 0xff0000) >> 16,
8267 (bp->common.bc_ver & 0xff00) >> 8,
8268 (bp->common.bc_ver & 0xff),
8269 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8270 strcpy(info->bus_info, pci_name(bp->pdev));
8271 info->n_stats = BNX2X_NUM_STATS;
8272 info->testinfo_len = BNX2X_NUM_TESTS;
8273 info->eedump_len = bp->common.flash_size;
8274 info->regdump_len = 0;
8277 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8279 struct bnx2x *bp = netdev_priv(dev);
8281 if (bp->flags & NO_WOL_FLAG) {
8285 wol->supported = WAKE_MAGIC;
8287 wol->wolopts = WAKE_MAGIC;
8291 memset(&wol->sopass, 0, sizeof(wol->sopass));
8294 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8296 struct bnx2x *bp = netdev_priv(dev);
8298 if (wol->wolopts & ~WAKE_MAGIC)
8301 if (wol->wolopts & WAKE_MAGIC) {
8302 if (bp->flags & NO_WOL_FLAG)
8312 static u32 bnx2x_get_msglevel(struct net_device *dev)
8314 struct bnx2x *bp = netdev_priv(dev);
8316 return bp->msglevel;
8319 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8321 struct bnx2x *bp = netdev_priv(dev);
8323 if (capable(CAP_NET_ADMIN))
8324 bp->msglevel = level;
8327 static int bnx2x_nway_reset(struct net_device *dev)
8329 struct bnx2x *bp = netdev_priv(dev);
8334 if (netif_running(dev)) {
8335 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8342 static int bnx2x_get_eeprom_len(struct net_device *dev)
8344 struct bnx2x *bp = netdev_priv(dev);
8346 return bp->common.flash_size;
8349 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8351 int port = BP_PORT(bp);
8355 /* adjust timeout for emulation/FPGA */
8356 count = NVRAM_TIMEOUT_COUNT;
8357 if (CHIP_REV_IS_SLOW(bp))
8360 /* request access to nvram interface */
8361 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8362 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8364 for (i = 0; i < count*10; i++) {
8365 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8366 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8372 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8373 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8380 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8382 int port = BP_PORT(bp);
8386 /* adjust timeout for emulation/FPGA */
8387 count = NVRAM_TIMEOUT_COUNT;
8388 if (CHIP_REV_IS_SLOW(bp))
8391 /* relinquish nvram interface */
8392 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8393 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8395 for (i = 0; i < count*10; i++) {
8396 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8397 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8403 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8404 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8411 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8415 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8417 /* enable both bits, even on read */
8418 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8419 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8420 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8423 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8427 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8429 /* disable both bits, even after read */
8430 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8431 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8432 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8435 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8441 /* build the command word */
8442 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8444 /* need to clear DONE bit separately */
8445 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8447 /* address of the NVRAM to read from */
8448 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8449 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8451 /* issue a read command */
8452 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8454 /* adjust timeout for emulation/FPGA */
8455 count = NVRAM_TIMEOUT_COUNT;
8456 if (CHIP_REV_IS_SLOW(bp))
8459 /* wait for completion */
8462 for (i = 0; i < count; i++) {
8464 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8466 if (val & MCPR_NVM_COMMAND_DONE) {
8467 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8468 /* we read nvram data in cpu order
8469 * but ethtool sees it as an array of bytes
8470 * converting to big-endian will do the work */
8471 val = cpu_to_be32(val);
8481 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8488 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8490 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8495 if (offset + buf_size > bp->common.flash_size) {
8496 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8497 " buf_size (0x%x) > flash_size (0x%x)\n",
8498 offset, buf_size, bp->common.flash_size);
8502 /* request access to nvram interface */
8503 rc = bnx2x_acquire_nvram_lock(bp);
8507 /* enable access to nvram interface */
8508 bnx2x_enable_nvram_access(bp);
8510 /* read the first word(s) */
8511 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8512 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8513 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8514 memcpy(ret_buf, &val, 4);
8516 /* advance to the next dword */
8517 offset += sizeof(u32);
8518 ret_buf += sizeof(u32);
8519 buf_size -= sizeof(u32);
8524 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8525 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8526 memcpy(ret_buf, &val, 4);
8529 /* disable access to nvram interface */
8530 bnx2x_disable_nvram_access(bp);
8531 bnx2x_release_nvram_lock(bp);
8536 static int bnx2x_get_eeprom(struct net_device *dev,
8537 struct ethtool_eeprom *eeprom, u8 *eebuf)
8539 struct bnx2x *bp = netdev_priv(dev);
8542 if (!netif_running(dev))
8545 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8546 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8547 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8548 eeprom->len, eeprom->len);
8550 /* parameters already validated in ethtool_get_eeprom */
8552 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8557 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8562 /* build the command word */
8563 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8565 /* need to clear DONE bit separately */
8566 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8568 /* write the data */
8569 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8571 /* address of the NVRAM to write to */
8572 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8573 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8575 /* issue the write command */
8576 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8578 /* adjust timeout for emulation/FPGA */
8579 count = NVRAM_TIMEOUT_COUNT;
8580 if (CHIP_REV_IS_SLOW(bp))
8583 /* wait for completion */
8585 for (i = 0; i < count; i++) {
8587 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8588 if (val & MCPR_NVM_COMMAND_DONE) {
8597 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8599 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8607 if (offset + buf_size > bp->common.flash_size) {
8608 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8609 " buf_size (0x%x) > flash_size (0x%x)\n",
8610 offset, buf_size, bp->common.flash_size);
8614 /* request access to nvram interface */
8615 rc = bnx2x_acquire_nvram_lock(bp);
8619 /* enable access to nvram interface */
8620 bnx2x_enable_nvram_access(bp);
8622 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8623 align_offset = (offset & ~0x03);
8624 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8627 val &= ~(0xff << BYTE_OFFSET(offset));
8628 val |= (*data_buf << BYTE_OFFSET(offset));
8630 /* nvram data is returned as an array of bytes
8631 * convert it back to cpu order */
8632 val = be32_to_cpu(val);
8634 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8638 /* disable access to nvram interface */
8639 bnx2x_disable_nvram_access(bp);
8640 bnx2x_release_nvram_lock(bp);
8645 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8653 if (buf_size == 1) /* ethtool */
8654 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8656 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8658 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8663 if (offset + buf_size > bp->common.flash_size) {
8664 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8665 " buf_size (0x%x) > flash_size (0x%x)\n",
8666 offset, buf_size, bp->common.flash_size);
8670 /* request access to nvram interface */
8671 rc = bnx2x_acquire_nvram_lock(bp);
8675 /* enable access to nvram interface */
8676 bnx2x_enable_nvram_access(bp);
8679 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8680 while ((written_so_far < buf_size) && (rc == 0)) {
8681 if (written_so_far == (buf_size - sizeof(u32)))
8682 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8683 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8684 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8685 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8686 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8688 memcpy(&val, data_buf, 4);
8690 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8692 /* advance to the next dword */
8693 offset += sizeof(u32);
8694 data_buf += sizeof(u32);
8695 written_so_far += sizeof(u32);
8699 /* disable access to nvram interface */
8700 bnx2x_disable_nvram_access(bp);
8701 bnx2x_release_nvram_lock(bp);
8706 static int bnx2x_set_eeprom(struct net_device *dev,
8707 struct ethtool_eeprom *eeprom, u8 *eebuf)
8709 struct bnx2x *bp = netdev_priv(dev);
8712 if (!netif_running(dev))
8715 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8716 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8717 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8718 eeprom->len, eeprom->len);
8720 /* parameters already validated in ethtool_set_eeprom */
8722 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
8723 if (eeprom->magic == 0x00504859)
8726 bnx2x_acquire_phy_lock(bp);
8727 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8728 bp->link_params.ext_phy_config,
8729 (bp->state != BNX2X_STATE_CLOSED),
8730 eebuf, eeprom->len);
8731 if ((bp->state == BNX2X_STATE_OPEN) ||
8732 (bp->state == BNX2X_STATE_DISABLED)) {
8733 rc |= bnx2x_link_reset(&bp->link_params,
8735 rc |= bnx2x_phy_init(&bp->link_params,
8738 bnx2x_release_phy_lock(bp);
8740 } else /* Only the PMF can access the PHY */
8743 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8748 static int bnx2x_get_coalesce(struct net_device *dev,
8749 struct ethtool_coalesce *coal)
8751 struct bnx2x *bp = netdev_priv(dev);
8753 memset(coal, 0, sizeof(struct ethtool_coalesce));
8755 coal->rx_coalesce_usecs = bp->rx_ticks;
8756 coal->tx_coalesce_usecs = bp->tx_ticks;
8761 static int bnx2x_set_coalesce(struct net_device *dev,
8762 struct ethtool_coalesce *coal)
8764 struct bnx2x *bp = netdev_priv(dev);
8766 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8767 if (bp->rx_ticks > 3000)
8768 bp->rx_ticks = 3000;
8770 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8771 if (bp->tx_ticks > 0x3000)
8772 bp->tx_ticks = 0x3000;
8774 if (netif_running(dev))
8775 bnx2x_update_coalesce(bp);
8780 static void bnx2x_get_ringparam(struct net_device *dev,
8781 struct ethtool_ringparam *ering)
8783 struct bnx2x *bp = netdev_priv(dev);
8785 ering->rx_max_pending = MAX_RX_AVAIL;
8786 ering->rx_mini_max_pending = 0;
8787 ering->rx_jumbo_max_pending = 0;
8789 ering->rx_pending = bp->rx_ring_size;
8790 ering->rx_mini_pending = 0;
8791 ering->rx_jumbo_pending = 0;
8793 ering->tx_max_pending = MAX_TX_AVAIL;
8794 ering->tx_pending = bp->tx_ring_size;
8797 static int bnx2x_set_ringparam(struct net_device *dev,
8798 struct ethtool_ringparam *ering)
8800 struct bnx2x *bp = netdev_priv(dev);
8803 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8804 (ering->tx_pending > MAX_TX_AVAIL) ||
8805 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8808 bp->rx_ring_size = ering->rx_pending;
8809 bp->tx_ring_size = ering->tx_pending;
8811 if (netif_running(dev)) {
8812 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8813 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8819 static void bnx2x_get_pauseparam(struct net_device *dev,
8820 struct ethtool_pauseparam *epause)
8822 struct bnx2x *bp = netdev_priv(dev);
8824 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8825 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8827 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8828 BNX2X_FLOW_CTRL_RX);
8829 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8830 BNX2X_FLOW_CTRL_TX);
8832 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8833 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8834 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8837 static int bnx2x_set_pauseparam(struct net_device *dev,
8838 struct ethtool_pauseparam *epause)
8840 struct bnx2x *bp = netdev_priv(dev);
8845 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8846 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8847 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8849 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8851 if (epause->rx_pause)
8852 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
8854 if (epause->tx_pause)
8855 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
8857 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8858 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8860 if (epause->autoneg) {
8861 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8862 DP(NETIF_MSG_LINK, "autoneg not supported\n");
8866 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8867 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
8871 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
8873 if (netif_running(dev)) {
8874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8881 static int bnx2x_set_flags(struct net_device *dev, u32 data)
8883 struct bnx2x *bp = netdev_priv(dev);
8887 /* TPA requires Rx CSUM offloading */
8888 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8889 if (!(dev->features & NETIF_F_LRO)) {
8890 dev->features |= NETIF_F_LRO;
8891 bp->flags |= TPA_ENABLE_FLAG;
8895 } else if (dev->features & NETIF_F_LRO) {
8896 dev->features &= ~NETIF_F_LRO;
8897 bp->flags &= ~TPA_ENABLE_FLAG;
8901 if (changed && netif_running(dev)) {
8902 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8903 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8909 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8911 struct bnx2x *bp = netdev_priv(dev);
8916 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8918 struct bnx2x *bp = netdev_priv(dev);
8923 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8924 TPA'ed packets will be discarded due to wrong TCP CSUM */
8926 u32 flags = ethtool_op_get_flags(dev);
8928 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8934 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8937 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8938 dev->features |= NETIF_F_TSO6;
8940 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8941 dev->features &= ~NETIF_F_TSO6;
8947 static const struct {
8948 char string[ETH_GSTRING_LEN];
8949 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8950 { "register_test (offline)" },
8951 { "memory_test (offline)" },
8952 { "loopback_test (offline)" },
8953 { "nvram_test (online)" },
8954 { "interrupt_test (online)" },
8955 { "link_test (online)" },
8956 { "idle check (online)" }
8959 static int bnx2x_self_test_count(struct net_device *dev)
8961 return BNX2X_NUM_TESTS;
8964 static int bnx2x_test_registers(struct bnx2x *bp)
8966 int idx, i, rc = -ENODEV;
8968 int port = BP_PORT(bp);
8969 static const struct {
8974 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8975 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8976 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8977 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8978 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8979 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8980 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8981 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8982 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8983 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8984 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8985 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8986 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8987 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8988 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8989 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8990 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8991 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8992 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8993 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8994 /* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8995 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8996 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8997 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8998 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8999 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9000 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9001 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9002 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9003 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9004 /* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9005 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9006 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9007 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9008 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9009 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9010 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9011 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9013 { 0xffffffff, 0, 0x00000000 }
9016 if (!netif_running(bp->dev))
9019 /* Repeat the test twice:
9020 First by writing 0x00000000, second by writing 0xffffffff */
9021 for (idx = 0; idx < 2; idx++) {
9028 wr_val = 0xffffffff;
9032 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9033 u32 offset, mask, save_val, val;
9035 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9036 mask = reg_tbl[i].mask;
9038 save_val = REG_RD(bp, offset);
9040 REG_WR(bp, offset, wr_val);
9041 val = REG_RD(bp, offset);
9043 /* Restore the original register's value */
9044 REG_WR(bp, offset, save_val);
9046 /* verify that value is as expected value */
9047 if ((val & mask) != (wr_val & mask))
9058 static int bnx2x_test_memory(struct bnx2x *bp)
9060 int i, j, rc = -ENODEV;
9062 static const struct {
9066 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9067 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9068 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9069 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9070 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9071 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9072 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9076 static const struct {
9082 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9083 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9084 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9085 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9086 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9087 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9089 { NULL, 0xffffffff, 0, 0 }
9092 if (!netif_running(bp->dev))
9095 /* Go through all the memories */
9096 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9097 for (j = 0; j < mem_tbl[i].size; j++)
9098 REG_RD(bp, mem_tbl[i].offset + j*4);
9100 /* Check the parity status */
9101 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9102 val = REG_RD(bp, prty_tbl[i].offset);
9103 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9104 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9106 "%s is 0x%x\n", prty_tbl[i].name, val);
9117 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9122 while (bnx2x_link_test(bp) && cnt--)
9126 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9128 unsigned int pkt_size, num_pkts, i;
9129 struct sk_buff *skb;
9130 unsigned char *packet;
9131 struct bnx2x_fastpath *fp = &bp->fp[0];
9132 u16 tx_start_idx, tx_idx;
9133 u16 rx_start_idx, rx_idx;
9135 struct sw_tx_bd *tx_buf;
9136 struct eth_tx_bd *tx_bd;
9138 union eth_rx_cqe *cqe;
9140 struct sw_rx_bd *rx_buf;
9144 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9145 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9146 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9148 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
9150 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
9151 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9152 /* wait until link state is restored */
9154 while (cnt-- && bnx2x_test_link(&bp->link_params,
9161 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9164 goto test_loopback_exit;
9166 packet = skb_put(skb, pkt_size);
9167 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9168 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9169 for (i = ETH_HLEN; i < pkt_size; i++)
9170 packet[i] = (unsigned char) (i & 0xff);
9173 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9174 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9176 pkt_prod = fp->tx_pkt_prod++;
9177 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9178 tx_buf->first_bd = fp->tx_bd_prod;
9181 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9182 mapping = pci_map_single(bp->pdev, skb->data,
9183 skb_headlen(skb), PCI_DMA_TODEVICE);
9184 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9185 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9186 tx_bd->nbd = cpu_to_le16(1);
9187 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9188 tx_bd->vlan = cpu_to_le16(pkt_prod);
9189 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9190 ETH_TX_BD_FLAGS_END_BD);
9191 tx_bd->general_data = ((UNICAST_ADDRESS <<
9192 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9196 fp->hw_tx_prods->bds_prod =
9197 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9198 mb(); /* FW restriction: must not reorder writing nbd and packets */
9199 fp->hw_tx_prods->packets_prod =
9200 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9201 DOORBELL(bp, FP_IDX(fp), 0);
9207 bp->dev->trans_start = jiffies;
9211 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9212 if (tx_idx != tx_start_idx + num_pkts)
9213 goto test_loopback_exit;
9215 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9216 if (rx_idx != rx_start_idx + num_pkts)
9217 goto test_loopback_exit;
9219 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9220 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9221 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9222 goto test_loopback_rx_exit;
9224 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9225 if (len != pkt_size)
9226 goto test_loopback_rx_exit;
9228 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9230 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9231 for (i = ETH_HLEN; i < pkt_size; i++)
9232 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9233 goto test_loopback_rx_exit;
9237 test_loopback_rx_exit:
9239 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9240 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9241 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9242 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9244 /* Update producers */
9245 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9249 bp->link_params.loopback_mode = LOOPBACK_NONE;
9254 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9258 if (!netif_running(bp->dev))
9259 return BNX2X_LOOPBACK_FAILED;
9261 bnx2x_netif_stop(bp, 1);
9262 bnx2x_acquire_phy_lock(bp);
9264 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9265 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9266 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9269 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9270 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9271 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9274 bnx2x_release_phy_lock(bp);
9275 bnx2x_netif_start(bp);
9280 #define CRC32_RESIDUAL 0xdebb20e3
9282 static int bnx2x_test_nvram(struct bnx2x *bp)
9284 static const struct {
9288 { 0, 0x14 }, /* bootstrap */
9289 { 0x14, 0xec }, /* dir */
9290 { 0x100, 0x350 }, /* manuf_info */
9291 { 0x450, 0xf0 }, /* feature_info */
9292 { 0x640, 0x64 }, /* upgrade_key_info */
9294 { 0x708, 0x70 }, /* manuf_key_info */
9299 u8 *data = (u8 *)buf;
9303 rc = bnx2x_nvram_read(bp, 0, data, 4);
9305 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9306 goto test_nvram_exit;
9309 magic = be32_to_cpu(buf[0]);
9310 if (magic != 0x669955aa) {
9311 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9313 goto test_nvram_exit;
9316 for (i = 0; nvram_tbl[i].size; i++) {
9318 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9322 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9323 goto test_nvram_exit;
9326 csum = ether_crc_le(nvram_tbl[i].size, data);
9327 if (csum != CRC32_RESIDUAL) {
9329 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9331 goto test_nvram_exit;
9339 static int bnx2x_test_intr(struct bnx2x *bp)
9341 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9344 if (!netif_running(bp->dev))
9347 config->hdr.length = 0;
9349 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9351 config->hdr.offset = BP_FUNC(bp);
9352 config->hdr.client_id = BP_CL_ID(bp);
9353 config->hdr.reserved1 = 0;
9355 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9356 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9357 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9359 bp->set_mac_pending++;
9360 for (i = 0; i < 10; i++) {
9361 if (!bp->set_mac_pending)
9363 msleep_interruptible(10);
9372 static void bnx2x_self_test(struct net_device *dev,
9373 struct ethtool_test *etest, u64 *buf)
9375 struct bnx2x *bp = netdev_priv(dev);
9377 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9379 if (!netif_running(dev))
9382 /* offline tests are not supported in MF mode */
9384 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9386 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9389 link_up = bp->link_vars.link_up;
9390 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9391 bnx2x_nic_load(bp, LOAD_DIAG);
9392 /* wait until link state is restored */
9393 bnx2x_wait_for_link(bp, link_up);
9395 if (bnx2x_test_registers(bp) != 0) {
9397 etest->flags |= ETH_TEST_FL_FAILED;
9399 if (bnx2x_test_memory(bp) != 0) {
9401 etest->flags |= ETH_TEST_FL_FAILED;
9403 buf[2] = bnx2x_test_loopback(bp, link_up);
9405 etest->flags |= ETH_TEST_FL_FAILED;
9407 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9408 bnx2x_nic_load(bp, LOAD_NORMAL);
9409 /* wait until link state is restored */
9410 bnx2x_wait_for_link(bp, link_up);
9412 if (bnx2x_test_nvram(bp) != 0) {
9414 etest->flags |= ETH_TEST_FL_FAILED;
9416 if (bnx2x_test_intr(bp) != 0) {
9418 etest->flags |= ETH_TEST_FL_FAILED;
9421 if (bnx2x_link_test(bp) != 0) {
9423 etest->flags |= ETH_TEST_FL_FAILED;
9426 #ifdef BNX2X_EXTRA_DEBUG
9427 bnx2x_panic_dump(bp);
9431 static const struct {
9434 u8 string[ETH_GSTRING_LEN];
9435 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9436 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9437 { Q_STATS_OFFSET32(error_bytes_received_hi),
9438 8, "[%d]: rx_error_bytes" },
9439 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9440 8, "[%d]: rx_ucast_packets" },
9441 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9442 8, "[%d]: rx_mcast_packets" },
9443 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9444 8, "[%d]: rx_bcast_packets" },
9445 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9446 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9447 4, "[%d]: rx_phy_ip_err_discards"},
9448 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9449 4, "[%d]: rx_skb_alloc_discard" },
9450 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9452 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9453 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9454 8, "[%d]: tx_packets" }
9457 static const struct {
9461 #define STATS_FLAGS_PORT 1
9462 #define STATS_FLAGS_FUNC 2
9463 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9464 u8 string[ETH_GSTRING_LEN];
9465 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9466 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9467 8, STATS_FLAGS_BOTH, "rx_bytes" },
9468 { STATS_OFFSET32(error_bytes_received_hi),
9469 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9470 { STATS_OFFSET32(total_unicast_packets_received_hi),
9471 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9472 { STATS_OFFSET32(total_multicast_packets_received_hi),
9473 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9474 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9475 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9476 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9477 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9478 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9479 8, STATS_FLAGS_PORT, "rx_align_errors" },
9480 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9481 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9482 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9483 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9484 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9485 8, STATS_FLAGS_PORT, "rx_fragments" },
9486 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9487 8, STATS_FLAGS_PORT, "rx_jabbers" },
9488 { STATS_OFFSET32(no_buff_discard_hi),
9489 8, STATS_FLAGS_BOTH, "rx_discards" },
9490 { STATS_OFFSET32(mac_filter_discard),
9491 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9492 { STATS_OFFSET32(xxoverflow_discard),
9493 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9494 { STATS_OFFSET32(brb_drop_hi),
9495 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9496 { STATS_OFFSET32(brb_truncate_hi),
9497 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9498 { STATS_OFFSET32(pause_frames_received_hi),
9499 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9500 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9501 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9502 { STATS_OFFSET32(nig_timer_max),
9503 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9504 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9505 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9506 { STATS_OFFSET32(rx_skb_alloc_failed),
9507 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9508 { STATS_OFFSET32(hw_csum_err),
9509 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9511 { STATS_OFFSET32(total_bytes_transmitted_hi),
9512 8, STATS_FLAGS_BOTH, "tx_bytes" },
9513 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9514 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9515 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9516 8, STATS_FLAGS_BOTH, "tx_packets" },
9517 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9518 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9519 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9520 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9521 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9522 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9523 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9524 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9525 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9526 8, STATS_FLAGS_PORT, "tx_deferred" },
9527 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9528 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9529 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9530 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9531 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9532 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9533 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9534 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9535 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9536 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9537 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9538 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9539 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9540 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9541 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9542 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9543 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9544 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9545 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9546 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9547 { STATS_OFFSET32(pause_frames_sent_hi),
9548 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9551 #define IS_PORT_STAT(i) \
9552 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9553 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9554 #define IS_E1HMF_MODE_STAT(bp) \
9555 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9557 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9559 struct bnx2x *bp = netdev_priv(dev);
9562 switch (stringset) {
9566 for_each_queue(bp, i) {
9567 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9568 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9569 bnx2x_q_stats_arr[j].string, i);
9570 k += BNX2X_NUM_Q_STATS;
9572 if (IS_E1HMF_MODE_STAT(bp))
9574 for (j = 0; j < BNX2X_NUM_STATS; j++)
9575 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9576 bnx2x_stats_arr[j].string);
9578 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9579 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9581 strcpy(buf + j*ETH_GSTRING_LEN,
9582 bnx2x_stats_arr[i].string);
9589 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9594 static int bnx2x_get_stats_count(struct net_device *dev)
9596 struct bnx2x *bp = netdev_priv(dev);
9600 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9601 if (!IS_E1HMF_MODE_STAT(bp))
9602 num_stats += BNX2X_NUM_STATS;
9604 if (IS_E1HMF_MODE_STAT(bp)) {
9606 for (i = 0; i < BNX2X_NUM_STATS; i++)
9607 if (IS_FUNC_STAT(i))
9610 num_stats = BNX2X_NUM_STATS;
9616 static void bnx2x_get_ethtool_stats(struct net_device *dev,
9617 struct ethtool_stats *stats, u64 *buf)
9619 struct bnx2x *bp = netdev_priv(dev);
9620 u32 *hw_stats, *offset;
9625 for_each_queue(bp, i) {
9626 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9627 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9628 if (bnx2x_q_stats_arr[j].size == 0) {
9629 /* skip this counter */
9633 offset = (hw_stats +
9634 bnx2x_q_stats_arr[j].offset);
9635 if (bnx2x_q_stats_arr[j].size == 4) {
9636 /* 4-byte counter */
9637 buf[k + j] = (u64) *offset;
9640 /* 8-byte counter */
9641 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9643 k += BNX2X_NUM_Q_STATS;
9645 if (IS_E1HMF_MODE_STAT(bp))
9647 hw_stats = (u32 *)&bp->eth_stats;
9648 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9649 if (bnx2x_stats_arr[j].size == 0) {
9650 /* skip this counter */
9654 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9655 if (bnx2x_stats_arr[j].size == 4) {
9656 /* 4-byte counter */
9657 buf[k + j] = (u64) *offset;
9660 /* 8-byte counter */
9661 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9664 hw_stats = (u32 *)&bp->eth_stats;
9665 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9666 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9668 if (bnx2x_stats_arr[i].size == 0) {
9669 /* skip this counter */
9674 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9675 if (bnx2x_stats_arr[i].size == 4) {
9676 /* 4-byte counter */
9677 buf[j] = (u64) *offset;
9681 /* 8-byte counter */
9682 buf[j] = HILO_U64(*offset, *(offset + 1));
9688 static int bnx2x_phys_id(struct net_device *dev, u32 data)
9690 struct bnx2x *bp = netdev_priv(dev);
9691 int port = BP_PORT(bp);
9694 if (!netif_running(dev))
9703 for (i = 0; i < (data * 2); i++) {
9705 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
9706 bp->link_params.hw_led_mode,
9707 bp->link_params.chip_id);
9709 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
9710 bp->link_params.hw_led_mode,
9711 bp->link_params.chip_id);
9713 msleep_interruptible(500);
9714 if (signal_pending(current))
9718 if (bp->link_vars.link_up)
9719 bnx2x_set_led(bp, port, LED_MODE_OPER,
9720 bp->link_vars.line_speed,
9721 bp->link_params.hw_led_mode,
9722 bp->link_params.chip_id);
9727 static struct ethtool_ops bnx2x_ethtool_ops = {
9728 .get_settings = bnx2x_get_settings,
9729 .set_settings = bnx2x_set_settings,
9730 .get_drvinfo = bnx2x_get_drvinfo,
9731 .get_wol = bnx2x_get_wol,
9732 .set_wol = bnx2x_set_wol,
9733 .get_msglevel = bnx2x_get_msglevel,
9734 .set_msglevel = bnx2x_set_msglevel,
9735 .nway_reset = bnx2x_nway_reset,
9736 .get_link = ethtool_op_get_link,
9737 .get_eeprom_len = bnx2x_get_eeprom_len,
9738 .get_eeprom = bnx2x_get_eeprom,
9739 .set_eeprom = bnx2x_set_eeprom,
9740 .get_coalesce = bnx2x_get_coalesce,
9741 .set_coalesce = bnx2x_set_coalesce,
9742 .get_ringparam = bnx2x_get_ringparam,
9743 .set_ringparam = bnx2x_set_ringparam,
9744 .get_pauseparam = bnx2x_get_pauseparam,
9745 .set_pauseparam = bnx2x_set_pauseparam,
9746 .get_rx_csum = bnx2x_get_rx_csum,
9747 .set_rx_csum = bnx2x_set_rx_csum,
9748 .get_tx_csum = ethtool_op_get_tx_csum,
9749 .set_tx_csum = ethtool_op_set_tx_hw_csum,
9750 .set_flags = bnx2x_set_flags,
9751 .get_flags = ethtool_op_get_flags,
9752 .get_sg = ethtool_op_get_sg,
9753 .set_sg = ethtool_op_set_sg,
9754 .get_tso = ethtool_op_get_tso,
9755 .set_tso = bnx2x_set_tso,
9756 .self_test_count = bnx2x_self_test_count,
9757 .self_test = bnx2x_self_test,
9758 .get_strings = bnx2x_get_strings,
9759 .phys_id = bnx2x_phys_id,
9760 .get_stats_count = bnx2x_get_stats_count,
9761 .get_ethtool_stats = bnx2x_get_ethtool_stats,
9764 /* end of ethtool_ops */
9766 /****************************************************************************
9767 * General service functions
9768 ****************************************************************************/
9770 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9774 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9778 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9779 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9780 PCI_PM_CTRL_PME_STATUS));
9782 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
9783 /* delay required during transition out of D3hot */
9788 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9792 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
9794 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9797 /* No more memory access after this point until
9798 * device is brought back to D0.
9808 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9812 /* Tell compiler that status block fields can change */
9814 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9815 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9817 return (fp->rx_comp_cons != rx_cons_sb);
9821 * net_device service functions
9824 static int bnx2x_poll(struct napi_struct *napi, int budget)
9826 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9828 struct bnx2x *bp = fp->bp;
9831 #ifdef BNX2X_STOP_ON_ERROR
9832 if (unlikely(bp->panic))
9836 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9837 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9838 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9840 bnx2x_update_fpsb_idx(fp);
9842 if (bnx2x_has_tx_work(fp))
9843 bnx2x_tx_int(fp, budget);
9845 if (bnx2x_has_rx_work(fp))
9846 work_done = bnx2x_rx_int(fp, budget);
9847 rmb(); /* BNX2X_HAS_WORK() reads the status block */
9849 /* must not complete if we consumed full budget */
9850 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
9852 #ifdef BNX2X_STOP_ON_ERROR
9855 napi_complete(napi);
9857 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
9858 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9859 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
9860 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9866 /* we split the first BD into headers and data BDs
9867 * to ease the pain of our fellow microcode engineers
9868 * we use one mapping for both BDs
9869 * So far this has only been observed to happen
9870 * in Other Operating Systems(TM)
9872 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9873 struct bnx2x_fastpath *fp,
9874 struct eth_tx_bd **tx_bd, u16 hlen,
9875 u16 bd_prod, int nbd)
9877 struct eth_tx_bd *h_tx_bd = *tx_bd;
9878 struct eth_tx_bd *d_tx_bd;
9880 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9882 /* first fix first BD */
9883 h_tx_bd->nbd = cpu_to_le16(nbd);
9884 h_tx_bd->nbytes = cpu_to_le16(hlen);
9886 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9887 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9888 h_tx_bd->addr_lo, h_tx_bd->nbd);
9890 /* now get a new data BD
9891 * (after the pbd) and fill it */
9892 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9893 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9895 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9896 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9898 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9899 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9900 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9902 /* this marks the BD as one that has no individual mapping
9903 * the FW ignores this flag in a BD not marked start
9905 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9906 DP(NETIF_MSG_TX_QUEUED,
9907 "TSO split data size is %d (%x:%x)\n",
9908 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9910 /* update tx_bd for marking the last BD flag */
9916 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9919 csum = (u16) ~csum_fold(csum_sub(csum,
9920 csum_partial(t_header - fix, fix, 0)));
9923 csum = (u16) ~csum_fold(csum_add(csum,
9924 csum_partial(t_header, -fix, 0)));
9926 return swab16(csum);
9929 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9933 if (skb->ip_summed != CHECKSUM_PARTIAL)
9937 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9939 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9940 rc |= XMIT_CSUM_TCP;
9944 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9945 rc |= XMIT_CSUM_TCP;
9949 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9952 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9958 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
9959 /* check if packet requires linearization (packet is too fragmented) */
9960 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9965 int first_bd_sz = 0;
9967 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9968 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9970 if (xmit_type & XMIT_GSO) {
9971 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9972 /* Check if LSO packet needs to be copied:
9973 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9974 int wnd_size = MAX_FETCH_BD - 3;
9975 /* Number of windows to check */
9976 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9981 /* Headers length */
9982 hlen = (int)(skb_transport_header(skb) - skb->data) +
9985 /* Amount of data (w/o headers) on linear part of SKB*/
9986 first_bd_sz = skb_headlen(skb) - hlen;
9988 wnd_sum = first_bd_sz;
9990 /* Calculate the first sum - it's special */
9991 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9993 skb_shinfo(skb)->frags[frag_idx].size;
9995 /* If there was data on linear skb data - check it */
9996 if (first_bd_sz > 0) {
9997 if (unlikely(wnd_sum < lso_mss)) {
10002 wnd_sum -= first_bd_sz;
10005 /* Others are easier: run through the frag list and
10006 check all windows */
10007 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10009 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10011 if (unlikely(wnd_sum < lso_mss)) {
10016 skb_shinfo(skb)->frags[wnd_idx].size;
10020 /* in non-LSO too fragmented packet should always
10027 if (unlikely(to_copy))
10028 DP(NETIF_MSG_TX_QUEUED,
10029 "Linearization IS REQUIRED for %s packet. "
10030 "num_frags %d hlen %d first_bd_sz %d\n",
10031 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10032 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10038 /* called with netif_tx_lock
10039 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10040 * netif_wake_queue()
10042 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10044 struct bnx2x *bp = netdev_priv(dev);
10045 struct bnx2x_fastpath *fp;
10046 struct netdev_queue *txq;
10047 struct sw_tx_bd *tx_buf;
10048 struct eth_tx_bd *tx_bd;
10049 struct eth_tx_parse_bd *pbd = NULL;
10050 u16 pkt_prod, bd_prod;
10052 dma_addr_t mapping;
10053 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10054 int vlan_off = (bp->e1hov ? 4 : 0);
10058 #ifdef BNX2X_STOP_ON_ERROR
10059 if (unlikely(bp->panic))
10060 return NETDEV_TX_BUSY;
10063 fp_index = skb_get_queue_mapping(skb);
10064 txq = netdev_get_tx_queue(dev, fp_index);
10066 fp = &bp->fp[fp_index];
10068 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10069 fp->eth_q_stats.driver_xoff++,
10070 netif_tx_stop_queue(txq);
10071 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10072 return NETDEV_TX_BUSY;
10075 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10076 " gso type %x xmit_type %x\n",
10077 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10078 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10080 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10081 /* First, check if we need to linearize the skb
10082 (due to FW restrictions) */
10083 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10084 /* Statistics of linearization */
10086 if (skb_linearize(skb) != 0) {
10087 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10088 "silently dropping this SKB\n");
10089 dev_kfree_skb_any(skb);
10090 return NETDEV_TX_OK;
10096 Please read carefully. First we use one BD which we mark as start,
10097 then for TSO or xsum we have a parsing info BD,
10098 and only then we have the rest of the TSO BDs.
10099 (don't forget to mark the last one as last,
10100 and to unmap only AFTER you write to the BD ...)
10101 And above all, all pdb sizes are in words - NOT DWORDS!
10104 pkt_prod = fp->tx_pkt_prod++;
10105 bd_prod = TX_BD(fp->tx_bd_prod);
10107 /* get a tx_buf and first BD */
10108 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10109 tx_bd = &fp->tx_desc_ring[bd_prod];
10111 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10112 tx_bd->general_data = (UNICAST_ADDRESS <<
10113 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10115 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10117 /* remember the first BD of the packet */
10118 tx_buf->first_bd = fp->tx_bd_prod;
10121 DP(NETIF_MSG_TX_QUEUED,
10122 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10123 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10126 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10127 (bp->flags & HW_VLAN_TX_FLAG)) {
10128 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10129 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10133 tx_bd->vlan = cpu_to_le16(pkt_prod);
10136 /* turn on parsing and get a BD */
10137 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10138 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10140 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10143 if (xmit_type & XMIT_CSUM) {
10144 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10146 /* for now NS flag is not used in Linux */
10147 pbd->global_data = (hlen |
10148 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
10149 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10151 pbd->ip_hlen = (skb_transport_header(skb) -
10152 skb_network_header(skb)) / 2;
10154 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10156 pbd->total_hlen = cpu_to_le16(hlen);
10157 hlen = hlen*2 - vlan_off;
10159 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10161 if (xmit_type & XMIT_CSUM_V4)
10162 tx_bd->bd_flags.as_bitfield |=
10163 ETH_TX_BD_FLAGS_IP_CSUM;
10165 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10167 if (xmit_type & XMIT_CSUM_TCP) {
10168 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10171 s8 fix = SKB_CS_OFF(skb); /* signed! */
10173 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10174 pbd->cs_offset = fix / 2;
10176 DP(NETIF_MSG_TX_QUEUED,
10177 "hlen %d offset %d fix %d csum before fix %x\n",
10178 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10181 /* HW bug: fixup the CSUM */
10182 pbd->tcp_pseudo_csum =
10183 bnx2x_csum_fix(skb_transport_header(skb),
10186 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10187 pbd->tcp_pseudo_csum);
10191 mapping = pci_map_single(bp->pdev, skb->data,
10192 skb_headlen(skb), PCI_DMA_TODEVICE);
10194 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10195 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10196 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10197 tx_bd->nbd = cpu_to_le16(nbd);
10198 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10200 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10201 " nbytes %d flags %x vlan %x\n",
10202 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10203 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10204 le16_to_cpu(tx_bd->vlan));
10206 if (xmit_type & XMIT_GSO) {
10208 DP(NETIF_MSG_TX_QUEUED,
10209 "TSO packet len %d hlen %d total len %d tso size %d\n",
10210 skb->len, hlen, skb_headlen(skb),
10211 skb_shinfo(skb)->gso_size);
10213 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10215 if (unlikely(skb_headlen(skb) > hlen))
10216 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10219 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10220 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10221 pbd->tcp_flags = pbd_tcp_flags(skb);
10223 if (xmit_type & XMIT_GSO_V4) {
10224 pbd->ip_id = swab16(ip_hdr(skb)->id);
10225 pbd->tcp_pseudo_csum =
10226 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10227 ip_hdr(skb)->daddr,
10228 0, IPPROTO_TCP, 0));
10231 pbd->tcp_pseudo_csum =
10232 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10233 &ipv6_hdr(skb)->daddr,
10234 0, IPPROTO_TCP, 0));
10236 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10239 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10240 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10242 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10243 tx_bd = &fp->tx_desc_ring[bd_prod];
10245 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10246 frag->size, PCI_DMA_TODEVICE);
10248 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10249 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10250 tx_bd->nbytes = cpu_to_le16(frag->size);
10251 tx_bd->vlan = cpu_to_le16(pkt_prod);
10252 tx_bd->bd_flags.as_bitfield = 0;
10254 DP(NETIF_MSG_TX_QUEUED,
10255 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10256 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10257 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10260 /* now at last mark the BD as the last BD */
10261 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10263 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10264 tx_bd, tx_bd->bd_flags.as_bitfield);
10266 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10268 /* now send a tx doorbell, counting the next BD
10269 * if the packet contains or ends with it
10271 if (TX_BD_POFF(bd_prod) < nbd)
10275 DP(NETIF_MSG_TX_QUEUED,
10276 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10277 " tcp_flags %x xsum %x seq %u hlen %u\n",
10278 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10279 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10280 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10282 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10285 * Make sure that the BD data is updated before updating the producer
10286 * since FW might read the BD right after the producer is updated.
10287 * This is only applicable for weak-ordered memory model archs such
10288 * as IA-64. The following barrier is also mandatory since FW will
10289 * assumes packets must have BDs.
10293 fp->hw_tx_prods->bds_prod =
10294 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
10295 mb(); /* FW restriction: must not reorder writing nbd and packets */
10296 fp->hw_tx_prods->packets_prod =
10297 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
10298 DOORBELL(bp, FP_IDX(fp), 0);
10302 fp->tx_bd_prod += nbd;
10303 dev->trans_start = jiffies;
10305 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10306 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10307 if we put Tx into XOFF state. */
10309 netif_tx_stop_queue(txq);
10310 fp->eth_q_stats.driver_xoff++;
10311 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10312 netif_tx_wake_queue(txq);
10316 return NETDEV_TX_OK;
10319 /* called with rtnl_lock */
10320 static int bnx2x_open(struct net_device *dev)
10322 struct bnx2x *bp = netdev_priv(dev);
10324 netif_carrier_off(dev);
10326 bnx2x_set_power_state(bp, PCI_D0);
10328 return bnx2x_nic_load(bp, LOAD_OPEN);
10331 /* called with rtnl_lock */
10332 static int bnx2x_close(struct net_device *dev)
10334 struct bnx2x *bp = netdev_priv(dev);
10336 /* Unload the driver, release IRQs */
10337 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10338 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10339 if (!CHIP_REV_IS_SLOW(bp))
10340 bnx2x_set_power_state(bp, PCI_D3hot);
10345 /* called with netif_tx_lock from set_multicast */
10346 static void bnx2x_set_rx_mode(struct net_device *dev)
10348 struct bnx2x *bp = netdev_priv(dev);
10349 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10350 int port = BP_PORT(bp);
10352 if (bp->state != BNX2X_STATE_OPEN) {
10353 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10357 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10359 if (dev->flags & IFF_PROMISC)
10360 rx_mode = BNX2X_RX_MODE_PROMISC;
10362 else if ((dev->flags & IFF_ALLMULTI) ||
10363 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10364 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10366 else { /* some multicasts */
10367 if (CHIP_IS_E1(bp)) {
10368 int i, old, offset;
10369 struct dev_mc_list *mclist;
10370 struct mac_configuration_cmd *config =
10371 bnx2x_sp(bp, mcast_config);
10373 for (i = 0, mclist = dev->mc_list;
10374 mclist && (i < dev->mc_count);
10375 i++, mclist = mclist->next) {
10377 config->config_table[i].
10378 cam_entry.msb_mac_addr =
10379 swab16(*(u16 *)&mclist->dmi_addr[0]);
10380 config->config_table[i].
10381 cam_entry.middle_mac_addr =
10382 swab16(*(u16 *)&mclist->dmi_addr[2]);
10383 config->config_table[i].
10384 cam_entry.lsb_mac_addr =
10385 swab16(*(u16 *)&mclist->dmi_addr[4]);
10386 config->config_table[i].cam_entry.flags =
10388 config->config_table[i].
10389 target_table_entry.flags = 0;
10390 config->config_table[i].
10391 target_table_entry.client_id = 0;
10392 config->config_table[i].
10393 target_table_entry.vlan_id = 0;
10396 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10397 config->config_table[i].
10398 cam_entry.msb_mac_addr,
10399 config->config_table[i].
10400 cam_entry.middle_mac_addr,
10401 config->config_table[i].
10402 cam_entry.lsb_mac_addr);
10404 old = config->hdr.length;
10406 for (; i < old; i++) {
10407 if (CAM_IS_INVALID(config->
10408 config_table[i])) {
10409 /* already invalidated */
10413 CAM_INVALIDATE(config->
10418 if (CHIP_REV_IS_SLOW(bp))
10419 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10421 offset = BNX2X_MAX_MULTICAST*(1 + port);
10423 config->hdr.length = i;
10424 config->hdr.offset = offset;
10425 config->hdr.client_id = bp->fp->cl_id;
10426 config->hdr.reserved1 = 0;
10428 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10429 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10430 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10433 /* Accept one or more multicasts */
10434 struct dev_mc_list *mclist;
10435 u32 mc_filter[MC_HASH_SIZE];
10436 u32 crc, bit, regidx;
10439 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10441 for (i = 0, mclist = dev->mc_list;
10442 mclist && (i < dev->mc_count);
10443 i++, mclist = mclist->next) {
10445 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10448 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10449 bit = (crc >> 24) & 0xff;
10452 mc_filter[regidx] |= (1 << bit);
10455 for (i = 0; i < MC_HASH_SIZE; i++)
10456 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10461 bp->rx_mode = rx_mode;
10462 bnx2x_set_storm_rx_mode(bp);
10465 /* called with rtnl_lock */
10466 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10468 struct sockaddr *addr = p;
10469 struct bnx2x *bp = netdev_priv(dev);
10471 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10474 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10475 if (netif_running(dev)) {
10476 if (CHIP_IS_E1(bp))
10477 bnx2x_set_mac_addr_e1(bp, 1);
10479 bnx2x_set_mac_addr_e1h(bp, 1);
10485 /* called with rtnl_lock */
10486 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10488 struct mii_ioctl_data *data = if_mii(ifr);
10489 struct bnx2x *bp = netdev_priv(dev);
10490 int port = BP_PORT(bp);
10495 data->phy_id = bp->port.phy_addr;
10499 case SIOCGMIIREG: {
10502 if (!netif_running(dev))
10505 mutex_lock(&bp->port.phy_mutex);
10506 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10507 DEFAULT_PHY_DEV_ADDR,
10508 (data->reg_num & 0x1f), &mii_regval);
10509 data->val_out = mii_regval;
10510 mutex_unlock(&bp->port.phy_mutex);
10515 if (!capable(CAP_NET_ADMIN))
10518 if (!netif_running(dev))
10521 mutex_lock(&bp->port.phy_mutex);
10522 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10523 DEFAULT_PHY_DEV_ADDR,
10524 (data->reg_num & 0x1f), data->val_in);
10525 mutex_unlock(&bp->port.phy_mutex);
10533 return -EOPNOTSUPP;
10536 /* called with rtnl_lock */
10537 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10539 struct bnx2x *bp = netdev_priv(dev);
10542 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10543 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10546 /* This does not race with packet allocation
10547 * because the actual alloc size is
10548 * only updated as part of load
10550 dev->mtu = new_mtu;
10552 if (netif_running(dev)) {
10553 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10554 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10560 static void bnx2x_tx_timeout(struct net_device *dev)
10562 struct bnx2x *bp = netdev_priv(dev);
10564 #ifdef BNX2X_STOP_ON_ERROR
10568 /* This allows the netif to be shutdown gracefully before resetting */
10569 schedule_work(&bp->reset_task);
10573 /* called with rtnl_lock */
10574 static void bnx2x_vlan_rx_register(struct net_device *dev,
10575 struct vlan_group *vlgrp)
10577 struct bnx2x *bp = netdev_priv(dev);
10581 /* Set flags according to the required capabilities */
10582 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10584 if (dev->features & NETIF_F_HW_VLAN_TX)
10585 bp->flags |= HW_VLAN_TX_FLAG;
10587 if (dev->features & NETIF_F_HW_VLAN_RX)
10588 bp->flags |= HW_VLAN_RX_FLAG;
10590 if (netif_running(dev))
10591 bnx2x_set_client_config(bp);
10596 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10597 static void poll_bnx2x(struct net_device *dev)
10599 struct bnx2x *bp = netdev_priv(dev);
10601 disable_irq(bp->pdev->irq);
10602 bnx2x_interrupt(bp->pdev->irq, dev);
10603 enable_irq(bp->pdev->irq);
10607 static const struct net_device_ops bnx2x_netdev_ops = {
10608 .ndo_open = bnx2x_open,
10609 .ndo_stop = bnx2x_close,
10610 .ndo_start_xmit = bnx2x_start_xmit,
10611 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10612 .ndo_set_mac_address = bnx2x_change_mac_addr,
10613 .ndo_validate_addr = eth_validate_addr,
10614 .ndo_do_ioctl = bnx2x_ioctl,
10615 .ndo_change_mtu = bnx2x_change_mtu,
10616 .ndo_tx_timeout = bnx2x_tx_timeout,
10618 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10620 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10621 .ndo_poll_controller = poll_bnx2x,
10626 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10627 struct net_device *dev)
10632 SET_NETDEV_DEV(dev, &pdev->dev);
10633 bp = netdev_priv(dev);
10638 bp->func = PCI_FUNC(pdev->devfn);
10640 rc = pci_enable_device(pdev);
10642 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10646 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10647 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10650 goto err_out_disable;
10653 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10654 printk(KERN_ERR PFX "Cannot find second PCI device"
10655 " base address, aborting\n");
10657 goto err_out_disable;
10660 if (atomic_read(&pdev->enable_cnt) == 1) {
10661 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10663 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10665 goto err_out_disable;
10668 pci_set_master(pdev);
10669 pci_save_state(pdev);
10672 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10673 if (bp->pm_cap == 0) {
10674 printk(KERN_ERR PFX "Cannot find power management"
10675 " capability, aborting\n");
10677 goto err_out_release;
10680 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10681 if (bp->pcie_cap == 0) {
10682 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10685 goto err_out_release;
10688 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10689 bp->flags |= USING_DAC_FLAG;
10690 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10691 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10692 " failed, aborting\n");
10694 goto err_out_release;
10697 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10698 printk(KERN_ERR PFX "System does not support DMA,"
10701 goto err_out_release;
10704 dev->mem_start = pci_resource_start(pdev, 0);
10705 dev->base_addr = dev->mem_start;
10706 dev->mem_end = pci_resource_end(pdev, 0);
10708 dev->irq = pdev->irq;
10710 bp->regview = pci_ioremap_bar(pdev, 0);
10711 if (!bp->regview) {
10712 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10714 goto err_out_release;
10717 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10718 min_t(u64, BNX2X_DB_SIZE,
10719 pci_resource_len(pdev, 2)));
10720 if (!bp->doorbells) {
10721 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10723 goto err_out_unmap;
10726 bnx2x_set_power_state(bp, PCI_D0);
10728 /* clean indirect addresses */
10729 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10730 PCICFG_VENDOR_ID_OFFSET);
10731 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10732 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10733 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10734 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
10736 dev->watchdog_timeo = TX_TIMEOUT;
10738 dev->netdev_ops = &bnx2x_netdev_ops;
10739 dev->ethtool_ops = &bnx2x_ethtool_ops;
10740 dev->features |= NETIF_F_SG;
10741 dev->features |= NETIF_F_HW_CSUM;
10742 if (bp->flags & USING_DAC_FLAG)
10743 dev->features |= NETIF_F_HIGHDMA;
10745 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10746 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10748 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
10749 dev->features |= NETIF_F_TSO6;
10755 iounmap(bp->regview);
10756 bp->regview = NULL;
10758 if (bp->doorbells) {
10759 iounmap(bp->doorbells);
10760 bp->doorbells = NULL;
10764 if (atomic_read(&pdev->enable_cnt) == 1)
10765 pci_release_regions(pdev);
10768 pci_disable_device(pdev);
10769 pci_set_drvdata(pdev, NULL);
10775 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10777 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10779 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10783 /* return value of 1=2.5GHz 2=5GHz */
10784 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10786 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10788 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10792 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10793 const struct pci_device_id *ent)
10795 static int version_printed;
10796 struct net_device *dev = NULL;
10800 if (version_printed++ == 0)
10801 printk(KERN_INFO "%s", version);
10803 /* dev zeroed in init_etherdev */
10804 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
10806 printk(KERN_ERR PFX "Cannot allocate net device\n");
10810 bp = netdev_priv(dev);
10811 bp->msglevel = debug;
10813 rc = bnx2x_init_dev(pdev, dev);
10819 pci_set_drvdata(pdev, dev);
10821 rc = bnx2x_init_bp(bp);
10823 goto init_one_exit;
10825 rc = register_netdev(dev);
10827 dev_err(&pdev->dev, "Cannot register net device\n");
10828 goto init_one_exit;
10831 bp->common.name = board_info[ent->driver_data].name;
10832 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
10833 " IRQ %d, ", dev->name, bp->common.name,
10834 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
10835 bnx2x_get_pcie_width(bp),
10836 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10837 dev->base_addr, bp->pdev->irq);
10838 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
10843 iounmap(bp->regview);
10846 iounmap(bp->doorbells);
10850 if (atomic_read(&pdev->enable_cnt) == 1)
10851 pci_release_regions(pdev);
10853 pci_disable_device(pdev);
10854 pci_set_drvdata(pdev, NULL);
10859 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10861 struct net_device *dev = pci_get_drvdata(pdev);
10865 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10868 bp = netdev_priv(dev);
10870 unregister_netdev(dev);
10873 iounmap(bp->regview);
10876 iounmap(bp->doorbells);
10880 if (atomic_read(&pdev->enable_cnt) == 1)
10881 pci_release_regions(pdev);
10883 pci_disable_device(pdev);
10884 pci_set_drvdata(pdev, NULL);
10887 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10889 struct net_device *dev = pci_get_drvdata(pdev);
10893 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10896 bp = netdev_priv(dev);
10900 pci_save_state(pdev);
10902 if (!netif_running(dev)) {
10907 netif_device_detach(dev);
10909 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10911 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
10918 static int bnx2x_resume(struct pci_dev *pdev)
10920 struct net_device *dev = pci_get_drvdata(pdev);
10925 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10928 bp = netdev_priv(dev);
10932 pci_restore_state(pdev);
10934 if (!netif_running(dev)) {
10939 bnx2x_set_power_state(bp, PCI_D0);
10940 netif_device_attach(dev);
10942 rc = bnx2x_nic_load(bp, LOAD_OPEN);
10949 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10953 bp->state = BNX2X_STATE_ERROR;
10955 bp->rx_mode = BNX2X_RX_MODE_NONE;
10957 bnx2x_netif_stop(bp, 0);
10959 del_timer_sync(&bp->timer);
10960 bp->stats_state = STATS_STATE_DISABLED;
10961 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10964 bnx2x_free_irq(bp);
10966 if (CHIP_IS_E1(bp)) {
10967 struct mac_configuration_cmd *config =
10968 bnx2x_sp(bp, mcast_config);
10970 for (i = 0; i < config->hdr.length; i++)
10971 CAM_INVALIDATE(config->config_table[i]);
10974 /* Free SKBs, SGEs, TPA pool and driver internals */
10975 bnx2x_free_skbs(bp);
10976 for_each_rx_queue(bp, i)
10977 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10978 for_each_rx_queue(bp, i)
10979 netif_napi_del(&bnx2x_fp(bp, i, napi));
10980 bnx2x_free_mem(bp);
10982 bp->state = BNX2X_STATE_CLOSED;
10984 netif_carrier_off(bp->dev);
10989 static void bnx2x_eeh_recover(struct bnx2x *bp)
10993 mutex_init(&bp->port.phy_mutex);
10995 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10996 bp->link_params.shmem_base = bp->common.shmem_base;
10997 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10999 if (!bp->common.shmem_base ||
11000 (bp->common.shmem_base < 0xA0000) ||
11001 (bp->common.shmem_base >= 0xC0000)) {
11002 BNX2X_DEV_INFO("MCP not active\n");
11003 bp->flags |= NO_MCP_FLAG;
11007 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11008 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11009 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11010 BNX2X_ERR("BAD MCP validity signature\n");
11012 if (!BP_NOMCP(bp)) {
11013 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11014 & DRV_MSG_SEQ_NUMBER_MASK);
11015 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11020 * bnx2x_io_error_detected - called when PCI error is detected
11021 * @pdev: Pointer to PCI device
11022 * @state: The current pci connection state
11024 * This function is called after a PCI bus error affecting
11025 * this device has been detected.
11027 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11028 pci_channel_state_t state)
11030 struct net_device *dev = pci_get_drvdata(pdev);
11031 struct bnx2x *bp = netdev_priv(dev);
11035 netif_device_detach(dev);
11037 if (netif_running(dev))
11038 bnx2x_eeh_nic_unload(bp);
11040 pci_disable_device(pdev);
11044 /* Request a slot reset */
11045 return PCI_ERS_RESULT_NEED_RESET;
11049 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11050 * @pdev: Pointer to PCI device
11052 * Restart the card from scratch, as if from a cold-boot.
11054 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11056 struct net_device *dev = pci_get_drvdata(pdev);
11057 struct bnx2x *bp = netdev_priv(dev);
11061 if (pci_enable_device(pdev)) {
11062 dev_err(&pdev->dev,
11063 "Cannot re-enable PCI device after reset\n");
11065 return PCI_ERS_RESULT_DISCONNECT;
11068 pci_set_master(pdev);
11069 pci_restore_state(pdev);
11071 if (netif_running(dev))
11072 bnx2x_set_power_state(bp, PCI_D0);
11076 return PCI_ERS_RESULT_RECOVERED;
11080 * bnx2x_io_resume - called when traffic can start flowing again
11081 * @pdev: Pointer to PCI device
11083 * This callback is called when the error recovery driver tells us that
11084 * its OK to resume normal operation.
11086 static void bnx2x_io_resume(struct pci_dev *pdev)
11088 struct net_device *dev = pci_get_drvdata(pdev);
11089 struct bnx2x *bp = netdev_priv(dev);
11093 bnx2x_eeh_recover(bp);
11095 if (netif_running(dev))
11096 bnx2x_nic_load(bp, LOAD_NORMAL);
11098 netif_device_attach(dev);
11103 static struct pci_error_handlers bnx2x_err_handler = {
11104 .error_detected = bnx2x_io_error_detected,
11105 .slot_reset = bnx2x_io_slot_reset,
11106 .resume = bnx2x_io_resume,
11109 static struct pci_driver bnx2x_pci_driver = {
11110 .name = DRV_MODULE_NAME,
11111 .id_table = bnx2x_pci_tbl,
11112 .probe = bnx2x_init_one,
11113 .remove = __devexit_p(bnx2x_remove_one),
11114 .suspend = bnx2x_suspend,
11115 .resume = bnx2x_resume,
11116 .err_handler = &bnx2x_err_handler,
11119 static int __init bnx2x_init(void)
11121 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11122 if (bnx2x_wq == NULL) {
11123 printk(KERN_ERR PFX "Cannot create workqueue\n");
11127 return pci_register_driver(&bnx2x_pci_driver);
11130 static void __exit bnx2x_cleanup(void)
11132 pci_unregister_driver(&bnx2x_pci_driver);
11134 destroy_workqueue(bnx2x_wq);
11137 module_init(bnx2x_init);
11138 module_exit(bnx2x_cleanup);