1 /* bnx2x.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2008 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Eliezer Tamir <eliezert@broadcom.com>
10 * Based on code from Michael Chan's bnx2 driver
11 * UDP CSUM errata workaround by Arik Gendelman
12 * Slowpath rework by Vladislav Zolotarov
13 * Statistics and Link management by Yitchak Gertner
17 /* define this to make the driver freeze on error
18 * to allow getting debug info
19 * (you will need to reboot afterwards)
21 /*#define BNX2X_STOP_ON_ERROR*/
23 #include <linux/module.h>
24 #include <linux/moduleparam.h>
25 #include <linux/kernel.h>
26 #include <linux/device.h> /* for dev_info() */
27 #include <linux/timer.h>
28 #include <linux/errno.h>
29 #include <linux/ioport.h>
30 #include <linux/slab.h>
31 #include <linux/vmalloc.h>
32 #include <linux/interrupt.h>
33 #include <linux/pci.h>
34 #include <linux/init.h>
35 #include <linux/netdevice.h>
36 #include <linux/etherdevice.h>
37 #include <linux/skbuff.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/bitops.h>
40 #include <linux/irq.h>
41 #include <linux/delay.h>
42 #include <asm/byteorder.h>
43 #include <linux/time.h>
44 #include <linux/ethtool.h>
45 #include <linux/mii.h>
46 #ifdef NETIF_F_HW_VLAN_TX
47 #include <linux/if_vlan.h>
52 #include <net/checksum.h>
53 #include <linux/workqueue.h>
54 #include <linux/crc32.h>
55 #include <linux/prefetch.h>
56 #include <linux/zlib.h>
57 #include <linux/version.h>
60 #include "bnx2x_reg.h"
61 #include "bnx2x_fw_defs.h"
62 #include "bnx2x_hsi.h"
64 #include "bnx2x_init.h"
66 #define DRV_MODULE_VERSION "0.40.15"
67 #define DRV_MODULE_RELDATE "$DateTime: 2007/11/15 07:28:37 $"
68 #define BNX2X_BC_VER 0x040200
70 /* Time in jiffies before concluding the transmitter is hung. */
71 #define TX_TIMEOUT (5*HZ)
73 static char version[] __devinitdata =
74 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
75 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_INFO(cvs_version, "$Revision: #404 $");
90 module_param(use_inta, int, 0);
91 module_param(poll, int, 0);
92 module_param(onefunc, int, 0);
93 module_param(debug, int, 0);
94 MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
95 MODULE_PARM_DESC(poll, "use polling (for debug)");
96 MODULE_PARM_DESC(onefunc, "enable only first function");
97 MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
98 MODULE_PARM_DESC(debug, "default debug msglevel");
101 module_param(use_multi, int, 0);
102 MODULE_PARM_DESC(use_multi, "use per-CPU queues");
105 enum bnx2x_board_type {
109 /* indexed by board_t, above */
112 } board_info[] __devinitdata = {
113 { "Broadcom NetXtreme II BCM57710 XGb" }
116 static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
122 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
124 /****************************************************************************
125 * General service functions
126 ****************************************************************************/
129 * locking is done by mcp
131 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
140 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
153 static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160 /* copy command into DMAE command memory and set DMAE command go */
161 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171 /* DP(NETIF_MSG_DMAE, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i)); */
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
177 static void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr,
178 u32 dst_addr, u32 len32)
180 struct dmae_command *dmae = &bp->dmae;
182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
185 memset(dmae, 0, sizeof(struct dmae_command));
187 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
188 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
193 DMAE_CMD_ENDIANITY_DW_SWAP |
195 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
196 dmae->src_addr_lo = U64_LO(dma_addr);
197 dmae->src_addr_hi = U64_HI(dma_addr);
198 dmae->dst_addr_lo = dst_addr >> 2;
199 dmae->dst_addr_hi = 0;
201 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
202 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
203 dmae->comp_val = BNX2X_WB_COMP_VAL;
206 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
207 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
208 "dst_addr [%x:%08x (%08x)]\n"
209 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
210 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
211 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
212 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
215 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
216 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
217 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
222 bnx2x_post_dmae(bp, dmae, port * 8);
225 /* adjust timeout for emulation/FPGA */
226 if (CHIP_REV_IS_SLOW(bp))
228 while (*wb_comp != BNX2X_WB_COMP_VAL) {
229 /* DP(NETIF_MSG_DMAE, "wb_comp 0x%08x\n", *wb_comp); */
232 BNX2X_ERR("dmae timeout!\n");
240 static void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
242 struct dmae_command *dmae = &bp->dmae;
244 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
247 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
248 memset(dmae, 0, sizeof(struct dmae_command));
250 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
251 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
252 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
254 DMAE_CMD_ENDIANITY_B_DW_SWAP |
256 DMAE_CMD_ENDIANITY_DW_SWAP |
258 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
259 dmae->src_addr_lo = src_addr >> 2;
260 dmae->src_addr_hi = 0;
261 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
262 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
264 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
265 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
266 dmae->comp_val = BNX2X_WB_COMP_VAL;
269 DP(NETIF_MSG_DMAE, "dmae: opcode 0x%08x\n"
270 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
271 "dst_addr [%x:%08x (%08x)]\n"
272 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
273 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
274 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
275 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
280 bnx2x_post_dmae(bp, dmae, port * 8);
283 while (*wb_comp != BNX2X_WB_COMP_VAL) {
286 BNX2X_ERR("dmae timeout!\n");
292 DP(NETIF_MSG_DMAE, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
293 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
294 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
299 static int bnx2x_mc_assert(struct bnx2x *bp)
304 const char storm[] = {"XTCU"};
305 const u32 intmem_base[] = {
312 /* Go through all instances of all SEMIs */
313 for (i = 0; i < 4; i++) {
314 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
316 BNX2X_ERR("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
319 /* print the asserts */
320 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
321 u32 row0, row1, row2, row3;
323 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
325 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
327 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
329 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
332 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
333 BNX2X_ERR("DATA %cSTORM_ASSERT_INDEX 0x%x ="
334 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
335 storm[i], j, row3, row2, row1, row0);
345 static void bnx2x_fw_dump(struct bnx2x *bp)
351 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
352 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
354 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
355 for (word = 0; word < 8; word++)
356 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
359 printk(KERN_ERR PFX "%s", (char *)data);
361 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
362 for (word = 0; word < 8; word++)
363 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
366 printk(KERN_ERR PFX "%s", (char *)data);
368 printk("\n" KERN_ERR PFX "end of fw dump\n");
371 static void bnx2x_panic_dump(struct bnx2x *bp)
376 BNX2X_ERR("begin crash dump -----------------\n");
378 for_each_queue(bp, i) {
379 struct bnx2x_fastpath *fp = &bp->fp[i];
380 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
382 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
383 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
384 " *rx_cons_sb(%x) rx_comp_prod(%x)"
385 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
387 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
388 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
389 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
390 fp->fp_u_idx, hw_prods->packets_prod,
393 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
394 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
395 for (j = start; j < end; j++) {
396 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
398 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
399 sw_bd->skb, sw_bd->first_bd);
402 start = TX_BD(fp->tx_bd_cons - 10);
403 end = TX_BD(fp->tx_bd_cons + 254);
404 for (j = start; j < end; j++) {
405 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
407 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
408 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
411 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
412 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
413 for (j = start; j < end; j++) {
414 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
415 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
417 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
418 j, rx_bd[0], rx_bd[1], sw_bd->skb);
421 start = RCQ_BD(fp->rx_comp_cons - 10);
422 end = RCQ_BD(fp->rx_comp_cons + 503);
423 for (j = start; j < end; j++) {
424 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
426 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
427 j, cqe[0], cqe[1], cqe[2], cqe[3]);
431 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_t_idx(%u)"
432 " def_x_idx(%u) def_att_idx(%u) attn_state(%u)"
433 " spq_prod_idx(%u)\n",
434 bp->def_c_idx, bp->def_u_idx, bp->def_t_idx, bp->def_x_idx,
435 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
439 BNX2X_ERR("end crash dump -----------------\n");
441 bp->stats_state = STATS_STATE_DISABLE;
442 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
445 static void bnx2x_enable_int(struct bnx2x *bp)
448 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
449 u32 val = REG_RD(bp, addr);
450 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
453 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
454 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
455 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
457 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
458 HC_CONFIG_0_REG_INT_LINE_EN_0 |
459 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
460 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
463 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) msi %d\n",
464 val, port, addr, msix);
466 REG_WR(bp, addr, val);
469 static void bnx2x_disable_int(struct bnx2x *bp)
472 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
473 u32 val = REG_RD(bp, addr);
475 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
476 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
477 HC_CONFIG_0_REG_INT_LINE_EN_0 |
478 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
480 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
483 REG_WR(bp, addr, val);
484 if (REG_RD(bp, addr) != val)
485 BNX2X_ERR("BUG! proper val not read from IGU!\n");
488 static void bnx2x_disable_int_sync(struct bnx2x *bp)
491 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
494 atomic_inc(&bp->intr_sem);
495 /* prevent the HW from sending interrupts */
496 bnx2x_disable_int(bp);
498 /* make sure all ISRs are done */
500 for_each_queue(bp, i)
501 synchronize_irq(bp->msix_table[i].vector);
503 /* one more for the Slow Path IRQ */
504 synchronize_irq(bp->msix_table[i].vector);
506 synchronize_irq(bp->pdev->irq);
508 /* make sure sp_task is not running */
509 cancel_work_sync(&bp->sp_task);
516 * general service functions
519 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
520 u8 storm, u16 index, u8 op, u8 update)
522 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
523 struct igu_ack_register igu_ack;
525 igu_ack.status_block_index = index;
526 igu_ack.sb_id_and_flags =
527 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
528 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
529 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
530 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
532 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
533 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
534 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
537 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
539 struct host_status_block *fpsb = fp->status_blk;
542 barrier(); /* status block is written to by the chip */
543 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
544 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
547 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
548 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
554 static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
556 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
558 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
561 if ((rx_cons_sb != fp->rx_comp_cons) ||
562 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
568 static u16 bnx2x_ack_int(struct bnx2x *bp)
570 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
571 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
573 /* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
574 result, BAR_IGU_INTMEM + igu_addr); */
577 #warning IGU_DEBUG active
579 BNX2X_ERR("read %x from IGU\n", result);
580 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
588 * fast path service functions
591 /* free skb in the packet ring at pos idx
592 * return idx of last bd freed
594 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
597 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
598 struct eth_tx_bd *tx_bd;
599 struct sk_buff *skb = tx_buf->skb;
600 u16 bd_idx = tx_buf->first_bd;
603 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
607 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
608 tx_bd = &fp->tx_desc_ring[bd_idx];
609 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
610 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
612 nbd = le16_to_cpu(tx_bd->nbd) - 1;
613 #ifdef BNX2X_STOP_ON_ERROR
614 if (nbd > (MAX_SKB_FRAGS + 2)) {
615 BNX2X_ERR("bad nbd!\n");
620 /* Skip a parse bd and the TSO split header bd
621 since they have no mapping */
623 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
625 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
626 ETH_TX_BD_FLAGS_TCP_CSUM |
627 ETH_TX_BD_FLAGS_SW_LSO)) {
629 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
630 tx_bd = &fp->tx_desc_ring[bd_idx];
631 /* is this a TSO split header bd? */
632 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
634 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
641 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
642 tx_bd = &fp->tx_desc_ring[bd_idx];
643 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
644 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
646 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
652 tx_buf->first_bd = 0;
658 static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
664 /* Tell compiler that prod and cons can change */
666 prod = fp->tx_bd_prod;
667 cons = fp->tx_bd_cons;
669 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
670 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
673 /* used = prod - cons - prod/size + cons/size */
674 used -= NUM_TX_BD - NUM_TX_RINGS;
677 BUG_TRAP(used <= fp->bp->tx_ring_size);
678 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
680 return (fp->bp->tx_ring_size - used);
683 static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
685 struct bnx2x *bp = fp->bp;
686 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
689 #ifdef BNX2X_STOP_ON_ERROR
690 if (unlikely(bp->panic))
694 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
695 sw_cons = fp->tx_pkt_cons;
697 while (sw_cons != hw_cons) {
700 pkt_cons = TX_BD(sw_cons);
702 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
704 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
705 hw_cons, sw_cons, pkt_cons);
707 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
709 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
712 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
720 fp->tx_pkt_cons = sw_cons;
721 fp->tx_bd_cons = bd_cons;
723 /* Need to make the tx_cons update visible to start_xmit()
724 * before checking for netif_queue_stopped(). Without the
725 * memory barrier, there is a small possibility that start_xmit()
726 * will miss it and cause the queue to be stopped forever.
730 /* TBD need a thresh? */
731 if (unlikely(netif_queue_stopped(bp->dev))) {
733 netif_tx_lock(bp->dev);
735 if (netif_queue_stopped(bp->dev) &&
736 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
737 netif_wake_queue(bp->dev);
739 netif_tx_unlock(bp->dev);
744 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
745 union eth_rx_cqe *rr_cqe)
747 struct bnx2x *bp = fp->bp;
748 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
749 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
751 DP(NETIF_MSG_RX_STATUS,
752 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
753 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
758 switch (command | fp->state) {
759 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
760 BNX2X_FP_STATE_OPENING):
761 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
763 fp->state = BNX2X_FP_STATE_OPEN;
766 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
767 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
769 fp->state = BNX2X_FP_STATE_HALTED;
773 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
776 mb(); /* force bnx2x_wait_ramrod to see the change */
780 switch (command | bp->state) {
781 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
782 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
783 bp->state = BNX2X_STATE_OPEN;
786 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
787 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
788 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
789 fp->state = BNX2X_FP_STATE_HALTED;
792 case (RAMROD_CMD_ID_ETH_PORT_DEL | BNX2X_STATE_CLOSING_WAIT4_DELETE):
793 DP(NETIF_MSG_IFDOWN, "got delete ramrod\n");
794 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
797 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
798 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
799 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_DELETED;
802 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
803 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
807 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
811 mb(); /* force bnx2x_wait_ramrod to see the change */
814 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
815 struct bnx2x_fastpath *fp, u16 index)
818 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
819 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
822 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
823 if (unlikely(skb == NULL))
826 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
828 if (unlikely(dma_mapping_error(mapping))) {
835 pci_unmap_addr_set(rx_buf, mapping, mapping);
837 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
838 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
843 /* note that we are not allocating a new skb,
844 * we are just moving one from cons to prod
845 * we are not creating a new mapping,
846 * so there is no need to check for dma_mapping_error().
848 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
849 struct sk_buff *skb, u16 cons, u16 prod)
851 struct bnx2x *bp = fp->bp;
852 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
853 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
854 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
855 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
857 pci_dma_sync_single_for_device(bp->pdev,
858 pci_unmap_addr(cons_rx_buf, mapping),
859 bp->rx_offset + RX_COPY_THRESH,
862 prod_rx_buf->skb = cons_rx_buf->skb;
863 pci_unmap_addr_set(prod_rx_buf, mapping,
864 pci_unmap_addr(cons_rx_buf, mapping));
868 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
870 struct bnx2x *bp = fp->bp;
871 u16 bd_cons, bd_prod, comp_ring_cons;
872 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
875 #ifdef BNX2X_STOP_ON_ERROR
876 if (unlikely(bp->panic))
880 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
881 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
884 bd_cons = fp->rx_bd_cons;
885 bd_prod = fp->rx_bd_prod;
886 sw_comp_cons = fp->rx_comp_cons;
887 sw_comp_prod = fp->rx_comp_prod;
889 /* Memory barrier necessary as speculative reads of the rx
890 * buffer can be ahead of the index in the status block
894 DP(NETIF_MSG_RX_STATUS,
895 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
896 fp->index, hw_comp_cons, sw_comp_cons);
898 while (sw_comp_cons != hw_comp_cons) {
899 unsigned int len, pad;
900 struct sw_rx_bd *rx_buf;
902 union eth_rx_cqe *cqe;
904 comp_ring_cons = RCQ_BD(sw_comp_cons);
905 bd_prod = RX_BD(bd_prod);
906 bd_cons = RX_BD(bd_cons);
908 cqe = &fp->rx_comp_ring[comp_ring_cons];
910 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
911 " comp_ring (%u) bd_ring (%u,%u)\n",
912 hw_comp_cons, sw_comp_cons,
913 comp_ring_cons, bd_prod, bd_cons);
914 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
915 " queue %x vlan %x len %x\n",
916 cqe->fast_path_cqe.type,
917 cqe->fast_path_cqe.error_type_flags,
918 cqe->fast_path_cqe.status_flags,
919 cqe->fast_path_cqe.rss_hash_result,
920 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
922 /* is this a slowpath msg? */
923 if (unlikely(cqe->fast_path_cqe.type)) {
924 bnx2x_sp_event(fp, cqe);
927 /* this is an rx packet */
929 rx_buf = &fp->rx_buf_ring[bd_cons];
932 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
933 pad = cqe->fast_path_cqe.placement_offset;
935 pci_dma_sync_single_for_device(bp->pdev,
936 pci_unmap_addr(rx_buf, mapping),
937 pad + RX_COPY_THRESH,
940 prefetch(((char *)(skb)) + 128);
942 /* is this an error packet? */
943 if (unlikely(cqe->fast_path_cqe.error_type_flags &
944 ETH_RX_ERROR_FALGS)) {
945 /* do we sometimes forward error packets anyway? */
947 "ERROR flags(%u) Rx packet(%u)\n",
948 cqe->fast_path_cqe.error_type_flags,
950 /* TBD make sure MC counts this as a drop */
954 /* Since we don't have a jumbo ring
955 * copy small packets if mtu > 1500
957 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
958 (len <= RX_COPY_THRESH)) {
959 struct sk_buff *new_skb;
961 new_skb = netdev_alloc_skb(bp->dev,
963 if (new_skb == NULL) {
965 "ERROR packet dropped "
966 "because of alloc failure\n");
967 /* TBD count this as a drop? */
972 skb_copy_from_linear_data_offset(skb, pad,
973 new_skb->data + pad, len);
974 skb_reserve(new_skb, pad);
975 skb_put(new_skb, len);
977 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
981 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
982 pci_unmap_single(bp->pdev,
983 pci_unmap_addr(rx_buf, mapping),
986 skb_reserve(skb, pad);
991 "ERROR packet dropped because "
992 "of alloc failure\n");
994 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
998 skb->protocol = eth_type_trans(skb, bp->dev);
1000 skb->ip_summed = CHECKSUM_NONE;
1001 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1002 skb->ip_summed = CHECKSUM_UNNECESSARY;
1004 /* TBD do we pass bad csum packets in promisc */
1008 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1009 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1010 && (bp->vlgrp != NULL))
1011 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1012 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1015 netif_receive_skb(skb);
1017 bp->dev->last_rx = jiffies;
1022 bd_cons = NEXT_RX_IDX(bd_cons);
1023 bd_prod = NEXT_RX_IDX(bd_prod);
1025 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1026 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1029 if ((rx_pkt == budget))
1033 fp->rx_bd_cons = bd_cons;
1034 fp->rx_bd_prod = bd_prod;
1035 fp->rx_comp_cons = sw_comp_cons;
1036 fp->rx_comp_prod = sw_comp_prod;
1038 REG_WR(bp, BAR_TSTRORM_INTMEM +
1039 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1041 mmiowb(); /* keep prod updates ordered */
1043 fp->rx_pkt += rx_pkt;
1049 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1051 struct bnx2x_fastpath *fp = fp_cookie;
1052 struct bnx2x *bp = fp->bp;
1053 struct net_device *dev = bp->dev;
1054 int index = fp->index;
1056 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1057 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1059 #ifdef BNX2X_STOP_ON_ERROR
1060 if (unlikely(bp->panic))
1064 prefetch(fp->rx_cons_sb);
1065 prefetch(fp->tx_cons_sb);
1066 prefetch(&fp->status_blk->c_status_block.status_block_index);
1067 prefetch(&fp->status_blk->u_status_block.status_block_index);
1069 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1073 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1075 struct net_device *dev = dev_instance;
1076 struct bnx2x *bp = netdev_priv(dev);
1077 u16 status = bnx2x_ack_int(bp);
1079 if (unlikely(status == 0)) {
1080 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1084 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1086 #ifdef BNX2X_STOP_ON_ERROR
1087 if (unlikely(bp->panic))
1091 /* Return here if interrupt is shared and is disabled */
1092 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1093 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1098 struct bnx2x_fastpath *fp = &bp->fp[0];
1100 prefetch(fp->rx_cons_sb);
1101 prefetch(fp->tx_cons_sb);
1102 prefetch(&fp->status_blk->c_status_block.status_block_index);
1103 prefetch(&fp->status_blk->u_status_block.status_block_index);
1105 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1112 if (unlikely(status & 0x1)) {
1114 schedule_work(&bp->sp_task);
1121 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1127 /* end of fast path */
1132 * General service functions
1135 static void bnx2x_leds_set(struct bnx2x *bp, unsigned int speed)
1137 int port = bp->port;
1139 NIG_WR(NIG_REG_LED_MODE_P0 + port*4,
1140 ((bp->hw_config & SHARED_HW_CFG_LED_MODE_MASK) >>
1141 SHARED_HW_CFG_LED_MODE_SHIFT));
1142 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 0);
1144 /* Set blinking rate to ~15.9Hz */
1145 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_P0 + port*4,
1146 LED_BLINK_RATE_VAL);
1147 NIG_WR(NIG_REG_LED_CONTROL_BLINK_RATE_ENA_P0 + port*4, 1);
1149 /* On Ax chip versions for speeds less than 10G
1150 LED scheme is different */
1151 if ((CHIP_REV(bp) == CHIP_REV_Ax) && (speed < SPEED_10000)) {
1152 NIG_WR(NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + port*4, 1);
1153 NIG_WR(NIG_REG_LED_CONTROL_TRAFFIC_P0 + port*4, 0);
1154 NIG_WR(NIG_REG_LED_CONTROL_BLINK_TRAFFIC_P0 + port*4, 1);
1158 static void bnx2x_leds_unset(struct bnx2x *bp)
1160 int port = bp->port;
1162 NIG_WR(NIG_REG_LED_10G_P0 + port*4, 0);
1163 NIG_WR(NIG_REG_LED_MODE_P0 + port*4, SHARED_HW_CFG_LED_MAC1);
1166 static u32 bnx2x_bits_en(struct bnx2x *bp, u32 reg, u32 bits)
1168 u32 val = REG_RD(bp, reg);
1171 REG_WR(bp, reg, val);
1175 static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
1177 u32 val = REG_RD(bp, reg);
1180 REG_WR(bp, reg, val);
1184 static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1188 u32 resource_bit = (1 << resource);
1191 /* Validating that the resource is within range */
1192 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1194 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1195 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1199 /* Validating that the resource is not already taken */
1200 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1201 if (lock_status & resource_bit) {
1202 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1203 lock_status, resource_bit);
1207 /* Try for 1 second every 5ms */
1208 for (cnt = 0; cnt < 200; cnt++) {
1209 /* Try to acquire the lock */
1210 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8 + 4,
1212 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1213 if (lock_status & resource_bit)
1218 DP(NETIF_MSG_HW, "Timeout\n");
1222 static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1225 u32 resource_bit = (1 << resource);
1228 /* Validating that the resource is within range */
1229 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1231 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1232 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1236 /* Validating that the resource is currently taken */
1237 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + func*8);
1238 if (!(lock_status & resource_bit)) {
1239 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1240 lock_status, resource_bit);
1244 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + func*8, resource_bit);
1248 static int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1250 /* The GPIO should be swapped if swap register is set and active */
1251 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1252 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1253 int gpio_shift = gpio_num +
1254 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1255 u32 gpio_mask = (1 << gpio_shift);
1258 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1259 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1263 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1264 /* read GPIO and mask except the float bits */
1265 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1268 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1269 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1270 gpio_num, gpio_shift);
1271 /* clear FLOAT and set CLR */
1272 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1273 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1276 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1277 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1278 gpio_num, gpio_shift);
1279 /* clear FLOAT and set SET */
1280 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1281 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1284 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1285 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1286 gpio_num, gpio_shift);
1288 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1295 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1296 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
1301 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1303 u32 spio_mask = (1 << spio_num);
1306 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1307 (spio_num > MISC_REGISTERS_SPIO_7)) {
1308 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1312 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1313 /* read SPIO and mask except the float bits */
1314 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1317 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1318 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1319 /* clear FLOAT and set CLR */
1320 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1321 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1324 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1325 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1326 /* clear FLOAT and set SET */
1327 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1328 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1331 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1332 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1334 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1341 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1342 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1347 static int bnx2x_mdio22_write(struct bnx2x *bp, u32 reg, u32 val)
1349 int port = bp->port;
1350 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1354 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x val 0x%08x\n",
1355 bp->phy_addr, reg, val); */
1357 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1359 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1360 tmp &= ~EMAC_MDIO_MODE_AUTO_POLL;
1361 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1362 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1366 tmp = ((bp->phy_addr << 21) | (reg << 16) |
1367 (val & EMAC_MDIO_COMM_DATA) |
1368 EMAC_MDIO_COMM_COMMAND_WRITE_22 |
1369 EMAC_MDIO_COMM_START_BUSY);
1370 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, tmp);
1372 for (i = 0; i < 50; i++) {
1375 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1376 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1382 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1383 BNX2X_ERR("write phy register failed\n");
1390 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1392 tmp = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1393 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1394 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, tmp);
1400 static int bnx2x_mdio22_read(struct bnx2x *bp, u32 reg, u32 *ret_val)
1402 int port = bp->port;
1403 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1407 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1409 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1410 val &= ~EMAC_MDIO_MODE_AUTO_POLL;
1411 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1412 REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1416 val = ((bp->phy_addr << 21) | (reg << 16) |
1417 EMAC_MDIO_COMM_COMMAND_READ_22 |
1418 EMAC_MDIO_COMM_START_BUSY);
1419 EMAC_WR(EMAC_REG_EMAC_MDIO_COMM, val);
1421 for (i = 0; i < 50; i++) {
1424 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_COMM);
1425 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1426 val &= EMAC_MDIO_COMM_DATA;
1431 if (val & EMAC_MDIO_COMM_START_BUSY) {
1432 BNX2X_ERR("read phy register failed\n");
1441 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
1443 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MDIO_MODE);
1444 val |= EMAC_MDIO_MODE_AUTO_POLL;
1445 EMAC_WR(EMAC_REG_EMAC_MDIO_MODE, val);
1448 /* DP(NETIF_MSG_HW, "phy_addr 0x%x reg 0x%x ret_val 0x%08x\n",
1449 bp->phy_addr, reg, *ret_val); */
1454 static int bnx2x_mdio45_ctrl_write(struct bnx2x *bp, u32 mdio_ctrl,
1455 u32 phy_addr, u32 reg, u32 addr, u32 val)
1460 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1461 * (a value of 49==0x31) and make sure that the AUTO poll is off
1463 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1464 tmp &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1465 tmp |= (EMAC_MDIO_MODE_CLAUSE_45 |
1466 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1467 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1468 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1472 tmp = ((phy_addr << 21) | (reg << 16) | addr |
1473 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1474 EMAC_MDIO_COMM_START_BUSY);
1475 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1477 for (i = 0; i < 50; i++) {
1480 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1481 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1486 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1487 BNX2X_ERR("write phy register failed\n");
1493 tmp = ((phy_addr << 21) | (reg << 16) | val |
1494 EMAC_MDIO_COMM_COMMAND_WRITE_45 |
1495 EMAC_MDIO_COMM_START_BUSY);
1496 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, tmp);
1498 for (i = 0; i < 50; i++) {
1501 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1502 if (!(tmp & EMAC_MDIO_COMM_START_BUSY)) {
1508 if (tmp & EMAC_MDIO_COMM_START_BUSY) {
1509 BNX2X_ERR("write phy register failed\n");
1515 /* unset clause 45 mode, set the MDIO clock to a faster value
1516 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1518 tmp = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1519 tmp &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1520 tmp |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1521 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1522 tmp |= EMAC_MDIO_MODE_AUTO_POLL;
1523 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, tmp);
1528 static int bnx2x_mdio45_write(struct bnx2x *bp, u32 phy_addr, u32 reg,
1531 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1533 return bnx2x_mdio45_ctrl_write(bp, emac_base, phy_addr,
1537 static int bnx2x_mdio45_ctrl_read(struct bnx2x *bp, u32 mdio_ctrl,
1538 u32 phy_addr, u32 reg, u32 addr,
1544 /* set clause 45 mode, slow down the MDIO clock to 2.5MHz
1545 * (a value of 49==0x31) and make sure that the AUTO poll is off
1547 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1548 val &= ~(EMAC_MDIO_MODE_AUTO_POLL | EMAC_MDIO_MODE_CLOCK_CNT);
1549 val |= (EMAC_MDIO_MODE_CLAUSE_45 |
1550 (49 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT));
1551 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1552 REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1556 val = ((phy_addr << 21) | (reg << 16) | addr |
1557 EMAC_MDIO_COMM_COMMAND_ADDRESS |
1558 EMAC_MDIO_COMM_START_BUSY);
1559 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1561 for (i = 0; i < 50; i++) {
1564 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1565 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1570 if (val & EMAC_MDIO_COMM_START_BUSY) {
1571 BNX2X_ERR("read phy register failed\n");
1578 val = ((phy_addr << 21) | (reg << 16) |
1579 EMAC_MDIO_COMM_COMMAND_READ_45 |
1580 EMAC_MDIO_COMM_START_BUSY);
1581 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM, val);
1583 for (i = 0; i < 50; i++) {
1586 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_COMM);
1587 if (!(val & EMAC_MDIO_COMM_START_BUSY)) {
1588 val &= EMAC_MDIO_COMM_DATA;
1593 if (val & EMAC_MDIO_COMM_START_BUSY) {
1594 BNX2X_ERR("read phy register failed\n");
1603 /* unset clause 45 mode, set the MDIO clock to a faster value
1604 * (0x13 => 6.25Mhz) and restore the AUTO poll if needed
1606 val = REG_RD(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE);
1607 val &= ~(EMAC_MDIO_MODE_CLAUSE_45 | EMAC_MDIO_MODE_CLOCK_CNT);
1608 val |= (0x13 << EMAC_MDIO_MODE_CLOCK_CNT_BITSHIFT);
1609 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG)
1610 val |= EMAC_MDIO_MODE_AUTO_POLL;
1611 REG_WR(bp, mdio_ctrl + EMAC_REG_EMAC_MDIO_MODE, val);
1616 static int bnx2x_mdio45_read(struct bnx2x *bp, u32 phy_addr, u32 reg,
1617 u32 addr, u32 *ret_val)
1619 u32 emac_base = bp->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
1621 return bnx2x_mdio45_ctrl_read(bp, emac_base, phy_addr,
1622 reg, addr, ret_val);
1625 static int bnx2x_mdio45_vwrite(struct bnx2x *bp, u32 phy_addr, u32 reg,
1632 for (i = 0; i < 10; i++) {
1633 bnx2x_mdio45_write(bp, phy_addr, reg, addr, val);
1635 bnx2x_mdio45_read(bp, phy_addr, reg, addr, &rd_val);
1636 /* if the read value is not the same as the value we wrote,
1637 we should write it again */
1641 BNX2X_ERR("MDIO write in CL45 failed\n");
1649 static void bnx2x_pause_resolve(struct bnx2x *bp, u32 pause_result)
1651 switch (pause_result) { /* ASYM P ASYM P */
1652 case 0xb: /* 1 0 1 1 */
1653 bp->flow_ctrl = FLOW_CTRL_TX;
1656 case 0xe: /* 1 1 1 0 */
1657 bp->flow_ctrl = FLOW_CTRL_RX;
1660 case 0x5: /* 0 1 0 1 */
1661 case 0x7: /* 0 1 1 1 */
1662 case 0xd: /* 1 1 0 1 */
1663 case 0xf: /* 1 1 1 1 */
1664 bp->flow_ctrl = FLOW_CTRL_BOTH;
1672 static u8 bnx2x_ext_phy_resove_fc(struct bnx2x *bp)
1675 u32 ld_pause; /* local */
1676 u32 lp_pause; /* link partner */
1677 u32 an_complete; /* AN complete */
1681 ext_phy_addr = ((bp->ext_phy_config &
1682 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1683 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1686 bnx2x_mdio45_read(bp, ext_phy_addr,
1687 EXT_PHY_KR_AUTO_NEG_DEVAD,
1688 EXT_PHY_KR_STATUS, &an_complete);
1689 bnx2x_mdio45_read(bp, ext_phy_addr,
1690 EXT_PHY_KR_AUTO_NEG_DEVAD,
1691 EXT_PHY_KR_STATUS, &an_complete);
1693 if (an_complete & EXT_PHY_KR_AUTO_NEG_COMPLETE) {
1695 bnx2x_mdio45_read(bp, ext_phy_addr,
1696 EXT_PHY_KR_AUTO_NEG_DEVAD,
1697 EXT_PHY_KR_AUTO_NEG_ADVERT, &ld_pause);
1698 bnx2x_mdio45_read(bp, ext_phy_addr,
1699 EXT_PHY_KR_AUTO_NEG_DEVAD,
1700 EXT_PHY_KR_LP_AUTO_NEG, &lp_pause);
1701 pause_result = (ld_pause &
1702 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 8;
1703 pause_result |= (lp_pause &
1704 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_MASK) >> 10;
1705 DP(NETIF_MSG_LINK, "Ext PHY pause result 0x%x \n",
1707 bnx2x_pause_resolve(bp, pause_result);
1712 static void bnx2x_flow_ctrl_resolve(struct bnx2x *bp, u32 gp_status)
1714 u32 ld_pause; /* local driver */
1715 u32 lp_pause; /* link partner */
1720 /* resolve from gp_status in case of AN complete and not sgmii */
1721 if ((bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
1722 (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) &&
1723 (!(bp->phy_flags & PHY_SGMII_FLAG)) &&
1724 (XGXS_EXT_PHY_TYPE(bp) == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1726 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
1727 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1729 bnx2x_mdio22_read(bp,
1730 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1732 pause_result = (ld_pause &
1733 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1734 pause_result |= (lp_pause &
1735 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1736 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result);
1737 bnx2x_pause_resolve(bp, pause_result);
1738 } else if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) ||
1739 !(bnx2x_ext_phy_resove_fc(bp))) {
1741 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
1742 switch (bp->req_flow_ctrl) {
1743 case FLOW_CTRL_AUTO:
1744 if (bp->dev->mtu <= 4500)
1745 bp->flow_ctrl = FLOW_CTRL_BOTH;
1747 bp->flow_ctrl = FLOW_CTRL_TX;
1751 bp->flow_ctrl = FLOW_CTRL_TX;
1755 if (bp->dev->mtu <= 4500)
1756 bp->flow_ctrl = FLOW_CTRL_RX;
1759 case FLOW_CTRL_BOTH:
1760 if (bp->dev->mtu <= 4500)
1761 bp->flow_ctrl = FLOW_CTRL_BOTH;
1763 bp->flow_ctrl = FLOW_CTRL_TX;
1766 case FLOW_CTRL_NONE:
1770 } else { /* forced mode */
1771 switch (bp->req_flow_ctrl) {
1772 case FLOW_CTRL_AUTO:
1773 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
1774 " req_autoneg 0x%x\n",
1775 bp->req_flow_ctrl, bp->req_autoneg);
1780 case FLOW_CTRL_BOTH:
1781 bp->flow_ctrl = bp->req_flow_ctrl;
1784 case FLOW_CTRL_NONE:
1790 DP(NETIF_MSG_LINK, "flow_ctrl 0x%x\n", bp->flow_ctrl);
1793 static void bnx2x_link_settings_status(struct bnx2x *bp, u32 gp_status)
1795 bp->link_status = 0;
1797 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_LINK_STATUS) {
1798 DP(NETIF_MSG_LINK, "phy link up\n");
1800 bp->phy_link_up = 1;
1801 bp->link_status |= LINK_STATUS_LINK_UP;
1803 if (gp_status & MDIO_GP_STATUS_TOP_AN_STATUS1_DUPLEX_STATUS)
1804 bp->duplex = DUPLEX_FULL;
1806 bp->duplex = DUPLEX_HALF;
1808 bnx2x_flow_ctrl_resolve(bp, gp_status);
1810 switch (gp_status & GP_STATUS_SPEED_MASK) {
1812 bp->line_speed = SPEED_10;
1813 if (bp->duplex == DUPLEX_FULL)
1814 bp->link_status |= LINK_10TFD;
1816 bp->link_status |= LINK_10THD;
1819 case GP_STATUS_100M:
1820 bp->line_speed = SPEED_100;
1821 if (bp->duplex == DUPLEX_FULL)
1822 bp->link_status |= LINK_100TXFD;
1824 bp->link_status |= LINK_100TXHD;
1828 case GP_STATUS_1G_KX:
1829 bp->line_speed = SPEED_1000;
1830 if (bp->duplex == DUPLEX_FULL)
1831 bp->link_status |= LINK_1000TFD;
1833 bp->link_status |= LINK_1000THD;
1836 case GP_STATUS_2_5G:
1837 bp->line_speed = SPEED_2500;
1838 if (bp->duplex == DUPLEX_FULL)
1839 bp->link_status |= LINK_2500TFD;
1841 bp->link_status |= LINK_2500THD;
1846 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1850 case GP_STATUS_10G_KX4:
1851 case GP_STATUS_10G_HIG:
1852 case GP_STATUS_10G_CX4:
1853 bp->line_speed = SPEED_10000;
1854 bp->link_status |= LINK_10GTFD;
1857 case GP_STATUS_12G_HIG:
1858 bp->line_speed = SPEED_12000;
1859 bp->link_status |= LINK_12GTFD;
1862 case GP_STATUS_12_5G:
1863 bp->line_speed = SPEED_12500;
1864 bp->link_status |= LINK_12_5GTFD;
1868 bp->line_speed = SPEED_13000;
1869 bp->link_status |= LINK_13GTFD;
1873 bp->line_speed = SPEED_15000;
1874 bp->link_status |= LINK_15GTFD;
1878 bp->line_speed = SPEED_16000;
1879 bp->link_status |= LINK_16GTFD;
1883 BNX2X_ERR("link speed unsupported gp_status 0x%x\n",
1888 bp->link_status |= LINK_STATUS_SERDES_LINK;
1890 if (bp->req_autoneg & AUTONEG_SPEED) {
1891 bp->link_status |= LINK_STATUS_AUTO_NEGOTIATE_ENABLED;
1893 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE)
1895 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE;
1897 if (bp->autoneg & AUTONEG_PARALLEL)
1899 LINK_STATUS_PARALLEL_DETECTION_USED;
1902 if (bp->flow_ctrl & FLOW_CTRL_TX)
1903 bp->link_status |= LINK_STATUS_TX_FLOW_CONTROL_ENABLED;
1905 if (bp->flow_ctrl & FLOW_CTRL_RX)
1906 bp->link_status |= LINK_STATUS_RX_FLOW_CONTROL_ENABLED;
1908 } else { /* link_down */
1909 DP(NETIF_MSG_LINK, "phy link down\n");
1911 bp->phy_link_up = 0;
1914 bp->duplex = DUPLEX_FULL;
1918 DP(NETIF_MSG_LINK, "gp_status 0x%x phy_link_up %d\n"
1919 DP_LEVEL " line_speed %d duplex %d flow_ctrl 0x%x"
1920 " link_status 0x%x\n",
1921 gp_status, bp->phy_link_up, bp->line_speed, bp->duplex,
1922 bp->flow_ctrl, bp->link_status);
1925 static void bnx2x_link_int_ack(struct bnx2x *bp, int is_10g)
1927 int port = bp->port;
1929 /* first reset all status
1930 * we assume only one line will be change at a time */
1931 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1932 (NIG_STATUS_XGXS0_LINK10G |
1933 NIG_STATUS_XGXS0_LINK_STATUS |
1934 NIG_STATUS_SERDES0_LINK_STATUS));
1935 if (bp->phy_link_up) {
1937 /* Disable the 10G link interrupt
1938 * by writing 1 to the status register
1940 DP(NETIF_MSG_LINK, "10G XGXS phy link up\n");
1942 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1943 NIG_STATUS_XGXS0_LINK10G);
1945 } else if (bp->phy_flags & PHY_XGXS_FLAG) {
1946 /* Disable the link interrupt
1947 * by writing 1 to the relevant lane
1948 * in the status register
1950 DP(NETIF_MSG_LINK, "1G XGXS phy link up\n");
1952 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1953 ((1 << bp->ser_lane) <<
1954 NIG_STATUS_XGXS0_LINK_STATUS_SIZE));
1956 } else { /* SerDes */
1957 DP(NETIF_MSG_LINK, "SerDes phy link up\n");
1958 /* Disable the link interrupt
1959 * by writing 1 to the status register
1962 NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
1963 NIG_STATUS_SERDES0_LINK_STATUS);
1966 } else { /* link_down */
1970 static int bnx2x_ext_phy_is_link_up(struct bnx2x *bp)
1975 u32 rx_sd, pcs_status;
1977 if (bp->phy_flags & PHY_XGXS_FLAG) {
1978 ext_phy_addr = ((bp->ext_phy_config &
1979 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
1980 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
1982 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
1983 switch (ext_phy_type) {
1984 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
1985 DP(NETIF_MSG_LINK, "XGXS Direct\n");
1989 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
1990 DP(NETIF_MSG_LINK, "XGXS 8705\n");
1991 bnx2x_mdio45_read(bp, ext_phy_addr,
1992 EXT_PHY_OPT_WIS_DEVAD,
1993 EXT_PHY_OPT_LASI_STATUS, &val1);
1994 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
1996 bnx2x_mdio45_read(bp, ext_phy_addr,
1997 EXT_PHY_OPT_WIS_DEVAD,
1998 EXT_PHY_OPT_LASI_STATUS, &val1);
1999 DP(NETIF_MSG_LINK, "8705 LASI status 0x%x\n", val1);
2001 bnx2x_mdio45_read(bp, ext_phy_addr,
2002 EXT_PHY_OPT_PMA_PMD_DEVAD,
2003 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2004 DP(NETIF_MSG_LINK, "8705 rx_sd 0x%x\n", rx_sd);
2005 val1 = (rx_sd & 0x1);
2008 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
2009 DP(NETIF_MSG_LINK, "XGXS 8706\n");
2010 bnx2x_mdio45_read(bp, ext_phy_addr,
2011 EXT_PHY_OPT_PMA_PMD_DEVAD,
2012 EXT_PHY_OPT_LASI_STATUS, &val1);
2013 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2015 bnx2x_mdio45_read(bp, ext_phy_addr,
2016 EXT_PHY_OPT_PMA_PMD_DEVAD,
2017 EXT_PHY_OPT_LASI_STATUS, &val1);
2018 DP(NETIF_MSG_LINK, "8706 LASI status 0x%x\n", val1);
2020 bnx2x_mdio45_read(bp, ext_phy_addr,
2021 EXT_PHY_OPT_PMA_PMD_DEVAD,
2022 EXT_PHY_OPT_PMD_RX_SD, &rx_sd);
2023 bnx2x_mdio45_read(bp, ext_phy_addr,
2024 EXT_PHY_OPT_PCS_DEVAD,
2025 EXT_PHY_OPT_PCS_STATUS, &pcs_status);
2026 bnx2x_mdio45_read(bp, ext_phy_addr,
2027 EXT_PHY_AUTO_NEG_DEVAD,
2028 EXT_PHY_OPT_AN_LINK_STATUS, &val2);
2030 DP(NETIF_MSG_LINK, "8706 rx_sd 0x%x"
2031 " pcs_status 0x%x 1Gbps link_status 0x%x 0x%x\n",
2032 rx_sd, pcs_status, val2, (val2 & (1<<1)));
2033 /* link is up if both bit 0 of pmd_rx_sd and
2034 * bit 0 of pcs_status are set, or if the autoneg bit
2037 val1 = ((rx_sd & pcs_status & 0x1) || (val2 & (1<<1)));
2040 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
2041 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2043 /* clear the interrupt LASI status register */
2044 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2046 EXT_PHY_KR_PCS_DEVAD,
2047 EXT_PHY_KR_LASI_STATUS, &val2);
2048 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2050 EXT_PHY_KR_PCS_DEVAD,
2051 EXT_PHY_KR_LASI_STATUS, &val1);
2052 DP(NETIF_MSG_LINK, "KR LASI status 0x%x->0x%x\n",
2054 /* Check the LASI */
2055 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2057 EXT_PHY_KR_PMA_PMD_DEVAD,
2059 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2061 EXT_PHY_KR_PMA_PMD_DEVAD,
2063 DP(NETIF_MSG_LINK, "KR 0x9003 0x%x->0x%x\n",
2065 /* Check the link status */
2066 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2068 EXT_PHY_KR_PCS_DEVAD,
2069 EXT_PHY_KR_PCS_STATUS, &val2);
2070 DP(NETIF_MSG_LINK, "KR PCS status 0x%x\n", val2);
2071 /* Check the link status on 1.1.2 */
2072 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2074 EXT_PHY_OPT_PMA_PMD_DEVAD,
2075 EXT_PHY_KR_STATUS, &val2);
2076 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
2078 EXT_PHY_OPT_PMA_PMD_DEVAD,
2079 EXT_PHY_KR_STATUS, &val1);
2081 "KR PMA status 0x%x->0x%x\n", val2, val1);
2082 val1 = ((val1 & 4) == 4);
2083 /* If 1G was requested assume the link is up */
2084 if (!(bp->req_autoneg & AUTONEG_SPEED) &&
2085 (bp->req_line_speed == SPEED_1000))
2087 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
2090 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2091 bnx2x_mdio45_read(bp, ext_phy_addr,
2092 EXT_PHY_OPT_PMA_PMD_DEVAD,
2093 EXT_PHY_OPT_LASI_STATUS, &val2);
2094 bnx2x_mdio45_read(bp, ext_phy_addr,
2095 EXT_PHY_OPT_PMA_PMD_DEVAD,
2096 EXT_PHY_OPT_LASI_STATUS, &val1);
2098 "10G-base-T LASI status 0x%x->0x%x\n", val2, val1);
2099 bnx2x_mdio45_read(bp, ext_phy_addr,
2100 EXT_PHY_OPT_PMA_PMD_DEVAD,
2101 EXT_PHY_KR_STATUS, &val2);
2102 bnx2x_mdio45_read(bp, ext_phy_addr,
2103 EXT_PHY_OPT_PMA_PMD_DEVAD,
2104 EXT_PHY_KR_STATUS, &val1);
2106 "10G-base-T PMA status 0x%x->0x%x\n", val2, val1);
2107 val1 = ((val1 & 4) == 4);
2109 * print the AN outcome of the SFX7101 PHY
2112 bnx2x_mdio45_read(bp, ext_phy_addr,
2113 EXT_PHY_KR_AUTO_NEG_DEVAD,
2116 "SFX7101 AN status 0x%x->%s\n", val2,
2117 (val2 & (1<<14)) ? "Master" : "Slave");
2122 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
2123 bp->ext_phy_config);
2128 } else { /* SerDes */
2129 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
2130 switch (ext_phy_type) {
2131 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
2132 DP(NETIF_MSG_LINK, "SerDes Direct\n");
2136 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
2137 DP(NETIF_MSG_LINK, "SerDes 5482\n");
2142 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
2143 bp->ext_phy_config);
2152 static void bnx2x_bmac_enable(struct bnx2x *bp, int is_lb)
2154 int port = bp->port;
2155 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2156 NIG_REG_INGRESS_BMAC0_MEM;
2160 DP(NETIF_MSG_LINK, "enabling BigMAC\n");
2161 /* reset and unreset the BigMac */
2162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2163 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2165 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2166 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2168 /* enable access for bmac registers */
2169 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2174 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_XGXS_CONTROL,
2178 wb_write[0] = ((bp->dev->dev_addr[2] << 24) |
2179 (bp->dev->dev_addr[3] << 16) |
2180 (bp->dev->dev_addr[4] << 8) |
2181 bp->dev->dev_addr[5]);
2182 wb_write[1] = ((bp->dev->dev_addr[0] << 8) |
2183 bp->dev->dev_addr[1]);
2184 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
2189 if (bp->flow_ctrl & FLOW_CTRL_TX)
2193 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_write, 2);
2196 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -CRC */
2198 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_MAX_SIZE, wb_write, 2);
2204 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
2208 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2211 /* rx control set to don't strip crc */
2213 if (bp->flow_ctrl & FLOW_CTRL_RX)
2217 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_write, 2);
2220 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
2222 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, wb_write, 2);
2224 /* set cnt max size */
2225 wb_write[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; /* -VLAN */
2227 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_CNT_MAX_SIZE,
2230 /* configure safc */
2231 wb_write[0] = 0x1000200;
2233 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_LLFC_MSG_FLDS,
2236 /* fix for emulation */
2237 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2238 wb_write[0] = 0xf000;
2241 bmac_addr + BIGMAC_REGISTER_TX_PAUSE_THRESHOLD,
2245 /* reset old bmac stats */
2246 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2248 NIG_WR(NIG_REG_XCM0_OUT_EN + port*4, 0x0);
2251 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0x1);
2252 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
2254 /* disable the NIG in/out to the emac */
2255 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x0);
2256 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, 0x0);
2257 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
2259 /* enable the NIG in/out to the bmac */
2260 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
2262 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x1);
2264 if (bp->flow_ctrl & FLOW_CTRL_TX)
2266 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
2267 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x1);
2269 bp->phy_flags |= PHY_BMAC_FLAG;
2271 bp->stats_state = STATS_STATE_ENABLE;
2274 static void bnx2x_bmac_rx_disable(struct bnx2x *bp)
2276 int port = bp->port;
2277 u32 bmac_addr = port ? NIG_REG_INGRESS_BMAC1_MEM :
2278 NIG_REG_INGRESS_BMAC0_MEM;
2281 /* Only if the bmac is out of reset */
2282 if (REG_RD(bp, MISC_REG_RESET_REG_2) &
2283 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port)) {
2284 /* Clear Rx Enable bit in BMAC_CONTROL register */
2285 #ifdef BNX2X_DMAE_RD
2286 bnx2x_read_dmae(bp, bmac_addr +
2287 BIGMAC_REGISTER_BMAC_CONTROL, 2);
2288 wb_write[0] = *bnx2x_sp(bp, wb_data[0]);
2289 wb_write[1] = *bnx2x_sp(bp, wb_data[1]);
2291 wb_write[0] = REG_RD(bp,
2292 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL);
2293 wb_write[1] = REG_RD(bp,
2294 bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL + 4);
2296 wb_write[0] &= ~BMAC_CONTROL_RX_ENABLE;
2297 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_BMAC_CONTROL,
2303 static void bnx2x_emac_enable(struct bnx2x *bp)
2305 int port = bp->port;
2306 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
2310 DP(NETIF_MSG_LINK, "enabling EMAC\n");
2311 /* reset and unreset the emac core */
2312 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2313 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2315 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2316 (MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE << port));
2318 /* enable emac and not bmac */
2319 NIG_WR(NIG_REG_EGRESS_EMAC0_PORT + port*4, 1);
2322 if (CHIP_REV(bp) == CHIP_REV_EMUL) {
2323 /* Use lane 1 (of lanes 0-3) */
2324 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2325 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2328 else if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2329 /* Use lane 1 (of lanes 0-3) */
2330 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4, 1);
2331 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2335 if (bp->phy_flags & PHY_XGXS_FLAG) {
2336 DP(NETIF_MSG_LINK, "XGXS\n");
2337 /* select the master lanes (out of 0-3) */
2338 NIG_WR(NIG_REG_XGXS_LANE_SEL_P0 + port*4,
2341 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
2343 } else { /* SerDes */
2344 DP(NETIF_MSG_LINK, "SerDes\n");
2346 NIG_WR(NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 0);
2351 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 1);
2353 /* init emac - use read-modify-write */
2354 /* self clear reset */
2355 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2356 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_RESET));
2359 while (val & EMAC_MODE_RESET) {
2360 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2361 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2363 BNX2X_ERR("EMAC timeout!\n");
2370 EMAC_WR(EMAC_REG_EMAC_TX_MODE, EMAC_TX_MODE_RESET);
2373 while (val & EMAC_TX_MODE_RESET) {
2374 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_TX_MODE);
2375 DP(NETIF_MSG_LINK, "EMAC reset reg is %u\n", val);
2377 BNX2X_ERR("EMAC timeout!\n");
2383 if (CHIP_REV_IS_SLOW(bp)) {
2384 /* config GMII mode */
2385 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2386 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_PORT_GMII));
2389 /* pause enable/disable */
2390 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2391 EMAC_RX_MODE_FLOW_EN);
2392 if (bp->flow_ctrl & FLOW_CTRL_RX)
2393 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
2394 EMAC_RX_MODE_FLOW_EN);
2396 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2397 EMAC_TX_MODE_EXT_PAUSE_EN);
2398 if (bp->flow_ctrl & FLOW_CTRL_TX)
2399 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
2400 EMAC_TX_MODE_EXT_PAUSE_EN);
2403 /* KEEP_VLAN_TAG, promiscuous */
2404 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
2405 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
2406 EMAC_WR(EMAC_REG_EMAC_RX_MODE, val);
2408 /* identify magic packets */
2409 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_MODE);
2410 EMAC_WR(EMAC_REG_EMAC_MODE, (val | EMAC_MODE_MPKT));
2412 /* enable emac for jumbo packets */
2413 EMAC_WR(EMAC_REG_EMAC_RX_MTU_SIZE,
2414 (EMAC_RX_MTU_SIZE_JUMBO_ENA |
2415 (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD))); /* -VLAN */
2418 NIG_WR(NIG_REG_NIG_INGRESS_EMAC0_NO_CRC + port*4, 0x1);
2420 val = ((bp->dev->dev_addr[0] << 8) |
2421 bp->dev->dev_addr[1]);
2422 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
2424 val = ((bp->dev->dev_addr[2] << 24) |
2425 (bp->dev->dev_addr[3] << 16) |
2426 (bp->dev->dev_addr[4] << 8) |
2427 bp->dev->dev_addr[5]);
2428 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
2430 /* disable the NIG in/out to the bmac */
2431 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0x0);
2432 NIG_WR(NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, 0x0);
2433 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0x0);
2435 /* enable the NIG in/out to the emac */
2436 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0x1);
2438 if (bp->flow_ctrl & FLOW_CTRL_TX)
2440 NIG_WR(NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
2441 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x1);
2443 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
2444 /* take the BigMac out of reset */
2445 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
2446 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2448 /* enable access for bmac registers */
2449 NIG_WR(NIG_REG_BMAC0_REGS_OUT_EN + port*4, 0x1);
2452 bp->phy_flags |= PHY_EMAC_FLAG;
2454 bp->stats_state = STATS_STATE_ENABLE;
2457 static void bnx2x_emac_program(struct bnx2x *bp)
2460 int port = bp->port;
2462 DP(NETIF_MSG_LINK, "setting link speed & duplex\n");
2463 bnx2x_bits_dis(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2464 (EMAC_MODE_25G_MODE |
2465 EMAC_MODE_PORT_MII_10M |
2466 EMAC_MODE_HALF_DUPLEX));
2467 switch (bp->line_speed) {
2469 mode |= EMAC_MODE_PORT_MII_10M;
2473 mode |= EMAC_MODE_PORT_MII;
2477 mode |= EMAC_MODE_PORT_GMII;
2481 mode |= (EMAC_MODE_25G_MODE | EMAC_MODE_PORT_GMII);
2485 /* 10G not valid for EMAC */
2486 BNX2X_ERR("Invalid line_speed 0x%x\n", bp->line_speed);
2490 if (bp->duplex == DUPLEX_HALF)
2491 mode |= EMAC_MODE_HALF_DUPLEX;
2492 bnx2x_bits_en(bp, GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
2495 bnx2x_leds_set(bp, bp->line_speed);
2498 static void bnx2x_set_sgmii_tx_driver(struct bnx2x *bp)
2504 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
2505 bnx2x_mdio22_read(bp, MDIO_OVER_1G_LP_UP2, &lp_up2);
2507 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_TX0);
2508 bnx2x_mdio22_read(bp, MDIO_TX0_TX_DRIVER, &tx_driver);
2510 /* bits [10:7] at lp_up2, positioned at [15:12] */
2511 lp_up2 = (((lp_up2 & MDIO_OVER_1G_LP_UP2_PREEMPHASIS_MASK) >>
2512 MDIO_OVER_1G_LP_UP2_PREEMPHASIS_SHIFT) <<
2513 MDIO_TX0_TX_DRIVER_PREEMPHASIS_SHIFT);
2515 if ((lp_up2 != 0) &&
2516 (lp_up2 != (tx_driver & MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK))) {
2517 /* replace tx_driver bits [15:12] */
2518 tx_driver &= ~MDIO_TX0_TX_DRIVER_PREEMPHASIS_MASK;
2519 tx_driver |= lp_up2;
2520 bnx2x_mdio22_write(bp, MDIO_TX0_TX_DRIVER, tx_driver);
2524 static void bnx2x_pbf_update(struct bnx2x *bp)
2526 int port = bp->port;
2532 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x1);
2534 /* wait for init credit */
2535 init_crd = REG_RD(bp, PBF_REG_P0_INIT_CRD + port*4);
2536 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2537 DP(NETIF_MSG_LINK, "init_crd 0x%x crd 0x%x\n", init_crd, crd);
2539 while ((init_crd != crd) && count) {
2542 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2545 crd = REG_RD(bp, PBF_REG_P0_CREDIT + port*8);
2546 if (init_crd != crd)
2547 BNX2X_ERR("BUG! init_crd 0x%x != crd 0x%x\n", init_crd, crd);
2549 if (bp->flow_ctrl & FLOW_CTRL_RX)
2551 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, pause);
2553 /* update threshold */
2554 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, 0);
2555 /* update init credit */
2556 init_crd = 778; /* (800-18-4) */
2559 u32 thresh = (ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD)/16;
2561 /* update threshold */
2562 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, thresh);
2563 /* update init credit */
2564 switch (bp->line_speed) {
2568 init_crd = thresh + 55 - 22;
2572 init_crd = thresh + 138 - 22;
2576 init_crd = thresh + 553 - 22;
2580 BNX2X_ERR("Invalid line_speed 0x%x\n",
2585 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, init_crd);
2586 DP(NETIF_MSG_LINK, "PBF updated to speed %d credit %d\n",
2587 bp->line_speed, init_crd);
2589 /* probe the credit changes */
2590 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x1);
2592 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0x0);
2595 REG_WR(bp, PBF_REG_DISABLE_NEW_TASK_PROC_P0 + port*4, 0x0);
2598 static void bnx2x_update_mng(struct bnx2x *bp)
2601 SHMEM_WR(bp, port_mb[bp->port].link_status,
2605 static void bnx2x_link_report(struct bnx2x *bp)
2608 netif_carrier_on(bp->dev);
2609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2611 printk("%d Mbps ", bp->line_speed);
2613 if (bp->duplex == DUPLEX_FULL)
2614 printk("full duplex");
2616 printk("half duplex");
2618 if (bp->flow_ctrl) {
2619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
2620 printk(", receive ");
2621 if (bp->flow_ctrl & FLOW_CTRL_TX)
2622 printk("& transmit ");
2624 printk(", transmit ");
2626 printk("flow control ON");
2630 } else { /* link_down */
2631 netif_carrier_off(bp->dev);
2632 printk(KERN_INFO PFX "%s NIC Link is Down\n", bp->dev->name);
2636 static void bnx2x_link_up(struct bnx2x *bp)
2638 int port = bp->port;
2641 bnx2x_pbf_update(bp);
2644 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 0);
2646 /* update shared memory */
2647 bnx2x_update_mng(bp);
2649 /* indicate link up */
2650 bnx2x_link_report(bp);
2653 static void bnx2x_link_down(struct bnx2x *bp)
2655 int port = bp->port;
2658 if (bp->stats_state != STATS_STATE_DISABLE) {
2659 bp->stats_state = STATS_STATE_STOP;
2660 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2663 /* indicate no mac active */
2664 bp->phy_flags &= ~(PHY_BMAC_FLAG | PHY_EMAC_FLAG);
2666 /* update shared memory */
2667 bnx2x_update_mng(bp);
2669 /* activate nig drain */
2670 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
2673 bnx2x_bmac_rx_disable(bp);
2674 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
2675 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
2677 /* indicate link down */
2678 bnx2x_link_report(bp);
2681 static void bnx2x_init_mac_stats(struct bnx2x *bp);
2683 /* This function is called upon link interrupt */
2684 static void bnx2x_link_update(struct bnx2x *bp)
2686 int port = bp->port;
2691 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
2692 " int_mask 0x%x, saved_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
2693 " 10G %x, XGXS_LINK %x\n", port,
2694 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2695 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
2696 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4), bp->nig_mask,
2697 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
2698 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
2699 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
2700 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
2704 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_GP_STATUS);
2705 /* avoid fast toggling */
2706 for (i = 0; i < 10; i++) {
2708 bnx2x_mdio22_read(bp, MDIO_GP_STATUS_TOP_AN_STATUS1,
2712 bnx2x_link_settings_status(bp, gp_status);
2714 /* anything 10 and over uses the bmac */
2715 link_10g = ((bp->line_speed >= SPEED_10000) &&
2716 (bp->line_speed <= SPEED_16000));
2718 bnx2x_link_int_ack(bp, link_10g);
2720 /* link is up only if both local phy and external phy are up */
2721 bp->link_up = (bp->phy_link_up && bnx2x_ext_phy_is_link_up(bp));
2724 bnx2x_bmac_enable(bp, 0);
2725 bnx2x_leds_set(bp, SPEED_10000);
2728 bnx2x_emac_enable(bp);
2729 bnx2x_emac_program(bp);
2732 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
2733 if (!(bp->phy_flags & PHY_SGMII_FLAG))
2734 bnx2x_set_sgmii_tx_driver(bp);
2739 } else { /* link down */
2740 bnx2x_leds_unset(bp);
2741 bnx2x_link_down(bp);
2744 bnx2x_init_mac_stats(bp);
2748 * Init service functions
2751 static void bnx2x_set_aer_mmd(struct bnx2x *bp)
2753 u16 offset = (bp->phy_flags & PHY_XGXS_FLAG) ?
2754 (bp->phy_addr + bp->ser_lane) : 0;
2756 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
2757 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x3800 + offset);
2760 static void bnx2x_set_master_ln(struct bnx2x *bp)
2764 /* set the master_ln for AN */
2765 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2766 bnx2x_mdio22_read(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2768 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TEST_MODE_LANE,
2769 (new_master_ln | bp->ser_lane));
2772 static void bnx2x_reset_unicore(struct bnx2x *bp)
2777 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2778 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, &mii_control);
2779 /* reset the unicore */
2780 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2781 (mii_control | MDIO_COMBO_IEEO_MII_CONTROL_RESET));
2783 /* wait for the reset to self clear */
2784 for (i = 0; i < MDIO_ACCESS_TIMEOUT; i++) {
2787 /* the reset erased the previous bank value */
2788 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2789 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
2792 if (!(mii_control & MDIO_COMBO_IEEO_MII_CONTROL_RESET)) {
2798 BNX2X_ERR("BUG! %s (0x%x) is still in reset!\n",
2799 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
2803 static void bnx2x_set_swap_lanes(struct bnx2x *bp)
2805 /* Each two bits represents a lane number:
2806 No swap is 0123 => 0x1b no need to enable the swap */
2808 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2809 if (bp->rx_lane_swap != 0x1b) {
2810 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP,
2812 MDIO_XGXS_BLOCK2_RX_LN_SWAP_ENABLE |
2813 MDIO_XGXS_BLOCK2_RX_LN_SWAP_FORCE_ENABLE));
2815 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_RX_LN_SWAP, 0);
2818 if (bp->tx_lane_swap != 0x1b) {
2819 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP,
2821 MDIO_XGXS_BLOCK2_TX_LN_SWAP_ENABLE));
2823 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_TX_LN_SWAP, 0);
2827 static void bnx2x_set_parallel_detection(struct bnx2x *bp)
2831 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2832 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2835 if (bp->autoneg & AUTONEG_PARALLEL) {
2836 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2838 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
2840 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
2843 if (bp->phy_flags & PHY_XGXS_FLAG) {
2844 DP(NETIF_MSG_LINK, "XGXS\n");
2845 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_10G_PARALLEL_DETECT);
2847 bnx2x_mdio22_write(bp,
2848 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK,
2849 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK_CNT);
2851 bnx2x_mdio22_read(bp,
2852 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2855 if (bp->autoneg & AUTONEG_PARALLEL) {
2857 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2860 ~MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN;
2862 bnx2x_mdio22_write(bp,
2863 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL,
2866 /* Disable parallel detection of HiG */
2867 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_XGXS_BLOCK2);
2868 bnx2x_mdio22_write(bp, MDIO_XGXS_BLOCK2_UNICORE_MODE_10G,
2869 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_CX4_XGXS |
2870 MDIO_XGXS_BLOCK2_UNICORE_MODE_10G_HIGIG_XGXS);
2874 static void bnx2x_set_autoneg(struct bnx2x *bp)
2879 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2880 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
2881 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2882 (bp->autoneg & AUTONEG_CL37)) {
2883 /* CL37 Autoneg Enabled */
2884 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_AN_EN;
2886 /* CL37 Autoneg Disabled */
2887 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
2888 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN);
2890 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2892 /* Enable/Disable Autodetection */
2893 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2894 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, ®_val);
2895 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_SIGNAL_DETECT_EN;
2897 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2898 (bp->autoneg & AUTONEG_SGMII_FIBER_AUTODET)) {
2899 reg_val |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2901 reg_val &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET;
2903 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1, reg_val);
2905 /* Enable TetonII and BAM autoneg */
2906 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_BAM_NEXT_PAGE);
2907 bnx2x_mdio22_read(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2909 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2910 (bp->autoneg & AUTONEG_CL37) && (bp->autoneg & AUTONEG_BAM)) {
2911 /* Enable BAM aneg Mode and TetonII aneg Mode */
2912 reg_val |= (MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2913 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2915 /* TetonII and BAM Autoneg Disabled */
2916 reg_val &= ~(MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_BAM_MODE |
2917 MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL_TETON_AN);
2919 bnx2x_mdio22_write(bp, MDIO_BAM_NEXT_PAGE_MP5_NEXT_PAGE_CTRL,
2922 /* Enable Clause 73 Aneg */
2923 if ((bp->req_autoneg & AUTONEG_SPEED) &&
2924 (bp->autoneg & AUTONEG_CL73)) {
2925 /* Enable BAM Station Manager */
2926 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_USERB0);
2927 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL1,
2928 (MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_EN |
2929 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
2930 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN));
2932 /* Merge CL73 and CL37 aneg resolution */
2933 bnx2x_mdio22_read(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2935 bnx2x_mdio22_write(bp, MDIO_CL73_USERB0_CL73_BAM_CTRL3,
2937 MDIO_CL73_USERB0_CL73_BAM_CTRL3_USE_CL73_HCD_MR));
2939 /* Set the CL73 AN speed */
2940 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB1);
2941 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB1_AN_ADV2, ®_val);
2942 /* In the SerDes we support only the 1G.
2943 In the XGXS we support the 10G KX4
2944 but we currently do not support the KR */
2945 if (bp->phy_flags & PHY_XGXS_FLAG) {
2946 DP(NETIF_MSG_LINK, "XGXS\n");
2948 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
2950 DP(NETIF_MSG_LINK, "SerDes\n");
2952 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
2954 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB1_AN_ADV2, reg_val);
2956 /* CL73 Autoneg Enabled */
2957 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
2959 /* CL73 Autoneg Disabled */
2962 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
2963 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL, reg_val);
2966 /* program SerDes, forced speed */
2967 static void bnx2x_program_serdes(struct bnx2x *bp)
2971 /* program duplex, disable autoneg */
2972 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
2973 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL, ®_val);
2974 reg_val &= ~(MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX |
2975 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN);
2976 if (bp->req_duplex == DUPLEX_FULL)
2977 reg_val |= MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
2978 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL, reg_val);
2981 - needed only if the speed is greater than 1G (2.5G or 10G) */
2982 if (bp->req_line_speed > SPEED_1000) {
2983 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
2984 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_MISC1, ®_val);
2985 /* clearing the speed value before setting the right speed */
2986 reg_val &= ~MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_MASK;
2987 reg_val |= (MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_156_25M |
2988 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_SEL);
2989 if (bp->req_line_speed == SPEED_10000)
2991 MDIO_SERDES_DIGITAL_MISC1_FORCE_SPEED_10G_CX4;
2992 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_MISC1, reg_val);
2996 static void bnx2x_set_brcm_cl37_advertisment(struct bnx2x *bp)
3000 /* configure the 48 bits for BAM AN */
3001 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_OVER_1G);
3003 /* set extended capabilities */
3004 if (bp->advertising & ADVERTISED_2500baseX_Full)
3005 val |= MDIO_OVER_1G_UP1_2_5G;
3006 if (bp->advertising & ADVERTISED_10000baseT_Full)
3007 val |= MDIO_OVER_1G_UP1_10G;
3008 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP1, val);
3010 bnx2x_mdio22_write(bp, MDIO_OVER_1G_UP3, 0);
3013 static void bnx2x_set_ieee_aneg_advertisment(struct bnx2x *bp)
3017 /* for AN, we are always publishing full duplex */
3018 an_adv = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
3020 /* resolve pause mode and advertisement
3021 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
3022 if (bp->req_autoneg & AUTONEG_FLOW_CTRL) {
3023 switch (bp->req_flow_ctrl) {
3024 case FLOW_CTRL_AUTO:
3025 if (bp->dev->mtu <= 4500) {
3027 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3028 bp->advertising |= (ADVERTISED_Pause |
3029 ADVERTISED_Asym_Pause);
3032 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3033 bp->advertising |= ADVERTISED_Asym_Pause;
3039 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3040 bp->advertising |= ADVERTISED_Asym_Pause;
3044 if (bp->dev->mtu <= 4500) {
3046 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3047 bp->advertising |= (ADVERTISED_Pause |
3048 ADVERTISED_Asym_Pause);
3051 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3052 bp->advertising &= ~(ADVERTISED_Pause |
3053 ADVERTISED_Asym_Pause);
3057 case FLOW_CTRL_BOTH:
3058 if (bp->dev->mtu <= 4500) {
3060 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3061 bp->advertising |= (ADVERTISED_Pause |
3062 ADVERTISED_Asym_Pause);
3065 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3066 bp->advertising |= ADVERTISED_Asym_Pause;
3070 case FLOW_CTRL_NONE:
3072 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3073 bp->advertising &= ~(ADVERTISED_Pause |
3074 ADVERTISED_Asym_Pause);
3077 } else { /* forced mode */
3078 switch (bp->req_flow_ctrl) {
3079 case FLOW_CTRL_AUTO:
3080 DP(NETIF_MSG_LINK, "req_flow_ctrl 0x%x while"
3081 " req_autoneg 0x%x\n",
3082 bp->req_flow_ctrl, bp->req_autoneg);
3087 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC;
3088 bp->advertising |= ADVERTISED_Asym_Pause;
3092 case FLOW_CTRL_BOTH:
3093 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH;
3094 bp->advertising |= (ADVERTISED_Pause |
3095 ADVERTISED_Asym_Pause);
3098 case FLOW_CTRL_NONE:
3100 an_adv |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
3101 bp->advertising &= ~(ADVERTISED_Pause |
3102 ADVERTISED_Asym_Pause);
3107 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3108 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_AUTO_NEG_ADV, an_adv);
3111 static void bnx2x_restart_autoneg(struct bnx2x *bp)
3113 if (bp->autoneg & AUTONEG_CL73) {
3114 /* enable and restart clause 73 aneg */
3117 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3118 bnx2x_mdio22_read(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3120 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3122 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN |
3123 MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN));
3126 /* Enable and restart BAM/CL37 aneg */
3129 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3130 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3132 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3134 MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3135 MDIO_COMBO_IEEO_MII_CONTROL_RESTART_AN));
3139 static void bnx2x_initialize_sgmii_process(struct bnx2x *bp)
3143 /* in SGMII mode, the unicore is always slave */
3144 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_SERDES_DIGITAL);
3145 bnx2x_mdio22_read(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3147 control1 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_INVERT_SIGNAL_DETECT;
3148 /* set sgmii mode (and not fiber) */
3149 control1 &= ~(MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_FIBER_MODE |
3150 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_AUTODET |
3151 MDIO_SERDES_DIGITAL_A_1000X_CONTROL1_MSTR_MODE);
3152 bnx2x_mdio22_write(bp, MDIO_SERDES_DIGITAL_A_1000X_CONTROL1,
3155 /* if forced speed */
3156 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3157 /* set speed, disable autoneg */
3160 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3161 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3163 mii_control &= ~(MDIO_COMBO_IEEO_MII_CONTROL_AN_EN |
3164 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_MASK |
3165 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX);
3167 switch (bp->req_line_speed) {
3170 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_100;
3174 MDIO_COMBO_IEEO_MII_CONTROL_MAN_SGMII_SP_1000;
3177 /* there is nothing to set for 10M */
3180 /* invalid speed for SGMII */
3181 DP(NETIF_MSG_LINK, "Invalid req_line_speed 0x%x\n",
3182 bp->req_line_speed);
3186 /* setting the full duplex */
3187 if (bp->req_duplex == DUPLEX_FULL)
3189 MDIO_COMBO_IEEO_MII_CONTROL_FULL_DUPLEX;
3190 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3193 } else { /* AN mode */
3194 /* enable and restart AN */
3195 bnx2x_restart_autoneg(bp);
3199 static void bnx2x_link_int_enable(struct bnx2x *bp)
3201 int port = bp->port;
3205 /* setting the status to report on link up
3206 for either XGXS or SerDes */
3207 bnx2x_bits_dis(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4,
3208 (NIG_STATUS_XGXS0_LINK10G |
3209 NIG_STATUS_XGXS0_LINK_STATUS |
3210 NIG_STATUS_SERDES0_LINK_STATUS));
3212 if (bp->phy_flags & PHY_XGXS_FLAG) {
3213 mask = (NIG_MASK_XGXS0_LINK10G |
3214 NIG_MASK_XGXS0_LINK_STATUS);
3215 DP(NETIF_MSG_LINK, "enabled XGXS interrupt\n");
3216 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3217 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3218 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3220 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) {
3221 mask |= NIG_MASK_MI_INT;
3222 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3225 } else { /* SerDes */
3226 mask = NIG_MASK_SERDES0_LINK_STATUS;
3227 DP(NETIF_MSG_LINK, "enabled SerDes interrupt\n");
3228 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3229 if ((ext_phy_type !=
3230 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
3232 PORT_HW_CFG_SERDES_EXT_PHY_TYPE_NOT_CONN)) {
3233 mask |= NIG_MASK_MI_INT;
3234 DP(NETIF_MSG_LINK, "enabled external phy int\n");
3238 NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3240 DP(NETIF_MSG_LINK, "port %x, %s, int_status 0x%x,"
3241 " int_mask 0x%x, MI_INT %x, SERDES_LINK %x,"
3242 " 10G %x, XGXS_LINK %x\n", port,
3243 (bp->phy_flags & PHY_XGXS_FLAG)? "XGXS":"SerDes",
3244 REG_RD(bp, NIG_REG_STATUS_INTERRUPT_PORT0 + port*4),
3245 REG_RD(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4),
3246 REG_RD(bp, NIG_REG_EMAC0_STATUS_MISC_MI_INT + port*0x18),
3247 REG_RD(bp, NIG_REG_SERDES0_STATUS_LINK_STATUS + port*0x3c),
3248 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK10G + port*0x68),
3249 REG_RD(bp, NIG_REG_XGXS0_STATUS_LINK_STATUS + port*0x68)
3253 static void bnx2x_bcm8072_external_rom_boot(struct bnx2x *bp)
3255 u32 ext_phy_addr = ((bp->ext_phy_config &
3256 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3257 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3258 u32 fw_ver1, fw_ver2;
3260 /* Need to wait 200ms after reset */
3262 /* Boot port from external ROM
3263 * Set ser_boot_ctl bit in the MISC_CTRL1 register
3265 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3266 EXT_PHY_KR_PMA_PMD_DEVAD,
3267 EXT_PHY_KR_MISC_CTRL1, 0x0001);
3269 /* Reset internal microprocessor */
3270 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3271 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3272 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3273 /* set micro reset = 0 */
3274 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3275 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3276 EXT_PHY_KR_ROM_MICRO_RESET);
3277 /* Reset internal microprocessor */
3278 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3279 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_GEN_CTRL,
3280 EXT_PHY_KR_ROM_RESET_INTERNAL_MP);
3281 /* wait for 100ms for code download via SPI port */
3284 /* Clear ser_boot_ctl bit */
3285 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3286 EXT_PHY_KR_PMA_PMD_DEVAD,
3287 EXT_PHY_KR_MISC_CTRL1, 0x0000);
3291 /* Print the PHY FW version */
3292 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3293 EXT_PHY_KR_PMA_PMD_DEVAD,
3295 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0, ext_phy_addr,
3296 EXT_PHY_KR_PMA_PMD_DEVAD,
3299 "8072 FW version 0x%x:0x%x\n", fw_ver1, fw_ver2);
3302 static void bnx2x_bcm8072_force_10G(struct bnx2x *bp)
3304 u32 ext_phy_addr = ((bp->ext_phy_config &
3305 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3306 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3308 /* Force KR or KX */
3309 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3310 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL,
3312 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3313 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_CTRL2,
3315 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3316 EXT_PHY_KR_PMA_PMD_DEVAD, EXT_PHY_KR_PMD_CTRL,
3318 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0, ext_phy_addr,
3319 EXT_PHY_KR_AUTO_NEG_DEVAD, EXT_PHY_KR_CTRL,
3323 static void bnx2x_ext_phy_init(struct bnx2x *bp)
3331 if (bp->phy_flags & PHY_XGXS_FLAG) {
3332 ext_phy_addr = ((bp->ext_phy_config &
3333 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3334 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3336 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3337 /* Make sure that the soft reset is off (expect for the 8072:
3338 * due to the lock, it will be done inside the specific
3341 if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3342 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
3343 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN) &&
3344 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072)) {
3345 /* Wait for soft reset to get cleared upto 1 sec */
3346 for (cnt = 0; cnt < 1000; cnt++) {
3347 bnx2x_mdio45_read(bp, ext_phy_addr,
3348 EXT_PHY_OPT_PMA_PMD_DEVAD,
3349 EXT_PHY_OPT_CNTL, &ctrl);
3350 if (!(ctrl & (1<<15)))
3355 "control reg 0x%x (after %d ms)\n", ctrl, cnt);
3358 switch (ext_phy_type) {
3359 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3360 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3363 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3364 DP(NETIF_MSG_LINK, "XGXS 8705\n");
3366 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3367 EXT_PHY_OPT_PMA_PMD_DEVAD,
3368 EXT_PHY_OPT_PMD_MISC_CNTL,
3370 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3371 EXT_PHY_OPT_PMA_PMD_DEVAD,
3372 EXT_PHY_OPT_PHY_IDENTIFIER,
3374 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3375 EXT_PHY_OPT_PMA_PMD_DEVAD,
3376 EXT_PHY_OPT_CMU_PLL_BYPASS,
3378 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3379 EXT_PHY_OPT_WIS_DEVAD,
3380 EXT_PHY_OPT_LASI_CNTL, 0x1);
3383 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3384 DP(NETIF_MSG_LINK, "XGXS 8706\n");
3386 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3388 if (bp->req_line_speed == SPEED_10000) {
3390 "XGXS 8706 force 10Gbps\n");
3391 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3392 EXT_PHY_OPT_PMA_PMD_DEVAD,
3393 EXT_PHY_OPT_PMD_DIGITAL_CNT,
3398 "XGXS 8706 force 1Gbps\n");
3400 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3401 EXT_PHY_OPT_PMA_PMD_DEVAD,
3405 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3406 EXT_PHY_OPT_PMA_PMD_DEVAD,
3412 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3413 EXT_PHY_OPT_PMA_PMD_DEVAD,
3414 EXT_PHY_OPT_LASI_CNTL,
3418 /* Allow CL37 through CL73 */
3419 DP(NETIF_MSG_LINK, "XGXS 8706 AutoNeg\n");
3420 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3421 EXT_PHY_AUTO_NEG_DEVAD,
3422 EXT_PHY_OPT_AN_CL37_CL73,
3425 /* Enable Full-Duplex advertisment on CL37 */
3426 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3427 EXT_PHY_AUTO_NEG_DEVAD,
3428 EXT_PHY_OPT_AN_CL37_FD,
3430 /* Enable CL37 AN */
3431 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3432 EXT_PHY_AUTO_NEG_DEVAD,
3433 EXT_PHY_OPT_AN_CL37_AN,
3435 /* Advertise 10G/1G support */
3436 if (bp->advertising &
3437 ADVERTISED_1000baseT_Full)
3439 if (bp->advertising &
3440 ADVERTISED_10000baseT_Full)
3443 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3444 EXT_PHY_AUTO_NEG_DEVAD,
3445 EXT_PHY_OPT_AN_ADV, val);
3447 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3448 EXT_PHY_OPT_PMA_PMD_DEVAD,
3449 EXT_PHY_OPT_LASI_CNTL,
3452 /* Enable clause 73 AN */
3453 bnx2x_mdio45_write(bp, ext_phy_addr,
3454 EXT_PHY_AUTO_NEG_DEVAD,
3460 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3461 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3462 /* Wait for soft reset to get cleared upto 1 sec */
3463 for (cnt = 0; cnt < 1000; cnt++) {
3464 bnx2x_mdio45_ctrl_read(bp, GRCBASE_EMAC0,
3466 EXT_PHY_OPT_PMA_PMD_DEVAD,
3467 EXT_PHY_OPT_CNTL, &ctrl);
3468 if (!(ctrl & (1<<15)))
3473 "8072 control reg 0x%x (after %d ms)\n",
3476 bnx2x_bcm8072_external_rom_boot(bp);
3477 DP(NETIF_MSG_LINK, "Finshed loading 8072 KR ROM\n");
3480 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3482 EXT_PHY_KR_PMA_PMD_DEVAD,
3484 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3486 EXT_PHY_KR_PMA_PMD_DEVAD,
3487 EXT_PHY_KR_LASI_CNTL, 0x0004);
3489 /* If this is forced speed, set to KR or KX
3490 * (all other are not supported)
3492 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3493 if (bp->req_line_speed == SPEED_10000) {
3494 bnx2x_bcm8072_force_10G(bp);
3496 "Forced speed 10G on 8072\n");
3499 HW_LOCK_RESOURCE_8072_MDIO);
3505 /* Advertise 10G/1G support */
3506 if (bp->advertising &
3507 ADVERTISED_1000baseT_Full)
3509 if (bp->advertising &
3510 ADVERTISED_10000baseT_Full)
3513 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3515 EXT_PHY_KR_AUTO_NEG_DEVAD,
3517 /* Add support for CL37 ( passive mode ) I */
3518 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3520 EXT_PHY_KR_AUTO_NEG_DEVAD,
3522 /* Add support for CL37 ( passive mode ) II */
3523 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3525 EXT_PHY_KR_AUTO_NEG_DEVAD,
3527 /* Add support for CL37 ( passive mode ) III */
3528 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3530 EXT_PHY_KR_AUTO_NEG_DEVAD,
3532 /* Restart autoneg */
3534 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3536 EXT_PHY_KR_AUTO_NEG_DEVAD,
3537 EXT_PHY_KR_CTRL, 0x1200);
3538 DP(NETIF_MSG_LINK, "8072 Autoneg Restart: "
3539 "1G %ssupported 10G %ssupported\n",
3540 (val & (1<<5)) ? "" : "not ",
3541 (val & (1<<7)) ? "" : "not ");
3544 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3547 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3549 "Setting the SFX7101 LASI indication\n");
3550 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3551 EXT_PHY_OPT_PMA_PMD_DEVAD,
3552 EXT_PHY_OPT_LASI_CNTL, 0x1);
3554 "Setting the SFX7101 LED to blink on traffic\n");
3555 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3556 EXT_PHY_OPT_PMA_PMD_DEVAD,
3559 /* read modify write pause advertizing */
3560 bnx2x_mdio45_read(bp, ext_phy_addr,
3561 EXT_PHY_KR_AUTO_NEG_DEVAD,
3562 EXT_PHY_KR_AUTO_NEG_ADVERT, &val);
3563 val &= ~EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_BOTH;
3564 /* Please refer to Table 28B-3 of 802.3ab-1999 spec. */
3565 if (bp->advertising & ADVERTISED_Pause)
3566 val |= EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE;
3568 if (bp->advertising & ADVERTISED_Asym_Pause) {
3570 EXT_PHY_KR_AUTO_NEG_ADVERT_PAUSE_ASYMMETRIC;
3572 DP(NETIF_MSG_LINK, "SFX7101 AN advertize 0x%x\n", val);
3573 bnx2x_mdio45_vwrite(bp, ext_phy_addr,
3574 EXT_PHY_KR_AUTO_NEG_DEVAD,
3575 EXT_PHY_KR_AUTO_NEG_ADVERT, val);
3576 /* Restart autoneg */
3577 bnx2x_mdio45_read(bp, ext_phy_addr,
3578 EXT_PHY_KR_AUTO_NEG_DEVAD,
3579 EXT_PHY_KR_CTRL, &val);
3581 bnx2x_mdio45_write(bp, ext_phy_addr,
3582 EXT_PHY_KR_AUTO_NEG_DEVAD,
3583 EXT_PHY_KR_CTRL, val);
3587 BNX2X_ERR("BAD XGXS ext_phy_config 0x%x\n",
3588 bp->ext_phy_config);
3592 } else { /* SerDes */
3593 /* ext_phy_addr = ((bp->ext_phy_config &
3594 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_MASK) >>
3595 PORT_HW_CFG_SERDES_EXT_PHY_ADDR_SHIFT);
3597 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3598 switch (ext_phy_type) {
3599 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3600 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3603 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3604 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3608 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3609 bp->ext_phy_config);
3615 static void bnx2x_ext_phy_reset(struct bnx2x *bp)
3618 u32 ext_phy_addr = ((bp->ext_phy_config &
3619 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_MASK) >>
3620 PORT_HW_CFG_XGXS_EXT_PHY_ADDR_SHIFT);
3621 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3623 /* The PHY reset is controled by GPIO 1
3624 * Give it 1ms of reset pulse
3626 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3627 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3628 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3629 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3631 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3632 MISC_REGISTERS_GPIO_OUTPUT_HIGH);
3635 if (bp->phy_flags & PHY_XGXS_FLAG) {
3636 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
3637 switch (ext_phy_type) {
3638 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
3639 DP(NETIF_MSG_LINK, "XGXS Direct\n");
3642 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
3643 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
3644 DP(NETIF_MSG_LINK, "XGXS 8705/8706\n");
3645 bnx2x_mdio45_write(bp, ext_phy_addr,
3646 EXT_PHY_OPT_PMA_PMD_DEVAD,
3647 EXT_PHY_OPT_CNTL, 0xa040);
3650 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
3651 DP(NETIF_MSG_LINK, "XGXS 8072\n");
3652 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3653 bnx2x_mdio45_ctrl_write(bp, GRCBASE_EMAC0,
3655 EXT_PHY_KR_PMA_PMD_DEVAD,
3657 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
3660 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
3661 DP(NETIF_MSG_LINK, "XGXS SFX7101\n");
3665 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
3666 bp->ext_phy_config);
3670 } else { /* SerDes */
3671 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
3672 switch (ext_phy_type) {
3673 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
3674 DP(NETIF_MSG_LINK, "SerDes Direct\n");
3677 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
3678 DP(NETIF_MSG_LINK, "SerDes 5482\n");
3682 DP(NETIF_MSG_LINK, "BAD SerDes ext_phy_config 0x%x\n",
3683 bp->ext_phy_config);
3689 static void bnx2x_link_initialize(struct bnx2x *bp)
3691 int port = bp->port;
3693 /* disable attentions */
3694 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3695 (NIG_MASK_XGXS0_LINK_STATUS |
3696 NIG_MASK_XGXS0_LINK10G |
3697 NIG_MASK_SERDES0_LINK_STATUS |
3700 /* Activate the external PHY */
3701 bnx2x_ext_phy_reset(bp);
3703 bnx2x_set_aer_mmd(bp);
3705 if (bp->phy_flags & PHY_XGXS_FLAG)
3706 bnx2x_set_master_ln(bp);
3708 /* reset the SerDes and wait for reset bit return low */
3709 bnx2x_reset_unicore(bp);
3711 bnx2x_set_aer_mmd(bp);
3713 /* setting the masterLn_def again after the reset */
3714 if (bp->phy_flags & PHY_XGXS_FLAG) {
3715 bnx2x_set_master_ln(bp);
3716 bnx2x_set_swap_lanes(bp);
3719 /* Set Parallel Detect */
3720 if (bp->req_autoneg & AUTONEG_SPEED)
3721 bnx2x_set_parallel_detection(bp);
3723 if (bp->phy_flags & PHY_XGXS_FLAG) {
3724 if (bp->req_line_speed &&
3725 bp->req_line_speed < SPEED_1000) {
3726 bp->phy_flags |= PHY_SGMII_FLAG;
3728 bp->phy_flags &= ~PHY_SGMII_FLAG;
3732 if (!(bp->phy_flags & PHY_SGMII_FLAG)) {
3735 rx_eq = ((bp->serdes_config &
3736 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_MASK) >>
3737 PORT_HW_CFG_SERDES_RX_DRV_EQUALIZER_SHIFT);
3739 DP(NETIF_MSG_LINK, "setting rx eq to %d\n", rx_eq);
3740 for (bank = MDIO_REG_BANK_RX0; bank <= MDIO_REG_BANK_RX_ALL;
3741 bank += (MDIO_REG_BANK_RX1 - MDIO_REG_BANK_RX0)) {
3742 MDIO_SET_REG_BANK(bp, bank);
3743 bnx2x_mdio22_write(bp, MDIO_RX0_RX_EQ_BOOST,
3745 MDIO_RX0_RX_EQ_BOOST_EQUALIZER_CTRL_MASK) |
3746 MDIO_RX0_RX_EQ_BOOST_OFFSET_CTRL));
3749 /* forced speed requested? */
3750 if (!(bp->req_autoneg & AUTONEG_SPEED)) {
3751 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3753 /* disable autoneg */
3754 bnx2x_set_autoneg(bp);
3756 /* program speed and duplex */
3757 bnx2x_program_serdes(bp);
3759 } else { /* AN_mode */
3760 DP(NETIF_MSG_LINK, "not SGMII, AN\n");
3763 bnx2x_set_brcm_cl37_advertisment(bp);
3765 /* program duplex & pause advertisement (for aneg) */
3766 bnx2x_set_ieee_aneg_advertisment(bp);
3768 /* enable autoneg */
3769 bnx2x_set_autoneg(bp);
3771 /* enable and restart AN */
3772 bnx2x_restart_autoneg(bp);
3775 } else { /* SGMII mode */
3776 DP(NETIF_MSG_LINK, "SGMII\n");
3778 bnx2x_initialize_sgmii_process(bp);
3781 /* init ext phy and enable link state int */
3782 bnx2x_ext_phy_init(bp);
3784 /* enable the interrupt */
3785 bnx2x_link_int_enable(bp);
3788 static void bnx2x_phy_deassert(struct bnx2x *bp)
3790 int port = bp->port;
3793 if (bp->phy_flags & PHY_XGXS_FLAG) {
3794 DP(NETIF_MSG_LINK, "XGXS\n");
3795 val = XGXS_RESET_BITS;
3797 } else { /* SerDes */
3798 DP(NETIF_MSG_LINK, "SerDes\n");
3799 val = SERDES_RESET_BITS;
3802 val = val << (port*16);
3804 /* reset and unreset the SerDes/XGXS */
3805 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR, val);
3807 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_SET, val);
3810 static int bnx2x_phy_init(struct bnx2x *bp)
3812 DP(NETIF_MSG_LINK, "started\n");
3813 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
3814 bp->phy_flags |= PHY_EMAC_FLAG;
3816 bp->line_speed = SPEED_10000;
3817 bp->duplex = DUPLEX_FULL;
3818 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3819 bnx2x_emac_enable(bp);
3820 bnx2x_link_report(bp);
3823 } else if (CHIP_REV(bp) == CHIP_REV_EMUL) {
3824 bp->phy_flags |= PHY_BMAC_FLAG;
3826 bp->line_speed = SPEED_10000;
3827 bp->duplex = DUPLEX_FULL;
3828 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + bp->port*4, 0);
3829 bnx2x_bmac_enable(bp, 0);
3830 bnx2x_link_report(bp);
3834 bnx2x_phy_deassert(bp);
3835 bnx2x_link_initialize(bp);
3841 static void bnx2x_link_reset(struct bnx2x *bp)
3843 int port = bp->port;
3844 u32 board = (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK);
3846 /* update shared memory */
3847 bp->link_status = 0;
3848 bnx2x_update_mng(bp);
3850 /* disable attentions */
3851 bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
3852 (NIG_MASK_XGXS0_LINK_STATUS |
3853 NIG_MASK_XGXS0_LINK10G |
3854 NIG_MASK_SERDES0_LINK_STATUS |
3857 /* activate nig drain */
3858 NIG_WR(NIG_REG_EGRESS_DRAIN0_MODE + port*4, 1);
3860 /* disable nig egress interface */
3861 NIG_WR(NIG_REG_BMAC0_OUT_EN + port*4, 0);
3862 NIG_WR(NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0);
3864 /* Stop BigMac rx */
3865 bnx2x_bmac_rx_disable(bp);
3868 NIG_WR(NIG_REG_NIG_EMAC0_EN + port*4, 0);
3872 /* The PHY reset is controled by GPIO 1
3873 * Hold it as output low
3875 if ((board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1002G) &&
3876 (board != SHARED_HW_CFG_BOARD_TYPE_BCM957710T1003G)) {
3877 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
3878 MISC_REGISTERS_GPIO_OUTPUT_LOW);
3879 DP(NETIF_MSG_LINK, "reset external PHY\n");
3882 /* reset the SerDes/XGXS */
3883 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_3_CLEAR,
3884 (0x1ff << (port*16)));
3887 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
3888 (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
3890 /* disable nig ingress interface */
3891 NIG_WR(NIG_REG_BMAC0_IN_EN + port*4, 0);
3892 NIG_WR(NIG_REG_EMAC0_IN_EN + port*4, 0);
3898 #ifdef BNX2X_XGXS_LB
3899 static void bnx2x_set_xgxs_loopback(struct bnx2x *bp, int is_10g)
3901 int port = bp->port;
3906 DP(NETIF_MSG_LINK, "XGXS 10G loopback enable\n");
3908 /* change the uni_phy_addr in the nig */
3909 REG_RD(bp, (NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18),
3911 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, 0x5);
3913 /* change the aer mmd */
3914 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_AER_BLOCK);
3915 bnx2x_mdio22_write(bp, MDIO_AER_BLOCK_AER_REG, 0x2800);
3917 /* config combo IEEE0 control reg for loopback */
3918 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_CL73_IEEEB0);
3919 bnx2x_mdio22_write(bp, MDIO_CL73_IEEEB0_CL73_AN_CONTROL,
3922 /* set aer mmd back */
3923 bnx2x_set_aer_mmd(bp);
3926 NIG_WR(NIG_REG_XGXS0_CTRL_MD_DEVAD + port*0x18, md_devad);
3931 DP(NETIF_MSG_LINK, "XGXS 1G loopback enable\n");
3933 MDIO_SET_REG_BANK(bp, MDIO_REG_BANK_COMBO_IEEE0);
3934 bnx2x_mdio22_read(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3936 bnx2x_mdio22_write(bp, MDIO_COMBO_IEEE0_MII_CONTROL,
3938 MDIO_COMBO_IEEO_MII_CONTROL_LOOPBACK));
3943 /* end of PHY/MAC */
3948 * General service functions
3951 /* the slow path queue is odd since completions arrive on the fastpath ring */
3952 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
3953 u32 data_hi, u32 data_lo, int common)
3955 int port = bp->port;
3958 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
3959 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
3960 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
3961 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
3963 #ifdef BNX2X_STOP_ON_ERROR
3964 if (unlikely(bp->panic))
3968 spin_lock(&bp->spq_lock);
3970 if (!bp->spq_left) {
3971 BNX2X_ERR("BUG! SPQ ring full!\n");
3972 spin_unlock(&bp->spq_lock);
3977 /* CID needs port number to be encoded int it */
3978 bp->spq_prod_bd->hdr.conn_and_cmd_data =
3979 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
3981 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
3983 bp->spq_prod_bd->hdr.type |=
3984 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
3986 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
3987 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
3991 if (bp->spq_prod_bd == bp->spq_last_bd) {
3992 bp->spq_prod_bd = bp->spq;
3993 bp->spq_prod_idx = 0;
3994 DP(NETIF_MSG_TIMER, "end of spq\n");
4001 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
4004 spin_unlock(&bp->spq_lock);
4008 /* acquire split MCP access lock register */
4009 static int bnx2x_lock_alr(struct bnx2x *bp)
4016 for (j = 0; j < i*10; j++) {
4018 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4019 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
4020 if (val & (1L << 31))
4026 if (!(val & (1L << 31))) {
4027 BNX2X_ERR("Cannot acquire nvram interface\n");
4035 /* Release split MCP access lock register */
4036 static void bnx2x_unlock_alr(struct bnx2x *bp)
4040 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
4043 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
4045 struct host_def_status_block *def_sb = bp->def_status_blk;
4048 barrier(); /* status block is written to by the chip */
4050 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
4051 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
4054 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
4055 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
4058 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
4059 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
4062 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
4063 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
4066 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
4067 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
4074 * slow path service functions
4077 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
4079 int port = bp->port;
4080 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
4081 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4082 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4083 u32 nig_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
4084 NIG_REG_MASK_INTERRUPT_PORT0;
4086 if (~bp->aeu_mask & (asserted & 0xff))
4087 BNX2X_ERR("IGU ERROR\n");
4088 if (bp->attn_state & asserted)
4089 BNX2X_ERR("IGU ERROR\n");
4091 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
4092 bp->aeu_mask, asserted);
4093 bp->aeu_mask &= ~(asserted & 0xff);
4094 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
4096 REG_WR(bp, aeu_addr, bp->aeu_mask);
4098 bp->attn_state |= asserted;
4100 if (asserted & ATTN_HARD_WIRED_MASK) {
4101 if (asserted & ATTN_NIG_FOR_FUNC) {
4102 u32 nig_status_port;
4103 u32 nig_int_addr = port ?
4104 NIG_REG_STATUS_INTERRUPT_PORT1 :
4105 NIG_REG_STATUS_INTERRUPT_PORT0;
4107 bp->nig_mask = REG_RD(bp, nig_mask_addr);
4108 REG_WR(bp, nig_mask_addr, 0);
4110 nig_status_port = REG_RD(bp, nig_int_addr);
4111 bnx2x_link_update(bp);
4113 /* handle unicore attn? */
4115 if (asserted & ATTN_SW_TIMER_4_FUNC)
4116 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
4118 if (asserted & GPIO_2_FUNC)
4119 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
4121 if (asserted & GPIO_3_FUNC)
4122 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
4124 if (asserted & GPIO_4_FUNC)
4125 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
4128 if (asserted & ATTN_GENERAL_ATTN_1) {
4129 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
4130 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
4132 if (asserted & ATTN_GENERAL_ATTN_2) {
4133 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
4134 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
4136 if (asserted & ATTN_GENERAL_ATTN_3) {
4137 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
4138 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
4141 if (asserted & ATTN_GENERAL_ATTN_4) {
4142 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
4143 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
4145 if (asserted & ATTN_GENERAL_ATTN_5) {
4146 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
4147 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
4149 if (asserted & ATTN_GENERAL_ATTN_6) {
4150 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
4151 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
4155 } /* if hardwired */
4157 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
4158 asserted, BAR_IGU_INTMEM + igu_addr);
4159 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
4161 /* now set back the mask */
4162 if (asserted & ATTN_NIG_FOR_FUNC)
4163 REG_WR(bp, nig_mask_addr, bp->nig_mask);
4166 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4168 int port = bp->port;
4170 struct attn_route attn;
4171 struct attn_route group_mask;
4175 /* need to take HW lock because MCP or other port might also
4176 try to handle this event */
4179 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4180 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4181 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4182 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
4183 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
4185 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4186 if (deasserted & (1 << index)) {
4187 group_mask = bp->attn_group[index];
4189 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
4190 (unsigned long long)group_mask.sig[0]);
4192 if (attn.sig[3] & group_mask.sig[3] &
4193 EVEREST_GEN_ATTN_IN_USE_MASK) {
4195 if (attn.sig[3] & BNX2X_MC_ASSERT_BITS) {
4197 BNX2X_ERR("MC assert!\n");
4200 } else if (attn.sig[3] & BNX2X_MCP_ASSERT) {
4202 BNX2X_ERR("MCP assert!\n");
4204 MISC_REG_AEU_GENERAL_ATTN_11, 0);
4205 bnx2x_mc_assert(bp);
4208 BNX2X_ERR("UNKOWEN HW ASSERT!\n");
4212 if (attn.sig[1] & group_mask.sig[1] &
4213 BNX2X_DOORQ_ASSERT) {
4215 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
4216 BNX2X_ERR("DB hw attention 0x%x\n", val);
4217 /* DORQ discard attention */
4219 BNX2X_ERR("FATAL error from DORQ\n");
4222 if (attn.sig[2] & group_mask.sig[2] &
4223 AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
4225 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
4226 BNX2X_ERR("CFC hw attention 0x%x\n", val);
4227 /* CFC error attention */
4229 BNX2X_ERR("FATAL error from CFC\n");
4232 if (attn.sig[2] & group_mask.sig[2] &
4233 AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
4235 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
4236 BNX2X_ERR("PXP hw attention 0x%x\n", val);
4237 /* RQ_USDMDP_FIFO_OVERFLOW */
4239 BNX2X_ERR("FATAL error from PXP\n");
4242 if (attn.sig[3] & group_mask.sig[3] &
4243 EVEREST_LATCHED_ATTN_IN_USE_MASK) {
4245 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL,
4247 DP(NETIF_MSG_HW, "got latched bits 0x%x\n",
4251 if ((attn.sig[0] & group_mask.sig[0] &
4252 HW_INTERRUT_ASSERT_SET_0) ||
4253 (attn.sig[1] & group_mask.sig[1] &
4254 HW_INTERRUT_ASSERT_SET_1) ||
4255 (attn.sig[2] & group_mask.sig[2] &
4256 HW_INTERRUT_ASSERT_SET_2))
4257 BNX2X_ERR("FATAL HW block attention\n");
4259 if ((attn.sig[0] & group_mask.sig[0] &
4260 HW_PRTY_ASSERT_SET_0) ||
4261 (attn.sig[1] & group_mask.sig[1] &
4262 HW_PRTY_ASSERT_SET_1) ||
4263 (attn.sig[2] & group_mask.sig[2] &
4264 HW_PRTY_ASSERT_SET_2))
4265 BNX2X_ERR("FATAL HW block parity attention\n");
4269 bnx2x_unlock_alr(bp);
4271 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
4274 /* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
4275 val, BAR_IGU_INTMEM + reg_addr); */
4276 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
4278 if (bp->aeu_mask & (deasserted & 0xff))
4279 BNX2X_ERR("IGU BUG\n");
4280 if (~bp->attn_state & deasserted)
4281 BNX2X_ERR("IGU BUG\n");
4283 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4284 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4286 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
4287 bp->aeu_mask |= (deasserted & 0xff);
4289 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
4290 REG_WR(bp, reg_addr, bp->aeu_mask);
4292 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4293 bp->attn_state &= ~deasserted;
4294 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4297 static void bnx2x_attn_int(struct bnx2x *bp)
4299 /* read local copy of bits */
4300 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
4301 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
4302 u32 attn_state = bp->attn_state;
4304 /* look for changed bits */
4305 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4306 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4309 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4310 attn_bits, attn_ack, asserted, deasserted);
4312 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
4313 BNX2X_ERR("bad attention state\n");
4315 /* handle bits that were raised */
4317 bnx2x_attn_int_asserted(bp, asserted);
4320 bnx2x_attn_int_deasserted(bp, deasserted);
4323 static void bnx2x_sp_task(struct work_struct *work)
4325 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
4328 /* Return here if interrupt is disabled */
4329 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4330 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4334 status = bnx2x_update_dsb_idx(bp);
4336 BNX2X_ERR("spurious slowpath interrupt!\n");
4338 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
4345 /* CStorm events: query_stats, cfc delete ramrods */
4347 bp->stat_pending = 0;
4349 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
4351 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
4353 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
4355 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
4357 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
4361 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
4363 struct net_device *dev = dev_instance;
4364 struct bnx2x *bp = netdev_priv(dev);
4366 /* Return here if interrupt is disabled */
4367 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
4368 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
4372 bnx2x_ack_sb(bp, 16, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
4374 #ifdef BNX2X_STOP_ON_ERROR
4375 if (unlikely(bp->panic))
4379 schedule_work(&bp->sp_task);
4384 /* end of slow path */
4388 /****************************************************************************
4390 ****************************************************************************/
4392 #define UPDATE_STAT(s, t) \
4394 estats->t += new->s - old->s; \
4398 /* sum[hi:lo] += add[hi:lo] */
4399 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
4402 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
4405 /* difference = minuend - subtrahend */
4406 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
4408 if (m_lo < s_lo) { /* underflow */ \
4409 d_hi = m_hi - s_hi; \
4410 if (d_hi > 0) { /* we can 'loan' 1 */ \
4412 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
4413 } else { /* m_hi <= s_hi */ \
4417 } else { /* m_lo >= s_lo */ \
4418 if (m_hi < s_hi) { \
4421 } else { /* m_hi >= s_hi */ \
4422 d_hi = m_hi - s_hi; \
4423 d_lo = m_lo - s_lo; \
4428 /* minuend -= subtrahend */
4429 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
4431 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
4434 #define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
4436 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
4437 diff.lo, new->s_lo, old->s_lo); \
4438 old->s_hi = new->s_hi; \
4439 old->s_lo = new->s_lo; \
4440 ADD_64(estats->t_hi, diff.hi, \
4441 estats->t_lo, diff.lo); \
4444 /* sum[hi:lo] += add */
4445 #define ADD_EXTEND_64(s_hi, s_lo, a) \
4448 s_hi += (s_lo < a) ? 1 : 0; \
4451 #define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
4453 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
4456 #define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
4458 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
4459 old_tclient->s = le32_to_cpu(tclient->s); \
4460 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
4464 * General service functions
4467 static inline long bnx2x_hilo(u32 *hiref)
4469 u32 lo = *(hiref + 1);
4470 #if (BITS_PER_LONG == 64)
4473 return HILO_U64(hi, lo);
4480 * Init service functions
4483 static void bnx2x_init_mac_stats(struct bnx2x *bp)
4485 struct dmae_command *dmae;
4486 int port = bp->port;
4487 int loader_idx = port * 8;
4491 bp->executer_idx = 0;
4494 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4495 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4497 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4499 DMAE_CMD_ENDIANITY_DW_SWAP |
4501 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4504 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
4506 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4507 dmae->opcode = opcode;
4508 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
4510 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
4512 dmae->dst_addr_lo = bp->fw_mb >> 2;
4513 dmae->dst_addr_hi = 0;
4514 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
4517 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4518 dmae->comp_addr_hi = 0;
4521 dmae->comp_addr_lo = 0;
4522 dmae->comp_addr_hi = 0;
4528 /* no need to collect statistics in link down */
4532 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4533 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
4534 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4536 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4538 DMAE_CMD_ENDIANITY_DW_SWAP |
4540 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4542 if (bp->phy_flags & PHY_BMAC_FLAG) {
4544 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
4545 NIG_REG_INGRESS_BMAC0_MEM);
4547 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
4548 BIGMAC_REGISTER_TX_STAT_GTBYT */
4549 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4550 dmae->opcode = opcode;
4551 dmae->src_addr_lo = (mac_addr +
4552 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4553 dmae->src_addr_hi = 0;
4554 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4555 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4556 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
4557 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
4558 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4559 dmae->comp_addr_hi = 0;
4562 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
4563 BIGMAC_REGISTER_RX_STAT_GRIPJ */
4564 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4565 dmae->opcode = opcode;
4566 dmae->src_addr_lo = (mac_addr +
4567 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4568 dmae->src_addr_hi = 0;
4569 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4570 offsetof(struct bmac_stats, rx_gr64));
4571 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4572 offsetof(struct bmac_stats, rx_gr64));
4573 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4574 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4575 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4576 dmae->comp_addr_hi = 0;
4579 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
4581 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4583 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4584 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4585 dmae->opcode = opcode;
4586 dmae->src_addr_lo = (mac_addr +
4587 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4588 dmae->src_addr_hi = 0;
4589 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4590 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4591 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4592 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4593 dmae->comp_addr_hi = 0;
4596 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4597 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4598 dmae->opcode = opcode;
4599 dmae->src_addr_lo = (mac_addr +
4600 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4601 dmae->src_addr_hi = 0;
4602 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4603 offsetof(struct emac_stats,
4604 rx_falsecarriererrors));
4605 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4606 offsetof(struct emac_stats,
4607 rx_falsecarriererrors));
4609 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4610 dmae->comp_addr_hi = 0;
4613 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4614 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4615 dmae->opcode = opcode;
4616 dmae->src_addr_lo = (mac_addr +
4617 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4618 dmae->src_addr_hi = 0;
4619 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
4620 offsetof(struct emac_stats,
4622 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
4623 offsetof(struct emac_stats,
4625 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4626 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4627 dmae->comp_addr_hi = 0;
4632 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4633 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4634 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4635 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4637 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4639 DMAE_CMD_ENDIANITY_DW_SWAP |
4641 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
4642 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4643 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4644 dmae->src_addr_hi = 0;
4645 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
4646 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
4647 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
4648 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
4649 offsetof(struct nig_stats, done));
4650 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
4651 offsetof(struct nig_stats, done));
4652 dmae->comp_val = 0xffffffff;
4655 static void bnx2x_init_stats(struct bnx2x *bp)
4657 int port = bp->port;
4659 bp->stats_state = STATS_STATE_DISABLE;
4660 bp->executer_idx = 0;
4662 bp->old_brb_discard = REG_RD(bp,
4663 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4665 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
4666 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
4667 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4669 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
4670 REG_WR(bp, BAR_XSTRORM_INTMEM +
4671 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4673 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
4674 REG_WR(bp, BAR_TSTRORM_INTMEM +
4675 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4677 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
4678 REG_WR(bp, BAR_CSTRORM_INTMEM +
4679 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
4681 REG_WR(bp, BAR_XSTRORM_INTMEM +
4682 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4683 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4684 REG_WR(bp, BAR_XSTRORM_INTMEM +
4685 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4686 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4688 REG_WR(bp, BAR_TSTRORM_INTMEM +
4689 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
4690 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4691 REG_WR(bp, BAR_TSTRORM_INTMEM +
4692 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
4693 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4696 static void bnx2x_stop_stats(struct bnx2x *bp)
4699 if (bp->stats_state != STATS_STATE_DISABLE) {
4702 bp->stats_state = STATS_STATE_STOP;
4703 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
4705 while (bp->stats_state != STATS_STATE_DISABLE) {
4707 BNX2X_ERR("timeout waiting for stats stop\n");
4714 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
4718 * Statistics service functions
4721 static void bnx2x_update_bmac_stats(struct bnx2x *bp)
4725 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
4726 struct bmac_stats *old = &bp->old_bmac;
4727 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4732 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
4733 tx_gtbyt.lo, total_bytes_transmitted_lo);
4735 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
4736 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
4737 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4739 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
4740 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
4741 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
4743 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
4744 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
4745 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
4746 estats->total_unicast_packets_transmitted_lo, sum.lo);
4748 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
4749 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
4750 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
4751 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
4752 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
4753 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
4754 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
4755 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
4756 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
4757 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
4758 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
4760 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
4761 UPDATE_STAT(rx_grund.lo, runt_packets_received);
4762 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
4763 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
4764 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
4765 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
4766 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
4767 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
4769 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
4770 rx_grerb.lo, stat_IfHCInBadOctets_lo);
4771 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
4772 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
4773 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
4774 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
4775 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
4778 static void bnx2x_update_emac_stats(struct bnx2x *bp)
4780 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
4781 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4783 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
4784 total_bytes_transmitted_lo);
4785 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
4786 total_unicast_packets_transmitted_hi,
4787 total_unicast_packets_transmitted_lo);
4788 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
4789 total_multicast_packets_transmitted_hi,
4790 total_multicast_packets_transmitted_lo);
4791 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
4792 total_broadcast_packets_transmitted_hi,
4793 total_broadcast_packets_transmitted_lo);
4795 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
4796 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
4797 estats->single_collision_transmit_frames +=
4798 new->tx_dot3statssinglecollisionframes;
4799 estats->multiple_collision_transmit_frames +=
4800 new->tx_dot3statsmultiplecollisionframes;
4801 estats->late_collision_frames += new->tx_dot3statslatecollisions;
4802 estats->excessive_collision_frames +=
4803 new->tx_dot3statsexcessivecollisions;
4804 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
4805 estats->frames_transmitted_65_127_bytes +=
4806 new->tx_etherstatspkts65octetsto127octets;
4807 estats->frames_transmitted_128_255_bytes +=
4808 new->tx_etherstatspkts128octetsto255octets;
4809 estats->frames_transmitted_256_511_bytes +=
4810 new->tx_etherstatspkts256octetsto511octets;
4811 estats->frames_transmitted_512_1023_bytes +=
4812 new->tx_etherstatspkts512octetsto1023octets;
4813 estats->frames_transmitted_1024_1522_bytes +=
4814 new->tx_etherstatspkts1024octetsto1522octet;
4815 estats->frames_transmitted_1523_9022_bytes +=
4816 new->tx_etherstatspktsover1522octets;
4818 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
4819 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
4820 estats->false_carrier_detections += new->rx_falsecarriererrors;
4821 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
4822 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
4823 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
4824 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
4825 estats->control_frames_received += new->rx_maccontrolframesreceived;
4826 estats->error_runt_packets_received += new->rx_etherstatsfragments;
4827 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
4829 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
4830 stat_IfHCInBadOctets_lo);
4831 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
4832 stat_IfHCOutBadOctets_lo);
4833 estats->stat_Dot3statsInternalMacTransmitErrors +=
4834 new->tx_dot3statsinternalmactransmiterrors;
4835 estats->stat_Dot3StatsCarrierSenseErrors +=
4836 new->rx_dot3statscarriersenseerrors;
4837 estats->stat_Dot3StatsDeferredTransmissions +=
4838 new->tx_dot3statsdeferredtransmissions;
4839 estats->stat_FlowControlDone += new->tx_flowcontroldone;
4840 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
4843 static int bnx2x_update_storm_stats(struct bnx2x *bp)
4845 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
4846 struct tstorm_common_stats *tstats = &stats->tstorm_common;
4847 struct tstorm_per_client_stats *tclient =
4848 &tstats->client_statistics[0];
4849 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
4850 struct xstorm_common_stats *xstats = &stats->xstorm_common;
4851 struct nig_stats *nstats = bnx2x_sp(bp, nig);
4852 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4855 /* are DMAE stats valid? */
4856 if (nstats->done != 0xffffffff) {
4857 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
4861 /* are storm stats valid? */
4862 if (tstats->done.hi != 0xffffffff) {
4863 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
4866 if (xstats->done.hi != 0xffffffff) {
4867 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
4871 estats->total_bytes_received_hi =
4872 estats->valid_bytes_received_hi =
4873 le32_to_cpu(tclient->total_rcv_bytes.hi);
4874 estats->total_bytes_received_lo =
4875 estats->valid_bytes_received_lo =
4876 le32_to_cpu(tclient->total_rcv_bytes.lo);
4877 ADD_64(estats->total_bytes_received_hi,
4878 le32_to_cpu(tclient->rcv_error_bytes.hi),
4879 estats->total_bytes_received_lo,
4880 le32_to_cpu(tclient->rcv_error_bytes.lo));
4882 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4883 total_unicast_packets_received_hi,
4884 total_unicast_packets_received_lo);
4885 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4886 total_multicast_packets_received_hi,
4887 total_multicast_packets_received_lo);
4888 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4889 total_broadcast_packets_received_hi,
4890 total_broadcast_packets_received_lo);
4892 estats->frames_received_64_bytes = MAC_STX_NA;
4893 estats->frames_received_65_127_bytes = MAC_STX_NA;
4894 estats->frames_received_128_255_bytes = MAC_STX_NA;
4895 estats->frames_received_256_511_bytes = MAC_STX_NA;
4896 estats->frames_received_512_1023_bytes = MAC_STX_NA;
4897 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
4898 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
4900 estats->x_total_sent_bytes_hi =
4901 le32_to_cpu(xstats->total_sent_bytes.hi);
4902 estats->x_total_sent_bytes_lo =
4903 le32_to_cpu(xstats->total_sent_bytes.lo);
4904 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
4906 estats->t_rcv_unicast_bytes_hi =
4907 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
4908 estats->t_rcv_unicast_bytes_lo =
4909 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
4910 estats->t_rcv_broadcast_bytes_hi =
4911 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
4912 estats->t_rcv_broadcast_bytes_lo =
4913 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4914 estats->t_rcv_multicast_bytes_hi =
4915 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
4916 estats->t_rcv_multicast_bytes_lo =
4917 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
4918 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
4920 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
4921 estats->packets_too_big_discard =
4922 le32_to_cpu(tclient->packets_too_big_discard);
4923 estats->jabber_packets_received = estats->packets_too_big_discard +
4924 estats->stat_Dot3statsFramesTooLong;
4925 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
4926 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
4927 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
4928 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
4929 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
4930 estats->brb_truncate_discard =
4931 le32_to_cpu(tstats->brb_truncate_discard);
4933 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
4934 bp->old_brb_discard = nstats->brb_discard;
4936 estats->brb_packet = nstats->brb_packet;
4937 estats->brb_truncate = nstats->brb_truncate;
4938 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
4939 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
4940 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
4941 estats->mng_discard = nstats->mng_discard;
4942 estats->mng_octet_inp = nstats->mng_octet_inp;
4943 estats->mng_octet_out = nstats->mng_octet_out;
4944 estats->mng_packet_inp = nstats->mng_packet_inp;
4945 estats->mng_packet_out = nstats->mng_packet_out;
4946 estats->pbf_octets = nstats->pbf_octets;
4947 estats->pbf_packet = nstats->pbf_packet;
4948 estats->safc_inp = nstats->safc_inp;
4950 xstats->done.hi = 0;
4951 tstats->done.hi = 0;
4957 static void bnx2x_update_net_stats(struct bnx2x *bp)
4959 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
4960 struct net_device_stats *nstats = &bp->dev->stats;
4962 nstats->rx_packets =
4963 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4964 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4965 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4967 nstats->tx_packets =
4968 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4969 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4970 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4972 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4975 bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4977 nstats->rx_dropped = estats->checksum_discard +
4978 estats->mac_discard;
4979 nstats->tx_dropped = 0;
4982 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
4984 nstats->collisions =
4985 estats->single_collision_transmit_frames +
4986 estats->multiple_collision_transmit_frames +
4987 estats->late_collision_frames +
4988 estats->excessive_collision_frames;
4990 nstats->rx_length_errors = estats->runt_packets_received +
4991 estats->jabber_packets_received;
4992 nstats->rx_over_errors = estats->no_buff_discard;
4993 nstats->rx_crc_errors = estats->crc_receive_errors;
4994 nstats->rx_frame_errors = estats->alignment_errors;
4995 nstats->rx_fifo_errors = estats->brb_discard +
4996 estats->brb_truncate_discard;
4997 nstats->rx_missed_errors = estats->xxoverflow_discard;
4999 nstats->rx_errors = nstats->rx_length_errors +
5000 nstats->rx_over_errors +
5001 nstats->rx_crc_errors +
5002 nstats->rx_frame_errors +
5003 nstats->rx_fifo_errors;
5005 nstats->tx_aborted_errors = estats->late_collision_frames +
5006 estats->excessive_collision_frames;
5007 nstats->tx_carrier_errors = estats->false_carrier_detections;
5008 nstats->tx_fifo_errors = 0;
5009 nstats->tx_heartbeat_errors = 0;
5010 nstats->tx_window_errors = 0;
5012 nstats->tx_errors = nstats->tx_aborted_errors +
5013 nstats->tx_carrier_errors;
5015 estats->mac_stx_start = ++estats->mac_stx_end;
5018 static void bnx2x_update_stats(struct bnx2x *bp)
5022 if (!bnx2x_update_storm_stats(bp)) {
5024 if (bp->phy_flags & PHY_BMAC_FLAG) {
5025 bnx2x_update_bmac_stats(bp);
5027 } else if (bp->phy_flags & PHY_EMAC_FLAG) {
5028 bnx2x_update_emac_stats(bp);
5030 } else { /* unreached */
5031 BNX2X_ERR("no MAC active\n");
5035 bnx2x_update_net_stats(bp);
5038 if (bp->msglevel & NETIF_MSG_TIMER) {
5039 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
5040 struct net_device_stats *nstats = &bp->dev->stats;
5042 printk(KERN_DEBUG "%s:\n", bp->dev->name);
5043 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
5045 bnx2x_tx_avail(bp->fp),
5046 *bp->fp->tx_cons_sb, nstats->tx_packets);
5047 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
5049 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
5050 *bp->fp->rx_cons_sb, nstats->rx_packets);
5051 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
5052 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
5053 estats->driver_xoff, estats->brb_discard);
5054 printk(KERN_DEBUG "tstats: checksum_discard %u "
5055 "packets_too_big_discard %u no_buff_discard %u "
5056 "mac_discard %u mac_filter_discard %u "
5057 "xxovrflow_discard %u brb_truncate_discard %u "
5058 "ttl0_discard %u\n",
5059 estats->checksum_discard,
5060 estats->packets_too_big_discard,
5061 estats->no_buff_discard, estats->mac_discard,
5062 estats->mac_filter_discard, estats->xxoverflow_discard,
5063 estats->brb_truncate_discard, estats->ttl0_discard);
5065 for_each_queue(bp, i) {
5066 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
5067 bnx2x_fp(bp, i, tx_pkt),
5068 bnx2x_fp(bp, i, rx_pkt),
5069 bnx2x_fp(bp, i, rx_calls));
5073 if (bp->state != BNX2X_STATE_OPEN) {
5074 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
5078 #ifdef BNX2X_STOP_ON_ERROR
5079 if (unlikely(bp->panic))
5084 if (bp->executer_idx) {
5085 struct dmae_command *dmae = &bp->dmae;
5086 int port = bp->port;
5087 int loader_idx = port * 8;
5089 memset(dmae, 0, sizeof(struct dmae_command));
5091 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
5092 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
5093 DMAE_CMD_DST_RESET |
5095 DMAE_CMD_ENDIANITY_B_DW_SWAP |
5097 DMAE_CMD_ENDIANITY_DW_SWAP |
5099 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
5100 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
5101 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
5102 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
5103 sizeof(struct dmae_command) *
5104 (loader_idx + 1)) >> 2;
5105 dmae->dst_addr_hi = 0;
5106 dmae->len = sizeof(struct dmae_command) >> 2;
5107 dmae->len--; /* !!! for A0/1 only */
5108 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
5109 dmae->comp_addr_hi = 0;
5112 bnx2x_post_dmae(bp, dmae, loader_idx);
5115 if (bp->stats_state != STATS_STATE_ENABLE) {
5116 bp->stats_state = STATS_STATE_DISABLE;
5120 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
5121 /* stats ramrod has it's own slot on the spe */
5123 bp->stat_pending = 1;
5127 static void bnx2x_timer(unsigned long data)
5129 struct bnx2x *bp = (struct bnx2x *) data;
5131 if (!netif_running(bp->dev))
5134 if (atomic_read(&bp->intr_sem) != 0)
5138 struct bnx2x_fastpath *fp = &bp->fp[0];
5141 bnx2x_tx_int(fp, 1000);
5142 rc = bnx2x_rx_int(fp, 1000);
5146 int port = bp->port;
5150 ++bp->fw_drv_pulse_wr_seq;
5151 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5152 /* TBD - add SYSTEM_TIME */
5153 drv_pulse = bp->fw_drv_pulse_wr_seq;
5154 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
5156 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
5157 MCP_PULSE_SEQ_MASK);
5158 /* The delta between driver pulse and mcp response
5159 * should be 1 (before mcp response) or 0 (after mcp response)
5161 if ((drv_pulse != mcp_pulse) &&
5162 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5163 /* someone lost a heartbeat... */
5164 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5165 drv_pulse, mcp_pulse);
5169 if (bp->stats_state == STATS_STATE_DISABLE)
5172 bnx2x_update_stats(bp);
5175 mod_timer(&bp->timer, jiffies + bp->current_interval);
5178 /* end of Statistics */
5183 * nic init service functions
5186 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5187 dma_addr_t mapping, int id)
5189 int port = bp->port;
5194 section = ((u64)mapping) + offsetof(struct host_status_block,
5196 sb->u_status_block.status_block_id = id;
5198 REG_WR(bp, BAR_USTRORM_INTMEM +
5199 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5200 REG_WR(bp, BAR_USTRORM_INTMEM +
5201 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5204 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
5205 REG_WR16(bp, BAR_USTRORM_INTMEM +
5206 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5209 section = ((u64)mapping) + offsetof(struct host_status_block,
5211 sb->c_status_block.status_block_id = id;
5213 REG_WR(bp, BAR_CSTRORM_INTMEM +
5214 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
5215 REG_WR(bp, BAR_CSTRORM_INTMEM +
5216 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
5219 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5220 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5221 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
5223 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5226 static void bnx2x_init_def_sb(struct bnx2x *bp,
5227 struct host_def_status_block *def_sb,
5228 dma_addr_t mapping, int id)
5230 int port = bp->port;
5231 int index, val, reg_offset;
5235 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5236 atten_status_block);
5237 def_sb->atten_status_block.status_block_id = id;
5239 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5240 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5242 for (index = 0; index < 3; index++) {
5243 bp->attn_group[index].sig[0] = REG_RD(bp,
5244 reg_offset + 0x10*index);
5245 bp->attn_group[index].sig[1] = REG_RD(bp,
5246 reg_offset + 0x4 + 0x10*index);
5247 bp->attn_group[index].sig[2] = REG_RD(bp,
5248 reg_offset + 0x8 + 0x10*index);
5249 bp->attn_group[index].sig[3] = REG_RD(bp,
5250 reg_offset + 0xc + 0x10*index);
5253 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5254 MISC_REG_AEU_MASK_ATTN_FUNC_0));
5256 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5257 HC_REG_ATTN_MSG0_ADDR_L);
5259 REG_WR(bp, reg_offset, U64_LO(section));
5260 REG_WR(bp, reg_offset + 4, U64_HI(section));
5262 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5264 val = REG_RD(bp, reg_offset);
5266 REG_WR(bp, reg_offset, val);
5269 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5270 u_def_status_block);
5271 def_sb->u_def_status_block.status_block_id = id;
5273 REG_WR(bp, BAR_USTRORM_INTMEM +
5274 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5275 REG_WR(bp, BAR_USTRORM_INTMEM +
5276 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5278 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
5281 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
5282 REG_WR16(bp, BAR_USTRORM_INTMEM +
5283 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5286 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5287 c_def_status_block);
5288 def_sb->c_def_status_block.status_block_id = id;
5290 REG_WR(bp, BAR_CSTRORM_INTMEM +
5291 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5292 REG_WR(bp, BAR_CSTRORM_INTMEM +
5293 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5295 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
5298 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5299 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5300 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5303 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5304 t_def_status_block);
5305 def_sb->t_def_status_block.status_block_id = id;
5307 REG_WR(bp, BAR_TSTRORM_INTMEM +
5308 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5309 REG_WR(bp, BAR_TSTRORM_INTMEM +
5310 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5312 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
5315 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5316 REG_WR16(bp, BAR_TSTRORM_INTMEM +
5317 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5320 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5321 x_def_status_block);
5322 def_sb->x_def_status_block.status_block_id = id;
5324 REG_WR(bp, BAR_XSTRORM_INTMEM +
5325 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
5326 REG_WR(bp, BAR_XSTRORM_INTMEM +
5327 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
5329 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
5332 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5333 REG_WR16(bp, BAR_XSTRORM_INTMEM +
5334 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
5336 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5339 static void bnx2x_update_coalesce(struct bnx2x *bp)
5341 int port = bp->port;
5344 for_each_queue(bp, i) {
5346 /* HC_INDEX_U_ETH_RX_CQ_CONS */
5347 REG_WR8(bp, BAR_USTRORM_INTMEM +
5348 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5349 HC_INDEX_U_ETH_RX_CQ_CONS),
5350 bp->rx_ticks_int/12);
5351 REG_WR16(bp, BAR_USTRORM_INTMEM +
5352 USTORM_SB_HC_DISABLE_OFFSET(port, i,
5353 HC_INDEX_U_ETH_RX_CQ_CONS),
5354 bp->rx_ticks_int ? 0 : 1);
5356 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5357 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5358 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
5359 HC_INDEX_C_ETH_TX_CQ_CONS),
5360 bp->tx_ticks_int/12);
5361 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5362 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
5363 HC_INDEX_C_ETH_TX_CQ_CONS),
5364 bp->tx_ticks_int ? 0 : 1);
5368 static void bnx2x_init_rx_rings(struct bnx2x *bp)
5372 int port = bp->port;
5374 bp->rx_buf_use_size = bp->dev->mtu;
5376 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
5377 bp->rx_buf_size = bp->rx_buf_use_size + 64;
5379 for_each_queue(bp, j) {
5380 struct bnx2x_fastpath *fp = &bp->fp[j];
5383 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
5385 for (i = 1; i <= NUM_RX_RINGS; i++) {
5386 struct eth_rx_bd *rx_bd;
5388 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5390 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
5391 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5393 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
5394 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
5398 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5399 struct eth_rx_cqe_next_page *nextpg;
5401 nextpg = (struct eth_rx_cqe_next_page *)
5402 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5404 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
5405 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5407 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
5408 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
5411 /* rx completion queue */
5412 fp->rx_comp_cons = ring_prod = 0;
5414 for (i = 0; i < bp->rx_ring_size; i++) {
5415 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5416 BNX2X_ERR("was only able to allocate "
5420 ring_prod = NEXT_RX_IDX(ring_prod);
5421 BUG_TRAP(ring_prod > i);
5424 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
5425 fp->rx_pkt = fp->rx_calls = 0;
5427 /* Warning! this will generate an interrupt (to the TSTORM) */
5428 /* must only be done when chip is initialized */
5429 REG_WR(bp, BAR_TSTRORM_INTMEM +
5430 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
5434 REG_WR(bp, BAR_USTRORM_INTMEM +
5435 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
5436 U64_LO(fp->rx_comp_mapping));
5437 REG_WR(bp, BAR_USTRORM_INTMEM +
5438 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
5439 U64_HI(fp->rx_comp_mapping));
5443 static void bnx2x_init_tx_ring(struct bnx2x *bp)
5447 for_each_queue(bp, j) {
5448 struct bnx2x_fastpath *fp = &bp->fp[j];
5450 for (i = 1; i <= NUM_TX_RINGS; i++) {
5451 struct eth_tx_bd *tx_bd =
5452 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
5455 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
5456 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5458 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
5459 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5462 fp->tx_pkt_prod = 0;
5463 fp->tx_pkt_cons = 0;
5466 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5471 static void bnx2x_init_sp_ring(struct bnx2x *bp)
5473 int port = bp->port;
5475 spin_lock_init(&bp->spq_lock);
5477 bp->spq_left = MAX_SPQ_PENDING;
5478 bp->spq_prod_idx = 0;
5479 bp->dsb_sp_prod_idx = 0;
5480 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5481 bp->spq_prod_bd = bp->spq;
5482 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5484 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
5485 U64_LO(bp->spq_mapping));
5486 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
5487 U64_HI(bp->spq_mapping));
5489 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
5493 static void bnx2x_init_context(struct bnx2x *bp)
5497 for_each_queue(bp, i) {
5498 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5499 struct bnx2x_fastpath *fp = &bp->fp[i];
5501 context->xstorm_st_context.tx_bd_page_base_hi =
5502 U64_HI(fp->tx_desc_mapping);
5503 context->xstorm_st_context.tx_bd_page_base_lo =
5504 U64_LO(fp->tx_desc_mapping);
5505 context->xstorm_st_context.db_data_addr_hi =
5506 U64_HI(fp->tx_prods_mapping);
5507 context->xstorm_st_context.db_data_addr_lo =
5508 U64_LO(fp->tx_prods_mapping);
5510 context->ustorm_st_context.rx_bd_page_base_hi =
5511 U64_HI(fp->rx_desc_mapping);
5512 context->ustorm_st_context.rx_bd_page_base_lo =
5513 U64_LO(fp->rx_desc_mapping);
5514 context->ustorm_st_context.status_block_id = i;
5515 context->ustorm_st_context.sb_index_number =
5516 HC_INDEX_U_ETH_RX_CQ_CONS;
5517 context->ustorm_st_context.rcq_base_address_hi =
5518 U64_HI(fp->rx_comp_mapping);
5519 context->ustorm_st_context.rcq_base_address_lo =
5520 U64_LO(fp->rx_comp_mapping);
5521 context->ustorm_st_context.flags =
5522 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
5523 context->ustorm_st_context.mc_alignment_size = 64;
5524 context->ustorm_st_context.num_rss = bp->num_queues;
5526 context->cstorm_st_context.sb_index_number =
5527 HC_INDEX_C_ETH_TX_CQ_CONS;
5528 context->cstorm_st_context.status_block_id = i;
5530 context->xstorm_ag_context.cdu_reserved =
5531 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5532 CDU_REGION_NUMBER_XCM_AG,
5533 ETH_CONNECTION_TYPE);
5534 context->ustorm_ag_context.cdu_usage =
5535 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5536 CDU_REGION_NUMBER_UCM_AG,
5537 ETH_CONNECTION_TYPE);
5541 static void bnx2x_init_ind_table(struct bnx2x *bp)
5543 int port = bp->port;
5549 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
5550 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
5551 i % bp->num_queues);
5553 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5556 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5558 int mode = bp->rx_mode;
5559 int port = bp->port;
5560 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5563 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
5566 case BNX2X_RX_MODE_NONE: /* no Rx */
5567 tstorm_mac_filter.ucast_drop_all = 1;
5568 tstorm_mac_filter.mcast_drop_all = 1;
5569 tstorm_mac_filter.bcast_drop_all = 1;
5571 case BNX2X_RX_MODE_NORMAL:
5572 tstorm_mac_filter.bcast_accept_all = 1;
5574 case BNX2X_RX_MODE_ALLMULTI:
5575 tstorm_mac_filter.mcast_accept_all = 1;
5576 tstorm_mac_filter.bcast_accept_all = 1;
5578 case BNX2X_RX_MODE_PROMISC:
5579 tstorm_mac_filter.ucast_accept_all = 1;
5580 tstorm_mac_filter.mcast_accept_all = 1;
5581 tstorm_mac_filter.bcast_accept_all = 1;
5584 BNX2X_ERR("bad rx mode (%d)\n", mode);
5587 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5588 REG_WR(bp, BAR_TSTRORM_INTMEM +
5589 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
5590 ((u32 *)&tstorm_mac_filter)[i]);
5592 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
5593 ((u32 *)&tstorm_mac_filter)[i]); */
5597 static void bnx2x_set_client_config(struct bnx2x *bp, int client_id)
5600 int mode = bp->rx_mode;
5602 int port = bp->port;
5603 struct tstorm_eth_client_config tstorm_client = {0};
5605 tstorm_client.mtu = bp->dev->mtu;
5606 tstorm_client.statistics_counter_id = 0;
5607 tstorm_client.config_flags =
5608 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
5610 if (mode && bp->vlgrp) {
5611 tstorm_client.config_flags |=
5612 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
5613 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5616 tstorm_client.drop_flags = (TSTORM_ETH_CLIENT_CONFIG_DROP_IP_CS_ERR |
5617 TSTORM_ETH_CLIENT_CONFIG_DROP_TCP_CS_ERR |
5618 TSTORM_ETH_CLIENT_CONFIG_DROP_UDP_CS_ERR |
5619 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR);
5621 REG_WR(bp, BAR_TSTRORM_INTMEM +
5622 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id),
5623 ((u32 *)&tstorm_client)[0]);
5624 REG_WR(bp, BAR_TSTRORM_INTMEM +
5625 TSTORM_CLIENT_CONFIG_OFFSET(port, client_id) + 4,
5626 ((u32 *)&tstorm_client)[1]);
5628 /* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
5629 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
5632 static void bnx2x_init_internal(struct bnx2x *bp)
5634 int port = bp->port;
5635 struct tstorm_eth_function_common_config tstorm_config = {0};
5636 struct stats_indication_flags stats_flags = {0};
5640 tstorm_config.config_flags = MULTI_FLAGS;
5641 tstorm_config.rss_result_mask = MULTI_MASK;
5644 REG_WR(bp, BAR_TSTRORM_INTMEM +
5645 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
5646 (*(u32 *)&tstorm_config));
5648 /* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
5649 (*(u32 *)&tstorm_config)); */
5651 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5652 bnx2x_set_storm_rx_mode(bp);
5654 for_each_queue(bp, i)
5655 bnx2x_set_client_config(bp, i);
5658 stats_flags.collect_eth = cpu_to_le32(1);
5660 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
5661 ((u32 *)&stats_flags)[0]);
5662 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
5663 ((u32 *)&stats_flags)[1]);
5665 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
5666 ((u32 *)&stats_flags)[0]);
5667 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
5668 ((u32 *)&stats_flags)[1]);
5670 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
5671 ((u32 *)&stats_flags)[0]);
5672 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
5673 ((u32 *)&stats_flags)[1]);
5675 /* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
5676 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
5679 static void bnx2x_nic_init(struct bnx2x *bp)
5683 for_each_queue(bp, i) {
5684 struct bnx2x_fastpath *fp = &bp->fp[i];
5686 fp->state = BNX2X_FP_STATE_CLOSED;
5687 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
5688 bp, fp->status_blk, i);
5690 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
5693 bnx2x_init_def_sb(bp, bp->def_status_blk,
5694 bp->def_status_blk_mapping, 0x10);
5695 bnx2x_update_coalesce(bp);
5696 bnx2x_init_rx_rings(bp);
5697 bnx2x_init_tx_ring(bp);
5698 bnx2x_init_sp_ring(bp);
5699 bnx2x_init_context(bp);
5700 bnx2x_init_internal(bp);
5701 bnx2x_init_stats(bp);
5702 bnx2x_init_ind_table(bp);
5703 bnx2x_enable_int(bp);
5707 /* end of nic init */
5710 * gzip service functions
5713 static int bnx2x_gunzip_init(struct bnx2x *bp)
5715 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5716 &bp->gunzip_mapping);
5717 if (bp->gunzip_buf == NULL)
5720 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5721 if (bp->strm == NULL)
5724 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5726 if (bp->strm->workspace == NULL)
5736 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5737 bp->gunzip_mapping);
5738 bp->gunzip_buf = NULL;
5741 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5742 " uncompression\n", bp->dev->name);
5746 static void bnx2x_gunzip_end(struct bnx2x *bp)
5748 kfree(bp->strm->workspace);
5753 if (bp->gunzip_buf) {
5754 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5755 bp->gunzip_mapping);
5756 bp->gunzip_buf = NULL;
5760 static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5764 /* check gzip header */
5765 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5772 if (zbuf[3] & FNAME)
5773 while ((zbuf[n++] != 0) && (n < len));
5775 bp->strm->next_in = zbuf + n;
5776 bp->strm->avail_in = len - n;
5777 bp->strm->next_out = bp->gunzip_buf;
5778 bp->strm->avail_out = FW_BUF_SIZE;
5780 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5784 rc = zlib_inflate(bp->strm, Z_FINISH);
5785 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5786 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5787 bp->dev->name, bp->strm->msg);
5789 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5790 if (bp->gunzip_outlen & 0x3)
5791 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5792 " gunzip_outlen (%d) not aligned\n",
5793 bp->dev->name, bp->gunzip_outlen);
5794 bp->gunzip_outlen >>= 2;
5796 zlib_inflateEnd(bp->strm);
5798 if (rc == Z_STREAM_END)
5804 /* nic load/unload */
5807 * general service functions
5810 /* send a NIG loopback debug packet */
5811 static void bnx2x_lb_pckt(struct bnx2x *bp)
5817 /* Ethernet source and destination addresses */
5819 wb_write[0] = 0x55555555;
5820 wb_write[1] = 0x55555555;
5821 wb_write[2] = 0x20; /* SOP */
5822 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5824 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
5825 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5827 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
5830 /* NON-IP protocol */
5832 wb_write[0] = 0x09000000;
5833 wb_write[1] = 0x55555555;
5834 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5835 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5837 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
5838 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
5839 /* EOP, eop_bvalid = 0 */
5840 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
5844 /* some of the internal memories
5845 * are not directly readable from the driver
5846 * to test them we send debug packets
5848 static int bnx2x_int_mem_test(struct bnx2x *bp)
5854 switch (CHIP_REV(bp)) {
5866 DP(NETIF_MSG_HW, "start part1\n");
5868 /* Disable inputs of parser neighbor blocks */
5869 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5870 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5871 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5872 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5874 /* Write 0 to parser credits for CFC search request */
5875 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5877 /* send Ethernet packet */
5880 /* TODO do i reset NIG statistic? */
5881 /* Wait until NIG register shows 1 packet of size 0x10 */
5882 count = 1000 * factor;
5884 #ifdef BNX2X_DMAE_RD
5885 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5886 val = *bnx2x_sp(bp, wb_data[0]);
5888 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5889 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5898 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5902 /* Wait until PRS register shows 1 packet */
5903 count = 1000 * factor;
5905 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5914 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5918 /* Reset and init BRB, PRS */
5919 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
5921 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
5923 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5924 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5926 DP(NETIF_MSG_HW, "part2\n");
5928 /* Disable inputs of parser neighbor blocks */
5929 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5930 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5931 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5932 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
5934 /* Write 0 to parser credits for CFC search request */
5935 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5937 /* send 10 Ethernet packets */
5938 for (i = 0; i < 10; i++)
5941 /* Wait until NIG register shows 10 + 1
5942 packets of size 11*0x10 = 0xb0 */
5943 count = 1000 * factor;
5945 #ifdef BNX2X_DMAE_RD
5946 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5947 val = *bnx2x_sp(bp, wb_data[0]);
5949 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
5950 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
5959 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5963 /* Wait until PRS register shows 2 packets */
5964 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5966 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5968 /* Write 1 to parser credits for CFC search request */
5969 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5971 /* Wait until PRS register shows 3 packets */
5972 msleep(10 * factor);
5973 /* Wait until NIG register shows 1 packet of size 0x10 */
5974 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5976 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5978 /* clear NIG EOP FIFO */
5979 for (i = 0; i < 11; i++)
5980 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5981 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5983 BNX2X_ERR("clear of NIG failed\n");
5987 /* Reset and init BRB, PRS, NIG */
5988 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5990 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5992 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5993 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5996 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5999 /* Enable inputs of parser neighbor blocks */
6000 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6001 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6002 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
6003 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
6005 DP(NETIF_MSG_HW, "done\n");
6010 static void enable_blocks_attention(struct bnx2x *bp)
6012 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6013 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6014 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6015 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6016 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6017 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6018 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6019 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6020 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
6021 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6022 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
6023 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6024 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6025 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
6026 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6027 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
6028 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6029 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6030 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6031 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
6032 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6033 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6034 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
6035 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6036 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6037 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
6038 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6039 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
6040 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6041 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
6042 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6043 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
6046 static int bnx2x_function_init(struct bnx2x *bp, int mode)
6048 int func = bp->port;
6049 int port = func ? PORT1 : PORT0;
6055 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
6056 if ((func != 0) && (func != 1)) {
6057 BNX2X_ERR("BAD function number (%d)\n", func);
6061 bnx2x_gunzip_init(bp);
6063 if (mode & 0x1) { /* init common */
6064 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
6066 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6068 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6070 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
6072 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6074 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
6076 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
6077 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
6081 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6082 /* enable HW interrupt from PXP on USDM
6083 overflow bit 16 on INT_MASK_0 */
6084 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6088 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6089 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6090 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6091 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6092 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
6093 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
6095 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6096 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6097 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6098 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6099 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
6104 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6107 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
6109 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6110 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6111 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
6114 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
6116 /* let the HW do it's magic ... */
6119 (can be moved up if we want to use the DMAE) */
6120 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6122 BNX2X_ERR("PXP2 CFG failed\n");
6126 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6128 BNX2X_ERR("PXP2 RD_INIT failed\n");
6132 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6133 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
6135 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
6137 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
6138 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
6139 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
6140 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
6142 #ifdef BNX2X_DMAE_RD
6143 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6144 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6145 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6146 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6148 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
6149 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
6150 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
6151 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
6152 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
6153 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
6154 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
6155 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
6156 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
6157 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
6158 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
6159 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
6161 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
6162 /* soft reset pulse */
6163 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6164 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6167 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
6169 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
6170 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
6171 if (CHIP_REV(bp) == CHIP_REV_Ax) {
6172 /* enable hw interrupt from doorbell Q */
6173 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6176 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
6178 if (CHIP_REV_IS_SLOW(bp)) {
6179 /* fix for emulation and FPGA for no pause */
6180 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
6181 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
6182 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
6183 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
6186 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
6188 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
6189 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
6190 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
6191 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
6193 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6194 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6195 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6196 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE);
6198 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
6199 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
6200 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
6201 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
6204 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6206 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6209 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
6210 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
6211 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
6213 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6214 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6215 REG_WR(bp, i, 0xc0cac01a);
6216 /* TODO: replace with something meaningful */
6218 /* SRCH COMMON comes here */
6219 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6221 if (sizeof(union cdu_context) != 1024) {
6222 /* we currently assume that a context is 1024 bytes */
6223 printk(KERN_ALERT PFX "please adjust the size of"
6224 " cdu_context(%ld)\n",
6225 (long)sizeof(union cdu_context));
6227 val = (4 << 24) + (0 << 12) + 1024;
6228 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
6229 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
6231 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
6232 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
6234 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
6235 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
6236 MISC_AEU_COMMON_END);
6237 /* RXPCS COMMON comes here */
6238 /* EMAC0 COMMON comes here */
6239 /* EMAC1 COMMON comes here */
6240 /* DBU COMMON comes here */
6241 /* DBG COMMON comes here */
6242 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
6244 if (CHIP_REV_IS_SLOW(bp))
6247 /* finish CFC init */
6248 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
6250 BNX2X_ERR("CFC LL_INIT failed\n");
6254 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
6256 BNX2X_ERR("CFC AC_INIT failed\n");
6260 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
6262 BNX2X_ERR("CFC CAM_INIT failed\n");
6266 REG_WR(bp, CFC_REG_DEBUG0, 0);
6268 /* read NIG statistic
6269 to see if this is our first up since powerup */
6270 #ifdef BNX2X_DMAE_RD
6271 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6272 val = *bnx2x_sp(bp, wb_data[0]);
6274 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
6275 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
6277 /* do internal memory self test */
6278 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6279 BNX2X_ERR("internal mem selftest failed\n");
6283 /* clear PXP2 attentions */
6284 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
6286 enable_blocks_attention(bp);
6287 /* enable_blocks_parity(bp); */
6289 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6290 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6291 /* Fan failure is indicated by SPIO 5 */
6292 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6293 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6295 /* set to active low mode */
6296 val = REG_RD(bp, MISC_REG_SPIO_INT);
6297 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6298 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6299 REG_WR(bp, MISC_REG_SPIO_INT, val);
6301 /* enable interrupt to signal the IGU */
6302 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6303 val |= (1 << MISC_REGISTERS_SPIO_5);
6304 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6311 } /* end of common init */
6315 /* the phys address is shifted right 12 bits and has an added
6316 1=valid bit added to the 53rd bit
6317 then since this is a wide register(TM)
6318 we split it into two 32 bit writes
6320 #define RQ_ONCHIP_AT_PORT_SIZE 384
6321 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6322 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6323 #define PXP_ONE_ILT(x) ((x << 10) | x)
6325 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
6327 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
6329 /* Port PXP comes here */
6330 /* Port PXP2 comes here */
6335 i = func * RQ_ONCHIP_AT_PORT_SIZE;
6337 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
6338 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
6339 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6341 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
6342 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
6343 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
6344 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
6346 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
6352 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6353 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6354 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6355 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6360 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6361 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6362 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6363 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6368 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6369 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6370 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6371 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6374 /* Port TCM comes here */
6375 /* Port UCM comes here */
6376 /* Port CCM comes here */
6377 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
6378 func ? XCM_PORT1_END : XCM_PORT0_END);
6384 for (i = 0; i < 32; i++) {
6385 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
6387 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
6389 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
6390 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
6393 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
6395 /* Port QM comes here */
6398 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6399 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6401 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
6402 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
6404 /* Port DQ comes here */
6405 /* Port BRB1 comes here */
6406 bnx2x_init_block(bp, func ? PRS_PORT1_START : PRS_PORT0_START,
6407 func ? PRS_PORT1_END : PRS_PORT0_END);
6408 /* Port TSDM comes here */
6409 /* Port CSDM comes here */
6410 /* Port USDM comes here */
6411 /* Port XSDM comes here */
6412 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
6413 func ? TSEM_PORT1_END : TSEM_PORT0_END);
6414 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
6415 func ? USEM_PORT1_END : USEM_PORT0_END);
6416 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
6417 func ? CSEM_PORT1_END : CSEM_PORT0_END);
6418 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
6419 func ? XSEM_PORT1_END : XSEM_PORT0_END);
6420 /* Port UPB comes here */
6421 /* Port XSDM comes here */
6422 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
6423 func ? PBF_PORT1_END : PBF_PORT0_END);
6425 /* configure PBF to work without PAUSE mtu 9000 */
6426 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
6428 /* update threshold */
6429 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
6430 /* update init credit */
6431 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
6434 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
6436 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
6439 /* tell the searcher where the T2 table is */
6440 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6442 wb_write[0] = U64_LO(bp->t2_mapping);
6443 wb_write[1] = U64_HI(bp->t2_mapping);
6444 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6445 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6446 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6447 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6449 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6450 /* Port SRCH comes here */
6452 /* Port CDU comes here */
6453 /* Port CFC comes here */
6454 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
6455 func ? HC_PORT1_END : HC_PORT0_END);
6456 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
6457 MISC_AEU_PORT0_START,
6458 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
6459 /* Port PXPCS comes here */
6460 /* Port EMAC0 comes here */
6461 /* Port EMAC1 comes here */
6462 /* Port DBU comes here */
6463 /* Port DBG comes here */
6464 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
6465 func ? NIG_PORT1_END : NIG_PORT0_END);
6466 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
6467 /* Port MCP comes here */
6468 /* Port DMAE comes here */
6470 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
6471 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
6472 /* add SPIO 5 to group 0 */
6473 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6474 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6475 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
6482 bnx2x_link_reset(bp);
6484 /* Reset PCIE errors for debug */
6485 REG_WR(bp, 0x2114, 0xffffffff);
6486 REG_WR(bp, 0x2120, 0xffffffff);
6487 REG_WR(bp, 0x2814, 0xffffffff);
6489 /* !!! move to init_values.h */
6490 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6491 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6492 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6493 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
6495 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
6496 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
6497 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
6498 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
6500 bnx2x_gunzip_end(bp);
6505 bp->fw_drv_pulse_wr_seq =
6506 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
6507 DRV_PULSE_SEQ_MASK);
6508 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
6509 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
6510 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
6518 /* send the MCP a request, block until there is a reply */
6519 static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6521 int port = bp->port;
6522 u32 seq = ++bp->fw_seq;
6525 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
6526 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6528 /* let the FW do it's magic ... */
6529 msleep(100); /* TBD */
6531 if (CHIP_REV_IS_SLOW(bp))
6534 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
6535 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
6537 /* is this a reply to our command? */
6538 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6539 rc &= FW_MSG_CODE_MASK;
6543 BNX2X_ERR("FW failed to respond!\n");
6551 static void bnx2x_free_mem(struct bnx2x *bp)
6554 #define BNX2X_PCI_FREE(x, y, size) \
6557 pci_free_consistent(bp->pdev, size, x, y); \
6563 #define BNX2X_FREE(x) \
6574 for_each_queue(bp, i) {
6577 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6578 bnx2x_fp(bp, i, status_blk_mapping),
6579 sizeof(struct host_status_block) +
6580 sizeof(struct eth_tx_db_data));
6582 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6583 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6584 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6585 bnx2x_fp(bp, i, tx_desc_mapping),
6586 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6588 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6589 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6590 bnx2x_fp(bp, i, rx_desc_mapping),
6591 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6593 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6594 bnx2x_fp(bp, i, rx_comp_mapping),
6595 sizeof(struct eth_fast_path_rx_cqe) *
6601 /* end of fastpath */
6603 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6604 (sizeof(struct host_def_status_block)));
6606 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6607 (sizeof(struct bnx2x_slowpath)));
6610 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6611 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6612 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6613 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6615 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
6617 #undef BNX2X_PCI_FREE
6621 static int bnx2x_alloc_mem(struct bnx2x *bp)
6624 #define BNX2X_PCI_ALLOC(x, y, size) \
6626 x = pci_alloc_consistent(bp->pdev, size, y); \
6628 goto alloc_mem_err; \
6629 memset(x, 0, size); \
6632 #define BNX2X_ALLOC(x, size) \
6634 x = vmalloc(size); \
6636 goto alloc_mem_err; \
6637 memset(x, 0, size); \
6643 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
6645 for_each_queue(bp, i) {
6646 bnx2x_fp(bp, i, bp) = bp;
6649 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6650 &bnx2x_fp(bp, i, status_blk_mapping),
6651 sizeof(struct host_status_block) +
6652 sizeof(struct eth_tx_db_data));
6654 bnx2x_fp(bp, i, hw_tx_prods) =
6655 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6657 bnx2x_fp(bp, i, tx_prods_mapping) =
6658 bnx2x_fp(bp, i, status_blk_mapping) +
6659 sizeof(struct host_status_block);
6661 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
6662 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6663 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6664 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6665 &bnx2x_fp(bp, i, tx_desc_mapping),
6666 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6668 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6669 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6670 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6671 &bnx2x_fp(bp, i, rx_desc_mapping),
6672 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6674 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6675 &bnx2x_fp(bp, i, rx_comp_mapping),
6676 sizeof(struct eth_fast_path_rx_cqe) *
6680 /* end of fastpath */
6682 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6683 sizeof(struct host_def_status_block));
6685 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6686 sizeof(struct bnx2x_slowpath));
6689 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6692 for (i = 0; i < 64*1024; i += 64) {
6693 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6694 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6697 /* allocate searcher T2 table
6698 we allocate 1/4 of alloc num for T2
6699 (which is not entered into the ILT) */
6700 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6703 for (i = 0; i < 16*1024; i += 64)
6704 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6706 /* now fixup the last line in the block to point to the next block */
6707 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6709 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6710 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6712 /* QM queues (128*MAX_CONN) */
6713 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6716 /* Slow path ring */
6717 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6725 #undef BNX2X_PCI_ALLOC
6729 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6733 for_each_queue(bp, i) {
6734 struct bnx2x_fastpath *fp = &bp->fp[i];
6736 u16 bd_cons = fp->tx_bd_cons;
6737 u16 sw_prod = fp->tx_pkt_prod;
6738 u16 sw_cons = fp->tx_pkt_cons;
6740 BUG_TRAP(fp->tx_buf_ring != NULL);
6742 while (sw_cons != sw_prod) {
6743 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6749 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6753 for_each_queue(bp, j) {
6754 struct bnx2x_fastpath *fp = &bp->fp[j];
6756 BUG_TRAP(fp->rx_buf_ring != NULL);
6758 for (i = 0; i < NUM_RX_BD; i++) {
6759 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6760 struct sk_buff *skb = rx_buf->skb;
6765 pci_unmap_single(bp->pdev,
6766 pci_unmap_addr(rx_buf, mapping),
6767 bp->rx_buf_use_size,
6768 PCI_DMA_FROMDEVICE);
6776 static void bnx2x_free_skbs(struct bnx2x *bp)
6778 bnx2x_free_tx_skbs(bp);
6779 bnx2x_free_rx_skbs(bp);
6782 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6786 free_irq(bp->msix_table[0].vector, bp->dev);
6787 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6788 bp->msix_table[0].vector);
6790 for_each_queue(bp, i) {
6791 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6792 "state(%x)\n", i, bp->msix_table[i + 1].vector,
6793 bnx2x_fp(bp, i, state));
6795 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED) {
6797 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
6798 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_CLOSED;
6801 DP(NETIF_MSG_IFDOWN, "irq not freed\n");
6807 static void bnx2x_free_irq(struct bnx2x *bp)
6810 if (bp->flags & USING_MSIX_FLAG) {
6812 bnx2x_free_msix_irqs(bp);
6813 pci_disable_msix(bp->pdev);
6815 bp->flags &= ~USING_MSIX_FLAG;
6818 free_irq(bp->pdev->irq, bp->dev);
6821 static int bnx2x_enable_msix(struct bnx2x *bp)
6826 bp->msix_table[0].entry = 0;
6827 for_each_queue(bp, i)
6828 bp->msix_table[i + 1].entry = i + 1;
6830 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
6831 bp->num_queues + 1)){
6832 BNX2X_ERR("failed to enable msix\n");
6837 bp->flags |= USING_MSIX_FLAG;
6844 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6849 DP(NETIF_MSG_IFUP, "about to request sp irq\n");
6851 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6852 bp->dev->name, bp->dev);
6855 BNX2X_ERR("request sp irq failed\n");
6859 for_each_queue(bp, i) {
6860 rc = request_irq(bp->msix_table[i + 1].vector,
6861 bnx2x_msix_fp_int, 0,
6862 bp->dev->name, &bp->fp[i]);
6865 BNX2X_ERR("request fp #%d irq failed\n", i);
6866 bnx2x_free_msix_irqs(bp);
6870 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
6878 static int bnx2x_req_irq(struct bnx2x *bp)
6881 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
6882 IRQF_SHARED, bp->dev->name, bp->dev);
6884 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6891 * Init service functions
6894 static void bnx2x_set_mac_addr(struct bnx2x *bp)
6896 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6899 * unicasts 0-31:port0 32-63:port1
6900 * multicast 64-127:port0 128-191:port1
6902 config->hdr.length_6b = 2;
6903 config->hdr.offset = bp->port ? 31 : 0;
6904 config->hdr.reserved0 = 0;
6905 config->hdr.reserved1 = 0;
6908 config->config_table[0].cam_entry.msb_mac_addr =
6909 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6910 config->config_table[0].cam_entry.middle_mac_addr =
6911 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6912 config->config_table[0].cam_entry.lsb_mac_addr =
6913 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6914 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
6915 config->config_table[0].target_table_entry.flags = 0;
6916 config->config_table[0].target_table_entry.client_id = 0;
6917 config->config_table[0].target_table_entry.vlan_id = 0;
6919 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
6920 config->config_table[0].cam_entry.msb_mac_addr,
6921 config->config_table[0].cam_entry.middle_mac_addr,
6922 config->config_table[0].cam_entry.lsb_mac_addr);
6925 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6926 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6927 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
6928 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
6929 config->config_table[1].target_table_entry.flags =
6930 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6931 config->config_table[1].target_table_entry.client_id = 0;
6932 config->config_table[1].target_table_entry.vlan_id = 0;
6934 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6935 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6936 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6939 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6940 int *state_p, int poll)
6942 /* can take a while if any port is running */
6945 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6946 poll ? "polling" : "waiting", state, idx);
6953 bnx2x_rx_int(bp->fp, 10);
6954 /* If index is different from 0
6955 * The reply for some commands will
6956 * be on the none default queue
6959 bnx2x_rx_int(&bp->fp[idx], 10);
6962 mb(); /* state is changed by bnx2x_sp_event()*/
6964 if (*state_p != state)
6973 BNX2X_ERR("timeout waiting for ramrod %d on %d\n", state, idx);
6978 static int bnx2x_setup_leading(struct bnx2x *bp)
6981 /* reset IGU state */
6982 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6985 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6987 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6991 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6994 /* reset IGU state */
6995 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6997 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6998 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
7000 /* Wait for completion */
7001 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
7002 &(bp->fp[index].state), 1);
7007 static int bnx2x_poll(struct napi_struct *napi, int budget);
7008 static void bnx2x_set_rx_mode(struct net_device *dev);
7010 static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
7015 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7017 /* Send LOAD_REQUEST command to MCP.
7018 Returns the type of LOAD command: if it is the
7019 first port to be initialized common blocks should be
7020 initialized, otherwise - not.
7023 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7024 if (rc == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7025 return -EBUSY; /* other port in diagnostic mode */
7028 rc = FW_MSG_CODE_DRV_LOAD_COMMON;
7031 /* if we can't use msix we only need one fp,
7032 * so try to enable msix with the requested number of fp's
7033 * and fallback to inta with one fp
7039 if ((use_multi > 1) && (use_multi <= 16))
7040 /* user requested number */
7041 bp->num_queues = use_multi;
7042 else if (use_multi == 1)
7043 bp->num_queues = num_online_cpus();
7047 if (bnx2x_enable_msix(bp)) {
7048 /* failed to enable msix */
7051 BNX2X_ERR("Multi requested but failed"
7052 " to enable MSI-X\n");
7057 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
7059 if (bnx2x_alloc_mem(bp))
7063 if (bp->flags & USING_MSIX_FLAG) {
7064 if (bnx2x_req_msix_irqs(bp)) {
7065 pci_disable_msix(bp->pdev);
7070 if (bnx2x_req_irq(bp)) {
7071 BNX2X_ERR("IRQ request failed, aborting\n");
7077 for_each_queue(bp, i)
7078 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7083 if (bnx2x_function_init(bp, (rc == FW_MSG_CODE_DRV_LOAD_COMMON))) {
7084 BNX2X_ERR("HW init failed, aborting\n");
7089 atomic_set(&bp->intr_sem, 0);
7092 /* Setup NIC internals and enable interrupts */
7095 /* Send LOAD_DONE command to MCP */
7097 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7098 DP(NETIF_MSG_IFUP, "rc = 0x%x\n", rc);
7100 BNX2X_ERR("MCP response failure, unloading\n");
7105 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7107 /* Enable Rx interrupt handling before sending the ramrod
7108 as it's completed on Rx FP queue */
7109 for_each_queue(bp, i)
7110 napi_enable(&bnx2x_fp(bp, i, napi));
7112 if (bnx2x_setup_leading(bp))
7115 for_each_nondefault_queue(bp, i)
7116 if (bnx2x_setup_multi(bp, i))
7119 bnx2x_set_mac_addr(bp);
7123 /* Start fast path */
7124 if (req_irq) { /* IRQ is only requested from bnx2x_open */
7125 netif_start_queue(bp->dev);
7126 if (bp->flags & USING_MSIX_FLAG)
7127 printk(KERN_INFO PFX "%s: using MSI-X\n",
7130 /* Otherwise Tx queue should be only reenabled */
7131 } else if (netif_running(bp->dev)) {
7132 netif_wake_queue(bp->dev);
7133 bnx2x_set_rx_mode(bp->dev);
7136 /* start the timer */
7137 mod_timer(&bp->timer, jiffies + bp->current_interval);
7142 for_each_queue(bp, i)
7143 napi_disable(&bnx2x_fp(bp, i, napi));
7146 bnx2x_disable_int_sync(bp);
7148 bnx2x_free_skbs(bp);
7154 /* TBD we really need to reset the chip
7155 if we want to recover from this */
7159 static void bnx2x_netif_stop(struct bnx2x *bp)
7163 bp->rx_mode = BNX2X_RX_MODE_NONE;
7164 bnx2x_set_storm_rx_mode(bp);
7166 bnx2x_disable_int_sync(bp);
7167 bnx2x_link_reset(bp);
7169 for_each_queue(bp, i)
7170 napi_disable(&bnx2x_fp(bp, i, napi));
7172 if (netif_running(bp->dev)) {
7173 netif_tx_disable(bp->dev);
7174 bp->dev->trans_start = jiffies; /* prevent tx timeout */
7178 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7180 int port = bp->port;
7186 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
7188 /* Do not rcv packets to BRB */
7189 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7190 /* Do not direct rcv packets that are not for MCP to the BRB */
7191 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7192 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7194 /* Configure IGU and AEU */
7195 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
7196 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7198 /* TODO: Close Doorbell port? */
7205 base = port * RQ_ONCHIP_AT_PORT_SIZE;
7206 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
7208 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
7210 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
7211 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
7215 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7217 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7219 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7224 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7229 /* halt the connection */
7230 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
7231 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
7234 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7235 &(bp->fp[index].state), 1);
7236 if (rc) /* timeout */
7239 /* delete cfc entry */
7240 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7242 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_DELETED, index,
7243 &(bp->fp[index].state), 1);
7248 static void bnx2x_stop_leading(struct bnx2x *bp)
7251 /* if the other port is handling traffic,
7252 this can take a lot of time */
7257 /* Send HALT ramrod */
7258 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7259 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
7261 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7262 &(bp->fp[0].state), 1))
7265 bp->dsb_sp_prod_idx = *bp->dsb_sp_prod;
7267 /* Send CFC_DELETE ramrod */
7268 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7271 Wait for completion.
7272 we are going to reset the chip anyway
7273 so there is not much to do if this times out
7275 while (bp->dsb_sp_prod_idx == *bp->dsb_sp_prod && timeout) {
7282 static int bnx2x_nic_unload(struct bnx2x *bp, int fre_irq)
7288 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7290 /* Calling flush_scheduled_work() may deadlock because
7291 * linkwatch_event() may be on the workqueue and it will try to get
7292 * the rtnl_lock which we are holding.
7295 while (bp->in_reset_task)
7298 /* Delete the timer: do it before disabling interrupts, as it
7299 may be still STAT_QUERY ramrod pending after stopping the timer */
7300 del_timer_sync(&bp->timer);
7302 /* Wait until stat ramrod returns and all SP tasks complete */
7303 while (bp->stat_pending && (bp->spq_left != MAX_SPQ_PENDING))
7306 /* Stop fast path, disable MAC, disable interrupts, disable napi */
7307 bnx2x_netif_stop(bp);
7309 if (bp->flags & NO_WOL_FLAG)
7310 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7312 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
7313 u8 *mac_addr = bp->dev->dev_addr;
7314 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
7315 EMAC_MODE_ACPI_RCVD);
7317 EMAC_WR(EMAC_REG_EMAC_MODE, val);
7319 val = (mac_addr[0] << 8) | mac_addr[1];
7320 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
7322 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7323 (mac_addr[4] << 8) | mac_addr[5];
7324 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
7326 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7328 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7330 for_each_nondefault_queue(bp, i)
7331 if (bnx2x_stop_multi(bp, i))
7335 bnx2x_stop_leading(bp);
7339 rc = bnx2x_fw_command(bp, reset_code);
7341 rc = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7347 /* Reset the chip */
7348 bnx2x_reset_chip(bp, rc);
7350 /* Report UNLOAD_DONE to MCP */
7352 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7354 /* Free SKBs and driver internals */
7355 bnx2x_free_skbs(bp);
7358 bp->state = BNX2X_STATE_CLOSED;
7361 netif_carrier_off(bp->dev);
7366 /* end of nic load/unload */
7371 * Init service functions
7374 static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
7376 int port = bp->port;
7381 switch (switch_cfg) {
7383 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7385 ext_phy_type = SERDES_EXT_PHY_TYPE(bp);
7386 switch (ext_phy_type) {
7387 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7388 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7391 bp->supported |= (SUPPORTED_10baseT_Half |
7392 SUPPORTED_10baseT_Full |
7393 SUPPORTED_100baseT_Half |
7394 SUPPORTED_100baseT_Full |
7395 SUPPORTED_1000baseT_Full |
7396 SUPPORTED_2500baseX_Full |
7397 SUPPORTED_TP | SUPPORTED_FIBRE |
7400 SUPPORTED_Asym_Pause);
7403 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7404 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7407 bp->phy_flags |= PHY_SGMII_FLAG;
7409 bp->supported |= (SUPPORTED_10baseT_Half |
7410 SUPPORTED_10baseT_Full |
7411 SUPPORTED_100baseT_Half |
7412 SUPPORTED_100baseT_Full |
7413 SUPPORTED_1000baseT_Full |
7414 SUPPORTED_TP | SUPPORTED_FIBRE |
7417 SUPPORTED_Asym_Pause);
7421 BNX2X_ERR("NVRAM config error. "
7422 "BAD SerDes ext_phy_config 0x%x\n",
7423 bp->ext_phy_config);
7427 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7429 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7432 case SWITCH_CFG_10G:
7433 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7435 bp->phy_flags |= PHY_XGXS_FLAG;
7437 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7438 switch (ext_phy_type) {
7439 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7440 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7443 bp->supported |= (SUPPORTED_10baseT_Half |
7444 SUPPORTED_10baseT_Full |
7445 SUPPORTED_100baseT_Half |
7446 SUPPORTED_100baseT_Full |
7447 SUPPORTED_1000baseT_Full |
7448 SUPPORTED_2500baseX_Full |
7449 SUPPORTED_10000baseT_Full |
7450 SUPPORTED_TP | SUPPORTED_FIBRE |
7453 SUPPORTED_Asym_Pause);
7456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7457 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7460 bp->supported |= (SUPPORTED_10000baseT_Full |
7463 SUPPORTED_Asym_Pause);
7466 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7467 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7470 bp->supported |= (SUPPORTED_10000baseT_Full |
7471 SUPPORTED_1000baseT_Full |
7475 SUPPORTED_Asym_Pause);
7478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7479 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7482 bp->supported |= (SUPPORTED_10000baseT_Full |
7483 SUPPORTED_1000baseT_Full |
7487 SUPPORTED_Asym_Pause);
7490 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7491 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7494 bp->supported |= (SUPPORTED_10000baseT_Full |
7498 SUPPORTED_Asym_Pause);
7502 BNX2X_ERR("NVRAM config error. "
7503 "BAD XGXS ext_phy_config 0x%x\n",
7504 bp->ext_phy_config);
7508 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7510 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
7512 bp->ser_lane = ((bp->lane_config &
7513 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_MASK) >>
7514 PORT_HW_CFG_LANE_SWAP_CFG_MASTER_SHIFT);
7515 bp->rx_lane_swap = ((bp->lane_config &
7516 PORT_HW_CFG_LANE_SWAP_CFG_RX_MASK) >>
7517 PORT_HW_CFG_LANE_SWAP_CFG_RX_SHIFT);
7518 bp->tx_lane_swap = ((bp->lane_config &
7519 PORT_HW_CFG_LANE_SWAP_CFG_TX_MASK) >>
7520 PORT_HW_CFG_LANE_SWAP_CFG_TX_SHIFT);
7521 BNX2X_DEV_INFO("rx_lane_swap 0x%x tx_lane_swap 0x%x\n",
7522 bp->rx_lane_swap, bp->tx_lane_swap);
7526 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7531 /* mask what we support according to speed_cap_mask */
7532 if (!(bp->speed_cap_mask &
7533 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7534 bp->supported &= ~SUPPORTED_10baseT_Half;
7536 if (!(bp->speed_cap_mask &
7537 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7538 bp->supported &= ~SUPPORTED_10baseT_Full;
7540 if (!(bp->speed_cap_mask &
7541 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7542 bp->supported &= ~SUPPORTED_100baseT_Half;
7544 if (!(bp->speed_cap_mask &
7545 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7546 bp->supported &= ~SUPPORTED_100baseT_Full;
7548 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7549 bp->supported &= ~(SUPPORTED_1000baseT_Half |
7550 SUPPORTED_1000baseT_Full);
7552 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7553 bp->supported &= ~SUPPORTED_2500baseX_Full;
7555 if (!(bp->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7556 bp->supported &= ~SUPPORTED_10000baseT_Full;
7558 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
7561 static void bnx2x_link_settings_requested(struct bnx2x *bp)
7563 bp->req_autoneg = 0;
7564 bp->req_duplex = DUPLEX_FULL;
7566 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7567 case PORT_FEATURE_LINK_SPEED_AUTO:
7568 if (bp->supported & SUPPORTED_Autoneg) {
7569 bp->req_autoneg |= AUTONEG_SPEED;
7570 bp->req_line_speed = 0;
7571 bp->advertising = bp->supported;
7573 if (XGXS_EXT_PHY_TYPE(bp) ==
7574 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) {
7575 /* force 10G, no AN */
7576 bp->req_line_speed = SPEED_10000;
7578 (ADVERTISED_10000baseT_Full |
7582 BNX2X_ERR("NVRAM config error. "
7583 "Invalid link_config 0x%x"
7584 " Autoneg not supported\n",
7590 case PORT_FEATURE_LINK_SPEED_10M_FULL:
7591 if (bp->supported & SUPPORTED_10baseT_Full) {
7592 bp->req_line_speed = SPEED_10;
7593 bp->advertising = (ADVERTISED_10baseT_Full |
7596 BNX2X_ERR("NVRAM config error. "
7597 "Invalid link_config 0x%x"
7598 " speed_cap_mask 0x%x\n",
7599 bp->link_config, bp->speed_cap_mask);
7604 case PORT_FEATURE_LINK_SPEED_10M_HALF:
7605 if (bp->supported & SUPPORTED_10baseT_Half) {
7606 bp->req_line_speed = SPEED_10;
7607 bp->req_duplex = DUPLEX_HALF;
7608 bp->advertising = (ADVERTISED_10baseT_Half |
7611 BNX2X_ERR("NVRAM config error. "
7612 "Invalid link_config 0x%x"
7613 " speed_cap_mask 0x%x\n",
7614 bp->link_config, bp->speed_cap_mask);
7619 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7620 if (bp->supported & SUPPORTED_100baseT_Full) {
7621 bp->req_line_speed = SPEED_100;
7622 bp->advertising = (ADVERTISED_100baseT_Full |
7625 BNX2X_ERR("NVRAM config error. "
7626 "Invalid link_config 0x%x"
7627 " speed_cap_mask 0x%x\n",
7628 bp->link_config, bp->speed_cap_mask);
7633 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7634 if (bp->supported & SUPPORTED_100baseT_Half) {
7635 bp->req_line_speed = SPEED_100;
7636 bp->req_duplex = DUPLEX_HALF;
7637 bp->advertising = (ADVERTISED_100baseT_Half |
7640 BNX2X_ERR("NVRAM config error. "
7641 "Invalid link_config 0x%x"
7642 " speed_cap_mask 0x%x\n",
7643 bp->link_config, bp->speed_cap_mask);
7648 case PORT_FEATURE_LINK_SPEED_1G:
7649 if (bp->supported & SUPPORTED_1000baseT_Full) {
7650 bp->req_line_speed = SPEED_1000;
7651 bp->advertising = (ADVERTISED_1000baseT_Full |
7654 BNX2X_ERR("NVRAM config error. "
7655 "Invalid link_config 0x%x"
7656 " speed_cap_mask 0x%x\n",
7657 bp->link_config, bp->speed_cap_mask);
7662 case PORT_FEATURE_LINK_SPEED_2_5G:
7663 if (bp->supported & SUPPORTED_2500baseX_Full) {
7664 bp->req_line_speed = SPEED_2500;
7665 bp->advertising = (ADVERTISED_2500baseX_Full |
7668 BNX2X_ERR("NVRAM config error. "
7669 "Invalid link_config 0x%x"
7670 " speed_cap_mask 0x%x\n",
7671 bp->link_config, bp->speed_cap_mask);
7676 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7677 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7678 case PORT_FEATURE_LINK_SPEED_10G_KR:
7679 if (bp->supported & SUPPORTED_10000baseT_Full) {
7680 bp->req_line_speed = SPEED_10000;
7681 bp->advertising = (ADVERTISED_10000baseT_Full |
7684 BNX2X_ERR("NVRAM config error. "
7685 "Invalid link_config 0x%x"
7686 " speed_cap_mask 0x%x\n",
7687 bp->link_config, bp->speed_cap_mask);
7693 BNX2X_ERR("NVRAM config error. "
7694 "BAD link speed link_config 0x%x\n",
7696 bp->req_autoneg |= AUTONEG_SPEED;
7697 bp->req_line_speed = 0;
7698 bp->advertising = bp->supported;
7701 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d\n",
7702 bp->req_line_speed, bp->req_duplex);
7704 bp->req_flow_ctrl = (bp->link_config &
7705 PORT_FEATURE_FLOW_CONTROL_MASK);
7706 if ((bp->req_flow_ctrl == FLOW_CTRL_AUTO) &&
7707 (bp->supported & SUPPORTED_Autoneg))
7708 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
7710 BNX2X_DEV_INFO("req_autoneg 0x%x req_flow_ctrl 0x%x"
7711 " advertising 0x%x\n",
7712 bp->req_autoneg, bp->req_flow_ctrl, bp->advertising);
7715 static void bnx2x_get_hwinfo(struct bnx2x *bp)
7717 u32 val, val2, val3, val4, id;
7718 int port = bp->port;
7721 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7722 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
7724 /* Get the chip revision id and number. */
7725 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7726 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7727 id = ((val & 0xffff) << 16);
7728 val = REG_RD(bp, MISC_REG_CHIP_REV);
7729 id |= ((val & 0xf) << 12);
7730 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7731 id |= ((val & 0xff) << 4);
7732 REG_RD(bp, MISC_REG_BOND_ID);
7735 BNX2X_DEV_INFO("chip ID is %x\n", id);
7737 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
7738 BNX2X_DEV_INFO("MCP not active\n");
7743 val = SHMEM_RD(bp, validity_map[port]);
7744 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7745 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7746 BNX2X_ERR("BAD MCP validity signature\n");
7748 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
7749 DRV_MSG_SEQ_NUMBER_MASK);
7751 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7752 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7754 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
7756 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
7757 bp->ext_phy_config =
7759 dev_info.port_hw_config[port].external_phy_config);
7760 bp->speed_cap_mask =
7762 dev_info.port_hw_config[port].speed_capability_mask);
7765 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7767 BNX2X_DEV_INFO("hw_config (%08x) board (%08x) serdes_config (%08x)\n"
7768 KERN_INFO " lane_config (%08x) ext_phy_config (%08x)\n"
7769 KERN_INFO " speed_cap_mask (%08x) link_config (%08x)"
7771 bp->hw_config, bp->board, bp->serdes_config,
7772 bp->lane_config, bp->ext_phy_config,
7773 bp->speed_cap_mask, bp->link_config, bp->fw_seq);
7775 switch_cfg = (bp->link_config & PORT_FEATURE_CONNECTED_SWITCH_MASK);
7776 bnx2x_link_settings_supported(bp, switch_cfg);
7778 bp->autoneg = (bp->hw_config & SHARED_HW_CFG_AN_ENABLE_MASK);
7779 /* for now disable cl73 */
7780 bp->autoneg &= ~SHARED_HW_CFG_AN_ENABLE_CL73;
7781 BNX2X_DEV_INFO("autoneg 0x%x\n", bp->autoneg);
7783 bnx2x_link_settings_requested(bp);
7785 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7786 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7787 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7788 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7789 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7790 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7791 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7792 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7794 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7797 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7798 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7799 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7800 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7802 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7803 val, val2, val3, val4);
7807 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
7808 BNX2X_DEV_INFO("bc_ver %X\n", val);
7809 if (val < BNX2X_BC_VER) {
7810 /* for now only warn
7811 * later we might need to enforce this */
7812 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7813 " please upgrade BC\n", BNX2X_BC_VER, val);
7819 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7820 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
7821 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7822 bp->flash_size, bp->flash_size);
7826 set_mac: /* only supposed to happen on emulation/FPGA */
7827 BNX2X_ERR("warning rendom MAC workaround active\n");
7828 random_ether_addr(bp->dev->dev_addr);
7829 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
7834 * ethtool service functions
7837 /* All ethtool functions called with rtnl_lock */
7839 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7841 struct bnx2x *bp = netdev_priv(dev);
7843 cmd->supported = bp->supported;
7844 cmd->advertising = bp->advertising;
7846 if (netif_carrier_ok(dev)) {
7847 cmd->speed = bp->line_speed;
7848 cmd->duplex = bp->duplex;
7850 cmd->speed = bp->req_line_speed;
7851 cmd->duplex = bp->req_duplex;
7854 if (bp->phy_flags & PHY_XGXS_FLAG) {
7855 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp);
7857 switch (ext_phy_type) {
7858 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7859 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7860 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7861 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7862 cmd->port = PORT_FIBRE;
7865 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7866 cmd->port = PORT_TP;
7870 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
7871 bp->ext_phy_config);
7874 cmd->port = PORT_TP;
7876 cmd->phy_address = bp->phy_addr;
7877 cmd->transceiver = XCVR_INTERNAL;
7879 if (bp->req_autoneg & AUTONEG_SPEED)
7880 cmd->autoneg = AUTONEG_ENABLE;
7882 cmd->autoneg = AUTONEG_DISABLE;
7887 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7888 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7889 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7890 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7891 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7892 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7893 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7898 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7900 struct bnx2x *bp = netdev_priv(dev);
7903 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7904 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7905 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7906 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7907 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7908 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7909 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7911 switch (cmd->port) {
7913 if (!(bp->supported & SUPPORTED_TP)) {
7914 DP(NETIF_MSG_LINK, "TP not supported\n");
7918 if (bp->phy_flags & PHY_XGXS_FLAG) {
7919 bnx2x_link_reset(bp);
7920 bnx2x_link_settings_supported(bp, SWITCH_CFG_1G);
7921 bnx2x_phy_deassert(bp);
7926 if (!(bp->supported & SUPPORTED_FIBRE)) {
7927 DP(NETIF_MSG_LINK, "FIBRE not supported\n");
7931 if (!(bp->phy_flags & PHY_XGXS_FLAG)) {
7932 bnx2x_link_reset(bp);
7933 bnx2x_link_settings_supported(bp, SWITCH_CFG_10G);
7934 bnx2x_phy_deassert(bp);
7939 DP(NETIF_MSG_LINK, "Unknown port type\n");
7943 if (cmd->autoneg == AUTONEG_ENABLE) {
7944 if (!(bp->supported & SUPPORTED_Autoneg)) {
7945 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
7949 /* advertise the requested speed and duplex if supported */
7950 cmd->advertising &= bp->supported;
7952 bp->req_autoneg |= AUTONEG_SPEED;
7953 bp->req_line_speed = 0;
7954 bp->req_duplex = DUPLEX_FULL;
7955 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
7957 } else { /* forced speed */
7958 /* advertise the requested speed and duplex if supported */
7959 switch (cmd->speed) {
7961 if (cmd->duplex == DUPLEX_FULL) {
7962 if (!(bp->supported &
7963 SUPPORTED_10baseT_Full)) {
7965 "10M full not supported\n");
7969 advertising = (ADVERTISED_10baseT_Full |
7972 if (!(bp->supported &
7973 SUPPORTED_10baseT_Half)) {
7975 "10M half not supported\n");
7979 advertising = (ADVERTISED_10baseT_Half |
7985 if (cmd->duplex == DUPLEX_FULL) {
7986 if (!(bp->supported &
7987 SUPPORTED_100baseT_Full)) {
7989 "100M full not supported\n");
7993 advertising = (ADVERTISED_100baseT_Full |
7996 if (!(bp->supported &
7997 SUPPORTED_100baseT_Half)) {
7999 "100M half not supported\n");
8003 advertising = (ADVERTISED_100baseT_Half |
8009 if (cmd->duplex != DUPLEX_FULL) {
8010 DP(NETIF_MSG_LINK, "1G half not supported\n");
8014 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
8015 DP(NETIF_MSG_LINK, "1G full not supported\n");
8019 advertising = (ADVERTISED_1000baseT_Full |
8024 if (cmd->duplex != DUPLEX_FULL) {
8026 "2.5G half not supported\n");
8030 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
8032 "2.5G full not supported\n");
8036 advertising = (ADVERTISED_2500baseX_Full |
8041 if (cmd->duplex != DUPLEX_FULL) {
8042 DP(NETIF_MSG_LINK, "10G half not supported\n");
8046 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
8047 DP(NETIF_MSG_LINK, "10G full not supported\n");
8051 advertising = (ADVERTISED_10000baseT_Full |
8056 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8060 bp->req_autoneg &= ~AUTONEG_SPEED;
8061 bp->req_line_speed = cmd->speed;
8062 bp->req_duplex = cmd->duplex;
8063 bp->advertising = advertising;
8066 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_line_speed %d\n"
8067 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8068 bp->req_autoneg, bp->req_line_speed, bp->req_duplex,
8071 bnx2x_stop_stats(bp);
8072 bnx2x_link_initialize(bp);
8077 static void bnx2x_get_drvinfo(struct net_device *dev,
8078 struct ethtool_drvinfo *info)
8080 struct bnx2x *bp = netdev_priv(dev);
8082 strcpy(info->driver, DRV_MODULE_NAME);
8083 strcpy(info->version, DRV_MODULE_VERSION);
8084 snprintf(info->fw_version, 32, "%d.%d.%d:%d (BC VER %x)",
8085 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
8086 BCM_5710_FW_REVISION_VERSION, BCM_5710_FW_COMPILE_FLAGS,
8088 strcpy(info->bus_info, pci_name(bp->pdev));
8089 info->n_stats = BNX2X_NUM_STATS;
8090 info->testinfo_len = BNX2X_NUM_TESTS;
8091 info->eedump_len = bp->flash_size;
8092 info->regdump_len = 0;
8095 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8097 struct bnx2x *bp = netdev_priv(dev);
8099 if (bp->flags & NO_WOL_FLAG) {
8103 wol->supported = WAKE_MAGIC;
8105 wol->wolopts = WAKE_MAGIC;
8109 memset(&wol->sopass, 0, sizeof(wol->sopass));
8112 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8114 struct bnx2x *bp = netdev_priv(dev);
8116 if (wol->wolopts & ~WAKE_MAGIC)
8119 if (wol->wolopts & WAKE_MAGIC) {
8120 if (bp->flags & NO_WOL_FLAG)
8130 static u32 bnx2x_get_msglevel(struct net_device *dev)
8132 struct bnx2x *bp = netdev_priv(dev);
8134 return bp->msglevel;
8137 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8139 struct bnx2x *bp = netdev_priv(dev);
8141 if (capable(CAP_NET_ADMIN))
8142 bp->msglevel = level;
8145 static int bnx2x_nway_reset(struct net_device *dev)
8147 struct bnx2x *bp = netdev_priv(dev);
8149 if (bp->state != BNX2X_STATE_OPEN) {
8150 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8154 bnx2x_stop_stats(bp);
8155 bnx2x_link_initialize(bp);
8160 static int bnx2x_get_eeprom_len(struct net_device *dev)
8162 struct bnx2x *bp = netdev_priv(dev);
8164 return bp->flash_size;
8167 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8169 int port = bp->port;
8173 /* adjust timeout for emulation/FPGA */
8174 count = NVRAM_TIMEOUT_COUNT;
8175 if (CHIP_REV_IS_SLOW(bp))
8178 /* request access to nvram interface */
8179 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8180 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8182 for (i = 0; i < count*10; i++) {
8183 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8184 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8190 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8191 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
8198 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8200 int port = bp->port;
8204 /* adjust timeout for emulation/FPGA */
8205 count = NVRAM_TIMEOUT_COUNT;
8206 if (CHIP_REV_IS_SLOW(bp))
8209 /* relinquish nvram interface */
8210 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8211 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8213 for (i = 0; i < count*10; i++) {
8214 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8215 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8221 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8222 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
8229 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8233 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8235 /* enable both bits, even on read */
8236 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8237 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8238 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8241 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8245 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8247 /* disable both bits, even after read */
8248 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8249 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8250 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8253 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8259 /* build the command word */
8260 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8262 /* need to clear DONE bit separately */
8263 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8265 /* address of the NVRAM to read from */
8266 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8267 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8269 /* issue a read command */
8270 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8272 /* adjust timeout for emulation/FPGA */
8273 count = NVRAM_TIMEOUT_COUNT;
8274 if (CHIP_REV_IS_SLOW(bp))
8277 /* wait for completion */
8280 for (i = 0; i < count; i++) {
8282 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8284 if (val & MCPR_NVM_COMMAND_DONE) {
8285 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8286 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8287 /* we read nvram data in cpu order
8288 * but ethtool sees it as an array of bytes
8289 * converting to big-endian will do the work */
8290 val = cpu_to_be32(val);
8300 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8307 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8309 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8314 if (offset + buf_size > bp->flash_size) {
8315 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8316 " buf_size (0x%x) > flash_size (0x%x)\n",
8317 offset, buf_size, bp->flash_size);
8321 /* request access to nvram interface */
8322 rc = bnx2x_acquire_nvram_lock(bp);
8326 /* enable access to nvram interface */
8327 bnx2x_enable_nvram_access(bp);
8329 /* read the first word(s) */
8330 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8331 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8332 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8333 memcpy(ret_buf, &val, 4);
8335 /* advance to the next dword */
8336 offset += sizeof(u32);
8337 ret_buf += sizeof(u32);
8338 buf_size -= sizeof(u32);
8343 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8344 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8345 memcpy(ret_buf, &val, 4);
8348 /* disable access to nvram interface */
8349 bnx2x_disable_nvram_access(bp);
8350 bnx2x_release_nvram_lock(bp);
8355 static int bnx2x_get_eeprom(struct net_device *dev,
8356 struct ethtool_eeprom *eeprom, u8 *eebuf)
8358 struct bnx2x *bp = netdev_priv(dev);
8361 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8362 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8363 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8364 eeprom->len, eeprom->len);
8366 /* parameters already validated in ethtool_get_eeprom */
8368 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8373 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8378 /* build the command word */
8379 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8381 /* need to clear DONE bit separately */
8382 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8384 /* write the data */
8385 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8387 /* address of the NVRAM to write to */
8388 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8389 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8391 /* issue the write command */
8392 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8394 /* adjust timeout for emulation/FPGA */
8395 count = NVRAM_TIMEOUT_COUNT;
8396 if (CHIP_REV_IS_SLOW(bp))
8399 /* wait for completion */
8401 for (i = 0; i < count; i++) {
8403 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8404 if (val & MCPR_NVM_COMMAND_DONE) {
8413 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
8415 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8423 if (offset + buf_size > bp->flash_size) {
8424 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8425 " buf_size (0x%x) > flash_size (0x%x)\n",
8426 offset, buf_size, bp->flash_size);
8430 /* request access to nvram interface */
8431 rc = bnx2x_acquire_nvram_lock(bp);
8435 /* enable access to nvram interface */
8436 bnx2x_enable_nvram_access(bp);
8438 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8439 align_offset = (offset & ~0x03);
8440 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8443 val &= ~(0xff << BYTE_OFFSET(offset));
8444 val |= (*data_buf << BYTE_OFFSET(offset));
8446 /* nvram data is returned as an array of bytes
8447 * convert it back to cpu order */
8448 val = be32_to_cpu(val);
8450 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8452 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8456 /* disable access to nvram interface */
8457 bnx2x_disable_nvram_access(bp);
8458 bnx2x_release_nvram_lock(bp);
8463 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8471 if (buf_size == 1) { /* ethtool */
8472 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
8475 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8477 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8482 if (offset + buf_size > bp->flash_size) {
8483 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8484 " buf_size (0x%x) > flash_size (0x%x)\n",
8485 offset, buf_size, bp->flash_size);
8489 /* request access to nvram interface */
8490 rc = bnx2x_acquire_nvram_lock(bp);
8494 /* enable access to nvram interface */
8495 bnx2x_enable_nvram_access(bp);
8498 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8499 while ((written_so_far < buf_size) && (rc == 0)) {
8500 if (written_so_far == (buf_size - sizeof(u32)))
8501 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8502 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8503 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8504 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8505 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8507 memcpy(&val, data_buf, 4);
8508 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
8510 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8512 /* advance to the next dword */
8513 offset += sizeof(u32);
8514 data_buf += sizeof(u32);
8515 written_so_far += sizeof(u32);
8519 /* disable access to nvram interface */
8520 bnx2x_disable_nvram_access(bp);
8521 bnx2x_release_nvram_lock(bp);
8526 static int bnx2x_set_eeprom(struct net_device *dev,
8527 struct ethtool_eeprom *eeprom, u8 *eebuf)
8529 struct bnx2x *bp = netdev_priv(dev);
8532 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8533 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8534 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8535 eeprom->len, eeprom->len);
8537 /* parameters already validated in ethtool_set_eeprom */
8539 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
8544 static int bnx2x_get_coalesce(struct net_device *dev,
8545 struct ethtool_coalesce *coal)
8547 struct bnx2x *bp = netdev_priv(dev);
8549 memset(coal, 0, sizeof(struct ethtool_coalesce));
8551 coal->rx_coalesce_usecs = bp->rx_ticks;
8552 coal->tx_coalesce_usecs = bp->tx_ticks;
8553 coal->stats_block_coalesce_usecs = bp->stats_ticks;
8558 static int bnx2x_set_coalesce(struct net_device *dev,
8559 struct ethtool_coalesce *coal)
8561 struct bnx2x *bp = netdev_priv(dev);
8563 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8564 if (bp->rx_ticks > 3000)
8565 bp->rx_ticks = 3000;
8567 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8568 if (bp->tx_ticks > 0x3000)
8569 bp->tx_ticks = 0x3000;
8571 bp->stats_ticks = coal->stats_block_coalesce_usecs;
8572 if (bp->stats_ticks > 0xffff00)
8573 bp->stats_ticks = 0xffff00;
8574 bp->stats_ticks &= 0xffff00;
8576 if (netif_running(bp->dev))
8577 bnx2x_update_coalesce(bp);
8582 static void bnx2x_get_ringparam(struct net_device *dev,
8583 struct ethtool_ringparam *ering)
8585 struct bnx2x *bp = netdev_priv(dev);
8587 ering->rx_max_pending = MAX_RX_AVAIL;
8588 ering->rx_mini_max_pending = 0;
8589 ering->rx_jumbo_max_pending = 0;
8591 ering->rx_pending = bp->rx_ring_size;
8592 ering->rx_mini_pending = 0;
8593 ering->rx_jumbo_pending = 0;
8595 ering->tx_max_pending = MAX_TX_AVAIL;
8596 ering->tx_pending = bp->tx_ring_size;
8599 static int bnx2x_set_ringparam(struct net_device *dev,
8600 struct ethtool_ringparam *ering)
8602 struct bnx2x *bp = netdev_priv(dev);
8604 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8605 (ering->tx_pending > MAX_TX_AVAIL) ||
8606 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8609 bp->rx_ring_size = ering->rx_pending;
8610 bp->tx_ring_size = ering->tx_pending;
8612 if (netif_running(bp->dev)) {
8613 bnx2x_nic_unload(bp, 0);
8614 bnx2x_nic_load(bp, 0);
8620 static void bnx2x_get_pauseparam(struct net_device *dev,
8621 struct ethtool_pauseparam *epause)
8623 struct bnx2x *bp = netdev_priv(dev);
8626 ((bp->req_autoneg & AUTONEG_FLOW_CTRL) == AUTONEG_FLOW_CTRL);
8627 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) == FLOW_CTRL_RX);
8628 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) == FLOW_CTRL_TX);
8630 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8631 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8632 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8635 static int bnx2x_set_pauseparam(struct net_device *dev,
8636 struct ethtool_pauseparam *epause)
8638 struct bnx2x *bp = netdev_priv(dev);
8640 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8641 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8642 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8644 if (epause->autoneg) {
8645 if (!(bp->supported & SUPPORTED_Autoneg)) {
8646 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
8650 bp->req_autoneg |= AUTONEG_FLOW_CTRL;
8652 bp->req_autoneg &= ~AUTONEG_FLOW_CTRL;
8654 bp->req_flow_ctrl = FLOW_CTRL_AUTO;
8656 if (epause->rx_pause)
8657 bp->req_flow_ctrl |= FLOW_CTRL_RX;
8658 if (epause->tx_pause)
8659 bp->req_flow_ctrl |= FLOW_CTRL_TX;
8661 if (!(bp->req_autoneg & AUTONEG_FLOW_CTRL) &&
8662 (bp->req_flow_ctrl == FLOW_CTRL_AUTO))
8663 bp->req_flow_ctrl = FLOW_CTRL_NONE;
8665 DP(NETIF_MSG_LINK, "req_autoneg 0x%x req_flow_ctrl 0x%x\n",
8666 bp->req_autoneg, bp->req_flow_ctrl);
8668 bnx2x_stop_stats(bp);
8669 bnx2x_link_initialize(bp);
8674 static u32 bnx2x_get_rx_csum(struct net_device *dev)
8676 struct bnx2x *bp = netdev_priv(dev);
8681 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8683 struct bnx2x *bp = netdev_priv(dev);
8689 static int bnx2x_set_tso(struct net_device *dev, u32 data)
8692 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8694 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
8699 char string[ETH_GSTRING_LEN];
8700 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
8701 { "MC Errors (online)" }
8704 static int bnx2x_self_test_count(struct net_device *dev)
8706 return BNX2X_NUM_TESTS;
8709 static void bnx2x_self_test(struct net_device *dev,
8710 struct ethtool_test *etest, u64 *buf)
8712 struct bnx2x *bp = netdev_priv(dev);
8715 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8717 if (bp->state != BNX2X_STATE_OPEN) {
8718 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
8722 stats_state = bp->stats_state;
8723 bnx2x_stop_stats(bp);
8725 if (bnx2x_mc_assert(bp) != 0) {
8727 etest->flags |= ETH_TEST_FL_FAILED;
8730 #ifdef BNX2X_EXTRA_DEBUG
8731 bnx2x_panic_dump(bp);
8733 bp->stats_state = stats_state;
8737 char string[ETH_GSTRING_LEN];
8738 } bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
8739 { "rx_bytes"}, /* 0 */
8740 { "rx_error_bytes"}, /* 1 */
8741 { "tx_bytes"}, /* 2 */
8742 { "tx_error_bytes"}, /* 3 */
8743 { "rx_ucast_packets"}, /* 4 */
8744 { "rx_mcast_packets"}, /* 5 */
8745 { "rx_bcast_packets"}, /* 6 */
8746 { "tx_ucast_packets"}, /* 7 */
8747 { "tx_mcast_packets"}, /* 8 */
8748 { "tx_bcast_packets"}, /* 9 */
8749 { "tx_mac_errors"}, /* 10 */
8750 { "tx_carrier_errors"}, /* 11 */
8751 { "rx_crc_errors"}, /* 12 */
8752 { "rx_align_errors"}, /* 13 */
8753 { "tx_single_collisions"}, /* 14 */
8754 { "tx_multi_collisions"}, /* 15 */
8755 { "tx_deferred"}, /* 16 */
8756 { "tx_excess_collisions"}, /* 17 */
8757 { "tx_late_collisions"}, /* 18 */
8758 { "tx_total_collisions"}, /* 19 */
8759 { "rx_fragments"}, /* 20 */
8760 { "rx_jabbers"}, /* 21 */
8761 { "rx_undersize_packets"}, /* 22 */
8762 { "rx_oversize_packets"}, /* 23 */
8763 { "rx_xon_frames"}, /* 24 */
8764 { "rx_xoff_frames"}, /* 25 */
8765 { "tx_xon_frames"}, /* 26 */
8766 { "tx_xoff_frames"}, /* 27 */
8767 { "rx_mac_ctrl_frames"}, /* 28 */
8768 { "rx_filtered_packets"}, /* 29 */
8769 { "rx_discards"}, /* 30 */
8772 #define STATS_OFFSET32(offset_name) \
8773 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
8775 static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
8776 STATS_OFFSET32(total_bytes_received_hi), /* 0 */
8777 STATS_OFFSET32(stat_IfHCInBadOctets_hi), /* 1 */
8778 STATS_OFFSET32(total_bytes_transmitted_hi), /* 2 */
8779 STATS_OFFSET32(stat_IfHCOutBadOctets_hi), /* 3 */
8780 STATS_OFFSET32(total_unicast_packets_received_hi), /* 4 */
8781 STATS_OFFSET32(total_multicast_packets_received_hi), /* 5 */
8782 STATS_OFFSET32(total_broadcast_packets_received_hi), /* 6 */
8783 STATS_OFFSET32(total_unicast_packets_transmitted_hi), /* 7 */
8784 STATS_OFFSET32(total_multicast_packets_transmitted_hi), /* 8 */
8785 STATS_OFFSET32(total_broadcast_packets_transmitted_hi), /* 9 */
8786 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
8787 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors), /* 11 */
8788 STATS_OFFSET32(crc_receive_errors), /* 12 */
8789 STATS_OFFSET32(alignment_errors), /* 13 */
8790 STATS_OFFSET32(single_collision_transmit_frames), /* 14 */
8791 STATS_OFFSET32(multiple_collision_transmit_frames), /* 15 */
8792 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions), /* 16 */
8793 STATS_OFFSET32(excessive_collision_frames), /* 17 */
8794 STATS_OFFSET32(late_collision_frames), /* 18 */
8795 STATS_OFFSET32(number_of_bugs_found_in_stats_spec), /* 19 */
8796 STATS_OFFSET32(runt_packets_received), /* 20 */
8797 STATS_OFFSET32(jabber_packets_received), /* 21 */
8798 STATS_OFFSET32(error_runt_packets_received), /* 22 */
8799 STATS_OFFSET32(error_jabber_packets_received), /* 23 */
8800 STATS_OFFSET32(pause_xon_frames_received), /* 24 */
8801 STATS_OFFSET32(pause_xoff_frames_received), /* 25 */
8802 STATS_OFFSET32(pause_xon_frames_transmitted), /* 26 */
8803 STATS_OFFSET32(pause_xoff_frames_transmitted), /* 27 */
8804 STATS_OFFSET32(control_frames_received), /* 28 */
8805 STATS_OFFSET32(mac_filter_discard), /* 29 */
8806 STATS_OFFSET32(no_buff_discard), /* 30 */
8809 static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
8810 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
8811 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
8812 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
8816 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
8818 switch (stringset) {
8820 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
8824 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
8829 static int bnx2x_get_stats_count(struct net_device *dev)
8831 return BNX2X_NUM_STATS;
8834 static void bnx2x_get_ethtool_stats(struct net_device *dev,
8835 struct ethtool_stats *stats, u64 *buf)
8837 struct bnx2x *bp = netdev_priv(dev);
8838 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
8841 for (i = 0; i < BNX2X_NUM_STATS; i++) {
8842 if (bnx2x_stats_len_arr[i] == 0) {
8843 /* skip this counter */
8851 if (bnx2x_stats_len_arr[i] == 4) {
8852 /* 4-byte counter */
8853 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
8856 /* 8-byte counter */
8857 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
8858 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
8862 static int bnx2x_phys_id(struct net_device *dev, u32 data)
8864 struct bnx2x *bp = netdev_priv(dev);
8870 for (i = 0; i < (data * 2); i++) {
8872 bnx2x_leds_set(bp, SPEED_1000);
8874 bnx2x_leds_unset(bp);
8876 msleep_interruptible(500);
8877 if (signal_pending(current))
8882 bnx2x_leds_set(bp, bp->line_speed);
8887 static struct ethtool_ops bnx2x_ethtool_ops = {
8888 .get_settings = bnx2x_get_settings,
8889 .set_settings = bnx2x_set_settings,
8890 .get_drvinfo = bnx2x_get_drvinfo,
8891 .get_wol = bnx2x_get_wol,
8892 .set_wol = bnx2x_set_wol,
8893 .get_msglevel = bnx2x_get_msglevel,
8894 .set_msglevel = bnx2x_set_msglevel,
8895 .nway_reset = bnx2x_nway_reset,
8896 .get_link = ethtool_op_get_link,
8897 .get_eeprom_len = bnx2x_get_eeprom_len,
8898 .get_eeprom = bnx2x_get_eeprom,
8899 .set_eeprom = bnx2x_set_eeprom,
8900 .get_coalesce = bnx2x_get_coalesce,
8901 .set_coalesce = bnx2x_set_coalesce,
8902 .get_ringparam = bnx2x_get_ringparam,
8903 .set_ringparam = bnx2x_set_ringparam,
8904 .get_pauseparam = bnx2x_get_pauseparam,
8905 .set_pauseparam = bnx2x_set_pauseparam,
8906 .get_rx_csum = bnx2x_get_rx_csum,
8907 .set_rx_csum = bnx2x_set_rx_csum,
8908 .get_tx_csum = ethtool_op_get_tx_csum,
8909 .set_tx_csum = ethtool_op_set_tx_csum,
8910 .get_sg = ethtool_op_get_sg,
8911 .set_sg = ethtool_op_set_sg,
8912 .get_tso = ethtool_op_get_tso,
8913 .set_tso = bnx2x_set_tso,
8914 .self_test_count = bnx2x_self_test_count,
8915 .self_test = bnx2x_self_test,
8916 .get_strings = bnx2x_get_strings,
8917 .phys_id = bnx2x_phys_id,
8918 .get_stats_count = bnx2x_get_stats_count,
8919 .get_ethtool_stats = bnx2x_get_ethtool_stats
8922 /* end of ethtool_ops */
8924 /****************************************************************************
8925 * General service functions
8926 ****************************************************************************/
8928 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
8932 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
8936 pci_write_config_word(bp->pdev,
8937 bp->pm_cap + PCI_PM_CTRL,
8938 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
8939 PCI_PM_CTRL_PME_STATUS));
8941 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
8942 /* delay required during transition out of D3hot */
8947 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
8951 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
8953 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
8956 /* No more memory access after this point until
8957 * device is brought back to D0.
8968 * net_device service functions
8971 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
8972 * from set_multicast.
8974 static void bnx2x_set_rx_mode(struct net_device *dev)
8976 struct bnx2x *bp = netdev_priv(dev);
8977 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8979 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
8981 if (dev->flags & IFF_PROMISC)
8982 rx_mode = BNX2X_RX_MODE_PROMISC;
8984 else if ((dev->flags & IFF_ALLMULTI) ||
8985 (dev->mc_count > BNX2X_MAX_MULTICAST))
8986 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8988 else { /* some multicasts */
8990 struct dev_mc_list *mclist;
8991 struct mac_configuration_cmd *config =
8992 bnx2x_sp(bp, mcast_config);
8994 for (i = 0, mclist = dev->mc_list;
8995 mclist && (i < dev->mc_count);
8996 i++, mclist = mclist->next) {
8998 config->config_table[i].cam_entry.msb_mac_addr =
8999 swab16(*(u16 *)&mclist->dmi_addr[0]);
9000 config->config_table[i].cam_entry.middle_mac_addr =
9001 swab16(*(u16 *)&mclist->dmi_addr[2]);
9002 config->config_table[i].cam_entry.lsb_mac_addr =
9003 swab16(*(u16 *)&mclist->dmi_addr[4]);
9004 config->config_table[i].cam_entry.flags =
9005 cpu_to_le16(bp->port);
9006 config->config_table[i].target_table_entry.flags = 0;
9007 config->config_table[i].target_table_entry.
9009 config->config_table[i].target_table_entry.
9013 "setting MCAST[%d] (%04x:%04x:%04x)\n",
9014 i, config->config_table[i].cam_entry.msb_mac_addr,
9015 config->config_table[i].cam_entry.middle_mac_addr,
9016 config->config_table[i].cam_entry.lsb_mac_addr);
9018 old = config->hdr.length_6b;
9020 for (; i < old; i++) {
9021 if (CAM_IS_INVALID(config->config_table[i])) {
9022 i--; /* already invalidated */
9026 CAM_INVALIDATE(config->config_table[i]);
9030 if (CHIP_REV_IS_SLOW(bp))
9031 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
9033 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
9035 config->hdr.length_6b = i;
9036 config->hdr.offset = offset;
9037 config->hdr.reserved0 = 0;
9038 config->hdr.reserved1 = 0;
9040 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9041 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9042 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
9045 bp->rx_mode = rx_mode;
9046 bnx2x_set_storm_rx_mode(bp);
9049 static int bnx2x_poll(struct napi_struct *napi, int budget)
9051 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9053 struct bnx2x *bp = fp->bp;
9056 #ifdef BNX2X_STOP_ON_ERROR
9057 if (unlikely(bp->panic))
9061 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9062 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9063 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9065 bnx2x_update_fpsb_idx(fp);
9067 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
9068 bnx2x_tx_int(fp, budget);
9071 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
9072 work_done = bnx2x_rx_int(fp, budget);
9075 rmb(); /* bnx2x_has_work() reads the status block */
9077 /* must not complete if we consumed full budget */
9078 if ((work_done < budget) && !bnx2x_has_work(fp)) {
9080 #ifdef BNX2X_STOP_ON_ERROR
9083 netif_rx_complete(bp->dev, napi);
9085 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
9086 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
9087 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
9088 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9094 /* Called with netif_tx_lock.
9095 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
9096 * netif_wake_queue().
9098 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9100 struct bnx2x *bp = netdev_priv(dev);
9101 struct bnx2x_fastpath *fp;
9102 struct sw_tx_bd *tx_buf;
9103 struct eth_tx_bd *tx_bd;
9104 struct eth_tx_parse_bd *pbd = NULL;
9105 u16 pkt_prod, bd_prod;
9106 int nbd, fp_index = 0;
9109 #ifdef BNX2X_STOP_ON_ERROR
9110 if (unlikely(bp->panic))
9111 return NETDEV_TX_BUSY;
9114 fp_index = smp_processor_id() % (bp->num_queues);
9116 fp = &bp->fp[fp_index];
9117 if (unlikely(bnx2x_tx_avail(bp->fp) <
9118 (skb_shinfo(skb)->nr_frags + 3))) {
9119 bp->slowpath->eth_stats.driver_xoff++,
9120 netif_stop_queue(dev);
9121 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9122 return NETDEV_TX_BUSY;
9126 This is a bit ugly. First we use one BD which we mark as start,
9127 then for TSO or xsum we have a parsing info BD,
9128 and only then we have the rest of the TSO bds.
9129 (don't forget to mark the last one as last,
9130 and to unmap only AFTER you write to the BD ...)
9131 I would like to thank DovH for this mess.
9134 pkt_prod = fp->tx_pkt_prod++;
9135 bd_prod = fp->tx_bd_prod;
9136 bd_prod = TX_BD(bd_prod);
9138 /* get a tx_buff and first bd */
9139 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9140 tx_bd = &fp->tx_desc_ring[bd_prod];
9142 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9143 tx_bd->general_data = (UNICAST_ADDRESS <<
9144 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
9145 tx_bd->general_data |= 1; /* header nbd */
9147 /* remember the first bd of the packet */
9148 tx_buf->first_bd = bd_prod;
9150 DP(NETIF_MSG_TX_QUEUED,
9151 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9152 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9154 if (skb->ip_summed == CHECKSUM_PARTIAL) {
9155 struct iphdr *iph = ip_hdr(skb);
9158 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
9160 /* turn on parsing and get a bd */
9161 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9162 pbd = (void *)&fp->tx_desc_ring[bd_prod];
9163 len = ((u8 *)iph - (u8 *)skb->data) / 2;
9165 /* for now NS flag is not used in Linux */
9166 pbd->global_data = (len |
9167 ((skb->protocol == ETH_P_8021Q) <<
9168 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
9169 pbd->ip_hlen = ip_hdrlen(skb) / 2;
9170 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
9171 if (iph->protocol == IPPROTO_TCP) {
9172 struct tcphdr *th = tcp_hdr(skb);
9174 tx_bd->bd_flags.as_bitfield |=
9175 ETH_TX_BD_FLAGS_TCP_CSUM;
9176 pbd->tcp_flags = htonl(tcp_flag_word(skb)) & 0xFFFF;
9177 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
9178 pbd->tcp_pseudo_csum = swab16(th->check);
9180 } else if (iph->protocol == IPPROTO_UDP) {
9181 struct udphdr *uh = udp_hdr(skb);
9183 tx_bd->bd_flags.as_bitfield |=
9184 ETH_TX_BD_FLAGS_TCP_CSUM;
9185 pbd->total_hlen += cpu_to_le16(4);
9186 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
9187 pbd->cs_offset = 5; /* 10 >> 1 */
9188 pbd->tcp_pseudo_csum = 0;
9189 /* HW bug: we need to subtract 10 bytes before the
9190 * UDP header from the csum
9192 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
9193 csum_partial(((u8 *)(uh)-10), 10, 0)));
9197 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9198 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9199 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9201 tx_bd->vlan = cpu_to_le16(pkt_prod);
9204 mapping = pci_map_single(bp->pdev, skb->data,
9205 skb->len, PCI_DMA_TODEVICE);
9207 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9208 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9209 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
9210 tx_bd->nbd = cpu_to_le16(nbd);
9211 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9213 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
9214 " nbytes %d flags %x vlan %u\n",
9215 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
9216 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
9218 if (skb_shinfo(skb)->gso_size &&
9219 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
9220 int hlen = 2 * le32_to_cpu(pbd->total_hlen);
9222 DP(NETIF_MSG_TX_QUEUED,
9223 "TSO packet len %d hlen %d total len %d tso size %d\n",
9224 skb->len, hlen, skb_headlen(skb),
9225 skb_shinfo(skb)->gso_size);
9227 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9229 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
9230 /* we split the first bd into headers and data bds
9231 * to ease the pain of our fellow micocode engineers
9232 * we use one mapping for both bds
9233 * So far this has only been observed to happen
9234 * in Other Operating Systems(TM)
9237 /* first fix first bd */
9239 tx_bd->nbd = cpu_to_le16(nbd);
9240 tx_bd->nbytes = cpu_to_le16(hlen);
9242 /* we only print this as an error
9243 * because we don't think this will ever happen.
9245 BNX2X_ERR("TSO split header size is %d (%x:%x)"
9246 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
9247 tx_bd->addr_lo, tx_bd->nbd);
9249 /* now get a new data bd
9250 * (after the pbd) and fill it */
9251 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9252 tx_bd = &fp->tx_desc_ring[bd_prod];
9254 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9255 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
9256 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
9257 tx_bd->vlan = cpu_to_le16(pkt_prod);
9258 /* this marks the bd
9259 * as one that has no individual mapping
9260 * the FW ignores this flag in a bd not marked start
9262 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9263 DP(NETIF_MSG_TX_QUEUED,
9264 "TSO split data size is %d (%x:%x)\n",
9265 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
9269 /* supposed to be unreached
9270 * (and therefore not handled properly...)
9272 BNX2X_ERR("LSO with no PBD\n");
9276 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9277 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
9278 pbd->ip_id = swab16(ip_hdr(skb)->id);
9279 pbd->tcp_pseudo_csum =
9280 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9282 0, IPPROTO_TCP, 0));
9283 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9289 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9290 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
9292 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9293 tx_bd = &fp->tx_desc_ring[bd_prod];
9295 mapping = pci_map_page(bp->pdev, frag->page,
9297 frag->size, PCI_DMA_TODEVICE);
9299 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9300 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9301 tx_bd->nbytes = cpu_to_le16(frag->size);
9302 tx_bd->vlan = cpu_to_le16(pkt_prod);
9303 tx_bd->bd_flags.as_bitfield = 0;
9304 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
9305 " addr (%x:%x) nbytes %d flags %x\n",
9306 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9307 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
9311 /* now at last mark the bd as the last bd */
9312 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9314 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9315 tx_bd, tx_bd->bd_flags.as_bitfield);
9319 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9321 /* now send a tx doorbell, counting the next bd
9322 * if the packet contains or ends with it
9324 if (TX_BD_POFF(bd_prod) < nbd)
9328 DP(NETIF_MSG_TX_QUEUED,
9329 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9330 " tcp_flags %x xsum %x seq %u hlen %u\n",
9331 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9332 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
9333 pbd->tcp_send_seq, pbd->total_hlen);
9335 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
9337 fp->hw_tx_prods->bds_prod += cpu_to_le16(nbd);
9338 mb(); /* FW restriction: must not reorder writing nbd and packets */
9339 fp->hw_tx_prods->packets_prod += cpu_to_le32(1);
9340 DOORBELL(bp, fp_index, 0);
9344 fp->tx_bd_prod = bd_prod;
9345 dev->trans_start = jiffies;
9347 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9348 netif_stop_queue(dev);
9349 bp->slowpath->eth_stats.driver_xoff++;
9350 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9351 netif_wake_queue(dev);
9355 return NETDEV_TX_OK;
9358 static struct net_device_stats *bnx2x_get_stats(struct net_device *dev)
9363 /* Called with rtnl_lock */
9364 static int bnx2x_open(struct net_device *dev)
9366 struct bnx2x *bp = netdev_priv(dev);
9368 bnx2x_set_power_state(bp, PCI_D0);
9370 return bnx2x_nic_load(bp, 1);
9373 /* Called with rtnl_lock */
9374 static int bnx2x_close(struct net_device *dev)
9377 struct bnx2x *bp = netdev_priv(dev);
9379 /* Unload the driver, release IRQs */
9380 rc = bnx2x_nic_unload(bp, 1);
9382 BNX2X_ERR("bnx2x_nic_unload failed: %d\n", rc);
9385 bnx2x_set_power_state(bp, PCI_D3hot);
9390 /* Called with rtnl_lock */
9391 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9393 struct sockaddr *addr = p;
9394 struct bnx2x *bp = netdev_priv(dev);
9396 if (!is_valid_ether_addr(addr->sa_data))
9399 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
9400 if (netif_running(dev))
9401 bnx2x_set_mac_addr(bp);
9406 /* Called with rtnl_lock */
9407 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9409 struct mii_ioctl_data *data = if_mii(ifr);
9410 struct bnx2x *bp = netdev_priv(dev);
9415 data->phy_id = bp->phy_addr;
9421 spin_lock_bh(&bp->phy_lock);
9422 if (bp->state == BNX2X_STATE_OPEN) {
9423 err = bnx2x_mdio22_read(bp, data->reg_num & 0x1f,
9426 data->val_out = mii_regval;
9430 spin_unlock_bh(&bp->phy_lock);
9435 if (!capable(CAP_NET_ADMIN))
9438 spin_lock_bh(&bp->phy_lock);
9439 if (bp->state == BNX2X_STATE_OPEN) {
9440 err = bnx2x_mdio22_write(bp, data->reg_num & 0x1f,
9445 spin_unlock_bh(&bp->phy_lock);
9456 /* Called with rtnl_lock */
9457 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9459 struct bnx2x *bp = netdev_priv(dev);
9461 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9462 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9465 /* This does not race with packet allocation
9466 * because the actual alloc size is
9467 * only updated as part of load
9471 if (netif_running(dev)) {
9472 bnx2x_nic_unload(bp, 0);
9473 bnx2x_nic_load(bp, 0);
9478 static void bnx2x_tx_timeout(struct net_device *dev)
9480 struct bnx2x *bp = netdev_priv(dev);
9482 #ifdef BNX2X_STOP_ON_ERROR
9486 /* This allows the netif to be shutdown gracefully before resetting */
9487 schedule_work(&bp->reset_task);
9491 /* Called with rtnl_lock */
9492 static void bnx2x_vlan_rx_register(struct net_device *dev,
9493 struct vlan_group *vlgrp)
9495 struct bnx2x *bp = netdev_priv(dev);
9498 if (netif_running(dev))
9499 bnx2x_set_rx_mode(dev);
9503 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9504 static void poll_bnx2x(struct net_device *dev)
9506 struct bnx2x *bp = netdev_priv(dev);
9508 disable_irq(bp->pdev->irq);
9509 bnx2x_interrupt(bp->pdev->irq, dev);
9510 enable_irq(bp->pdev->irq);
9514 static void bnx2x_reset_task(struct work_struct *work)
9516 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
9518 #ifdef BNX2X_STOP_ON_ERROR
9519 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
9520 " so reset not done to allow debug dump,\n"
9521 KERN_ERR " you will need to reboot when done\n");
9525 if (!netif_running(bp->dev))
9528 bp->in_reset_task = 1;
9530 bnx2x_netif_stop(bp);
9532 bnx2x_nic_unload(bp, 0);
9533 bnx2x_nic_load(bp, 0);
9535 bp->in_reset_task = 0;
9538 static int __devinit bnx2x_init_board(struct pci_dev *pdev,
9539 struct net_device *dev)
9544 SET_NETDEV_DEV(dev, &pdev->dev);
9545 bp = netdev_priv(dev);
9548 bp->port = PCI_FUNC(pdev->devfn);
9550 rc = pci_enable_device(pdev);
9552 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
9556 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9557 printk(KERN_ERR PFX "Cannot find PCI device base address,"
9560 goto err_out_disable;
9563 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
9564 printk(KERN_ERR PFX "Cannot find second PCI device"
9565 " base address, aborting\n");
9567 goto err_out_disable;
9570 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9572 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
9574 goto err_out_disable;
9577 pci_set_master(pdev);
9579 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9580 if (bp->pm_cap == 0) {
9581 printk(KERN_ERR PFX "Cannot find power management"
9582 " capability, aborting\n");
9584 goto err_out_release;
9587 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9588 if (bp->pcie_cap == 0) {
9589 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
9592 goto err_out_release;
9595 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
9596 bp->flags |= USING_DAC_FLAG;
9597 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9598 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
9599 " failed, aborting\n");
9601 goto err_out_release;
9604 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9605 printk(KERN_ERR PFX "System does not support DMA,"
9608 goto err_out_release;
9614 spin_lock_init(&bp->phy_lock);
9616 bp->in_reset_task = 0;
9618 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
9619 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
9621 dev->base_addr = pci_resource_start(pdev, 0);
9623 dev->irq = pdev->irq;
9625 bp->regview = ioremap_nocache(dev->base_addr,
9626 pci_resource_len(pdev, 0));
9628 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
9630 goto err_out_release;
9633 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
9634 pci_resource_len(pdev, 2));
9635 if (!bp->doorbells) {
9636 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
9641 bnx2x_set_power_state(bp, PCI_D0);
9643 bnx2x_get_hwinfo(bp);
9645 if (CHIP_REV(bp) == CHIP_REV_FPGA) {
9646 printk(KERN_ERR PFX "FPGA detected. MCP disabled,"
9647 " will only init first device\n");
9653 printk(KERN_ERR PFX "MCP disabled, will only"
9654 " init first device\n");
9658 if (onefunc && bp->port) {
9659 printk(KERN_ERR PFX "Second device disabled, exiting\n");
9664 bp->tx_ring_size = MAX_TX_AVAIL;
9665 bp->rx_ring_size = MAX_RX_AVAIL;
9671 bp->tx_quick_cons_trip_int = 0xff;
9672 bp->tx_quick_cons_trip = 0xff;
9673 bp->tx_ticks_int = 50;
9676 bp->rx_quick_cons_trip_int = 0xff;
9677 bp->rx_quick_cons_trip = 0xff;
9678 bp->rx_ticks_int = 25;
9681 bp->stats_ticks = 1000000 & 0xffff00;
9683 bp->timer_interval = HZ;
9684 bp->current_interval = (poll ? poll : HZ);
9686 init_timer(&bp->timer);
9687 bp->timer.expires = jiffies + bp->current_interval;
9688 bp->timer.data = (unsigned long) bp;
9689 bp->timer.function = bnx2x_timer;
9695 iounmap(bp->regview);
9699 if (bp->doorbells) {
9700 iounmap(bp->doorbells);
9701 bp->doorbells = NULL;
9705 pci_release_regions(pdev);
9708 pci_disable_device(pdev);
9709 pci_set_drvdata(pdev, NULL);
9715 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
9717 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9719 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9723 /* return value of 1=2.5GHz 2=5GHz */
9724 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
9726 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9728 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
9732 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9733 const struct pci_device_id *ent)
9735 static int version_printed;
9736 struct net_device *dev = NULL;
9739 int port = PCI_FUNC(pdev->devfn);
9740 DECLARE_MAC_BUF(mac);
9742 if (version_printed++ == 0)
9743 printk(KERN_INFO "%s", version);
9745 /* dev zeroed in init_etherdev */
9746 dev = alloc_etherdev(sizeof(*bp));
9750 netif_carrier_off(dev);
9752 bp = netdev_priv(dev);
9753 bp->msglevel = debug;
9755 if (port && onefunc) {
9756 printk(KERN_ERR PFX "second function disabled. exiting\n");
9761 rc = bnx2x_init_board(pdev, dev);
9767 dev->hard_start_xmit = bnx2x_start_xmit;
9768 dev->watchdog_timeo = TX_TIMEOUT;
9770 dev->get_stats = bnx2x_get_stats;
9771 dev->ethtool_ops = &bnx2x_ethtool_ops;
9772 dev->open = bnx2x_open;
9773 dev->stop = bnx2x_close;
9774 dev->set_multicast_list = bnx2x_set_rx_mode;
9775 dev->set_mac_address = bnx2x_change_mac_addr;
9776 dev->do_ioctl = bnx2x_ioctl;
9777 dev->change_mtu = bnx2x_change_mtu;
9778 dev->tx_timeout = bnx2x_tx_timeout;
9780 dev->vlan_rx_register = bnx2x_vlan_rx_register;
9782 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
9783 dev->poll_controller = poll_bnx2x;
9785 dev->features |= NETIF_F_SG;
9786 if (bp->flags & USING_DAC_FLAG)
9787 dev->features |= NETIF_F_HIGHDMA;
9788 dev->features |= NETIF_F_IP_CSUM;
9790 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
9792 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
9794 rc = register_netdev(dev);
9796 dev_err(&pdev->dev, "Cannot register net device\n");
9798 iounmap(bp->regview);
9800 iounmap(bp->doorbells);
9801 pci_release_regions(pdev);
9802 pci_disable_device(pdev);
9803 pci_set_drvdata(pdev, NULL);
9808 pci_set_drvdata(pdev, dev);
9810 bp->name = board_info[ent->driver_data].name;
9811 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
9812 " IRQ %d, ", dev->name, bp->name,
9813 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
9814 ((CHIP_ID(bp) & 0x0ff0) >> 4),
9815 bnx2x_get_pcie_width(bp),
9816 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
9817 dev->base_addr, bp->pdev->irq);
9818 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
9822 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9824 struct net_device *dev = pci_get_drvdata(pdev);
9825 struct bnx2x *bp = netdev_priv(dev);
9827 flush_scheduled_work();
9828 /*tasklet_kill(&bp->sp_task);*/
9829 unregister_netdev(dev);
9832 iounmap(bp->regview);
9835 iounmap(bp->doorbells);
9838 pci_release_regions(pdev);
9839 pci_disable_device(pdev);
9840 pci_set_drvdata(pdev, NULL);
9843 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
9845 struct net_device *dev = pci_get_drvdata(pdev);
9846 struct bnx2x *bp = netdev_priv(dev);
9849 if (!netif_running(dev))
9852 rc = bnx2x_nic_unload(bp, 0);
9856 netif_device_detach(dev);
9857 pci_save_state(pdev);
9859 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
9863 static int bnx2x_resume(struct pci_dev *pdev)
9865 struct net_device *dev = pci_get_drvdata(pdev);
9866 struct bnx2x *bp = netdev_priv(dev);
9869 if (!netif_running(dev))
9872 pci_restore_state(pdev);
9874 bnx2x_set_power_state(bp, PCI_D0);
9875 netif_device_attach(dev);
9877 rc = bnx2x_nic_load(bp, 0);
9884 static struct pci_driver bnx2x_pci_driver = {
9885 .name = DRV_MODULE_NAME,
9886 .id_table = bnx2x_pci_tbl,
9887 .probe = bnx2x_init_one,
9888 .remove = __devexit_p(bnx2x_remove_one),
9889 .suspend = bnx2x_suspend,
9890 .resume = bnx2x_resume,
9893 static int __init bnx2x_init(void)
9895 return pci_register_driver(&bnx2x_pci_driver);
9898 static void __exit bnx2x_cleanup(void)
9900 pci_unregister_driver(&bnx2x_pci_driver);
9903 module_init(bnx2x_init);
9904 module_exit(bnx2x_cleanup);