1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <asm/bitops.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #ifdef NETIF_F_HW_VLAN_TX
39 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/zlib.h>
55 #define DRV_MODULE_NAME "bnx2"
56 #define PFX DRV_MODULE_NAME ": "
57 #define DRV_MODULE_VERSION "1.5.8"
58 #define DRV_MODULE_RELDATE "April 24, 2007"
60 #define RUN_AT(x) (jiffies + (x))
62 /* Time in jiffies before concluding the transmitter is hung. */
63 #define TX_TIMEOUT (5*HZ)
65 static const char version[] __devinitdata =
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
68 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
69 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
70 MODULE_LICENSE("GPL");
71 MODULE_VERSION(DRV_MODULE_VERSION);
73 static int disable_msi = 0;
75 module_param(disable_msi, int, 0);
76 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90 /* indexed by board_t, above */
93 } board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
105 static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
127 static struct flash_spec flash_table[] =
130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
214 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
216 static inline u32 bnx2_tx_avail(struct bnx2 *bp)
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
231 return (bp->tx_ring_size - diff);
235 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
239 spin_lock_bh(&bp->indirect_lock);
240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
247 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
249 spin_lock_bh(&bp->indirect_lock);
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
252 spin_unlock_bh(&bp->indirect_lock);
256 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
259 spin_lock_bh(&bp->indirect_lock);
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
277 spin_unlock_bh(&bp->indirect_lock);
281 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
301 for (i = 0; i < 50; i++) {
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
358 for (i = 0; i < 50; i++) {
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387 bnx2_disable_int(struct bnx2 *bp)
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
395 bnx2_enable_int(struct bnx2 *bp)
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
408 bnx2_disable_int_sync(struct bnx2 *bp)
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
416 bnx2_netif_stop(struct bnx2 *bp)
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
427 bnx2_netif_start(struct bnx2 *bp)
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
439 bnx2_free_mem(struct bnx2 *bp)
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
451 if (bp->status_blk) {
452 pci_free_consistent(bp->pdev, bp->status_stats_size,
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
455 bp->stats_blk = NULL;
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
473 vfree(bp->rx_buf_ring);
474 bp->rx_buf_ring = NULL;
478 bnx2_alloc_mem(struct bnx2 *bp)
480 int i, status_blk_size;
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
484 if (bp->tx_buf_ring == NULL)
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
496 if (bp->rx_buf_ring == NULL)
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
522 memset(bp->status_blk, 0, bp->status_stats_size);
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
549 bnx2_report_fw_link(struct bnx2 *bp)
551 u32 fw_link_status = 0;
556 switch (bp->line_speed) {
558 if (bp->duplex == DUPLEX_HALF)
559 fw_link_status = BNX2_LINK_STATUS_10HALF;
561 fw_link_status = BNX2_LINK_STATUS_10FULL;
564 if (bp->duplex == DUPLEX_HALF)
565 fw_link_status = BNX2_LINK_STATUS_100HALF;
567 fw_link_status = BNX2_LINK_STATUS_100FULL;
570 if (bp->duplex == DUPLEX_HALF)
571 fw_link_status = BNX2_LINK_STATUS_1000HALF;
573 fw_link_status = BNX2_LINK_STATUS_1000FULL;
576 if (bp->duplex == DUPLEX_HALF)
577 fw_link_status = BNX2_LINK_STATUS_2500HALF;
579 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
586 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
588 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
589 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
591 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
592 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
593 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
595 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
601 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605 bnx2_report_link(struct bnx2 *bp)
608 netif_carrier_on(bp->dev);
609 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
611 printk("%d Mbps ", bp->line_speed);
613 if (bp->duplex == DUPLEX_FULL)
614 printk("full duplex");
616 printk("half duplex");
619 if (bp->flow_ctrl & FLOW_CTRL_RX) {
620 printk(", receive ");
621 if (bp->flow_ctrl & FLOW_CTRL_TX)
622 printk("& transmit ");
625 printk(", transmit ");
627 printk("flow control ON");
632 netif_carrier_off(bp->dev);
633 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
636 bnx2_report_fw_link(bp);
640 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
642 u32 local_adv, remote_adv;
645 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
646 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
648 if (bp->duplex == DUPLEX_FULL) {
649 bp->flow_ctrl = bp->req_flow_ctrl;
654 if (bp->duplex != DUPLEX_FULL) {
658 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
659 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
662 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
663 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
664 bp->flow_ctrl |= FLOW_CTRL_TX;
665 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
666 bp->flow_ctrl |= FLOW_CTRL_RX;
670 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
671 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
673 if (bp->phy_flags & PHY_SERDES_FLAG) {
674 u32 new_local_adv = 0;
675 u32 new_remote_adv = 0;
677 if (local_adv & ADVERTISE_1000XPAUSE)
678 new_local_adv |= ADVERTISE_PAUSE_CAP;
679 if (local_adv & ADVERTISE_1000XPSE_ASYM)
680 new_local_adv |= ADVERTISE_PAUSE_ASYM;
681 if (remote_adv & ADVERTISE_1000XPAUSE)
682 new_remote_adv |= ADVERTISE_PAUSE_CAP;
683 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
684 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
686 local_adv = new_local_adv;
687 remote_adv = new_remote_adv;
690 /* See Table 28B-3 of 802.3ab-1999 spec. */
691 if (local_adv & ADVERTISE_PAUSE_CAP) {
692 if(local_adv & ADVERTISE_PAUSE_ASYM) {
693 if (remote_adv & ADVERTISE_PAUSE_CAP) {
694 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
696 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
697 bp->flow_ctrl = FLOW_CTRL_RX;
701 if (remote_adv & ADVERTISE_PAUSE_CAP) {
702 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
706 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
707 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
708 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
710 bp->flow_ctrl = FLOW_CTRL_TX;
716 bnx2_5709s_linkup(struct bnx2 *bp)
722 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
723 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
724 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
726 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
727 bp->line_speed = bp->req_line_speed;
728 bp->duplex = bp->req_duplex;
731 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
733 case MII_BNX2_GP_TOP_AN_SPEED_10:
734 bp->line_speed = SPEED_10;
736 case MII_BNX2_GP_TOP_AN_SPEED_100:
737 bp->line_speed = SPEED_100;
739 case MII_BNX2_GP_TOP_AN_SPEED_1G:
740 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
741 bp->line_speed = SPEED_1000;
743 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
744 bp->line_speed = SPEED_2500;
747 if (val & MII_BNX2_GP_TOP_AN_FD)
748 bp->duplex = DUPLEX_FULL;
750 bp->duplex = DUPLEX_HALF;
755 bnx2_5708s_linkup(struct bnx2 *bp)
760 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
761 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
762 case BCM5708S_1000X_STAT1_SPEED_10:
763 bp->line_speed = SPEED_10;
765 case BCM5708S_1000X_STAT1_SPEED_100:
766 bp->line_speed = SPEED_100;
768 case BCM5708S_1000X_STAT1_SPEED_1G:
769 bp->line_speed = SPEED_1000;
771 case BCM5708S_1000X_STAT1_SPEED_2G5:
772 bp->line_speed = SPEED_2500;
775 if (val & BCM5708S_1000X_STAT1_FD)
776 bp->duplex = DUPLEX_FULL;
778 bp->duplex = DUPLEX_HALF;
784 bnx2_5706s_linkup(struct bnx2 *bp)
786 u32 bmcr, local_adv, remote_adv, common;
789 bp->line_speed = SPEED_1000;
791 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
792 if (bmcr & BMCR_FULLDPLX) {
793 bp->duplex = DUPLEX_FULL;
796 bp->duplex = DUPLEX_HALF;
799 if (!(bmcr & BMCR_ANENABLE)) {
803 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
804 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
806 common = local_adv & remote_adv;
807 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
809 if (common & ADVERTISE_1000XFULL) {
810 bp->duplex = DUPLEX_FULL;
813 bp->duplex = DUPLEX_HALF;
821 bnx2_copper_linkup(struct bnx2 *bp)
825 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
826 if (bmcr & BMCR_ANENABLE) {
827 u32 local_adv, remote_adv, common;
829 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
830 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
832 common = local_adv & (remote_adv >> 2);
833 if (common & ADVERTISE_1000FULL) {
834 bp->line_speed = SPEED_1000;
835 bp->duplex = DUPLEX_FULL;
837 else if (common & ADVERTISE_1000HALF) {
838 bp->line_speed = SPEED_1000;
839 bp->duplex = DUPLEX_HALF;
842 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
843 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
845 common = local_adv & remote_adv;
846 if (common & ADVERTISE_100FULL) {
847 bp->line_speed = SPEED_100;
848 bp->duplex = DUPLEX_FULL;
850 else if (common & ADVERTISE_100HALF) {
851 bp->line_speed = SPEED_100;
852 bp->duplex = DUPLEX_HALF;
854 else if (common & ADVERTISE_10FULL) {
855 bp->line_speed = SPEED_10;
856 bp->duplex = DUPLEX_FULL;
858 else if (common & ADVERTISE_10HALF) {
859 bp->line_speed = SPEED_10;
860 bp->duplex = DUPLEX_HALF;
869 if (bmcr & BMCR_SPEED100) {
870 bp->line_speed = SPEED_100;
873 bp->line_speed = SPEED_10;
875 if (bmcr & BMCR_FULLDPLX) {
876 bp->duplex = DUPLEX_FULL;
879 bp->duplex = DUPLEX_HALF;
887 bnx2_set_mac_link(struct bnx2 *bp)
891 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
892 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
893 (bp->duplex == DUPLEX_HALF)) {
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
897 /* Configure the EMAC mode register. */
898 val = REG_RD(bp, BNX2_EMAC_MODE);
900 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
901 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
902 BNX2_EMAC_MODE_25G_MODE);
905 switch (bp->line_speed) {
907 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
908 val |= BNX2_EMAC_MODE_PORT_MII_10M;
913 val |= BNX2_EMAC_MODE_PORT_MII;
916 val |= BNX2_EMAC_MODE_25G_MODE;
919 val |= BNX2_EMAC_MODE_PORT_GMII;
924 val |= BNX2_EMAC_MODE_PORT_GMII;
927 /* Set the MAC to operate in the appropriate duplex mode. */
928 if (bp->duplex == DUPLEX_HALF)
929 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
930 REG_WR(bp, BNX2_EMAC_MODE, val);
932 /* Enable/disable rx PAUSE. */
933 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
935 if (bp->flow_ctrl & FLOW_CTRL_RX)
936 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
937 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
939 /* Enable/disable tx PAUSE. */
940 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
941 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
943 if (bp->flow_ctrl & FLOW_CTRL_TX)
944 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
945 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
947 /* Acknowledge the interrupt. */
948 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
954 bnx2_enable_bmsr1(struct bnx2 *bp)
956 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
957 (CHIP_NUM(bp) == CHIP_NUM_5709))
958 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
959 MII_BNX2_BLK_ADDR_GP_STATUS);
963 bnx2_disable_bmsr1(struct bnx2 *bp)
965 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
966 (CHIP_NUM(bp) == CHIP_NUM_5709))
967 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
968 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
972 bnx2_test_and_enable_2g5(struct bnx2 *bp)
977 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
980 if (bp->autoneg & AUTONEG_SPEED)
981 bp->advertising |= ADVERTISED_2500baseX_Full;
983 if (CHIP_NUM(bp) == CHIP_NUM_5709)
984 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
986 bnx2_read_phy(bp, bp->mii_up1, &up1);
987 if (!(up1 & BCM5708S_UP1_2G5)) {
988 up1 |= BCM5708S_UP1_2G5;
989 bnx2_write_phy(bp, bp->mii_up1, up1);
993 if (CHIP_NUM(bp) == CHIP_NUM_5709)
994 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
995 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1001 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1006 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1009 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1012 bnx2_read_phy(bp, bp->mii_up1, &up1);
1013 if (up1 & BCM5708S_UP1_2G5) {
1014 up1 &= ~BCM5708S_UP1_2G5;
1015 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1021 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1027 bnx2_enable_forced_2g5(struct bnx2 *bp)
1031 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1034 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1038 MII_BNX2_BLK_ADDR_SERDES_DIG);
1039 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1040 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1041 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1042 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1044 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1045 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1046 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1048 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050 bmcr |= BCM5708S_BMCR_FORCE_2500;
1053 if (bp->autoneg & AUTONEG_SPEED) {
1054 bmcr &= ~BMCR_ANENABLE;
1055 if (bp->req_duplex == DUPLEX_FULL)
1056 bmcr |= BMCR_FULLDPLX;
1058 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1062 bnx2_disable_forced_2g5(struct bnx2 *bp)
1066 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1069 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1072 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1073 MII_BNX2_BLK_ADDR_SERDES_DIG);
1074 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1075 val &= ~MII_BNX2_SD_MISC1_FORCE;
1076 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1078 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1079 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1080 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1082 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1087 if (bp->autoneg & AUTONEG_SPEED)
1088 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1089 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1093 bnx2_set_link(struct bnx2 *bp)
1098 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1103 link_up = bp->link_up;
1105 bnx2_enable_bmsr1(bp);
1106 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1107 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1108 bnx2_disable_bmsr1(bp);
1110 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1111 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1114 val = REG_RD(bp, BNX2_EMAC_STATUS);
1115 if (val & BNX2_EMAC_STATUS_LINK)
1116 bmsr |= BMSR_LSTATUS;
1118 bmsr &= ~BMSR_LSTATUS;
1121 if (bmsr & BMSR_LSTATUS) {
1124 if (bp->phy_flags & PHY_SERDES_FLAG) {
1125 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1126 bnx2_5706s_linkup(bp);
1127 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1128 bnx2_5708s_linkup(bp);
1129 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1130 bnx2_5709s_linkup(bp);
1133 bnx2_copper_linkup(bp);
1135 bnx2_resolve_flow_ctrl(bp);
1138 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1139 (bp->autoneg & AUTONEG_SPEED))
1140 bnx2_disable_forced_2g5(bp);
1142 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1146 if (bp->link_up != link_up) {
1147 bnx2_report_link(bp);
1150 bnx2_set_mac_link(bp);
1156 bnx2_reset_phy(struct bnx2 *bp)
1161 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1163 #define PHY_RESET_MAX_WAIT 100
1164 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1167 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1168 if (!(reg & BMCR_RESET)) {
1173 if (i == PHY_RESET_MAX_WAIT) {
1180 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1184 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1185 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1187 if (bp->phy_flags & PHY_SERDES_FLAG) {
1188 adv = ADVERTISE_1000XPAUSE;
1191 adv = ADVERTISE_PAUSE_CAP;
1194 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1195 if (bp->phy_flags & PHY_SERDES_FLAG) {
1196 adv = ADVERTISE_1000XPSE_ASYM;
1199 adv = ADVERTISE_PAUSE_ASYM;
1202 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1203 if (bp->phy_flags & PHY_SERDES_FLAG) {
1204 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1207 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1214 bnx2_setup_serdes_phy(struct bnx2 *bp)
1219 if (!(bp->autoneg & AUTONEG_SPEED)) {
1221 int force_link_down = 0;
1223 if (bp->req_line_speed == SPEED_2500) {
1224 if (!bnx2_test_and_enable_2g5(bp))
1225 force_link_down = 1;
1226 } else if (bp->req_line_speed == SPEED_1000) {
1227 if (bnx2_test_and_disable_2g5(bp))
1228 force_link_down = 1;
1230 bnx2_read_phy(bp, bp->mii_adv, &adv);
1231 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1233 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1234 new_bmcr = bmcr & ~BMCR_ANENABLE;
1235 new_bmcr |= BMCR_SPEED1000;
1237 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1238 if (bp->req_line_speed == SPEED_2500)
1239 bnx2_enable_forced_2g5(bp);
1240 else if (bp->req_line_speed == SPEED_1000) {
1241 bnx2_disable_forced_2g5(bp);
1242 new_bmcr &= ~0x2000;
1245 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1246 if (bp->req_line_speed == SPEED_2500)
1247 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1249 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1252 if (bp->req_duplex == DUPLEX_FULL) {
1253 adv |= ADVERTISE_1000XFULL;
1254 new_bmcr |= BMCR_FULLDPLX;
1257 adv |= ADVERTISE_1000XHALF;
1258 new_bmcr &= ~BMCR_FULLDPLX;
1260 if ((new_bmcr != bmcr) || (force_link_down)) {
1261 /* Force a link down visible on the other side */
1263 bnx2_write_phy(bp, bp->mii_adv, adv &
1264 ~(ADVERTISE_1000XFULL |
1265 ADVERTISE_1000XHALF));
1266 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1267 BMCR_ANRESTART | BMCR_ANENABLE);
1270 netif_carrier_off(bp->dev);
1271 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1272 bnx2_report_link(bp);
1274 bnx2_write_phy(bp, bp->mii_adv, adv);
1275 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1277 bnx2_resolve_flow_ctrl(bp);
1278 bnx2_set_mac_link(bp);
1283 bnx2_test_and_enable_2g5(bp);
1285 if (bp->advertising & ADVERTISED_1000baseT_Full)
1286 new_adv |= ADVERTISE_1000XFULL;
1288 new_adv |= bnx2_phy_get_pause_adv(bp);
1290 bnx2_read_phy(bp, bp->mii_adv, &adv);
1291 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1293 bp->serdes_an_pending = 0;
1294 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1295 /* Force a link down visible on the other side */
1297 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1298 spin_unlock_bh(&bp->phy_lock);
1300 spin_lock_bh(&bp->phy_lock);
1303 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1304 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1306 /* Speed up link-up time when the link partner
1307 * does not autonegotiate which is very common
1308 * in blade servers. Some blade servers use
1309 * IPMI for kerboard input and it's important
1310 * to minimize link disruptions. Autoneg. involves
1311 * exchanging base pages plus 3 next pages and
1312 * normally completes in about 120 msec.
1314 bp->current_interval = SERDES_AN_TIMEOUT;
1315 bp->serdes_an_pending = 1;
1316 mod_timer(&bp->timer, jiffies + bp->current_interval);
1318 bnx2_resolve_flow_ctrl(bp);
1319 bnx2_set_mac_link(bp);
1325 #define ETHTOOL_ALL_FIBRE_SPEED \
1326 (ADVERTISED_1000baseT_Full)
1328 #define ETHTOOL_ALL_COPPER_SPEED \
1329 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1330 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1331 ADVERTISED_1000baseT_Full)
1333 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1334 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1336 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1339 bnx2_setup_copper_phy(struct bnx2 *bp)
1344 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1346 if (bp->autoneg & AUTONEG_SPEED) {
1347 u32 adv_reg, adv1000_reg;
1348 u32 new_adv_reg = 0;
1349 u32 new_adv1000_reg = 0;
1351 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
1352 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1353 ADVERTISE_PAUSE_ASYM);
1355 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1356 adv1000_reg &= PHY_ALL_1000_SPEED;
1358 if (bp->advertising & ADVERTISED_10baseT_Half)
1359 new_adv_reg |= ADVERTISE_10HALF;
1360 if (bp->advertising & ADVERTISED_10baseT_Full)
1361 new_adv_reg |= ADVERTISE_10FULL;
1362 if (bp->advertising & ADVERTISED_100baseT_Half)
1363 new_adv_reg |= ADVERTISE_100HALF;
1364 if (bp->advertising & ADVERTISED_100baseT_Full)
1365 new_adv_reg |= ADVERTISE_100FULL;
1366 if (bp->advertising & ADVERTISED_1000baseT_Full)
1367 new_adv1000_reg |= ADVERTISE_1000FULL;
1369 new_adv_reg |= ADVERTISE_CSMA;
1371 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1373 if ((adv1000_reg != new_adv1000_reg) ||
1374 (adv_reg != new_adv_reg) ||
1375 ((bmcr & BMCR_ANENABLE) == 0)) {
1377 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
1378 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1379 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
1382 else if (bp->link_up) {
1383 /* Flow ctrl may have changed from auto to forced */
1384 /* or vice-versa. */
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
1393 if (bp->req_line_speed == SPEED_100) {
1394 new_bmcr |= BMCR_SPEED100;
1396 if (bp->req_duplex == DUPLEX_FULL) {
1397 new_bmcr |= BMCR_FULLDPLX;
1399 if (new_bmcr != bmcr) {
1402 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1403 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1405 if (bmsr & BMSR_LSTATUS) {
1406 /* Force link down */
1407 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1408 spin_unlock_bh(&bp->phy_lock);
1410 spin_lock_bh(&bp->phy_lock);
1412 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1413 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1416 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1418 /* Normally, the new speed is setup after the link has
1419 * gone down and up again. In some cases, link will not go
1420 * down so we need to set up the new speed here.
1422 if (bmsr & BMSR_LSTATUS) {
1423 bp->line_speed = bp->req_line_speed;
1424 bp->duplex = bp->req_duplex;
1425 bnx2_resolve_flow_ctrl(bp);
1426 bnx2_set_mac_link(bp);
1429 bnx2_resolve_flow_ctrl(bp);
1430 bnx2_set_mac_link(bp);
1436 bnx2_setup_phy(struct bnx2 *bp)
1438 if (bp->loopback == MAC_LOOPBACK)
1441 if (bp->phy_flags & PHY_SERDES_FLAG) {
1442 return (bnx2_setup_serdes_phy(bp));
1445 return (bnx2_setup_copper_phy(bp));
1450 bnx2_init_5709s_phy(struct bnx2 *bp)
1454 bp->mii_bmcr = MII_BMCR + 0x10;
1455 bp->mii_bmsr = MII_BMSR + 0x10;
1456 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1457 bp->mii_adv = MII_ADVERTISE + 0x10;
1458 bp->mii_lpa = MII_LPA + 0x10;
1459 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1461 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1462 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1464 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1469 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1470 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1471 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1472 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1474 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1475 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1476 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1477 val |= BCM5708S_UP1_2G5;
1479 val &= ~BCM5708S_UP1_2G5;
1480 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1482 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1483 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1484 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1485 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1487 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1489 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1490 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1491 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1493 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1499 bnx2_init_5708s_phy(struct bnx2 *bp)
1505 bp->mii_up1 = BCM5708S_UP1;
1507 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1508 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1509 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1511 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1512 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1513 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1515 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1516 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1517 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1519 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1520 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1521 val |= BCM5708S_UP1_2G5;
1522 bnx2_write_phy(bp, BCM5708S_UP1, val);
1525 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
1526 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1527 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
1528 /* increase tx signal amplitude */
1529 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1530 BCM5708S_BLK_ADDR_TX_MISC);
1531 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1532 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1533 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1534 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1537 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
1538 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1543 is_backplane = REG_RD_IND(bp, bp->shmem_base +
1544 BNX2_SHARED_HW_CFG_CONFIG);
1545 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1546 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1547 BCM5708S_BLK_ADDR_TX_MISC);
1548 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1549 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1550 BCM5708S_BLK_ADDR_DIG);
1557 bnx2_init_5706s_phy(struct bnx2 *bp)
1561 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1563 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1564 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
1566 if (bp->dev->mtu > 1500) {
1569 /* Set extended packet length bit */
1570 bnx2_write_phy(bp, 0x18, 0x7);
1571 bnx2_read_phy(bp, 0x18, &val);
1572 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1574 bnx2_write_phy(bp, 0x1c, 0x6c00);
1575 bnx2_read_phy(bp, 0x1c, &val);
1576 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1581 bnx2_write_phy(bp, 0x18, 0x7);
1582 bnx2_read_phy(bp, 0x18, &val);
1583 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1585 bnx2_write_phy(bp, 0x1c, 0x6c00);
1586 bnx2_read_phy(bp, 0x1c, &val);
1587 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1594 bnx2_init_copper_phy(struct bnx2 *bp)
1600 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1601 bnx2_write_phy(bp, 0x18, 0x0c00);
1602 bnx2_write_phy(bp, 0x17, 0x000a);
1603 bnx2_write_phy(bp, 0x15, 0x310b);
1604 bnx2_write_phy(bp, 0x17, 0x201f);
1605 bnx2_write_phy(bp, 0x15, 0x9506);
1606 bnx2_write_phy(bp, 0x17, 0x401f);
1607 bnx2_write_phy(bp, 0x15, 0x14e2);
1608 bnx2_write_phy(bp, 0x18, 0x0400);
1611 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1612 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1613 MII_BNX2_DSP_EXPAND_REG | 0x8);
1614 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1616 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1619 if (bp->dev->mtu > 1500) {
1620 /* Set extended packet length bit */
1621 bnx2_write_phy(bp, 0x18, 0x7);
1622 bnx2_read_phy(bp, 0x18, &val);
1623 bnx2_write_phy(bp, 0x18, val | 0x4000);
1625 bnx2_read_phy(bp, 0x10, &val);
1626 bnx2_write_phy(bp, 0x10, val | 0x1);
1629 bnx2_write_phy(bp, 0x18, 0x7);
1630 bnx2_read_phy(bp, 0x18, &val);
1631 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1633 bnx2_read_phy(bp, 0x10, &val);
1634 bnx2_write_phy(bp, 0x10, val & ~0x1);
1637 /* ethernet@wirespeed */
1638 bnx2_write_phy(bp, 0x18, 0x7007);
1639 bnx2_read_phy(bp, 0x18, &val);
1640 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
1646 bnx2_init_phy(struct bnx2 *bp)
1651 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1652 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1654 bp->mii_bmcr = MII_BMCR;
1655 bp->mii_bmsr = MII_BMSR;
1656 bp->mii_bmsr1 = MII_BMSR;
1657 bp->mii_adv = MII_ADVERTISE;
1658 bp->mii_lpa = MII_LPA;
1660 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1662 bnx2_read_phy(bp, MII_PHYSID1, &val);
1663 bp->phy_id = val << 16;
1664 bnx2_read_phy(bp, MII_PHYSID2, &val);
1665 bp->phy_id |= val & 0xffff;
1667 if (bp->phy_flags & PHY_SERDES_FLAG) {
1668 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1669 rc = bnx2_init_5706s_phy(bp);
1670 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1671 rc = bnx2_init_5708s_phy(bp);
1672 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1673 rc = bnx2_init_5709s_phy(bp);
1676 rc = bnx2_init_copper_phy(bp);
1685 bnx2_set_mac_loopback(struct bnx2 *bp)
1689 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1690 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1691 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1692 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1697 static int bnx2_test_link(struct bnx2 *);
1700 bnx2_set_phy_loopback(struct bnx2 *bp)
1705 spin_lock_bh(&bp->phy_lock);
1706 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
1708 spin_unlock_bh(&bp->phy_lock);
1712 for (i = 0; i < 10; i++) {
1713 if (bnx2_test_link(bp) == 0)
1718 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1719 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1720 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1721 BNX2_EMAC_MODE_25G_MODE);
1723 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1724 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1730 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
1736 msg_data |= bp->fw_wr_seq;
1738 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1740 /* wait for an acknowledgement. */
1741 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1744 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
1746 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1749 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1752 /* If we timed out, inform the firmware that this is the case. */
1753 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1755 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1758 msg_data &= ~BNX2_DRV_MSG_CODE;
1759 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1761 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
1766 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1773 bnx2_init_5709_context(struct bnx2 *bp)
1778 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
1779 val |= (BCM_PAGE_BITS - 8) << 16;
1780 REG_WR(bp, BNX2_CTX_COMMAND, val);
1781 for (i = 0; i < bp->ctx_pages; i++) {
1784 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
1785 (bp->ctx_blk_mapping[i] & 0xffffffff) |
1786 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
1787 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
1788 (u64) bp->ctx_blk_mapping[i] >> 32);
1789 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
1790 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
1791 for (j = 0; j < 10; j++) {
1793 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
1794 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
1798 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
1807 bnx2_init_context(struct bnx2 *bp)
1813 u32 vcid_addr, pcid_addr, offset;
1817 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1820 vcid_addr = GET_PCID_ADDR(vcid);
1822 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1827 pcid_addr = GET_PCID_ADDR(new_vcid);
1830 vcid_addr = GET_CID_ADDR(vcid);
1831 pcid_addr = vcid_addr;
1834 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1835 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1837 /* Zero out the context. */
1838 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1839 CTX_WR(bp, 0x00, offset, 0);
1842 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1843 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1848 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1854 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1855 if (good_mbuf == NULL) {
1856 printk(KERN_ERR PFX "Failed to allocate memory in "
1857 "bnx2_alloc_bad_rbuf\n");
1861 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1862 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1866 /* Allocate a bunch of mbufs and save the good ones in an array. */
1867 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1868 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1869 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1871 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1873 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1875 /* The addresses with Bit 9 set are bad memory blocks. */
1876 if (!(val & (1 << 9))) {
1877 good_mbuf[good_mbuf_cnt] = (u16) val;
1881 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1884 /* Free the good ones back to the mbuf pool thus discarding
1885 * all the bad ones. */
1886 while (good_mbuf_cnt) {
1889 val = good_mbuf[good_mbuf_cnt];
1890 val = (val << 9) | val | 1;
1892 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1899 bnx2_set_mac_addr(struct bnx2 *bp)
1902 u8 *mac_addr = bp->dev->dev_addr;
1904 val = (mac_addr[0] << 8) | mac_addr[1];
1906 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
1909 (mac_addr[4] << 8) | mac_addr[5];
1911 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1915 bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1917 struct sk_buff *skb;
1918 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1920 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
1921 unsigned long align;
1923 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1928 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
1929 skb_reserve(skb, BNX2_RX_ALIGN - align);
1931 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1932 PCI_DMA_FROMDEVICE);
1935 pci_unmap_addr_set(rx_buf, mapping, mapping);
1937 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1938 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1940 bp->rx_prod_bseq += bp->rx_buf_use_size;
1946 bnx2_phy_int(struct bnx2 *bp)
1948 u32 new_link_state, old_link_state;
1950 new_link_state = bp->status_blk->status_attn_bits &
1951 STATUS_ATTN_BITS_LINK_STATE;
1952 old_link_state = bp->status_blk->status_attn_bits_ack &
1953 STATUS_ATTN_BITS_LINK_STATE;
1954 if (new_link_state != old_link_state) {
1955 if (new_link_state) {
1956 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1957 STATUS_ATTN_BITS_LINK_STATE);
1960 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1961 STATUS_ATTN_BITS_LINK_STATE);
1968 bnx2_tx_int(struct bnx2 *bp)
1970 struct status_block *sblk = bp->status_blk;
1971 u16 hw_cons, sw_cons, sw_ring_cons;
1974 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
1975 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1978 sw_cons = bp->tx_cons;
1980 while (sw_cons != hw_cons) {
1981 struct sw_bd *tx_buf;
1982 struct sk_buff *skb;
1985 sw_ring_cons = TX_RING_IDX(sw_cons);
1987 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1990 /* partial BD completions possible with TSO packets */
1991 if (skb_is_gso(skb)) {
1992 u16 last_idx, last_ring_idx;
1994 last_idx = sw_cons +
1995 skb_shinfo(skb)->nr_frags + 1;
1996 last_ring_idx = sw_ring_cons +
1997 skb_shinfo(skb)->nr_frags + 1;
1998 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2001 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2006 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2007 skb_headlen(skb), PCI_DMA_TODEVICE);
2010 last = skb_shinfo(skb)->nr_frags;
2012 for (i = 0; i < last; i++) {
2013 sw_cons = NEXT_TX_BD(sw_cons);
2015 pci_unmap_page(bp->pdev,
2017 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2019 skb_shinfo(skb)->frags[i].size,
2023 sw_cons = NEXT_TX_BD(sw_cons);
2025 tx_free_bd += last + 1;
2029 hw_cons = bp->hw_tx_cons =
2030 sblk->status_tx_quick_consumer_index0;
2032 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2037 bp->tx_cons = sw_cons;
2038 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2039 * before checking for netif_queue_stopped(). Without the
2040 * memory barrier, there is a small possibility that bnx2_start_xmit()
2041 * will miss it and cause the queue to be stopped forever.
2045 if (unlikely(netif_queue_stopped(bp->dev)) &&
2046 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2047 netif_tx_lock(bp->dev);
2048 if ((netif_queue_stopped(bp->dev)) &&
2049 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
2050 netif_wake_queue(bp->dev);
2051 netif_tx_unlock(bp->dev);
2056 bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2059 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2060 struct rx_bd *cons_bd, *prod_bd;
2062 cons_rx_buf = &bp->rx_buf_ring[cons];
2063 prod_rx_buf = &bp->rx_buf_ring[prod];
2065 pci_dma_sync_single_for_device(bp->pdev,
2066 pci_unmap_addr(cons_rx_buf, mapping),
2067 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2069 bp->rx_prod_bseq += bp->rx_buf_use_size;
2071 prod_rx_buf->skb = skb;
2076 pci_unmap_addr_set(prod_rx_buf, mapping,
2077 pci_unmap_addr(cons_rx_buf, mapping));
2079 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2080 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2081 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2082 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2086 bnx2_rx_int(struct bnx2 *bp, int budget)
2088 struct status_block *sblk = bp->status_blk;
2089 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2090 struct l2_fhdr *rx_hdr;
2093 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
2094 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2097 sw_cons = bp->rx_cons;
2098 sw_prod = bp->rx_prod;
2100 /* Memory barrier necessary as speculative reads of the rx
2101 * buffer can be ahead of the index in the status block
2104 while (sw_cons != hw_cons) {
2107 struct sw_bd *rx_buf;
2108 struct sk_buff *skb;
2109 dma_addr_t dma_addr;
2111 sw_ring_cons = RX_RING_IDX(sw_cons);
2112 sw_ring_prod = RX_RING_IDX(sw_prod);
2114 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2119 dma_addr = pci_unmap_addr(rx_buf, mapping);
2121 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
2122 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2124 rx_hdr = (struct l2_fhdr *) skb->data;
2125 len = rx_hdr->l2_fhdr_pkt_len - 4;
2127 if ((status = rx_hdr->l2_fhdr_status) &
2128 (L2_FHDR_ERRORS_BAD_CRC |
2129 L2_FHDR_ERRORS_PHY_DECODE |
2130 L2_FHDR_ERRORS_ALIGNMENT |
2131 L2_FHDR_ERRORS_TOO_SHORT |
2132 L2_FHDR_ERRORS_GIANT_FRAME)) {
2137 /* Since we don't have a jumbo ring, copy small packets
2140 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2141 struct sk_buff *new_skb;
2143 new_skb = netdev_alloc_skb(bp->dev, len + 2);
2144 if (new_skb == NULL)
2148 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2149 new_skb->data, len + 2);
2150 skb_reserve(new_skb, 2);
2151 skb_put(new_skb, len);
2153 bnx2_reuse_rx_skb(bp, skb,
2154 sw_ring_cons, sw_ring_prod);
2158 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
2159 pci_unmap_single(bp->pdev, dma_addr,
2160 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2162 skb_reserve(skb, bp->rx_offset);
2167 bnx2_reuse_rx_skb(bp, skb,
2168 sw_ring_cons, sw_ring_prod);
2172 skb->protocol = eth_type_trans(skb, bp->dev);
2174 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
2175 (ntohs(skb->protocol) != 0x8100)) {
2182 skb->ip_summed = CHECKSUM_NONE;
2184 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2185 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2187 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2188 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
2189 skb->ip_summed = CHECKSUM_UNNECESSARY;
2193 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2194 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2195 rx_hdr->l2_fhdr_vlan_tag);
2199 netif_receive_skb(skb);
2201 bp->dev->last_rx = jiffies;
2205 sw_cons = NEXT_RX_BD(sw_cons);
2206 sw_prod = NEXT_RX_BD(sw_prod);
2208 if ((rx_pkt == budget))
2211 /* Refresh hw_cons to see if there is new work */
2212 if (sw_cons == hw_cons) {
2213 hw_cons = bp->hw_rx_cons =
2214 sblk->status_rx_quick_consumer_index0;
2215 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2220 bp->rx_cons = sw_cons;
2221 bp->rx_prod = sw_prod;
2223 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2225 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2233 /* MSI ISR - The only difference between this and the INTx ISR
2234 * is that the MSI interrupt is always serviced.
2237 bnx2_msi(int irq, void *dev_instance)
2239 struct net_device *dev = dev_instance;
2240 struct bnx2 *bp = netdev_priv(dev);
2242 prefetch(bp->status_blk);
2243 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2244 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2245 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2247 /* Return here if interrupt is disabled. */
2248 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2251 netif_rx_schedule(dev);
2257 bnx2_interrupt(int irq, void *dev_instance)
2259 struct net_device *dev = dev_instance;
2260 struct bnx2 *bp = netdev_priv(dev);
2262 /* When using INTx, it is possible for the interrupt to arrive
2263 * at the CPU before the status block posted prior to the
2264 * interrupt. Reading a register will flush the status block.
2265 * When using MSI, the MSI message will always complete after
2266 * the status block write.
2268 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
2269 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2270 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
2273 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2274 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2275 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2277 /* Return here if interrupt is shared and is disabled. */
2278 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2281 netif_rx_schedule(dev);
2287 bnx2_has_work(struct bnx2 *bp)
2289 struct status_block *sblk = bp->status_blk;
2291 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2292 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2295 if ((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) !=
2296 (sblk->status_attn_bits_ack & STATUS_ATTN_BITS_LINK_STATE))
2303 bnx2_poll(struct net_device *dev, int *budget)
2305 struct bnx2 *bp = netdev_priv(dev);
2307 if ((bp->status_blk->status_attn_bits &
2308 STATUS_ATTN_BITS_LINK_STATE) !=
2309 (bp->status_blk->status_attn_bits_ack &
2310 STATUS_ATTN_BITS_LINK_STATE)) {
2312 spin_lock(&bp->phy_lock);
2314 spin_unlock(&bp->phy_lock);
2316 /* This is needed to take care of transient status
2317 * during link changes.
2319 REG_WR(bp, BNX2_HC_COMMAND,
2320 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2321 REG_RD(bp, BNX2_HC_COMMAND);
2324 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
2327 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
2328 int orig_budget = *budget;
2331 if (orig_budget > dev->quota)
2332 orig_budget = dev->quota;
2334 work_done = bnx2_rx_int(bp, orig_budget);
2335 *budget -= work_done;
2336 dev->quota -= work_done;
2339 bp->last_status_idx = bp->status_blk->status_idx;
2342 if (!bnx2_has_work(bp)) {
2343 netif_rx_complete(dev);
2344 if (likely(bp->flags & USING_MSI_FLAG)) {
2345 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2346 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2347 bp->last_status_idx);
2350 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2351 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2352 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2353 bp->last_status_idx);
2355 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2356 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2357 bp->last_status_idx);
2364 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
2365 * from set_multicast.
2368 bnx2_set_rx_mode(struct net_device *dev)
2370 struct bnx2 *bp = netdev_priv(dev);
2371 u32 rx_mode, sort_mode;
2374 spin_lock_bh(&bp->phy_lock);
2376 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2377 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2378 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2380 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
2381 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2383 if (!(bp->flags & ASF_ENABLE_FLAG))
2384 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
2386 if (dev->flags & IFF_PROMISC) {
2387 /* Promiscuous mode. */
2388 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
2389 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2390 BNX2_RPM_SORT_USER0_PROM_VLAN;
2392 else if (dev->flags & IFF_ALLMULTI) {
2393 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2394 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2397 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2400 /* Accept one or more multicast(s). */
2401 struct dev_mc_list *mclist;
2402 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2407 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2409 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2410 i++, mclist = mclist->next) {
2412 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2414 regidx = (bit & 0xe0) >> 5;
2416 mc_filter[regidx] |= (1 << bit);
2419 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2420 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2424 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2427 if (rx_mode != bp->rx_mode) {
2428 bp->rx_mode = rx_mode;
2429 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2432 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2433 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2434 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2436 spin_unlock_bh(&bp->phy_lock);
2439 #define FW_BUF_SIZE 0x8000
2442 bnx2_gunzip_init(struct bnx2 *bp)
2444 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2447 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2450 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2451 if (bp->strm->workspace == NULL)
2461 vfree(bp->gunzip_buf);
2462 bp->gunzip_buf = NULL;
2465 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2466 "uncompression.\n", bp->dev->name);
2471 bnx2_gunzip_end(struct bnx2 *bp)
2473 kfree(bp->strm->workspace);
2478 if (bp->gunzip_buf) {
2479 vfree(bp->gunzip_buf);
2480 bp->gunzip_buf = NULL;
2485 bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2489 /* check gzip header */
2490 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2496 if (zbuf[3] & FNAME)
2497 while ((zbuf[n++] != 0) && (n < len));
2499 bp->strm->next_in = zbuf + n;
2500 bp->strm->avail_in = len - n;
2501 bp->strm->next_out = bp->gunzip_buf;
2502 bp->strm->avail_out = FW_BUF_SIZE;
2504 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2508 rc = zlib_inflate(bp->strm, Z_FINISH);
2510 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2511 *outbuf = bp->gunzip_buf;
2513 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2514 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2515 bp->dev->name, bp->strm->msg);
2517 zlib_inflateEnd(bp->strm);
2519 if (rc == Z_STREAM_END)
2526 load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2533 for (i = 0; i < rv2p_code_len; i += 8) {
2534 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
2536 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
2539 if (rv2p_proc == RV2P_PROC1) {
2540 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2541 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2544 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2545 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2549 /* Reset the processor, un-stall is done later. */
2550 if (rv2p_proc == RV2P_PROC1) {
2551 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2554 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2559 load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2566 val = REG_RD_IND(bp, cpu_reg->mode);
2567 val |= cpu_reg->mode_value_halt;
2568 REG_WR_IND(bp, cpu_reg->mode, val);
2569 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2571 /* Load the Text area. */
2572 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2577 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2587 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2588 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
2592 /* Load the Data area. */
2593 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2597 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2598 REG_WR_IND(bp, offset, fw->data[j]);
2602 /* Load the SBSS area. */
2603 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2607 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2608 REG_WR_IND(bp, offset, fw->sbss[j]);
2612 /* Load the BSS area. */
2613 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2617 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2618 REG_WR_IND(bp, offset, fw->bss[j]);
2622 /* Load the Read-Only area. */
2623 offset = cpu_reg->spad_base +
2624 (fw->rodata_addr - cpu_reg->mips_view_base);
2628 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2629 REG_WR_IND(bp, offset, fw->rodata[j]);
2633 /* Clear the pre-fetch instruction. */
2634 REG_WR_IND(bp, cpu_reg->inst, 0);
2635 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2637 /* Start the CPU. */
2638 val = REG_RD_IND(bp, cpu_reg->mode);
2639 val &= ~cpu_reg->mode_value_halt;
2640 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2641 REG_WR_IND(bp, cpu_reg->mode, val);
2647 bnx2_init_cpus(struct bnx2 *bp)
2649 struct cpu_reg cpu_reg;
2655 if ((rc = bnx2_gunzip_init(bp)) != 0)
2658 /* Initialize the RV2P processor. */
2659 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2664 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2666 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2671 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
2673 /* Initialize the RX Processor. */
2674 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2675 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2676 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2677 cpu_reg.state = BNX2_RXP_CPU_STATE;
2678 cpu_reg.state_value_clear = 0xffffff;
2679 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2680 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2681 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2682 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2683 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2684 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2685 cpu_reg.mips_view_base = 0x8000000;
2687 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2688 fw = &bnx2_rxp_fw_09;
2690 fw = &bnx2_rxp_fw_06;
2692 rc = load_cpu_fw(bp, &cpu_reg, fw);
2696 /* Initialize the TX Processor. */
2697 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2698 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2699 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2700 cpu_reg.state = BNX2_TXP_CPU_STATE;
2701 cpu_reg.state_value_clear = 0xffffff;
2702 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2703 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2704 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2705 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2706 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2707 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2708 cpu_reg.mips_view_base = 0x8000000;
2710 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2711 fw = &bnx2_txp_fw_09;
2713 fw = &bnx2_txp_fw_06;
2715 rc = load_cpu_fw(bp, &cpu_reg, fw);
2719 /* Initialize the TX Patch-up Processor. */
2720 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2721 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2722 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2723 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2724 cpu_reg.state_value_clear = 0xffffff;
2725 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2726 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2727 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2728 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2729 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2730 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2731 cpu_reg.mips_view_base = 0x8000000;
2733 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2734 fw = &bnx2_tpat_fw_09;
2736 fw = &bnx2_tpat_fw_06;
2738 rc = load_cpu_fw(bp, &cpu_reg, fw);
2742 /* Initialize the Completion Processor. */
2743 cpu_reg.mode = BNX2_COM_CPU_MODE;
2744 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2745 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2746 cpu_reg.state = BNX2_COM_CPU_STATE;
2747 cpu_reg.state_value_clear = 0xffffff;
2748 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2749 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2750 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2751 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2752 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2753 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2754 cpu_reg.mips_view_base = 0x8000000;
2756 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2757 fw = &bnx2_com_fw_09;
2759 fw = &bnx2_com_fw_06;
2761 rc = load_cpu_fw(bp, &cpu_reg, fw);
2765 /* Initialize the Command Processor. */
2766 cpu_reg.mode = BNX2_CP_CPU_MODE;
2767 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
2768 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
2769 cpu_reg.state = BNX2_CP_CPU_STATE;
2770 cpu_reg.state_value_clear = 0xffffff;
2771 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
2772 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
2773 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
2774 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
2775 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
2776 cpu_reg.spad_base = BNX2_CP_SCRATCH;
2777 cpu_reg.mips_view_base = 0x8000000;
2779 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
2780 fw = &bnx2_cp_fw_09;
2782 rc = load_cpu_fw(bp, &cpu_reg, fw);
2787 bnx2_gunzip_end(bp);
2792 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
2796 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2802 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2803 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2804 PCI_PM_CTRL_PME_STATUS);
2806 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2807 /* delay required during transition out of D3hot */
2810 val = REG_RD(bp, BNX2_EMAC_MODE);
2811 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2812 val &= ~BNX2_EMAC_MODE_MPKT;
2813 REG_WR(bp, BNX2_EMAC_MODE, val);
2815 val = REG_RD(bp, BNX2_RPM_CONFIG);
2816 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2817 REG_WR(bp, BNX2_RPM_CONFIG, val);
2828 autoneg = bp->autoneg;
2829 advertising = bp->advertising;
2831 bp->autoneg = AUTONEG_SPEED;
2832 bp->advertising = ADVERTISED_10baseT_Half |
2833 ADVERTISED_10baseT_Full |
2834 ADVERTISED_100baseT_Half |
2835 ADVERTISED_100baseT_Full |
2838 bnx2_setup_copper_phy(bp);
2840 bp->autoneg = autoneg;
2841 bp->advertising = advertising;
2843 bnx2_set_mac_addr(bp);
2845 val = REG_RD(bp, BNX2_EMAC_MODE);
2847 /* Enable port mode. */
2848 val &= ~BNX2_EMAC_MODE_PORT;
2849 val |= BNX2_EMAC_MODE_PORT_MII |
2850 BNX2_EMAC_MODE_MPKT_RCVD |
2851 BNX2_EMAC_MODE_ACPI_RCVD |
2852 BNX2_EMAC_MODE_MPKT;
2854 REG_WR(bp, BNX2_EMAC_MODE, val);
2856 /* receive all multicast */
2857 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2858 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2861 REG_WR(bp, BNX2_EMAC_RX_MODE,
2862 BNX2_EMAC_RX_MODE_SORT_MODE);
2864 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2865 BNX2_RPM_SORT_USER0_MC_EN;
2866 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2867 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2868 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2869 BNX2_RPM_SORT_USER0_ENA);
2871 /* Need to enable EMAC and RPM for WOL. */
2872 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2873 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2874 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2875 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2877 val = REG_RD(bp, BNX2_RPM_CONFIG);
2878 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2879 REG_WR(bp, BNX2_RPM_CONFIG, val);
2881 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2884 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2887 if (!(bp->flags & NO_WOL_FLAG))
2888 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
2890 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2891 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2892 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2901 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2903 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2906 /* No more memory access after this point until
2907 * device is brought back to D0.
2919 bnx2_acquire_nvram_lock(struct bnx2 *bp)
2924 /* Request access to the flash interface. */
2925 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2926 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2927 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2928 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2934 if (j >= NVRAM_TIMEOUT_COUNT)
2941 bnx2_release_nvram_lock(struct bnx2 *bp)
2946 /* Relinquish nvram interface. */
2947 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2949 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2950 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2951 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2957 if (j >= NVRAM_TIMEOUT_COUNT)
2965 bnx2_enable_nvram_write(struct bnx2 *bp)
2969 val = REG_RD(bp, BNX2_MISC_CFG);
2970 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2972 if (!bp->flash_info->buffered) {
2975 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2976 REG_WR(bp, BNX2_NVM_COMMAND,
2977 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2979 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2982 val = REG_RD(bp, BNX2_NVM_COMMAND);
2983 if (val & BNX2_NVM_COMMAND_DONE)
2987 if (j >= NVRAM_TIMEOUT_COUNT)
2994 bnx2_disable_nvram_write(struct bnx2 *bp)
2998 val = REG_RD(bp, BNX2_MISC_CFG);
2999 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3004 bnx2_enable_nvram_access(struct bnx2 *bp)
3008 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3009 /* Enable both bits, even on read. */
3010 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3011 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3015 bnx2_disable_nvram_access(struct bnx2 *bp)
3019 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3020 /* Disable both bits, even after read. */
3021 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
3022 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3023 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3027 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3032 if (bp->flash_info->buffered)
3033 /* Buffered flash, no erase needed */
3036 /* Build an erase command */
3037 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3038 BNX2_NVM_COMMAND_DOIT;
3040 /* Need to clear DONE bit separately. */
3041 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3043 /* Address of the NVRAM to read from. */
3044 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3046 /* Issue an erase command. */
3047 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3049 /* Wait for completion. */
3050 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3055 val = REG_RD(bp, BNX2_NVM_COMMAND);
3056 if (val & BNX2_NVM_COMMAND_DONE)
3060 if (j >= NVRAM_TIMEOUT_COUNT)
3067 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3072 /* Build the command word. */
3073 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3075 /* Calculate an offset of a buffered flash. */
3076 if (bp->flash_info->buffered) {
3077 offset = ((offset / bp->flash_info->page_size) <<
3078 bp->flash_info->page_bits) +
3079 (offset % bp->flash_info->page_size);
3082 /* Need to clear DONE bit separately. */
3083 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3085 /* Address of the NVRAM to read from. */
3086 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3088 /* Issue a read command. */
3089 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3091 /* Wait for completion. */
3092 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3097 val = REG_RD(bp, BNX2_NVM_COMMAND);
3098 if (val & BNX2_NVM_COMMAND_DONE) {
3099 val = REG_RD(bp, BNX2_NVM_READ);
3101 val = be32_to_cpu(val);
3102 memcpy(ret_val, &val, 4);
3106 if (j >= NVRAM_TIMEOUT_COUNT)
3114 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3119 /* Build the command word. */
3120 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3122 /* Calculate an offset of a buffered flash. */
3123 if (bp->flash_info->buffered) {
3124 offset = ((offset / bp->flash_info->page_size) <<
3125 bp->flash_info->page_bits) +
3126 (offset % bp->flash_info->page_size);
3129 /* Need to clear DONE bit separately. */
3130 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3132 memcpy(&val32, val, 4);
3133 val32 = cpu_to_be32(val32);
3135 /* Write the data. */
3136 REG_WR(bp, BNX2_NVM_WRITE, val32);
3138 /* Address of the NVRAM to write to. */
3139 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3141 /* Issue the write command. */
3142 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3144 /* Wait for completion. */
3145 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3148 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3151 if (j >= NVRAM_TIMEOUT_COUNT)
3158 bnx2_init_nvram(struct bnx2 *bp)
3161 int j, entry_count, rc;
3162 struct flash_spec *flash;
3164 /* Determine the selected interface. */
3165 val = REG_RD(bp, BNX2_NVM_CFG1);
3167 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3170 if (val & 0x40000000) {
3172 /* Flash interface has been reconfigured */
3173 for (j = 0, flash = &flash_table[0]; j < entry_count;
3175 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3176 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
3177 bp->flash_info = flash;
3184 /* Not yet been reconfigured */
3186 if (val & (1 << 23))
3187 mask = FLASH_BACKUP_STRAP_MASK;
3189 mask = FLASH_STRAP_MASK;
3191 for (j = 0, flash = &flash_table[0]; j < entry_count;
3194 if ((val & mask) == (flash->strapping & mask)) {
3195 bp->flash_info = flash;
3197 /* Request access to the flash interface. */
3198 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3201 /* Enable access to flash interface */
3202 bnx2_enable_nvram_access(bp);
3204 /* Reconfigure the flash interface */
3205 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3206 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3207 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3208 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3210 /* Disable access to flash interface */
3211 bnx2_disable_nvram_access(bp);
3212 bnx2_release_nvram_lock(bp);
3217 } /* if (val & 0x40000000) */
3219 if (j == entry_count) {
3220 bp->flash_info = NULL;
3221 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
3225 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3226 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3228 bp->flash_size = val;
3230 bp->flash_size = bp->flash_info->total_size;
3236 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3240 u32 cmd_flags, offset32, len32, extra;
3245 /* Request access to the flash interface. */
3246 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3249 /* Enable access to flash interface */
3250 bnx2_enable_nvram_access(bp);
3263 pre_len = 4 - (offset & 3);
3265 if (pre_len >= len32) {
3267 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3268 BNX2_NVM_COMMAND_LAST;
3271 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3274 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3279 memcpy(ret_buf, buf + (offset & 3), pre_len);
3286 extra = 4 - (len32 & 3);
3287 len32 = (len32 + 4) & ~3;
3294 cmd_flags = BNX2_NVM_COMMAND_LAST;
3296 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3297 BNX2_NVM_COMMAND_LAST;
3299 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3301 memcpy(ret_buf, buf, 4 - extra);
3303 else if (len32 > 0) {
3306 /* Read the first word. */
3310 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3312 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3314 /* Advance to the next dword. */
3319 while (len32 > 4 && rc == 0) {
3320 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3322 /* Advance to the next dword. */
3331 cmd_flags = BNX2_NVM_COMMAND_LAST;
3332 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3334 memcpy(ret_buf, buf, 4 - extra);
3337 /* Disable access to flash interface */
3338 bnx2_disable_nvram_access(bp);
3340 bnx2_release_nvram_lock(bp);
3346 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3349 u32 written, offset32, len32;
3350 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
3352 int align_start, align_end;
3357 align_start = align_end = 0;
3359 if ((align_start = (offset32 & 3))) {
3361 len32 += align_start;
3364 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3369 align_end = 4 - (len32 & 3);
3371 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3375 if (align_start || align_end) {
3376 align_buf = kmalloc(len32, GFP_KERNEL);
3377 if (align_buf == NULL)
3380 memcpy(align_buf, start, 4);
3383 memcpy(align_buf + len32 - 4, end, 4);
3385 memcpy(align_buf + align_start, data_buf, buf_size);
3389 if (bp->flash_info->buffered == 0) {
3390 flash_buffer = kmalloc(264, GFP_KERNEL);
3391 if (flash_buffer == NULL) {
3393 goto nvram_write_end;
3398 while ((written < len32) && (rc == 0)) {
3399 u32 page_start, page_end, data_start, data_end;
3400 u32 addr, cmd_flags;
3403 /* Find the page_start addr */
3404 page_start = offset32 + written;
3405 page_start -= (page_start % bp->flash_info->page_size);
3406 /* Find the page_end addr */
3407 page_end = page_start + bp->flash_info->page_size;
3408 /* Find the data_start addr */
3409 data_start = (written == 0) ? offset32 : page_start;
3410 /* Find the data_end addr */
3411 data_end = (page_end > offset32 + len32) ?
3412 (offset32 + len32) : page_end;
3414 /* Request access to the flash interface. */
3415 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3416 goto nvram_write_end;
3418 /* Enable access to flash interface */
3419 bnx2_enable_nvram_access(bp);
3421 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3422 if (bp->flash_info->buffered == 0) {
3425 /* Read the whole page into the buffer
3426 * (non-buffer flash only) */
3427 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3428 if (j == (bp->flash_info->page_size - 4)) {
3429 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3431 rc = bnx2_nvram_read_dword(bp,
3437 goto nvram_write_end;
3443 /* Enable writes to flash interface (unlock write-protect) */
3444 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3445 goto nvram_write_end;
3447 /* Loop to write back the buffer data from page_start to
3450 if (bp->flash_info->buffered == 0) {
3451 /* Erase the page */
3452 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3453 goto nvram_write_end;
3455 /* Re-enable the write again for the actual write */
3456 bnx2_enable_nvram_write(bp);
3458 for (addr = page_start; addr < data_start;
3459 addr += 4, i += 4) {
3461 rc = bnx2_nvram_write_dword(bp, addr,
3462 &flash_buffer[i], cmd_flags);
3465 goto nvram_write_end;
3471 /* Loop to write the new data from data_start to data_end */
3472 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
3473 if ((addr == page_end - 4) ||
3474 ((bp->flash_info->buffered) &&
3475 (addr == data_end - 4))) {
3477 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3479 rc = bnx2_nvram_write_dword(bp, addr, buf,
3483 goto nvram_write_end;
3489 /* Loop to write back the buffer data from data_end
3491 if (bp->flash_info->buffered == 0) {
3492 for (addr = data_end; addr < page_end;
3493 addr += 4, i += 4) {
3495 if (addr == page_end-4) {
3496 cmd_flags = BNX2_NVM_COMMAND_LAST;
3498 rc = bnx2_nvram_write_dword(bp, addr,
3499 &flash_buffer[i], cmd_flags);
3502 goto nvram_write_end;
3508 /* Disable writes to flash interface (lock write-protect) */
3509 bnx2_disable_nvram_write(bp);
3511 /* Disable access to flash interface */
3512 bnx2_disable_nvram_access(bp);
3513 bnx2_release_nvram_lock(bp);
3515 /* Increment written */
3516 written += data_end - data_start;
3520 kfree(flash_buffer);
3526 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3531 /* Wait for the current PCI transaction to complete before
3532 * issuing a reset. */
3533 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3534 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3535 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3536 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3537 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3538 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3541 /* Wait for the firmware to tell us it is ok to issue a reset. */
3542 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3544 /* Deposit a driver reset signature so the firmware knows that
3545 * this is a soft reset. */
3546 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
3547 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3549 /* Do a dummy read to force the chip to complete all current transaction
3550 * before we issue a reset. */
3551 val = REG_RD(bp, BNX2_MISC_ID);
3553 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3554 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3555 REG_RD(bp, BNX2_MISC_COMMAND);
3558 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3559 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3561 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
3564 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3565 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3566 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3569 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3571 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3572 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3573 current->state = TASK_UNINTERRUPTIBLE;
3574 schedule_timeout(HZ / 50);
3577 /* Reset takes approximate 30 usec */
3578 for (i = 0; i < 10; i++) {
3579 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3580 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3581 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3586 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3587 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3588 printk(KERN_ERR PFX "Chip reset did not complete\n");
3593 /* Make sure byte swapping is properly configured. */
3594 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3595 if (val != 0x01020304) {
3596 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3600 /* Wait for the firmware to finish its initialization. */
3601 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3605 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3606 /* Adjust the voltage regular to two steps lower. The default
3607 * of this register is 0x0000000e. */
3608 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3610 /* Remove bad rbuf memory from the free pool. */
3611 rc = bnx2_alloc_bad_rbuf(bp);
3618 bnx2_init_chip(struct bnx2 *bp)
3623 /* Make sure the interrupt is not active. */
3624 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3626 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3627 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3629 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
3631 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
3632 DMA_READ_CHANS << 12 |
3633 DMA_WRITE_CHANS << 16;
3635 val |= (0x2 << 20) | (1 << 11);
3637 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
3640 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3641 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3642 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3644 REG_WR(bp, BNX2_DMA_CONFIG, val);
3646 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3647 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3648 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3649 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3652 if (bp->flags & PCIX_FLAG) {
3655 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3657 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3658 val16 & ~PCI_X_CMD_ERO);
3661 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3662 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3663 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3664 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3666 /* Initialize context mapping and zero out the quick contexts. The
3667 * context block must have already been enabled. */
3668 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3669 bnx2_init_5709_context(bp);
3671 bnx2_init_context(bp);
3673 if ((rc = bnx2_init_cpus(bp)) != 0)
3676 bnx2_init_nvram(bp);
3678 bnx2_set_mac_addr(bp);
3680 val = REG_RD(bp, BNX2_MQ_CONFIG);
3681 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3682 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3683 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
3684 val |= BNX2_MQ_CONFIG_HALT_DIS;
3686 REG_WR(bp, BNX2_MQ_CONFIG, val);
3688 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3689 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3690 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3692 val = (BCM_PAGE_BITS - 8) << 24;
3693 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3695 /* Configure page size. */
3696 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3697 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3698 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3699 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3701 val = bp->mac_addr[0] +
3702 (bp->mac_addr[1] << 8) +
3703 (bp->mac_addr[2] << 16) +
3705 (bp->mac_addr[4] << 8) +
3706 (bp->mac_addr[5] << 16);
3707 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3709 /* Program the MTU. Also include 4 bytes for CRC32. */
3710 val = bp->dev->mtu + ETH_HLEN + 4;
3711 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3712 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3713 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3715 bp->last_status_idx = 0;
3716 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3718 /* Set up how to generate a link change interrupt. */
3719 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3721 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3722 (u64) bp->status_blk_mapping & 0xffffffff);
3723 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3725 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3726 (u64) bp->stats_blk_mapping & 0xffffffff);
3727 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3728 (u64) bp->stats_blk_mapping >> 32);
3730 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
3731 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3733 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3734 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3736 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3737 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3739 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3741 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3743 REG_WR(bp, BNX2_HC_COM_TICKS,
3744 (bp->com_ticks_int << 16) | bp->com_ticks);
3746 REG_WR(bp, BNX2_HC_CMD_TICKS,
3747 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3749 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3750 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3752 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3753 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3755 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3756 BNX2_HC_CONFIG_TX_TMR_MODE |
3757 BNX2_HC_CONFIG_COLLECT_STATS);
3760 /* Clear internal stats counters. */
3761 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3763 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3765 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3766 BNX2_PORT_FEATURE_ASF_ENABLED)
3767 bp->flags |= ASF_ENABLE_FLAG;
3769 /* Initialize the receive filter. */
3770 bnx2_set_rx_mode(bp->dev);
3772 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3775 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3776 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3780 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3786 bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
3788 u32 val, offset0, offset1, offset2, offset3;
3790 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3791 offset0 = BNX2_L2CTX_TYPE_XI;
3792 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
3793 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
3794 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
3796 offset0 = BNX2_L2CTX_TYPE;
3797 offset1 = BNX2_L2CTX_CMD_TYPE;
3798 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
3799 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
3801 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
3802 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
3804 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
3805 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
3807 val = (u64) bp->tx_desc_mapping >> 32;
3808 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
3810 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3811 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
3815 bnx2_init_tx_ring(struct bnx2 *bp)
3820 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3822 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
3824 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3825 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3830 bp->tx_prod_bseq = 0;
3833 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
3834 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
3836 bnx2_init_tx_context(bp, cid);
3840 bnx2_init_rx_ring(struct bnx2 *bp)
3844 u16 prod, ring_prod;
3847 /* 8 for CRC and VLAN */
3848 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3850 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
3852 ring_prod = prod = bp->rx_prod = 0;
3855 bp->rx_prod_bseq = 0;
3857 for (i = 0; i < bp->rx_max_ring; i++) {
3860 rxbd = &bp->rx_desc_ring[i][0];
3861 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3862 rxbd->rx_bd_len = bp->rx_buf_use_size;
3863 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3865 if (i == (bp->rx_max_ring - 1))
3869 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3870 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3874 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3875 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3877 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3879 val = (u64) bp->rx_desc_mapping[0] >> 32;
3880 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3882 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
3883 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3885 for (i = 0; i < bp->rx_ring_size; i++) {
3886 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3889 prod = NEXT_RX_BD(prod);
3890 ring_prod = RX_RING_IDX(prod);
3894 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3896 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3900 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3904 bp->rx_ring_size = size;
3906 while (size > MAX_RX_DESC_CNT) {
3907 size -= MAX_RX_DESC_CNT;
3910 /* round to next power of 2 */
3912 while ((max & num_rings) == 0)
3915 if (num_rings != max)
3918 bp->rx_max_ring = max;
3919 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3923 bnx2_free_tx_skbs(struct bnx2 *bp)
3927 if (bp->tx_buf_ring == NULL)
3930 for (i = 0; i < TX_DESC_CNT; ) {
3931 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3932 struct sk_buff *skb = tx_buf->skb;
3940 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3941 skb_headlen(skb), PCI_DMA_TODEVICE);
3945 last = skb_shinfo(skb)->nr_frags;
3946 for (j = 0; j < last; j++) {
3947 tx_buf = &bp->tx_buf_ring[i + j + 1];
3948 pci_unmap_page(bp->pdev,
3949 pci_unmap_addr(tx_buf, mapping),
3950 skb_shinfo(skb)->frags[j].size,
3960 bnx2_free_rx_skbs(struct bnx2 *bp)
3964 if (bp->rx_buf_ring == NULL)
3967 for (i = 0; i < bp->rx_max_ring_idx; i++) {
3968 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3969 struct sk_buff *skb = rx_buf->skb;
3974 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3975 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3984 bnx2_free_skbs(struct bnx2 *bp)
3986 bnx2_free_tx_skbs(bp);
3987 bnx2_free_rx_skbs(bp);
3991 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3995 rc = bnx2_reset_chip(bp, reset_code);
4000 if ((rc = bnx2_init_chip(bp)) != 0)
4003 bnx2_init_tx_ring(bp);
4004 bnx2_init_rx_ring(bp);
4009 bnx2_init_nic(struct bnx2 *bp)
4013 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4016 spin_lock_bh(&bp->phy_lock);
4018 spin_unlock_bh(&bp->phy_lock);
4024 bnx2_test_registers(struct bnx2 *bp)
4028 static const struct {
4031 #define BNX2_FL_NOT_5709 1
4035 { 0x006c, 0, 0x00000000, 0x0000003f },
4036 { 0x0090, 0, 0xffffffff, 0x00000000 },
4037 { 0x0094, 0, 0x00000000, 0x00000000 },
4039 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4040 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4041 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4042 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4043 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4044 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4045 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4046 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4047 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4049 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4050 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4051 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4052 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4053 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4054 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4056 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4057 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4058 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
4060 { 0x1000, 0, 0x00000000, 0x00000001 },
4061 { 0x1004, 0, 0x00000000, 0x000f0001 },
4063 { 0x1408, 0, 0x01c00800, 0x00000000 },
4064 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4065 { 0x14a8, 0, 0x00000000, 0x000001ff },
4066 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
4067 { 0x14b0, 0, 0x00000002, 0x00000001 },
4068 { 0x14b8, 0, 0x00000000, 0x00000000 },
4069 { 0x14c0, 0, 0x00000000, 0x00000009 },
4070 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4071 { 0x14cc, 0, 0x00000000, 0x00000001 },
4072 { 0x14d0, 0, 0xffffffff, 0x00000000 },
4074 { 0x1800, 0, 0x00000000, 0x00000001 },
4075 { 0x1804, 0, 0x00000000, 0x00000003 },
4077 { 0x2800, 0, 0x00000000, 0x00000001 },
4078 { 0x2804, 0, 0x00000000, 0x00003f01 },
4079 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4080 { 0x2810, 0, 0xffff0000, 0x00000000 },
4081 { 0x2814, 0, 0xffff0000, 0x00000000 },
4082 { 0x2818, 0, 0xffff0000, 0x00000000 },
4083 { 0x281c, 0, 0xffff0000, 0x00000000 },
4084 { 0x2834, 0, 0xffffffff, 0x00000000 },
4085 { 0x2840, 0, 0x00000000, 0xffffffff },
4086 { 0x2844, 0, 0x00000000, 0xffffffff },
4087 { 0x2848, 0, 0xffffffff, 0x00000000 },
4088 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4090 { 0x2c00, 0, 0x00000000, 0x00000011 },
4091 { 0x2c04, 0, 0x00000000, 0x00030007 },
4093 { 0x3c00, 0, 0x00000000, 0x00000001 },
4094 { 0x3c04, 0, 0x00000000, 0x00070000 },
4095 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4096 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4097 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4098 { 0x3c14, 0, 0x00000000, 0xffffffff },
4099 { 0x3c18, 0, 0x00000000, 0xffffffff },
4100 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4101 { 0x3c20, 0, 0xffffff00, 0x00000000 },
4103 { 0x5004, 0, 0x00000000, 0x0000007f },
4104 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
4106 { 0x5c00, 0, 0x00000000, 0x00000001 },
4107 { 0x5c04, 0, 0x00000000, 0x0003000f },
4108 { 0x5c08, 0, 0x00000003, 0x00000000 },
4109 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4110 { 0x5c10, 0, 0x00000000, 0xffffffff },
4111 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4112 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4113 { 0x5c88, 0, 0x00000000, 0x00077373 },
4114 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4116 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4117 { 0x680c, 0, 0xffffffff, 0x00000000 },
4118 { 0x6810, 0, 0xffffffff, 0x00000000 },
4119 { 0x6814, 0, 0xffffffff, 0x00000000 },
4120 { 0x6818, 0, 0xffffffff, 0x00000000 },
4121 { 0x681c, 0, 0xffffffff, 0x00000000 },
4122 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4123 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4124 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4125 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4126 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4127 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4128 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4129 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4130 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4131 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4132 { 0x684c, 0, 0xffffffff, 0x00000000 },
4133 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4134 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4135 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4136 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4137 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4138 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4140 { 0xffff, 0, 0x00000000, 0x00000000 },
4145 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4148 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4149 u32 offset, rw_mask, ro_mask, save_val, val;
4150 u16 flags = reg_tbl[i].flags;
4152 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4155 offset = (u32) reg_tbl[i].offset;
4156 rw_mask = reg_tbl[i].rw_mask;
4157 ro_mask = reg_tbl[i].ro_mask;
4159 save_val = readl(bp->regview + offset);
4161 writel(0, bp->regview + offset);
4163 val = readl(bp->regview + offset);
4164 if ((val & rw_mask) != 0) {
4168 if ((val & ro_mask) != (save_val & ro_mask)) {
4172 writel(0xffffffff, bp->regview + offset);
4174 val = readl(bp->regview + offset);
4175 if ((val & rw_mask) != rw_mask) {
4179 if ((val & ro_mask) != (save_val & ro_mask)) {
4183 writel(save_val, bp->regview + offset);
4187 writel(save_val, bp->regview + offset);
4195 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4197 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
4198 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4201 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4204 for (offset = 0; offset < size; offset += 4) {
4206 REG_WR_IND(bp, start + offset, test_pattern[i]);
4208 if (REG_RD_IND(bp, start + offset) !=
4218 bnx2_test_memory(struct bnx2 *bp)
4222 static struct mem_entry {
4225 } mem_tbl_5706[] = {
4226 { 0x60000, 0x4000 },
4227 { 0xa0000, 0x3000 },
4228 { 0xe0000, 0x4000 },
4229 { 0x120000, 0x4000 },
4230 { 0x1a0000, 0x4000 },
4231 { 0x160000, 0x4000 },
4235 { 0x60000, 0x4000 },
4236 { 0xa0000, 0x3000 },
4237 { 0xe0000, 0x4000 },
4238 { 0x120000, 0x4000 },
4239 { 0x1a0000, 0x4000 },
4242 struct mem_entry *mem_tbl;
4244 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4245 mem_tbl = mem_tbl_5709;
4247 mem_tbl = mem_tbl_5706;
4249 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4250 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4251 mem_tbl[i].len)) != 0) {
4259 #define BNX2_MAC_LOOPBACK 0
4260 #define BNX2_PHY_LOOPBACK 1
4263 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
4265 unsigned int pkt_size, num_pkts, i;
4266 struct sk_buff *skb, *rx_skb;
4267 unsigned char *packet;
4268 u16 rx_start_idx, rx_idx;
4271 struct sw_bd *rx_buf;
4272 struct l2_fhdr *rx_hdr;
4275 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4276 bp->loopback = MAC_LOOPBACK;
4277 bnx2_set_mac_loopback(bp);
4279 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
4280 bp->loopback = PHY_LOOPBACK;
4281 bnx2_set_phy_loopback(bp);
4287 skb = netdev_alloc_skb(bp->dev, pkt_size);
4290 packet = skb_put(skb, pkt_size);
4291 memcpy(packet, bp->dev->dev_addr, 6);
4292 memset(packet + 6, 0x0, 8);
4293 for (i = 14; i < pkt_size; i++)
4294 packet[i] = (unsigned char) (i & 0xff);
4296 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4299 REG_WR(bp, BNX2_HC_COMMAND,
4300 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4302 REG_RD(bp, BNX2_HC_COMMAND);
4305 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4309 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
4311 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4312 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4313 txbd->tx_bd_mss_nbytes = pkt_size;
4314 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4317 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4318 bp->tx_prod_bseq += pkt_size;
4320 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4321 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4325 REG_WR(bp, BNX2_HC_COMMAND,
4326 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4328 REG_RD(bp, BNX2_HC_COMMAND);
4332 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
4335 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
4336 goto loopback_test_done;
4339 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4340 if (rx_idx != rx_start_idx + num_pkts) {
4341 goto loopback_test_done;
4344 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4345 rx_skb = rx_buf->skb;
4347 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4348 skb_reserve(rx_skb, bp->rx_offset);
4350 pci_dma_sync_single_for_cpu(bp->pdev,
4351 pci_unmap_addr(rx_buf, mapping),
4352 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4354 if (rx_hdr->l2_fhdr_status &
4355 (L2_FHDR_ERRORS_BAD_CRC |
4356 L2_FHDR_ERRORS_PHY_DECODE |
4357 L2_FHDR_ERRORS_ALIGNMENT |
4358 L2_FHDR_ERRORS_TOO_SHORT |
4359 L2_FHDR_ERRORS_GIANT_FRAME)) {
4361 goto loopback_test_done;
4364 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4365 goto loopback_test_done;
4368 for (i = 14; i < pkt_size; i++) {
4369 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4370 goto loopback_test_done;
4381 #define BNX2_MAC_LOOPBACK_FAILED 1
4382 #define BNX2_PHY_LOOPBACK_FAILED 2
4383 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4384 BNX2_PHY_LOOPBACK_FAILED)
4387 bnx2_test_loopback(struct bnx2 *bp)
4391 if (!netif_running(bp->dev))
4392 return BNX2_LOOPBACK_FAILED;
4394 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4395 spin_lock_bh(&bp->phy_lock);
4397 spin_unlock_bh(&bp->phy_lock);
4398 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4399 rc |= BNX2_MAC_LOOPBACK_FAILED;
4400 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4401 rc |= BNX2_PHY_LOOPBACK_FAILED;
4405 #define NVRAM_SIZE 0x200
4406 #define CRC32_RESIDUAL 0xdebb20e3
4409 bnx2_test_nvram(struct bnx2 *bp)
4411 u32 buf[NVRAM_SIZE / 4];
4412 u8 *data = (u8 *) buf;
4416 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4417 goto test_nvram_done;
4419 magic = be32_to_cpu(buf[0]);
4420 if (magic != 0x669955aa) {
4422 goto test_nvram_done;
4425 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4426 goto test_nvram_done;
4428 csum = ether_crc_le(0x100, data);
4429 if (csum != CRC32_RESIDUAL) {
4431 goto test_nvram_done;
4434 csum = ether_crc_le(0x100, data + 0x100);
4435 if (csum != CRC32_RESIDUAL) {
4444 bnx2_test_link(struct bnx2 *bp)
4448 spin_lock_bh(&bp->phy_lock);
4449 bnx2_enable_bmsr1(bp);
4450 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4451 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4452 bnx2_disable_bmsr1(bp);
4453 spin_unlock_bh(&bp->phy_lock);
4455 if (bmsr & BMSR_LSTATUS) {
4462 bnx2_test_intr(struct bnx2 *bp)
4467 if (!netif_running(bp->dev))
4470 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4472 /* This register is not touched during run-time. */
4473 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
4474 REG_RD(bp, BNX2_HC_COMMAND);
4476 for (i = 0; i < 10; i++) {
4477 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4483 msleep_interruptible(10);
4492 bnx2_5706_serdes_timer(struct bnx2 *bp)
4494 spin_lock(&bp->phy_lock);
4495 if (bp->serdes_an_pending)
4496 bp->serdes_an_pending--;
4497 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4500 bp->current_interval = bp->timer_interval;
4502 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4504 if (bmcr & BMCR_ANENABLE) {
4507 bnx2_write_phy(bp, 0x1c, 0x7c00);
4508 bnx2_read_phy(bp, 0x1c, &phy1);
4510 bnx2_write_phy(bp, 0x17, 0x0f01);
4511 bnx2_read_phy(bp, 0x15, &phy2);
4512 bnx2_write_phy(bp, 0x17, 0x0f01);
4513 bnx2_read_phy(bp, 0x15, &phy2);
4515 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4516 !(phy2 & 0x20)) { /* no CONFIG */
4518 bmcr &= ~BMCR_ANENABLE;
4519 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
4520 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4521 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4525 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4526 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4529 bnx2_write_phy(bp, 0x17, 0x0f01);
4530 bnx2_read_phy(bp, 0x15, &phy2);
4534 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4535 bmcr |= BMCR_ANENABLE;
4536 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
4538 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4541 bp->current_interval = bp->timer_interval;
4543 spin_unlock(&bp->phy_lock);
4547 bnx2_5708_serdes_timer(struct bnx2 *bp)
4549 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4550 bp->serdes_an_pending = 0;
4554 spin_lock(&bp->phy_lock);
4555 if (bp->serdes_an_pending)
4556 bp->serdes_an_pending--;
4557 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4560 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
4561 if (bmcr & BMCR_ANENABLE) {
4562 bnx2_enable_forced_2g5(bp);
4563 bp->current_interval = SERDES_FORCED_TIMEOUT;
4565 bnx2_disable_forced_2g5(bp);
4566 bp->serdes_an_pending = 2;
4567 bp->current_interval = bp->timer_interval;
4571 bp->current_interval = bp->timer_interval;
4573 spin_unlock(&bp->phy_lock);
4577 bnx2_timer(unsigned long data)
4579 struct bnx2 *bp = (struct bnx2 *) data;
4582 if (!netif_running(bp->dev))
4585 if (atomic_read(&bp->intr_sem) != 0)
4586 goto bnx2_restart_timer;
4588 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4589 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
4591 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4593 if (bp->phy_flags & PHY_SERDES_FLAG) {
4594 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4595 bnx2_5706_serdes_timer(bp);
4597 bnx2_5708_serdes_timer(bp);
4601 mod_timer(&bp->timer, jiffies + bp->current_interval);
4604 /* Called with rtnl_lock */
4606 bnx2_open(struct net_device *dev)
4608 struct bnx2 *bp = netdev_priv(dev);
4611 netif_carrier_off(dev);
4613 bnx2_set_power_state(bp, PCI_D0);
4614 bnx2_disable_int(bp);
4616 rc = bnx2_alloc_mem(bp);
4620 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4621 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4624 if (pci_enable_msi(bp->pdev) == 0) {
4625 bp->flags |= USING_MSI_FLAG;
4626 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4630 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4631 IRQF_SHARED, dev->name, dev);
4635 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
4643 rc = bnx2_init_nic(bp);
4646 free_irq(bp->pdev->irq, dev);
4647 if (bp->flags & USING_MSI_FLAG) {
4648 pci_disable_msi(bp->pdev);
4649 bp->flags &= ~USING_MSI_FLAG;
4656 mod_timer(&bp->timer, jiffies + bp->current_interval);
4658 atomic_set(&bp->intr_sem, 0);
4660 bnx2_enable_int(bp);
4662 if (bp->flags & USING_MSI_FLAG) {
4663 /* Test MSI to make sure it is working
4664 * If MSI test fails, go back to INTx mode
4666 if (bnx2_test_intr(bp) != 0) {
4667 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4668 " using MSI, switching to INTx mode. Please"
4669 " report this failure to the PCI maintainer"
4670 " and include system chipset information.\n",
4673 bnx2_disable_int(bp);
4674 free_irq(bp->pdev->irq, dev);
4675 pci_disable_msi(bp->pdev);
4676 bp->flags &= ~USING_MSI_FLAG;
4678 rc = bnx2_init_nic(bp);
4681 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4682 IRQF_SHARED, dev->name, dev);
4687 del_timer_sync(&bp->timer);
4690 bnx2_enable_int(bp);
4693 if (bp->flags & USING_MSI_FLAG) {
4694 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4697 netif_start_queue(dev);
4703 bnx2_reset_task(struct work_struct *work)
4705 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
4707 if (!netif_running(bp->dev))
4710 bp->in_reset_task = 1;
4711 bnx2_netif_stop(bp);
4715 atomic_set(&bp->intr_sem, 1);
4716 bnx2_netif_start(bp);
4717 bp->in_reset_task = 0;
4721 bnx2_tx_timeout(struct net_device *dev)
4723 struct bnx2 *bp = netdev_priv(dev);
4725 /* This allows the netif to be shutdown gracefully before resetting */
4726 schedule_work(&bp->reset_task);
4730 /* Called with rtnl_lock */
4732 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4734 struct bnx2 *bp = netdev_priv(dev);
4736 bnx2_netif_stop(bp);
4739 bnx2_set_rx_mode(dev);
4741 bnx2_netif_start(bp);
4744 /* Called with rtnl_lock */
4746 bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4748 struct bnx2 *bp = netdev_priv(dev);
4750 bnx2_netif_stop(bp);
4751 vlan_group_set_device(bp->vlgrp, vid, NULL);
4752 bnx2_set_rx_mode(dev);
4754 bnx2_netif_start(bp);
4758 /* Called with netif_tx_lock.
4759 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4760 * netif_wake_queue().
4763 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4765 struct bnx2 *bp = netdev_priv(dev);
4768 struct sw_bd *tx_buf;
4769 u32 len, vlan_tag_flags, last_frag, mss;
4770 u16 prod, ring_prod;
4773 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
4774 netif_stop_queue(dev);
4775 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4778 return NETDEV_TX_BUSY;
4780 len = skb_headlen(skb);
4782 ring_prod = TX_RING_IDX(prod);
4785 if (skb->ip_summed == CHECKSUM_PARTIAL) {
4786 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4789 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4791 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4793 if ((mss = skb_shinfo(skb)->gso_size) &&
4794 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4795 u32 tcp_opt_len, ip_tcp_len;
4798 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4800 tcp_opt_len = tcp_optlen(skb);
4802 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
4803 u32 tcp_off = skb_transport_offset(skb) -
4804 sizeof(struct ipv6hdr) - ETH_HLEN;
4806 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
4807 TX_BD_FLAGS_SW_FLAGS;
4808 if (likely(tcp_off == 0))
4809 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
4812 vlan_tag_flags |= ((tcp_off & 0x3) <<
4813 TX_BD_FLAGS_TCP6_OFF0_SHL) |
4814 ((tcp_off & 0x10) <<
4815 TX_BD_FLAGS_TCP6_OFF4_SHL);
4816 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
4819 if (skb_header_cloned(skb) &&
4820 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4822 return NETDEV_TX_OK;
4825 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
4829 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
4830 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
4834 if (tcp_opt_len || (iph->ihl > 5)) {
4835 vlan_tag_flags |= ((iph->ihl - 5) +
4836 (tcp_opt_len >> 2)) << 8;
4842 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
4844 tx_buf = &bp->tx_buf_ring[ring_prod];
4846 pci_unmap_addr_set(tx_buf, mapping, mapping);
4848 txbd = &bp->tx_desc_ring[ring_prod];
4850 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4851 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4852 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4853 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4855 last_frag = skb_shinfo(skb)->nr_frags;
4857 for (i = 0; i < last_frag; i++) {
4858 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4860 prod = NEXT_TX_BD(prod);
4861 ring_prod = TX_RING_IDX(prod);
4862 txbd = &bp->tx_desc_ring[ring_prod];
4865 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4866 len, PCI_DMA_TODEVICE);
4867 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4870 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4871 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4872 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4873 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4876 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4878 prod = NEXT_TX_BD(prod);
4879 bp->tx_prod_bseq += skb->len;
4881 REG_WR16(bp, bp->tx_bidx_addr, prod);
4882 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
4887 dev->trans_start = jiffies;
4889 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
4890 netif_stop_queue(dev);
4891 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
4892 netif_wake_queue(dev);
4895 return NETDEV_TX_OK;
4898 /* Called with rtnl_lock */
4900 bnx2_close(struct net_device *dev)
4902 struct bnx2 *bp = netdev_priv(dev);
4905 /* Calling flush_scheduled_work() may deadlock because
4906 * linkwatch_event() may be on the workqueue and it will try to get
4907 * the rtnl_lock which we are holding.
4909 while (bp->in_reset_task)
4912 bnx2_netif_stop(bp);
4913 del_timer_sync(&bp->timer);
4914 if (bp->flags & NO_WOL_FLAG)
4915 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
4917 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4919 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4920 bnx2_reset_chip(bp, reset_code);
4921 free_irq(bp->pdev->irq, dev);
4922 if (bp->flags & USING_MSI_FLAG) {
4923 pci_disable_msi(bp->pdev);
4924 bp->flags &= ~USING_MSI_FLAG;
4929 netif_carrier_off(bp->dev);
4930 bnx2_set_power_state(bp, PCI_D3hot);
4934 #define GET_NET_STATS64(ctr) \
4935 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4936 (unsigned long) (ctr##_lo)
4938 #define GET_NET_STATS32(ctr) \
4941 #if (BITS_PER_LONG == 64)
4942 #define GET_NET_STATS GET_NET_STATS64
4944 #define GET_NET_STATS GET_NET_STATS32
4947 static struct net_device_stats *
4948 bnx2_get_stats(struct net_device *dev)
4950 struct bnx2 *bp = netdev_priv(dev);
4951 struct statistics_block *stats_blk = bp->stats_blk;
4952 struct net_device_stats *net_stats = &bp->net_stats;
4954 if (bp->stats_blk == NULL) {
4957 net_stats->rx_packets =
4958 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4959 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4960 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4962 net_stats->tx_packets =
4963 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4964 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4965 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4967 net_stats->rx_bytes =
4968 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4970 net_stats->tx_bytes =
4971 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4973 net_stats->multicast =
4974 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4976 net_stats->collisions =
4977 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4979 net_stats->rx_length_errors =
4980 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4981 stats_blk->stat_EtherStatsOverrsizePkts);
4983 net_stats->rx_over_errors =
4984 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4986 net_stats->rx_frame_errors =
4987 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4989 net_stats->rx_crc_errors =
4990 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4992 net_stats->rx_errors = net_stats->rx_length_errors +
4993 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4994 net_stats->rx_crc_errors;
4996 net_stats->tx_aborted_errors =
4997 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4998 stats_blk->stat_Dot3StatsLateCollisions);
5000 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5001 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5002 net_stats->tx_carrier_errors = 0;
5004 net_stats->tx_carrier_errors =
5006 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5009 net_stats->tx_errors =
5011 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5013 net_stats->tx_aborted_errors +
5014 net_stats->tx_carrier_errors;
5016 net_stats->rx_missed_errors =
5017 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5018 stats_blk->stat_FwRxDrop);
5023 /* All ethtool functions called with rtnl_lock */
5026 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5028 struct bnx2 *bp = netdev_priv(dev);
5030 cmd->supported = SUPPORTED_Autoneg;
5031 if (bp->phy_flags & PHY_SERDES_FLAG) {
5032 cmd->supported |= SUPPORTED_1000baseT_Full |
5034 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5035 cmd->supported |= SUPPORTED_2500baseX_Full;
5037 cmd->port = PORT_FIBRE;
5040 cmd->supported |= SUPPORTED_10baseT_Half |
5041 SUPPORTED_10baseT_Full |
5042 SUPPORTED_100baseT_Half |
5043 SUPPORTED_100baseT_Full |
5044 SUPPORTED_1000baseT_Full |
5047 cmd->port = PORT_TP;
5050 cmd->advertising = bp->advertising;
5052 if (bp->autoneg & AUTONEG_SPEED) {
5053 cmd->autoneg = AUTONEG_ENABLE;
5056 cmd->autoneg = AUTONEG_DISABLE;
5059 if (netif_carrier_ok(dev)) {
5060 cmd->speed = bp->line_speed;
5061 cmd->duplex = bp->duplex;
5068 cmd->transceiver = XCVR_INTERNAL;
5069 cmd->phy_address = bp->phy_addr;
5075 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5077 struct bnx2 *bp = netdev_priv(dev);
5078 u8 autoneg = bp->autoneg;
5079 u8 req_duplex = bp->req_duplex;
5080 u16 req_line_speed = bp->req_line_speed;
5081 u32 advertising = bp->advertising;
5083 if (cmd->autoneg == AUTONEG_ENABLE) {
5084 autoneg |= AUTONEG_SPEED;
5086 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
5088 /* allow advertising 1 speed */
5089 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5090 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5091 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5092 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5094 if (bp->phy_flags & PHY_SERDES_FLAG)
5097 advertising = cmd->advertising;
5099 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
5100 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5102 } else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
5103 advertising = cmd->advertising;
5105 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
5109 if (bp->phy_flags & PHY_SERDES_FLAG) {
5110 advertising = ETHTOOL_ALL_FIBRE_SPEED;
5113 advertising = ETHTOOL_ALL_COPPER_SPEED;
5116 advertising |= ADVERTISED_Autoneg;
5119 if (bp->phy_flags & PHY_SERDES_FLAG) {
5120 if ((cmd->speed != SPEED_1000 &&
5121 cmd->speed != SPEED_2500) ||
5122 (cmd->duplex != DUPLEX_FULL))
5125 if (cmd->speed == SPEED_2500 &&
5126 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
5129 else if (cmd->speed == SPEED_1000) {
5132 autoneg &= ~AUTONEG_SPEED;
5133 req_line_speed = cmd->speed;
5134 req_duplex = cmd->duplex;
5138 bp->autoneg = autoneg;
5139 bp->advertising = advertising;
5140 bp->req_line_speed = req_line_speed;
5141 bp->req_duplex = req_duplex;
5143 spin_lock_bh(&bp->phy_lock);
5147 spin_unlock_bh(&bp->phy_lock);
5153 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5155 struct bnx2 *bp = netdev_priv(dev);
5157 strcpy(info->driver, DRV_MODULE_NAME);
5158 strcpy(info->version, DRV_MODULE_VERSION);
5159 strcpy(info->bus_info, pci_name(bp->pdev));
5160 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5161 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5162 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
5163 info->fw_version[1] = info->fw_version[3] = '.';
5164 info->fw_version[5] = 0;
5167 #define BNX2_REGDUMP_LEN (32 * 1024)
5170 bnx2_get_regs_len(struct net_device *dev)
5172 return BNX2_REGDUMP_LEN;
5176 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5178 u32 *p = _p, i, offset;
5180 struct bnx2 *bp = netdev_priv(dev);
5181 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5182 0x0800, 0x0880, 0x0c00, 0x0c10,
5183 0x0c30, 0x0d08, 0x1000, 0x101c,
5184 0x1040, 0x1048, 0x1080, 0x10a4,
5185 0x1400, 0x1490, 0x1498, 0x14f0,
5186 0x1500, 0x155c, 0x1580, 0x15dc,
5187 0x1600, 0x1658, 0x1680, 0x16d8,
5188 0x1800, 0x1820, 0x1840, 0x1854,
5189 0x1880, 0x1894, 0x1900, 0x1984,
5190 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5191 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5192 0x2000, 0x2030, 0x23c0, 0x2400,
5193 0x2800, 0x2820, 0x2830, 0x2850,
5194 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5195 0x3c00, 0x3c94, 0x4000, 0x4010,
5196 0x4080, 0x4090, 0x43c0, 0x4458,
5197 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5198 0x4fc0, 0x5010, 0x53c0, 0x5444,
5199 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5200 0x5fc0, 0x6000, 0x6400, 0x6428,
5201 0x6800, 0x6848, 0x684c, 0x6860,
5202 0x6888, 0x6910, 0x8000 };
5206 memset(p, 0, BNX2_REGDUMP_LEN);
5208 if (!netif_running(bp->dev))
5212 offset = reg_boundaries[0];
5214 while (offset < BNX2_REGDUMP_LEN) {
5215 *p++ = REG_RD(bp, offset);
5217 if (offset == reg_boundaries[i + 1]) {
5218 offset = reg_boundaries[i + 2];
5219 p = (u32 *) (orig_p + offset);
5226 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5228 struct bnx2 *bp = netdev_priv(dev);
5230 if (bp->flags & NO_WOL_FLAG) {
5235 wol->supported = WAKE_MAGIC;
5237 wol->wolopts = WAKE_MAGIC;
5241 memset(&wol->sopass, 0, sizeof(wol->sopass));
5245 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5247 struct bnx2 *bp = netdev_priv(dev);
5249 if (wol->wolopts & ~WAKE_MAGIC)
5252 if (wol->wolopts & WAKE_MAGIC) {
5253 if (bp->flags & NO_WOL_FLAG)
5265 bnx2_nway_reset(struct net_device *dev)
5267 struct bnx2 *bp = netdev_priv(dev);
5270 if (!(bp->autoneg & AUTONEG_SPEED)) {
5274 spin_lock_bh(&bp->phy_lock);
5276 /* Force a link down visible on the other side */
5277 if (bp->phy_flags & PHY_SERDES_FLAG) {
5278 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
5279 spin_unlock_bh(&bp->phy_lock);
5283 spin_lock_bh(&bp->phy_lock);
5285 bp->current_interval = SERDES_AN_TIMEOUT;
5286 bp->serdes_an_pending = 1;
5287 mod_timer(&bp->timer, jiffies + bp->current_interval);
5290 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5291 bmcr &= ~BMCR_LOOPBACK;
5292 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
5294 spin_unlock_bh(&bp->phy_lock);
5300 bnx2_get_eeprom_len(struct net_device *dev)
5302 struct bnx2 *bp = netdev_priv(dev);
5304 if (bp->flash_info == NULL)
5307 return (int) bp->flash_size;
5311 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5314 struct bnx2 *bp = netdev_priv(dev);
5317 /* parameters already validated in ethtool_get_eeprom */
5319 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5325 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5328 struct bnx2 *bp = netdev_priv(dev);
5331 /* parameters already validated in ethtool_set_eeprom */
5333 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5339 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5341 struct bnx2 *bp = netdev_priv(dev);
5343 memset(coal, 0, sizeof(struct ethtool_coalesce));
5345 coal->rx_coalesce_usecs = bp->rx_ticks;
5346 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5347 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5348 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5350 coal->tx_coalesce_usecs = bp->tx_ticks;
5351 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5352 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5353 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5355 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5361 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5363 struct bnx2 *bp = netdev_priv(dev);
5365 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5366 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5368 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
5369 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5371 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5372 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5374 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5375 if (bp->rx_quick_cons_trip_int > 0xff)
5376 bp->rx_quick_cons_trip_int = 0xff;
5378 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5379 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5381 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5382 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5384 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5385 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5387 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5388 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5391 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5392 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5393 bp->stats_ticks &= 0xffff00;
5395 if (netif_running(bp->dev)) {
5396 bnx2_netif_stop(bp);
5398 bnx2_netif_start(bp);
5405 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5407 struct bnx2 *bp = netdev_priv(dev);
5409 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
5410 ering->rx_mini_max_pending = 0;
5411 ering->rx_jumbo_max_pending = 0;
5413 ering->rx_pending = bp->rx_ring_size;
5414 ering->rx_mini_pending = 0;
5415 ering->rx_jumbo_pending = 0;
5417 ering->tx_max_pending = MAX_TX_DESC_CNT;
5418 ering->tx_pending = bp->tx_ring_size;
5422 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5424 struct bnx2 *bp = netdev_priv(dev);
5426 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
5427 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5428 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5432 if (netif_running(bp->dev)) {
5433 bnx2_netif_stop(bp);
5434 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5439 bnx2_set_rx_ring_size(bp, ering->rx_pending);
5440 bp->tx_ring_size = ering->tx_pending;
5442 if (netif_running(bp->dev)) {
5445 rc = bnx2_alloc_mem(bp);
5449 bnx2_netif_start(bp);
5456 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5458 struct bnx2 *bp = netdev_priv(dev);
5460 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5461 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5462 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5466 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5468 struct bnx2 *bp = netdev_priv(dev);
5470 bp->req_flow_ctrl = 0;
5471 if (epause->rx_pause)
5472 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5473 if (epause->tx_pause)
5474 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5476 if (epause->autoneg) {
5477 bp->autoneg |= AUTONEG_FLOW_CTRL;
5480 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5483 spin_lock_bh(&bp->phy_lock);
5487 spin_unlock_bh(&bp->phy_lock);
5493 bnx2_get_rx_csum(struct net_device *dev)
5495 struct bnx2 *bp = netdev_priv(dev);
5501 bnx2_set_rx_csum(struct net_device *dev, u32 data)
5503 struct bnx2 *bp = netdev_priv(dev);
5510 bnx2_set_tso(struct net_device *dev, u32 data)
5512 struct bnx2 *bp = netdev_priv(dev);
5515 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5516 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5517 dev->features |= NETIF_F_TSO6;
5519 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5524 #define BNX2_NUM_STATS 46
5527 char string[ETH_GSTRING_LEN];
5528 } bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5530 { "rx_error_bytes" },
5532 { "tx_error_bytes" },
5533 { "rx_ucast_packets" },
5534 { "rx_mcast_packets" },
5535 { "rx_bcast_packets" },
5536 { "tx_ucast_packets" },
5537 { "tx_mcast_packets" },
5538 { "tx_bcast_packets" },
5539 { "tx_mac_errors" },
5540 { "tx_carrier_errors" },
5541 { "rx_crc_errors" },
5542 { "rx_align_errors" },
5543 { "tx_single_collisions" },
5544 { "tx_multi_collisions" },
5546 { "tx_excess_collisions" },
5547 { "tx_late_collisions" },
5548 { "tx_total_collisions" },
5551 { "rx_undersize_packets" },
5552 { "rx_oversize_packets" },
5553 { "rx_64_byte_packets" },
5554 { "rx_65_to_127_byte_packets" },
5555 { "rx_128_to_255_byte_packets" },
5556 { "rx_256_to_511_byte_packets" },
5557 { "rx_512_to_1023_byte_packets" },
5558 { "rx_1024_to_1522_byte_packets" },
5559 { "rx_1523_to_9022_byte_packets" },
5560 { "tx_64_byte_packets" },
5561 { "tx_65_to_127_byte_packets" },
5562 { "tx_128_to_255_byte_packets" },
5563 { "tx_256_to_511_byte_packets" },
5564 { "tx_512_to_1023_byte_packets" },
5565 { "tx_1024_to_1522_byte_packets" },
5566 { "tx_1523_to_9022_byte_packets" },
5567 { "rx_xon_frames" },
5568 { "rx_xoff_frames" },
5569 { "tx_xon_frames" },
5570 { "tx_xoff_frames" },
5571 { "rx_mac_ctrl_frames" },
5572 { "rx_filtered_packets" },
5574 { "rx_fw_discards" },
5577 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5579 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
5580 STATS_OFFSET32(stat_IfHCInOctets_hi),
5581 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5582 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5583 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5584 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5585 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5586 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5587 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5588 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5589 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5590 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
5591 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5592 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5593 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5594 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5595 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5596 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5597 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5598 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5599 STATS_OFFSET32(stat_EtherStatsCollisions),
5600 STATS_OFFSET32(stat_EtherStatsFragments),
5601 STATS_OFFSET32(stat_EtherStatsJabbers),
5602 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5603 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5604 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5605 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5606 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5607 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5608 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5609 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5610 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5611 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5612 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5613 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5614 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5615 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5616 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5617 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5618 STATS_OFFSET32(stat_XonPauseFramesReceived),
5619 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5620 STATS_OFFSET32(stat_OutXonSent),
5621 STATS_OFFSET32(stat_OutXoffSent),
5622 STATS_OFFSET32(stat_MacControlFramesReceived),
5623 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5624 STATS_OFFSET32(stat_IfInMBUFDiscards),
5625 STATS_OFFSET32(stat_FwRxDrop),
5628 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5629 * skipped because of errata.
5631 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
5632 8,0,8,8,8,8,8,8,8,8,
5633 4,0,4,4,4,4,4,4,4,4,
5634 4,4,4,4,4,4,4,4,4,4,
5635 4,4,4,4,4,4,4,4,4,4,
5639 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5640 8,0,8,8,8,8,8,8,8,8,
5641 4,4,4,4,4,4,4,4,4,4,
5642 4,4,4,4,4,4,4,4,4,4,
5643 4,4,4,4,4,4,4,4,4,4,
5647 #define BNX2_NUM_TESTS 6
5650 char string[ETH_GSTRING_LEN];
5651 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5652 { "register_test (offline)" },
5653 { "memory_test (offline)" },
5654 { "loopback_test (offline)" },
5655 { "nvram_test (online)" },
5656 { "interrupt_test (online)" },
5657 { "link_test (online)" },
5661 bnx2_self_test_count(struct net_device *dev)
5663 return BNX2_NUM_TESTS;
5667 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5669 struct bnx2 *bp = netdev_priv(dev);
5671 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5672 if (etest->flags & ETH_TEST_FL_OFFLINE) {
5675 bnx2_netif_stop(bp);
5676 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5679 if (bnx2_test_registers(bp) != 0) {
5681 etest->flags |= ETH_TEST_FL_FAILED;
5683 if (bnx2_test_memory(bp) != 0) {
5685 etest->flags |= ETH_TEST_FL_FAILED;
5687 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
5688 etest->flags |= ETH_TEST_FL_FAILED;
5690 if (!netif_running(bp->dev)) {
5691 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5695 bnx2_netif_start(bp);
5698 /* wait for link up */
5699 for (i = 0; i < 7; i++) {
5702 msleep_interruptible(1000);
5706 if (bnx2_test_nvram(bp) != 0) {
5708 etest->flags |= ETH_TEST_FL_FAILED;
5710 if (bnx2_test_intr(bp) != 0) {
5712 etest->flags |= ETH_TEST_FL_FAILED;
5715 if (bnx2_test_link(bp) != 0) {
5717 etest->flags |= ETH_TEST_FL_FAILED;
5723 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5725 switch (stringset) {
5727 memcpy(buf, bnx2_stats_str_arr,
5728 sizeof(bnx2_stats_str_arr));
5731 memcpy(buf, bnx2_tests_str_arr,
5732 sizeof(bnx2_tests_str_arr));
5738 bnx2_get_stats_count(struct net_device *dev)
5740 return BNX2_NUM_STATS;
5744 bnx2_get_ethtool_stats(struct net_device *dev,
5745 struct ethtool_stats *stats, u64 *buf)
5747 struct bnx2 *bp = netdev_priv(dev);
5749 u32 *hw_stats = (u32 *) bp->stats_blk;
5750 u8 *stats_len_arr = NULL;
5752 if (hw_stats == NULL) {
5753 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5757 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5758 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5759 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5760 (CHIP_ID(bp) == CHIP_ID_5708_A0))
5761 stats_len_arr = bnx2_5706_stats_len_arr;
5763 stats_len_arr = bnx2_5708_stats_len_arr;
5765 for (i = 0; i < BNX2_NUM_STATS; i++) {
5766 if (stats_len_arr[i] == 0) {
5767 /* skip this counter */
5771 if (stats_len_arr[i] == 4) {
5772 /* 4-byte counter */
5774 *(hw_stats + bnx2_stats_offset_arr[i]);
5777 /* 8-byte counter */
5778 buf[i] = (((u64) *(hw_stats +
5779 bnx2_stats_offset_arr[i])) << 32) +
5780 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5785 bnx2_phys_id(struct net_device *dev, u32 data)
5787 struct bnx2 *bp = netdev_priv(dev);
5794 save = REG_RD(bp, BNX2_MISC_CFG);
5795 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5797 for (i = 0; i < (data * 2); i++) {
5799 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5802 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5803 BNX2_EMAC_LED_1000MB_OVERRIDE |
5804 BNX2_EMAC_LED_100MB_OVERRIDE |
5805 BNX2_EMAC_LED_10MB_OVERRIDE |
5806 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5807 BNX2_EMAC_LED_TRAFFIC);
5809 msleep_interruptible(500);
5810 if (signal_pending(current))
5813 REG_WR(bp, BNX2_EMAC_LED, 0);
5814 REG_WR(bp, BNX2_MISC_CFG, save);
5819 bnx2_set_tx_csum(struct net_device *dev, u32 data)
5821 struct bnx2 *bp = netdev_priv(dev);
5823 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5824 return (ethtool_op_set_tx_hw_csum(dev, data));
5826 return (ethtool_op_set_tx_csum(dev, data));
5829 static const struct ethtool_ops bnx2_ethtool_ops = {
5830 .get_settings = bnx2_get_settings,
5831 .set_settings = bnx2_set_settings,
5832 .get_drvinfo = bnx2_get_drvinfo,
5833 .get_regs_len = bnx2_get_regs_len,
5834 .get_regs = bnx2_get_regs,
5835 .get_wol = bnx2_get_wol,
5836 .set_wol = bnx2_set_wol,
5837 .nway_reset = bnx2_nway_reset,
5838 .get_link = ethtool_op_get_link,
5839 .get_eeprom_len = bnx2_get_eeprom_len,
5840 .get_eeprom = bnx2_get_eeprom,
5841 .set_eeprom = bnx2_set_eeprom,
5842 .get_coalesce = bnx2_get_coalesce,
5843 .set_coalesce = bnx2_set_coalesce,
5844 .get_ringparam = bnx2_get_ringparam,
5845 .set_ringparam = bnx2_set_ringparam,
5846 .get_pauseparam = bnx2_get_pauseparam,
5847 .set_pauseparam = bnx2_set_pauseparam,
5848 .get_rx_csum = bnx2_get_rx_csum,
5849 .set_rx_csum = bnx2_set_rx_csum,
5850 .get_tx_csum = ethtool_op_get_tx_csum,
5851 .set_tx_csum = bnx2_set_tx_csum,
5852 .get_sg = ethtool_op_get_sg,
5853 .set_sg = ethtool_op_set_sg,
5854 .get_tso = ethtool_op_get_tso,
5855 .set_tso = bnx2_set_tso,
5856 .self_test_count = bnx2_self_test_count,
5857 .self_test = bnx2_self_test,
5858 .get_strings = bnx2_get_strings,
5859 .phys_id = bnx2_phys_id,
5860 .get_stats_count = bnx2_get_stats_count,
5861 .get_ethtool_stats = bnx2_get_ethtool_stats,
5862 .get_perm_addr = ethtool_op_get_perm_addr,
5865 /* Called with rtnl_lock */
5867 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5869 struct mii_ioctl_data *data = if_mii(ifr);
5870 struct bnx2 *bp = netdev_priv(dev);
5875 data->phy_id = bp->phy_addr;
5881 if (!netif_running(dev))
5884 spin_lock_bh(&bp->phy_lock);
5885 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
5886 spin_unlock_bh(&bp->phy_lock);
5888 data->val_out = mii_regval;
5894 if (!capable(CAP_NET_ADMIN))
5897 if (!netif_running(dev))
5900 spin_lock_bh(&bp->phy_lock);
5901 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
5902 spin_unlock_bh(&bp->phy_lock);
5913 /* Called with rtnl_lock */
5915 bnx2_change_mac_addr(struct net_device *dev, void *p)
5917 struct sockaddr *addr = p;
5918 struct bnx2 *bp = netdev_priv(dev);
5920 if (!is_valid_ether_addr(addr->sa_data))
5923 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5924 if (netif_running(dev))
5925 bnx2_set_mac_addr(bp);
5930 /* Called with rtnl_lock */
5932 bnx2_change_mtu(struct net_device *dev, int new_mtu)
5934 struct bnx2 *bp = netdev_priv(dev);
5936 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5937 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5941 if (netif_running(dev)) {
5942 bnx2_netif_stop(bp);
5946 bnx2_netif_start(bp);
5951 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5953 poll_bnx2(struct net_device *dev)
5955 struct bnx2 *bp = netdev_priv(dev);
5957 disable_irq(bp->pdev->irq);
5958 bnx2_interrupt(bp->pdev->irq, dev);
5959 enable_irq(bp->pdev->irq);
5963 static void __devinit
5964 bnx2_get_5709_media(struct bnx2 *bp)
5966 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
5967 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
5970 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
5972 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
5973 bp->phy_flags |= PHY_SERDES_FLAG;
5977 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
5978 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
5980 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
5982 if (PCI_FUNC(bp->pdev->devfn) == 0) {
5987 bp->phy_flags |= PHY_SERDES_FLAG;
5995 bp->phy_flags |= PHY_SERDES_FLAG;
6001 static int __devinit
6002 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6005 unsigned long mem_len;
6008 u64 dma_mask, persist_dma_mask;
6010 SET_MODULE_OWNER(dev);
6011 SET_NETDEV_DEV(dev, &pdev->dev);
6012 bp = netdev_priv(dev);
6017 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6018 rc = pci_enable_device(pdev);
6020 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
6024 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6026 "Cannot find PCI device base address, aborting.\n");
6028 goto err_out_disable;
6031 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6033 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
6034 goto err_out_disable;
6037 pci_set_master(pdev);
6039 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6040 if (bp->pm_cap == 0) {
6042 "Cannot find power management capability, aborting.\n");
6044 goto err_out_release;
6050 spin_lock_init(&bp->phy_lock);
6051 spin_lock_init(&bp->indirect_lock);
6052 INIT_WORK(&bp->reset_task, bnx2_reset_task);
6054 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
6055 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
6056 dev->mem_end = dev->mem_start + mem_len;
6057 dev->irq = pdev->irq;
6059 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6062 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
6064 goto err_out_release;
6067 /* Configure byte swap and enable write to the reg_window registers.
6068 * Rely on CPU to do target byte swapping on big endian systems
6069 * The chip's target access swapping will not swap all accesses
6071 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6072 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6073 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6075 bnx2_set_power_state(bp, PCI_D0);
6077 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6079 if (CHIP_NUM(bp) != CHIP_NUM_5709) {
6080 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6081 if (bp->pcix_cap == 0) {
6083 "Cannot find PCIX capability, aborting.\n");
6089 /* 5708 cannot support DMA addresses > 40-bit. */
6090 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6091 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6093 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6095 /* Configure DMA attributes. */
6096 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6097 dev->features |= NETIF_F_HIGHDMA;
6098 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6101 "pci_set_consistent_dma_mask failed, aborting.\n");
6104 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6105 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6109 /* Get bus information. */
6110 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6111 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6114 bp->flags |= PCIX_FLAG;
6116 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6118 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6120 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6121 bp->bus_speed_mhz = 133;
6124 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6125 bp->bus_speed_mhz = 100;
6128 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6129 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6130 bp->bus_speed_mhz = 66;
6133 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6134 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6135 bp->bus_speed_mhz = 50;
6138 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6139 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6140 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6141 bp->bus_speed_mhz = 33;
6146 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6147 bp->bus_speed_mhz = 66;
6149 bp->bus_speed_mhz = 33;
6152 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6153 bp->flags |= PCI_32BIT_FLAG;
6155 /* 5706A0 may falsely detect SERR and PERR. */
6156 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6157 reg = REG_RD(bp, PCI_COMMAND);
6158 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6159 REG_WR(bp, PCI_COMMAND, reg);
6161 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6162 !(bp->flags & PCIX_FLAG)) {
6165 "5706 A1 can only be used in a PCIX bus, aborting.\n");
6169 bnx2_init_nvram(bp);
6171 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6173 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
6174 BNX2_SHM_HDR_SIGNATURE_SIG) {
6175 u32 off = PCI_FUNC(pdev->devfn) << 2;
6177 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6179 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6181 /* Get the permanent MAC address. First we need to make sure the
6182 * firmware is actually running.
6184 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
6186 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6187 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
6188 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
6193 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6195 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
6196 bp->mac_addr[0] = (u8) (reg >> 8);
6197 bp->mac_addr[1] = (u8) reg;
6199 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
6200 bp->mac_addr[2] = (u8) (reg >> 24);
6201 bp->mac_addr[3] = (u8) (reg >> 16);
6202 bp->mac_addr[4] = (u8) (reg >> 8);
6203 bp->mac_addr[5] = (u8) reg;
6205 bp->tx_ring_size = MAX_TX_DESC_CNT;
6206 bnx2_set_rx_ring_size(bp, 255);
6210 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6212 bp->tx_quick_cons_trip_int = 20;
6213 bp->tx_quick_cons_trip = 20;
6214 bp->tx_ticks_int = 80;
6217 bp->rx_quick_cons_trip_int = 6;
6218 bp->rx_quick_cons_trip = 6;
6219 bp->rx_ticks_int = 18;
6222 bp->stats_ticks = 1000000 & 0xffff00;
6224 bp->timer_interval = HZ;
6225 bp->current_interval = HZ;
6229 /* Disable WOL support if we are running on a SERDES chip. */
6230 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6231 bnx2_get_5709_media(bp);
6232 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
6233 bp->phy_flags |= PHY_SERDES_FLAG;
6235 if (bp->phy_flags & PHY_SERDES_FLAG) {
6236 bp->flags |= NO_WOL_FLAG;
6237 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
6239 reg = REG_RD_IND(bp, bp->shmem_base +
6240 BNX2_SHARED_HW_CFG_CONFIG);
6241 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6242 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6244 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6245 CHIP_NUM(bp) == CHIP_NUM_5708)
6246 bp->phy_flags |= PHY_CRC_FIX_FLAG;
6247 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6248 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
6250 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6251 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6252 (CHIP_ID(bp) == CHIP_ID_5708_B1))
6253 bp->flags |= NO_WOL_FLAG;
6255 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6256 bp->tx_quick_cons_trip_int =
6257 bp->tx_quick_cons_trip;
6258 bp->tx_ticks_int = bp->tx_ticks;
6259 bp->rx_quick_cons_trip_int =
6260 bp->rx_quick_cons_trip;
6261 bp->rx_ticks_int = bp->rx_ticks;
6262 bp->comp_prod_trip_int = bp->comp_prod_trip;
6263 bp->com_ticks_int = bp->com_ticks;
6264 bp->cmd_ticks_int = bp->cmd_ticks;
6267 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6269 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6270 * with byte enables disabled on the unused 32-bit word. This is legal
6271 * but causes problems on the AMD 8132 which will eventually stop
6272 * responding after a while.
6274 * AMD believes this incompatibility is unique to the 5706, and
6275 * prefers to locally disable MSI rather than globally disabling it.
6277 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6278 struct pci_dev *amd_8132 = NULL;
6280 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6281 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6285 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6286 if (rev >= 0x10 && rev <= 0x13) {
6288 pci_dev_put(amd_8132);
6294 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
6295 bp->req_line_speed = 0;
6296 if (bp->phy_flags & PHY_SERDES_FLAG) {
6297 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
6299 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
6300 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
6301 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
6303 bp->req_line_speed = bp->line_speed = SPEED_1000;
6304 bp->req_duplex = DUPLEX_FULL;
6308 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
6311 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6313 init_timer(&bp->timer);
6314 bp->timer.expires = RUN_AT(bp->timer_interval);
6315 bp->timer.data = (unsigned long) bp;
6316 bp->timer.function = bnx2_timer;
6322 iounmap(bp->regview);
6327 pci_release_regions(pdev);
6330 pci_disable_device(pdev);
6331 pci_set_drvdata(pdev, NULL);
6337 static int __devinit
6338 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6340 static int version_printed = 0;
6341 struct net_device *dev = NULL;
6345 if (version_printed++ == 0)
6346 printk(KERN_INFO "%s", version);
6348 /* dev zeroed in init_etherdev */
6349 dev = alloc_etherdev(sizeof(*bp));
6354 rc = bnx2_init_board(pdev, dev);
6360 dev->open = bnx2_open;
6361 dev->hard_start_xmit = bnx2_start_xmit;
6362 dev->stop = bnx2_close;
6363 dev->get_stats = bnx2_get_stats;
6364 dev->set_multicast_list = bnx2_set_rx_mode;
6365 dev->do_ioctl = bnx2_ioctl;
6366 dev->set_mac_address = bnx2_change_mac_addr;
6367 dev->change_mtu = bnx2_change_mtu;
6368 dev->tx_timeout = bnx2_tx_timeout;
6369 dev->watchdog_timeo = TX_TIMEOUT;
6371 dev->vlan_rx_register = bnx2_vlan_rx_register;
6372 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
6374 dev->poll = bnx2_poll;
6375 dev->ethtool_ops = &bnx2_ethtool_ops;
6378 bp = netdev_priv(dev);
6380 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6381 dev->poll_controller = poll_bnx2;
6384 pci_set_drvdata(pdev, dev);
6386 memcpy(dev->dev_addr, bp->mac_addr, 6);
6387 memcpy(dev->perm_addr, bp->mac_addr, 6);
6388 bp->name = board_info[ent->driver_data].name;
6390 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6391 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
6393 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
6395 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6397 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
6398 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6399 dev->features |= NETIF_F_TSO6;
6401 if ((rc = register_netdev(dev))) {
6402 dev_err(&pdev->dev, "Cannot register net device\n");
6404 iounmap(bp->regview);
6405 pci_release_regions(pdev);
6406 pci_disable_device(pdev);
6407 pci_set_drvdata(pdev, NULL);
6412 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
6416 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6417 ((CHIP_ID(bp) & 0x0ff0) >> 4),
6418 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
6419 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
6424 printk("node addr ");
6425 for (i = 0; i < 6; i++)
6426 printk("%2.2x", dev->dev_addr[i]);
6432 static void __devexit
6433 bnx2_remove_one(struct pci_dev *pdev)
6435 struct net_device *dev = pci_get_drvdata(pdev);
6436 struct bnx2 *bp = netdev_priv(dev);
6438 flush_scheduled_work();
6440 unregister_netdev(dev);
6443 iounmap(bp->regview);
6446 pci_release_regions(pdev);
6447 pci_disable_device(pdev);
6448 pci_set_drvdata(pdev, NULL);
6452 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
6454 struct net_device *dev = pci_get_drvdata(pdev);
6455 struct bnx2 *bp = netdev_priv(dev);
6458 if (!netif_running(dev))
6461 flush_scheduled_work();
6462 bnx2_netif_stop(bp);
6463 netif_device_detach(dev);
6464 del_timer_sync(&bp->timer);
6465 if (bp->flags & NO_WOL_FLAG)
6466 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
6468 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6470 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6471 bnx2_reset_chip(bp, reset_code);
6473 pci_save_state(pdev);
6474 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
6479 bnx2_resume(struct pci_dev *pdev)
6481 struct net_device *dev = pci_get_drvdata(pdev);
6482 struct bnx2 *bp = netdev_priv(dev);
6484 if (!netif_running(dev))
6487 pci_restore_state(pdev);
6488 bnx2_set_power_state(bp, PCI_D0);
6489 netif_device_attach(dev);
6491 bnx2_netif_start(bp);
6495 static struct pci_driver bnx2_pci_driver = {
6496 .name = DRV_MODULE_NAME,
6497 .id_table = bnx2_pci_tbl,
6498 .probe = bnx2_init_one,
6499 .remove = __devexit_p(bnx2_remove_one),
6500 .suspend = bnx2_suspend,
6501 .resume = bnx2_resume,
6504 static int __init bnx2_init(void)
6506 return pci_register_driver(&bnx2_pci_driver);
6509 static void __exit bnx2_cleanup(void)
6511 pci_unregister_driver(&bnx2_pci_driver);
6514 module_init(bnx2_init);
6515 module_exit(bnx2_cleanup);