[BNX2]: Update version to 1.6.9.
[linux-block.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76 54
110d0ef9 55#define FW_BUF_SIZE 0x10000
b3448b0b 56
b6016b76
MC
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
a0d142c6
MC
59#define DRV_MODULE_VERSION "1.7.0"
60#define DRV_MODULE_RELDATE "December 11, 2007"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
e19360f2 67static const char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
bac0dff6 88 BCM5709,
27a005b8 89 BCM5709S,
b6016b76
MC
90} board_t;
91
92/* indexed by board_t, above */
f71e1309 93static const struct {
b6016b76
MC
94 char *name;
95} board_info[] __devinitdata = {
96 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
97 { "HP NC370T Multifunction Gigabit Server Adapter" },
98 { "HP NC370i Multifunction Gigabit Server Adapter" },
99 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
100 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
101 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
102 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 103 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 104 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
105 };
106
107static struct pci_device_id bnx2_pci_tbl[] = {
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
126 { 0, }
127};
128
129static struct flash_spec flash_table[] =
130{
e30372c9
MC
131#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
132#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 133 /* Slow EEPROM */
37137709 134 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 135 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
136 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
137 "EEPROM - slow"},
37137709
MC
138 /* Expansion entry 0001 */
139 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 140 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
141 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
142 "Entry 0001"},
b6016b76
MC
143 /* Saifun SA25F010 (non-buffered flash) */
144 /* strap, cfg1, & write1 need updates */
37137709 145 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 146 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
147 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
148 "Non-buffered flash (128kB)"},
149 /* Saifun SA25F020 (non-buffered flash) */
150 /* strap, cfg1, & write1 need updates */
37137709 151 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 152 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
153 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
154 "Non-buffered flash (256kB)"},
37137709
MC
155 /* Expansion entry 0100 */
156 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 157 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
158 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
159 "Entry 0100"},
160 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 161 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 162 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
163 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
164 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
165 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
166 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 167 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
168 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
169 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
170 /* Saifun SA25F005 (non-buffered flash) */
171 /* strap, cfg1, & write1 need updates */
172 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 173 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
174 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
175 "Non-buffered flash (64kB)"},
176 /* Fast EEPROM */
177 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 178 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
179 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
180 "EEPROM - fast"},
181 /* Expansion entry 1001 */
182 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 183 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
184 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
185 "Entry 1001"},
186 /* Expansion entry 1010 */
187 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1010"},
191 /* ATMEL AT45DB011B (buffered flash) */
192 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 193 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
194 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
195 "Buffered flash (128kB)"},
196 /* Expansion entry 1100 */
197 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1100"},
201 /* Expansion entry 1101 */
202 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1101"},
206 /* Ateml Expansion entry 1110 */
207 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
209 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1110 (Atmel)"},
211 /* ATMEL AT45DB021B (buffered flash) */
212 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
215 "Buffered flash (256kB)"},
b6016b76
MC
216};
217
e30372c9
MC
218static struct flash_spec flash_5709 = {
219 .flags = BNX2_NV_BUFFERED,
220 .page_bits = BCM5709_FLASH_PAGE_BITS,
221 .page_size = BCM5709_FLASH_PAGE_SIZE,
222 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
223 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
224 .name = "5709 Buffered flash (256kB)",
225};
226
b6016b76
MC
227MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
228
e89bbf10
MC
229static inline u32 bnx2_tx_avail(struct bnx2 *bp)
230{
2f8af120 231 u32 diff;
e89bbf10 232
2f8af120 233 smp_mb();
faac9c4b
MC
234
235 /* The ring uses 256 indices for 255 entries, one of them
236 * needs to be skipped.
237 */
238 diff = bp->tx_prod - bp->tx_cons;
239 if (unlikely(diff >= TX_DESC_CNT)) {
240 diff &= 0xffff;
241 if (diff == TX_DESC_CNT)
242 diff = MAX_TX_DESC_CNT;
243 }
e89bbf10
MC
244 return (bp->tx_ring_size - diff);
245}
246
b6016b76
MC
247static u32
248bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
249{
1b8227c4
MC
250 u32 val;
251
252 spin_lock_bh(&bp->indirect_lock);
b6016b76 253 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
254 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
255 spin_unlock_bh(&bp->indirect_lock);
256 return val;
b6016b76
MC
257}
258
259static void
260bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
261{
1b8227c4 262 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
263 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
264 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 265 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
266}
267
268static void
269bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
270{
271 offset += cid_addr;
1b8227c4 272 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
273 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
274 int i;
275
276 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
277 REG_WR(bp, BNX2_CTX_CTX_CTRL,
278 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
279 for (i = 0; i < 5; i++) {
280 u32 val;
281 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
282 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
283 break;
284 udelay(5);
285 }
286 } else {
287 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
288 REG_WR(bp, BNX2_CTX_DATA, val);
289 }
1b8227c4 290 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
291}
292
293static int
294bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
295{
296 u32 val1;
297 int i, ret;
298
299 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
300 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
301 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
302
303 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
304 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
305
306 udelay(40);
307 }
308
309 val1 = (bp->phy_addr << 21) | (reg << 16) |
310 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
311 BNX2_EMAC_MDIO_COMM_START_BUSY;
312 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
313
314 for (i = 0; i < 50; i++) {
315 udelay(10);
316
317 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
318 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
319 udelay(5);
320
321 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
322 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
323
324 break;
325 }
326 }
327
328 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
329 *val = 0x0;
330 ret = -EBUSY;
331 }
332 else {
333 *val = val1;
334 ret = 0;
335 }
336
337 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
338 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
339 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
340
341 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
342 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
343
344 udelay(40);
345 }
346
347 return ret;
348}
349
350static int
351bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
352{
353 u32 val1;
354 int i, ret;
355
356 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
357 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
358 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
359
360 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
361 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
362
363 udelay(40);
364 }
365
366 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
367 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
368 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
369 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 370
b6016b76
MC
371 for (i = 0; i < 50; i++) {
372 udelay(10);
373
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
375 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
376 udelay(5);
377 break;
378 }
379 }
380
381 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
382 ret = -EBUSY;
383 else
384 ret = 0;
385
386 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
387 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
389
390 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
391 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
392
393 udelay(40);
394 }
395
396 return ret;
397}
398
399static void
400bnx2_disable_int(struct bnx2 *bp)
401{
402 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
403 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
404 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
405}
406
407static void
408bnx2_enable_int(struct bnx2 *bp)
409{
1269a8a6
MC
410 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
411 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
412 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
413
b6016b76
MC
414 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
415 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
416
bf5295bb 417 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
418}
419
420static void
421bnx2_disable_int_sync(struct bnx2 *bp)
422{
423 atomic_inc(&bp->intr_sem);
424 bnx2_disable_int(bp);
425 synchronize_irq(bp->pdev->irq);
426}
427
428static void
429bnx2_netif_stop(struct bnx2 *bp)
430{
431 bnx2_disable_int_sync(bp);
432 if (netif_running(bp->dev)) {
bea3348e 433 napi_disable(&bp->napi);
b6016b76
MC
434 netif_tx_disable(bp->dev);
435 bp->dev->trans_start = jiffies; /* prevent tx timeout */
436 }
437}
438
439static void
440bnx2_netif_start(struct bnx2 *bp)
441{
442 if (atomic_dec_and_test(&bp->intr_sem)) {
443 if (netif_running(bp->dev)) {
444 netif_wake_queue(bp->dev);
bea3348e 445 napi_enable(&bp->napi);
b6016b76
MC
446 bnx2_enable_int(bp);
447 }
448 }
449}
450
451static void
452bnx2_free_mem(struct bnx2 *bp)
453{
13daffa2
MC
454 int i;
455
59b47d8a
MC
456 for (i = 0; i < bp->ctx_pages; i++) {
457 if (bp->ctx_blk[i]) {
458 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
459 bp->ctx_blk[i],
460 bp->ctx_blk_mapping[i]);
461 bp->ctx_blk[i] = NULL;
462 }
463 }
b6016b76 464 if (bp->status_blk) {
0f31f994 465 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
466 bp->status_blk, bp->status_blk_mapping);
467 bp->status_blk = NULL;
0f31f994 468 bp->stats_blk = NULL;
b6016b76
MC
469 }
470 if (bp->tx_desc_ring) {
e343d55c 471 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
472 bp->tx_desc_ring, bp->tx_desc_mapping);
473 bp->tx_desc_ring = NULL;
474 }
b4558ea9
JJ
475 kfree(bp->tx_buf_ring);
476 bp->tx_buf_ring = NULL;
13daffa2
MC
477 for (i = 0; i < bp->rx_max_ring; i++) {
478 if (bp->rx_desc_ring[i])
e343d55c 479 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
480 bp->rx_desc_ring[i],
481 bp->rx_desc_mapping[i]);
482 bp->rx_desc_ring[i] = NULL;
483 }
484 vfree(bp->rx_buf_ring);
b4558ea9 485 bp->rx_buf_ring = NULL;
47bf4246
MC
486 for (i = 0; i < bp->rx_max_pg_ring; i++) {
487 if (bp->rx_pg_desc_ring[i])
488 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
489 bp->rx_pg_desc_ring[i],
490 bp->rx_pg_desc_mapping[i]);
491 bp->rx_pg_desc_ring[i] = NULL;
492 }
493 if (bp->rx_pg_ring)
494 vfree(bp->rx_pg_ring);
495 bp->rx_pg_ring = NULL;
b6016b76
MC
496}
497
498static int
499bnx2_alloc_mem(struct bnx2 *bp)
500{
0f31f994 501 int i, status_blk_size;
13daffa2 502
e343d55c 503 bp->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
b6016b76
MC
504 if (bp->tx_buf_ring == NULL)
505 return -ENOMEM;
506
e343d55c 507 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
b6016b76
MC
508 &bp->tx_desc_mapping);
509 if (bp->tx_desc_ring == NULL)
510 goto alloc_mem_err;
511
e343d55c 512 bp->rx_buf_ring = vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
b6016b76
MC
513 if (bp->rx_buf_ring == NULL)
514 goto alloc_mem_err;
515
e343d55c 516 memset(bp->rx_buf_ring, 0, SW_RXBD_RING_SIZE * bp->rx_max_ring);
13daffa2
MC
517
518 for (i = 0; i < bp->rx_max_ring; i++) {
519 bp->rx_desc_ring[i] =
e343d55c 520 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
13daffa2
MC
521 &bp->rx_desc_mapping[i]);
522 if (bp->rx_desc_ring[i] == NULL)
523 goto alloc_mem_err;
524
525 }
b6016b76 526
47bf4246
MC
527 if (bp->rx_pg_ring_size) {
528 bp->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
529 bp->rx_max_pg_ring);
530 if (bp->rx_pg_ring == NULL)
531 goto alloc_mem_err;
532
533 memset(bp->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
534 bp->rx_max_pg_ring);
535 }
536
537 for (i = 0; i < bp->rx_max_pg_ring; i++) {
538 bp->rx_pg_desc_ring[i] =
539 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
540 &bp->rx_pg_desc_mapping[i]);
541 if (bp->rx_pg_desc_ring[i] == NULL)
542 goto alloc_mem_err;
543
544 }
545
0f31f994
MC
546 /* Combine status and statistics blocks into one allocation. */
547 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
548 bp->status_stats_size = status_blk_size +
549 sizeof(struct statistics_block);
550
551 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
552 &bp->status_blk_mapping);
553 if (bp->status_blk == NULL)
554 goto alloc_mem_err;
555
0f31f994 556 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 557
0f31f994
MC
558 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
559 status_blk_size);
b6016b76 560
0f31f994 561 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 562
59b47d8a
MC
563 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
564 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
565 if (bp->ctx_pages == 0)
566 bp->ctx_pages = 1;
567 for (i = 0; i < bp->ctx_pages; i++) {
568 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
569 BCM_PAGE_SIZE,
570 &bp->ctx_blk_mapping[i]);
571 if (bp->ctx_blk[i] == NULL)
572 goto alloc_mem_err;
573 }
574 }
b6016b76
MC
575 return 0;
576
577alloc_mem_err:
578 bnx2_free_mem(bp);
579 return -ENOMEM;
580}
581
e3648b3d
MC
582static void
583bnx2_report_fw_link(struct bnx2 *bp)
584{
585 u32 fw_link_status = 0;
586
0d8a6571
MC
587 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
588 return;
589
e3648b3d
MC
590 if (bp->link_up) {
591 u32 bmsr;
592
593 switch (bp->line_speed) {
594 case SPEED_10:
595 if (bp->duplex == DUPLEX_HALF)
596 fw_link_status = BNX2_LINK_STATUS_10HALF;
597 else
598 fw_link_status = BNX2_LINK_STATUS_10FULL;
599 break;
600 case SPEED_100:
601 if (bp->duplex == DUPLEX_HALF)
602 fw_link_status = BNX2_LINK_STATUS_100HALF;
603 else
604 fw_link_status = BNX2_LINK_STATUS_100FULL;
605 break;
606 case SPEED_1000:
607 if (bp->duplex == DUPLEX_HALF)
608 fw_link_status = BNX2_LINK_STATUS_1000HALF;
609 else
610 fw_link_status = BNX2_LINK_STATUS_1000FULL;
611 break;
612 case SPEED_2500:
613 if (bp->duplex == DUPLEX_HALF)
614 fw_link_status = BNX2_LINK_STATUS_2500HALF;
615 else
616 fw_link_status = BNX2_LINK_STATUS_2500FULL;
617 break;
618 }
619
620 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
621
622 if (bp->autoneg) {
623 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
624
ca58c3af
MC
625 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
626 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
627
628 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
629 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
630 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
631 else
632 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
633 }
634 }
635 else
636 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
637
638 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
639}
640
9b1084b8
MC
641static char *
642bnx2_xceiver_str(struct bnx2 *bp)
643{
644 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
645 ((bp->phy_flags & PHY_SERDES_FLAG) ? "Remote Copper" :
646 "Copper"));
647}
648
b6016b76
MC
649static void
650bnx2_report_link(struct bnx2 *bp)
651{
652 if (bp->link_up) {
653 netif_carrier_on(bp->dev);
9b1084b8
MC
654 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
655 bnx2_xceiver_str(bp));
b6016b76
MC
656
657 printk("%d Mbps ", bp->line_speed);
658
659 if (bp->duplex == DUPLEX_FULL)
660 printk("full duplex");
661 else
662 printk("half duplex");
663
664 if (bp->flow_ctrl) {
665 if (bp->flow_ctrl & FLOW_CTRL_RX) {
666 printk(", receive ");
667 if (bp->flow_ctrl & FLOW_CTRL_TX)
668 printk("& transmit ");
669 }
670 else {
671 printk(", transmit ");
672 }
673 printk("flow control ON");
674 }
675 printk("\n");
676 }
677 else {
678 netif_carrier_off(bp->dev);
9b1084b8
MC
679 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
680 bnx2_xceiver_str(bp));
b6016b76 681 }
e3648b3d
MC
682
683 bnx2_report_fw_link(bp);
b6016b76
MC
684}
685
686static void
687bnx2_resolve_flow_ctrl(struct bnx2 *bp)
688{
689 u32 local_adv, remote_adv;
690
691 bp->flow_ctrl = 0;
6aa20a22 692 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
693 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
694
695 if (bp->duplex == DUPLEX_FULL) {
696 bp->flow_ctrl = bp->req_flow_ctrl;
697 }
698 return;
699 }
700
701 if (bp->duplex != DUPLEX_FULL) {
702 return;
703 }
704
5b0c76ad
MC
705 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
706 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
707 u32 val;
708
709 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
710 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
711 bp->flow_ctrl |= FLOW_CTRL_TX;
712 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
713 bp->flow_ctrl |= FLOW_CTRL_RX;
714 return;
715 }
716
ca58c3af
MC
717 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
718 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
719
720 if (bp->phy_flags & PHY_SERDES_FLAG) {
721 u32 new_local_adv = 0;
722 u32 new_remote_adv = 0;
723
724 if (local_adv & ADVERTISE_1000XPAUSE)
725 new_local_adv |= ADVERTISE_PAUSE_CAP;
726 if (local_adv & ADVERTISE_1000XPSE_ASYM)
727 new_local_adv |= ADVERTISE_PAUSE_ASYM;
728 if (remote_adv & ADVERTISE_1000XPAUSE)
729 new_remote_adv |= ADVERTISE_PAUSE_CAP;
730 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
731 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
732
733 local_adv = new_local_adv;
734 remote_adv = new_remote_adv;
735 }
736
737 /* See Table 28B-3 of 802.3ab-1999 spec. */
738 if (local_adv & ADVERTISE_PAUSE_CAP) {
739 if(local_adv & ADVERTISE_PAUSE_ASYM) {
740 if (remote_adv & ADVERTISE_PAUSE_CAP) {
741 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
742 }
743 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
744 bp->flow_ctrl = FLOW_CTRL_RX;
745 }
746 }
747 else {
748 if (remote_adv & ADVERTISE_PAUSE_CAP) {
749 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
750 }
751 }
752 }
753 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
754 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
755 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
756
757 bp->flow_ctrl = FLOW_CTRL_TX;
758 }
759 }
760}
761
27a005b8
MC
762static int
763bnx2_5709s_linkup(struct bnx2 *bp)
764{
765 u32 val, speed;
766
767 bp->link_up = 1;
768
769 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
770 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
771 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
772
773 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
774 bp->line_speed = bp->req_line_speed;
775 bp->duplex = bp->req_duplex;
776 return 0;
777 }
778 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
779 switch (speed) {
780 case MII_BNX2_GP_TOP_AN_SPEED_10:
781 bp->line_speed = SPEED_10;
782 break;
783 case MII_BNX2_GP_TOP_AN_SPEED_100:
784 bp->line_speed = SPEED_100;
785 break;
786 case MII_BNX2_GP_TOP_AN_SPEED_1G:
787 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
788 bp->line_speed = SPEED_1000;
789 break;
790 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
791 bp->line_speed = SPEED_2500;
792 break;
793 }
794 if (val & MII_BNX2_GP_TOP_AN_FD)
795 bp->duplex = DUPLEX_FULL;
796 else
797 bp->duplex = DUPLEX_HALF;
798 return 0;
799}
800
b6016b76 801static int
5b0c76ad
MC
802bnx2_5708s_linkup(struct bnx2 *bp)
803{
804 u32 val;
805
806 bp->link_up = 1;
807 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
808 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
809 case BCM5708S_1000X_STAT1_SPEED_10:
810 bp->line_speed = SPEED_10;
811 break;
812 case BCM5708S_1000X_STAT1_SPEED_100:
813 bp->line_speed = SPEED_100;
814 break;
815 case BCM5708S_1000X_STAT1_SPEED_1G:
816 bp->line_speed = SPEED_1000;
817 break;
818 case BCM5708S_1000X_STAT1_SPEED_2G5:
819 bp->line_speed = SPEED_2500;
820 break;
821 }
822 if (val & BCM5708S_1000X_STAT1_FD)
823 bp->duplex = DUPLEX_FULL;
824 else
825 bp->duplex = DUPLEX_HALF;
826
827 return 0;
828}
829
830static int
831bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
832{
833 u32 bmcr, local_adv, remote_adv, common;
834
835 bp->link_up = 1;
836 bp->line_speed = SPEED_1000;
837
ca58c3af 838 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
839 if (bmcr & BMCR_FULLDPLX) {
840 bp->duplex = DUPLEX_FULL;
841 }
842 else {
843 bp->duplex = DUPLEX_HALF;
844 }
845
846 if (!(bmcr & BMCR_ANENABLE)) {
847 return 0;
848 }
849
ca58c3af
MC
850 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
851 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
852
853 common = local_adv & remote_adv;
854 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
855
856 if (common & ADVERTISE_1000XFULL) {
857 bp->duplex = DUPLEX_FULL;
858 }
859 else {
860 bp->duplex = DUPLEX_HALF;
861 }
862 }
863
864 return 0;
865}
866
867static int
868bnx2_copper_linkup(struct bnx2 *bp)
869{
870 u32 bmcr;
871
ca58c3af 872 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
873 if (bmcr & BMCR_ANENABLE) {
874 u32 local_adv, remote_adv, common;
875
876 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
877 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
878
879 common = local_adv & (remote_adv >> 2);
880 if (common & ADVERTISE_1000FULL) {
881 bp->line_speed = SPEED_1000;
882 bp->duplex = DUPLEX_FULL;
883 }
884 else if (common & ADVERTISE_1000HALF) {
885 bp->line_speed = SPEED_1000;
886 bp->duplex = DUPLEX_HALF;
887 }
888 else {
ca58c3af
MC
889 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
890 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
891
892 common = local_adv & remote_adv;
893 if (common & ADVERTISE_100FULL) {
894 bp->line_speed = SPEED_100;
895 bp->duplex = DUPLEX_FULL;
896 }
897 else if (common & ADVERTISE_100HALF) {
898 bp->line_speed = SPEED_100;
899 bp->duplex = DUPLEX_HALF;
900 }
901 else if (common & ADVERTISE_10FULL) {
902 bp->line_speed = SPEED_10;
903 bp->duplex = DUPLEX_FULL;
904 }
905 else if (common & ADVERTISE_10HALF) {
906 bp->line_speed = SPEED_10;
907 bp->duplex = DUPLEX_HALF;
908 }
909 else {
910 bp->line_speed = 0;
911 bp->link_up = 0;
912 }
913 }
914 }
915 else {
916 if (bmcr & BMCR_SPEED100) {
917 bp->line_speed = SPEED_100;
918 }
919 else {
920 bp->line_speed = SPEED_10;
921 }
922 if (bmcr & BMCR_FULLDPLX) {
923 bp->duplex = DUPLEX_FULL;
924 }
925 else {
926 bp->duplex = DUPLEX_HALF;
927 }
928 }
929
930 return 0;
931}
932
933static int
934bnx2_set_mac_link(struct bnx2 *bp)
935{
936 u32 val;
937
938 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
939 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
940 (bp->duplex == DUPLEX_HALF)) {
941 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
942 }
943
944 /* Configure the EMAC mode register. */
945 val = REG_RD(bp, BNX2_EMAC_MODE);
946
947 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 948 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 949 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
950
951 if (bp->link_up) {
5b0c76ad
MC
952 switch (bp->line_speed) {
953 case SPEED_10:
59b47d8a
MC
954 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
955 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
956 break;
957 }
958 /* fall through */
959 case SPEED_100:
960 val |= BNX2_EMAC_MODE_PORT_MII;
961 break;
962 case SPEED_2500:
59b47d8a 963 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
964 /* fall through */
965 case SPEED_1000:
966 val |= BNX2_EMAC_MODE_PORT_GMII;
967 break;
968 }
b6016b76
MC
969 }
970 else {
971 val |= BNX2_EMAC_MODE_PORT_GMII;
972 }
973
974 /* Set the MAC to operate in the appropriate duplex mode. */
975 if (bp->duplex == DUPLEX_HALF)
976 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
977 REG_WR(bp, BNX2_EMAC_MODE, val);
978
979 /* Enable/disable rx PAUSE. */
980 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
981
982 if (bp->flow_ctrl & FLOW_CTRL_RX)
983 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
984 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
985
986 /* Enable/disable tx PAUSE. */
987 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
988 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
989
990 if (bp->flow_ctrl & FLOW_CTRL_TX)
991 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
992 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
993
994 /* Acknowledge the interrupt. */
995 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
996
997 return 0;
998}
999
27a005b8
MC
1000static void
1001bnx2_enable_bmsr1(struct bnx2 *bp)
1002{
1003 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1004 (CHIP_NUM(bp) == CHIP_NUM_5709))
1005 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1006 MII_BNX2_BLK_ADDR_GP_STATUS);
1007}
1008
1009static void
1010bnx2_disable_bmsr1(struct bnx2 *bp)
1011{
1012 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1013 (CHIP_NUM(bp) == CHIP_NUM_5709))
1014 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1015 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1016}
1017
605a9e20
MC
1018static int
1019bnx2_test_and_enable_2g5(struct bnx2 *bp)
1020{
1021 u32 up1;
1022 int ret = 1;
1023
1024 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1025 return 0;
1026
1027 if (bp->autoneg & AUTONEG_SPEED)
1028 bp->advertising |= ADVERTISED_2500baseX_Full;
1029
27a005b8
MC
1030 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1032
605a9e20
MC
1033 bnx2_read_phy(bp, bp->mii_up1, &up1);
1034 if (!(up1 & BCM5708S_UP1_2G5)) {
1035 up1 |= BCM5708S_UP1_2G5;
1036 bnx2_write_phy(bp, bp->mii_up1, up1);
1037 ret = 0;
1038 }
1039
27a005b8
MC
1040 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1041 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1042 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1043
605a9e20
MC
1044 return ret;
1045}
1046
1047static int
1048bnx2_test_and_disable_2g5(struct bnx2 *bp)
1049{
1050 u32 up1;
1051 int ret = 0;
1052
1053 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1054 return 0;
1055
27a005b8
MC
1056 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1057 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1058
605a9e20
MC
1059 bnx2_read_phy(bp, bp->mii_up1, &up1);
1060 if (up1 & BCM5708S_UP1_2G5) {
1061 up1 &= ~BCM5708S_UP1_2G5;
1062 bnx2_write_phy(bp, bp->mii_up1, up1);
1063 ret = 1;
1064 }
1065
27a005b8
MC
1066 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1067 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1068 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1069
605a9e20
MC
1070 return ret;
1071}
1072
1073static void
1074bnx2_enable_forced_2g5(struct bnx2 *bp)
1075{
1076 u32 bmcr;
1077
1078 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1079 return;
1080
27a005b8
MC
1081 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1082 u32 val;
1083
1084 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1085 MII_BNX2_BLK_ADDR_SERDES_DIG);
1086 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1087 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1088 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1089 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1090
1091 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1092 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1093 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1094
1095 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1096 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1097 bmcr |= BCM5708S_BMCR_FORCE_2500;
1098 }
1099
1100 if (bp->autoneg & AUTONEG_SPEED) {
1101 bmcr &= ~BMCR_ANENABLE;
1102 if (bp->req_duplex == DUPLEX_FULL)
1103 bmcr |= BMCR_FULLDPLX;
1104 }
1105 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1106}
1107
1108static void
1109bnx2_disable_forced_2g5(struct bnx2 *bp)
1110{
1111 u32 bmcr;
1112
1113 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1114 return;
1115
27a005b8
MC
1116 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1117 u32 val;
1118
1119 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1120 MII_BNX2_BLK_ADDR_SERDES_DIG);
1121 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1122 val &= ~MII_BNX2_SD_MISC1_FORCE;
1123 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1124
1125 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1126 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1127 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1128
1129 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1130 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1131 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1132 }
1133
1134 if (bp->autoneg & AUTONEG_SPEED)
1135 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1136 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1137}
1138
b6016b76
MC
1139static int
1140bnx2_set_link(struct bnx2 *bp)
1141{
1142 u32 bmsr;
1143 u8 link_up;
1144
80be4434 1145 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1146 bp->link_up = 1;
1147 return 0;
1148 }
1149
0d8a6571
MC
1150 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1151 return 0;
1152
b6016b76
MC
1153 link_up = bp->link_up;
1154
27a005b8
MC
1155 bnx2_enable_bmsr1(bp);
1156 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1157 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1158 bnx2_disable_bmsr1(bp);
b6016b76
MC
1159
1160 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1161 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1162 u32 val;
1163
1164 val = REG_RD(bp, BNX2_EMAC_STATUS);
1165 if (val & BNX2_EMAC_STATUS_LINK)
1166 bmsr |= BMSR_LSTATUS;
1167 else
1168 bmsr &= ~BMSR_LSTATUS;
1169 }
1170
1171 if (bmsr & BMSR_LSTATUS) {
1172 bp->link_up = 1;
1173
1174 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1175 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1176 bnx2_5706s_linkup(bp);
1177 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1178 bnx2_5708s_linkup(bp);
27a005b8
MC
1179 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1180 bnx2_5709s_linkup(bp);
b6016b76
MC
1181 }
1182 else {
1183 bnx2_copper_linkup(bp);
1184 }
1185 bnx2_resolve_flow_ctrl(bp);
1186 }
1187 else {
1188 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1189 (bp->autoneg & AUTONEG_SPEED))
1190 bnx2_disable_forced_2g5(bp);
b6016b76 1191
b6016b76
MC
1192 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1193 bp->link_up = 0;
1194 }
1195
1196 if (bp->link_up != link_up) {
1197 bnx2_report_link(bp);
1198 }
1199
1200 bnx2_set_mac_link(bp);
1201
1202 return 0;
1203}
1204
1205static int
1206bnx2_reset_phy(struct bnx2 *bp)
1207{
1208 int i;
1209 u32 reg;
1210
ca58c3af 1211 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1212
1213#define PHY_RESET_MAX_WAIT 100
1214 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1215 udelay(10);
1216
ca58c3af 1217 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1218 if (!(reg & BMCR_RESET)) {
1219 udelay(20);
1220 break;
1221 }
1222 }
1223 if (i == PHY_RESET_MAX_WAIT) {
1224 return -EBUSY;
1225 }
1226 return 0;
1227}
1228
1229static u32
1230bnx2_phy_get_pause_adv(struct bnx2 *bp)
1231{
1232 u32 adv = 0;
1233
1234 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1235 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1236
1237 if (bp->phy_flags & PHY_SERDES_FLAG) {
1238 adv = ADVERTISE_1000XPAUSE;
1239 }
1240 else {
1241 adv = ADVERTISE_PAUSE_CAP;
1242 }
1243 }
1244 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1245 if (bp->phy_flags & PHY_SERDES_FLAG) {
1246 adv = ADVERTISE_1000XPSE_ASYM;
1247 }
1248 else {
1249 adv = ADVERTISE_PAUSE_ASYM;
1250 }
1251 }
1252 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1253 if (bp->phy_flags & PHY_SERDES_FLAG) {
1254 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1255 }
1256 else {
1257 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1258 }
1259 }
1260 return adv;
1261}
1262
0d8a6571
MC
1263static int bnx2_fw_sync(struct bnx2 *, u32, int);
1264
b6016b76 1265static int
0d8a6571
MC
1266bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1267{
1268 u32 speed_arg = 0, pause_adv;
1269
1270 pause_adv = bnx2_phy_get_pause_adv(bp);
1271
1272 if (bp->autoneg & AUTONEG_SPEED) {
1273 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1274 if (bp->advertising & ADVERTISED_10baseT_Half)
1275 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1276 if (bp->advertising & ADVERTISED_10baseT_Full)
1277 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1278 if (bp->advertising & ADVERTISED_100baseT_Half)
1279 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1280 if (bp->advertising & ADVERTISED_100baseT_Full)
1281 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1282 if (bp->advertising & ADVERTISED_1000baseT_Full)
1283 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1284 if (bp->advertising & ADVERTISED_2500baseX_Full)
1285 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1286 } else {
1287 if (bp->req_line_speed == SPEED_2500)
1288 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1289 else if (bp->req_line_speed == SPEED_1000)
1290 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1291 else if (bp->req_line_speed == SPEED_100) {
1292 if (bp->req_duplex == DUPLEX_FULL)
1293 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1294 else
1295 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1296 } else if (bp->req_line_speed == SPEED_10) {
1297 if (bp->req_duplex == DUPLEX_FULL)
1298 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1299 else
1300 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1301 }
1302 }
1303
1304 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1305 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1306 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1307 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1308
1309 if (port == PORT_TP)
1310 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1311 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1312
1313 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1314
1315 spin_unlock_bh(&bp->phy_lock);
1316 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1317 spin_lock_bh(&bp->phy_lock);
1318
1319 return 0;
1320}
1321
1322static int
1323bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1324{
605a9e20 1325 u32 adv, bmcr;
b6016b76
MC
1326 u32 new_adv = 0;
1327
0d8a6571
MC
1328 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1329 return (bnx2_setup_remote_phy(bp, port));
1330
b6016b76
MC
1331 if (!(bp->autoneg & AUTONEG_SPEED)) {
1332 u32 new_bmcr;
5b0c76ad
MC
1333 int force_link_down = 0;
1334
605a9e20
MC
1335 if (bp->req_line_speed == SPEED_2500) {
1336 if (!bnx2_test_and_enable_2g5(bp))
1337 force_link_down = 1;
1338 } else if (bp->req_line_speed == SPEED_1000) {
1339 if (bnx2_test_and_disable_2g5(bp))
1340 force_link_down = 1;
1341 }
ca58c3af 1342 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1343 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1344
ca58c3af 1345 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1346 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1347 new_bmcr |= BMCR_SPEED1000;
605a9e20 1348
27a005b8
MC
1349 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1350 if (bp->req_line_speed == SPEED_2500)
1351 bnx2_enable_forced_2g5(bp);
1352 else if (bp->req_line_speed == SPEED_1000) {
1353 bnx2_disable_forced_2g5(bp);
1354 new_bmcr &= ~0x2000;
1355 }
1356
1357 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1358 if (bp->req_line_speed == SPEED_2500)
1359 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1360 else
1361 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1362 }
1363
b6016b76 1364 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1365 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1366 new_bmcr |= BMCR_FULLDPLX;
1367 }
1368 else {
5b0c76ad 1369 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1370 new_bmcr &= ~BMCR_FULLDPLX;
1371 }
5b0c76ad 1372 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1373 /* Force a link down visible on the other side */
1374 if (bp->link_up) {
ca58c3af 1375 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1376 ~(ADVERTISE_1000XFULL |
1377 ADVERTISE_1000XHALF));
ca58c3af 1378 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1379 BMCR_ANRESTART | BMCR_ANENABLE);
1380
1381 bp->link_up = 0;
1382 netif_carrier_off(bp->dev);
ca58c3af 1383 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1384 bnx2_report_link(bp);
b6016b76 1385 }
ca58c3af
MC
1386 bnx2_write_phy(bp, bp->mii_adv, adv);
1387 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1388 } else {
1389 bnx2_resolve_flow_ctrl(bp);
1390 bnx2_set_mac_link(bp);
b6016b76
MC
1391 }
1392 return 0;
1393 }
1394
605a9e20 1395 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1396
b6016b76
MC
1397 if (bp->advertising & ADVERTISED_1000baseT_Full)
1398 new_adv |= ADVERTISE_1000XFULL;
1399
1400 new_adv |= bnx2_phy_get_pause_adv(bp);
1401
ca58c3af
MC
1402 bnx2_read_phy(bp, bp->mii_adv, &adv);
1403 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1404
1405 bp->serdes_an_pending = 0;
1406 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1407 /* Force a link down visible on the other side */
1408 if (bp->link_up) {
ca58c3af 1409 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1410 spin_unlock_bh(&bp->phy_lock);
1411 msleep(20);
1412 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1413 }
1414
ca58c3af
MC
1415 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1416 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1417 BMCR_ANENABLE);
f8dd064e
MC
1418 /* Speed up link-up time when the link partner
1419 * does not autonegotiate which is very common
1420 * in blade servers. Some blade servers use
1421 * IPMI for kerboard input and it's important
1422 * to minimize link disruptions. Autoneg. involves
1423 * exchanging base pages plus 3 next pages and
1424 * normally completes in about 120 msec.
1425 */
1426 bp->current_interval = SERDES_AN_TIMEOUT;
1427 bp->serdes_an_pending = 1;
1428 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1429 } else {
1430 bnx2_resolve_flow_ctrl(bp);
1431 bnx2_set_mac_link(bp);
b6016b76
MC
1432 }
1433
1434 return 0;
1435}
1436
1437#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1438 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1439 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1440 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1441
1442#define ETHTOOL_ALL_COPPER_SPEED \
1443 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1444 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1445 ADVERTISED_1000baseT_Full)
1446
1447#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1448 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1449
b6016b76
MC
1450#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1451
0d8a6571
MC
1452static void
1453bnx2_set_default_remote_link(struct bnx2 *bp)
1454{
1455 u32 link;
1456
1457 if (bp->phy_port == PORT_TP)
1458 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1459 else
1460 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1461
1462 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1463 bp->req_line_speed = 0;
1464 bp->autoneg |= AUTONEG_SPEED;
1465 bp->advertising = ADVERTISED_Autoneg;
1466 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1467 bp->advertising |= ADVERTISED_10baseT_Half;
1468 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1469 bp->advertising |= ADVERTISED_10baseT_Full;
1470 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1471 bp->advertising |= ADVERTISED_100baseT_Half;
1472 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1473 bp->advertising |= ADVERTISED_100baseT_Full;
1474 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1475 bp->advertising |= ADVERTISED_1000baseT_Full;
1476 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1477 bp->advertising |= ADVERTISED_2500baseX_Full;
1478 } else {
1479 bp->autoneg = 0;
1480 bp->advertising = 0;
1481 bp->req_duplex = DUPLEX_FULL;
1482 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1483 bp->req_line_speed = SPEED_10;
1484 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1485 bp->req_duplex = DUPLEX_HALF;
1486 }
1487 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1488 bp->req_line_speed = SPEED_100;
1489 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1490 bp->req_duplex = DUPLEX_HALF;
1491 }
1492 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1493 bp->req_line_speed = SPEED_1000;
1494 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1495 bp->req_line_speed = SPEED_2500;
1496 }
1497}
1498
deaf391b
MC
1499static void
1500bnx2_set_default_link(struct bnx2 *bp)
1501{
0d8a6571
MC
1502 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1503 return bnx2_set_default_remote_link(bp);
1504
deaf391b
MC
1505 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1506 bp->req_line_speed = 0;
1507 if (bp->phy_flags & PHY_SERDES_FLAG) {
1508 u32 reg;
1509
1510 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1511
1512 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1513 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1514 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1515 bp->autoneg = 0;
1516 bp->req_line_speed = bp->line_speed = SPEED_1000;
1517 bp->req_duplex = DUPLEX_FULL;
1518 }
1519 } else
1520 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1521}
1522
df149d70
MC
1523static void
1524bnx2_send_heart_beat(struct bnx2 *bp)
1525{
1526 u32 msg;
1527 u32 addr;
1528
1529 spin_lock(&bp->indirect_lock);
1530 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1531 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1532 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1533 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1534 spin_unlock(&bp->indirect_lock);
1535}
1536
0d8a6571
MC
1537static void
1538bnx2_remote_phy_event(struct bnx2 *bp)
1539{
1540 u32 msg;
1541 u8 link_up = bp->link_up;
1542 u8 old_port;
1543
1544 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1545
df149d70
MC
1546 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1547 bnx2_send_heart_beat(bp);
1548
1549 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1550
0d8a6571
MC
1551 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1552 bp->link_up = 0;
1553 else {
1554 u32 speed;
1555
1556 bp->link_up = 1;
1557 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1558 bp->duplex = DUPLEX_FULL;
1559 switch (speed) {
1560 case BNX2_LINK_STATUS_10HALF:
1561 bp->duplex = DUPLEX_HALF;
1562 case BNX2_LINK_STATUS_10FULL:
1563 bp->line_speed = SPEED_10;
1564 break;
1565 case BNX2_LINK_STATUS_100HALF:
1566 bp->duplex = DUPLEX_HALF;
1567 case BNX2_LINK_STATUS_100BASE_T4:
1568 case BNX2_LINK_STATUS_100FULL:
1569 bp->line_speed = SPEED_100;
1570 break;
1571 case BNX2_LINK_STATUS_1000HALF:
1572 bp->duplex = DUPLEX_HALF;
1573 case BNX2_LINK_STATUS_1000FULL:
1574 bp->line_speed = SPEED_1000;
1575 break;
1576 case BNX2_LINK_STATUS_2500HALF:
1577 bp->duplex = DUPLEX_HALF;
1578 case BNX2_LINK_STATUS_2500FULL:
1579 bp->line_speed = SPEED_2500;
1580 break;
1581 default:
1582 bp->line_speed = 0;
1583 break;
1584 }
1585
1586 spin_lock(&bp->phy_lock);
1587 bp->flow_ctrl = 0;
1588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1590 if (bp->duplex == DUPLEX_FULL)
1591 bp->flow_ctrl = bp->req_flow_ctrl;
1592 } else {
1593 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1594 bp->flow_ctrl |= FLOW_CTRL_TX;
1595 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1596 bp->flow_ctrl |= FLOW_CTRL_RX;
1597 }
1598
1599 old_port = bp->phy_port;
1600 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1601 bp->phy_port = PORT_FIBRE;
1602 else
1603 bp->phy_port = PORT_TP;
1604
1605 if (old_port != bp->phy_port)
1606 bnx2_set_default_link(bp);
1607
1608 spin_unlock(&bp->phy_lock);
1609 }
1610 if (bp->link_up != link_up)
1611 bnx2_report_link(bp);
1612
1613 bnx2_set_mac_link(bp);
1614}
1615
1616static int
1617bnx2_set_remote_link(struct bnx2 *bp)
1618{
1619 u32 evt_code;
1620
1621 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1622 switch (evt_code) {
1623 case BNX2_FW_EVT_CODE_LINK_EVENT:
1624 bnx2_remote_phy_event(bp);
1625 break;
1626 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1627 default:
df149d70 1628 bnx2_send_heart_beat(bp);
0d8a6571
MC
1629 break;
1630 }
1631 return 0;
1632}
1633
b6016b76
MC
1634static int
1635bnx2_setup_copper_phy(struct bnx2 *bp)
1636{
1637 u32 bmcr;
1638 u32 new_bmcr;
1639
ca58c3af 1640 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1641
1642 if (bp->autoneg & AUTONEG_SPEED) {
1643 u32 adv_reg, adv1000_reg;
1644 u32 new_adv_reg = 0;
1645 u32 new_adv1000_reg = 0;
1646
ca58c3af 1647 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1648 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1649 ADVERTISE_PAUSE_ASYM);
1650
1651 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1652 adv1000_reg &= PHY_ALL_1000_SPEED;
1653
1654 if (bp->advertising & ADVERTISED_10baseT_Half)
1655 new_adv_reg |= ADVERTISE_10HALF;
1656 if (bp->advertising & ADVERTISED_10baseT_Full)
1657 new_adv_reg |= ADVERTISE_10FULL;
1658 if (bp->advertising & ADVERTISED_100baseT_Half)
1659 new_adv_reg |= ADVERTISE_100HALF;
1660 if (bp->advertising & ADVERTISED_100baseT_Full)
1661 new_adv_reg |= ADVERTISE_100FULL;
1662 if (bp->advertising & ADVERTISED_1000baseT_Full)
1663 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1664
b6016b76
MC
1665 new_adv_reg |= ADVERTISE_CSMA;
1666
1667 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1668
1669 if ((adv1000_reg != new_adv1000_reg) ||
1670 (adv_reg != new_adv_reg) ||
1671 ((bmcr & BMCR_ANENABLE) == 0)) {
1672
ca58c3af 1673 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1674 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1675 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1676 BMCR_ANENABLE);
1677 }
1678 else if (bp->link_up) {
1679 /* Flow ctrl may have changed from auto to forced */
1680 /* or vice-versa. */
1681
1682 bnx2_resolve_flow_ctrl(bp);
1683 bnx2_set_mac_link(bp);
1684 }
1685 return 0;
1686 }
1687
1688 new_bmcr = 0;
1689 if (bp->req_line_speed == SPEED_100) {
1690 new_bmcr |= BMCR_SPEED100;
1691 }
1692 if (bp->req_duplex == DUPLEX_FULL) {
1693 new_bmcr |= BMCR_FULLDPLX;
1694 }
1695 if (new_bmcr != bmcr) {
1696 u32 bmsr;
b6016b76 1697
ca58c3af
MC
1698 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1699 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1700
b6016b76
MC
1701 if (bmsr & BMSR_LSTATUS) {
1702 /* Force link down */
ca58c3af 1703 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1704 spin_unlock_bh(&bp->phy_lock);
1705 msleep(50);
1706 spin_lock_bh(&bp->phy_lock);
1707
ca58c3af
MC
1708 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1709 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1710 }
1711
ca58c3af 1712 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1713
1714 /* Normally, the new speed is setup after the link has
1715 * gone down and up again. In some cases, link will not go
1716 * down so we need to set up the new speed here.
1717 */
1718 if (bmsr & BMSR_LSTATUS) {
1719 bp->line_speed = bp->req_line_speed;
1720 bp->duplex = bp->req_duplex;
1721 bnx2_resolve_flow_ctrl(bp);
1722 bnx2_set_mac_link(bp);
1723 }
27a005b8
MC
1724 } else {
1725 bnx2_resolve_flow_ctrl(bp);
1726 bnx2_set_mac_link(bp);
b6016b76
MC
1727 }
1728 return 0;
1729}
1730
1731static int
0d8a6571 1732bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1733{
1734 if (bp->loopback == MAC_LOOPBACK)
1735 return 0;
1736
1737 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1738 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1739 }
1740 else {
1741 return (bnx2_setup_copper_phy(bp));
1742 }
1743}
1744
27a005b8
MC
1745static int
1746bnx2_init_5709s_phy(struct bnx2 *bp)
1747{
1748 u32 val;
1749
1750 bp->mii_bmcr = MII_BMCR + 0x10;
1751 bp->mii_bmsr = MII_BMSR + 0x10;
1752 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1753 bp->mii_adv = MII_ADVERTISE + 0x10;
1754 bp->mii_lpa = MII_LPA + 0x10;
1755 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1756
1757 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1758 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1759
1760 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1761 bnx2_reset_phy(bp);
1762
1763 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1764
1765 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1766 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1767 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1768 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1769
1770 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1771 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1772 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1773 val |= BCM5708S_UP1_2G5;
1774 else
1775 val &= ~BCM5708S_UP1_2G5;
1776 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1777
1778 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1779 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1780 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1781 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1782
1783 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1784
1785 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1786 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1787 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1788
1789 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1790
1791 return 0;
1792}
1793
b6016b76 1794static int
5b0c76ad
MC
1795bnx2_init_5708s_phy(struct bnx2 *bp)
1796{
1797 u32 val;
1798
27a005b8
MC
1799 bnx2_reset_phy(bp);
1800
1801 bp->mii_up1 = BCM5708S_UP1;
1802
5b0c76ad
MC
1803 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1804 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1805 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1806
1807 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1808 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1809 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1810
1811 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1812 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1813 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1814
1815 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1816 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1817 val |= BCM5708S_UP1_2G5;
1818 bnx2_write_phy(bp, BCM5708S_UP1, val);
1819 }
1820
1821 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1822 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1823 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1824 /* increase tx signal amplitude */
1825 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1826 BCM5708S_BLK_ADDR_TX_MISC);
1827 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1828 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1829 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1830 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1831 }
1832
e3648b3d 1833 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1834 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1835
1836 if (val) {
1837 u32 is_backplane;
1838
e3648b3d 1839 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1840 BNX2_SHARED_HW_CFG_CONFIG);
1841 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1842 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1843 BCM5708S_BLK_ADDR_TX_MISC);
1844 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1845 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1846 BCM5708S_BLK_ADDR_DIG);
1847 }
1848 }
1849 return 0;
1850}
1851
1852static int
1853bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1854{
27a005b8
MC
1855 bnx2_reset_phy(bp);
1856
b6016b76
MC
1857 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1858
59b47d8a
MC
1859 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1860 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1861
1862 if (bp->dev->mtu > 1500) {
1863 u32 val;
1864
1865 /* Set extended packet length bit */
1866 bnx2_write_phy(bp, 0x18, 0x7);
1867 bnx2_read_phy(bp, 0x18, &val);
1868 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1869
1870 bnx2_write_phy(bp, 0x1c, 0x6c00);
1871 bnx2_read_phy(bp, 0x1c, &val);
1872 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1873 }
1874 else {
1875 u32 val;
1876
1877 bnx2_write_phy(bp, 0x18, 0x7);
1878 bnx2_read_phy(bp, 0x18, &val);
1879 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1880
1881 bnx2_write_phy(bp, 0x1c, 0x6c00);
1882 bnx2_read_phy(bp, 0x1c, &val);
1883 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1884 }
1885
1886 return 0;
1887}
1888
1889static int
1890bnx2_init_copper_phy(struct bnx2 *bp)
1891{
5b0c76ad
MC
1892 u32 val;
1893
27a005b8
MC
1894 bnx2_reset_phy(bp);
1895
b6016b76
MC
1896 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1897 bnx2_write_phy(bp, 0x18, 0x0c00);
1898 bnx2_write_phy(bp, 0x17, 0x000a);
1899 bnx2_write_phy(bp, 0x15, 0x310b);
1900 bnx2_write_phy(bp, 0x17, 0x201f);
1901 bnx2_write_phy(bp, 0x15, 0x9506);
1902 bnx2_write_phy(bp, 0x17, 0x401f);
1903 bnx2_write_phy(bp, 0x15, 0x14e2);
1904 bnx2_write_phy(bp, 0x18, 0x0400);
1905 }
1906
b659f44e
MC
1907 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1908 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1909 MII_BNX2_DSP_EXPAND_REG | 0x8);
1910 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1911 val &= ~(1 << 8);
1912 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1913 }
1914
b6016b76 1915 if (bp->dev->mtu > 1500) {
b6016b76
MC
1916 /* Set extended packet length bit */
1917 bnx2_write_phy(bp, 0x18, 0x7);
1918 bnx2_read_phy(bp, 0x18, &val);
1919 bnx2_write_phy(bp, 0x18, val | 0x4000);
1920
1921 bnx2_read_phy(bp, 0x10, &val);
1922 bnx2_write_phy(bp, 0x10, val | 0x1);
1923 }
1924 else {
b6016b76
MC
1925 bnx2_write_phy(bp, 0x18, 0x7);
1926 bnx2_read_phy(bp, 0x18, &val);
1927 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1928
1929 bnx2_read_phy(bp, 0x10, &val);
1930 bnx2_write_phy(bp, 0x10, val & ~0x1);
1931 }
1932
5b0c76ad
MC
1933 /* ethernet@wirespeed */
1934 bnx2_write_phy(bp, 0x18, 0x7007);
1935 bnx2_read_phy(bp, 0x18, &val);
1936 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1937 return 0;
1938}
1939
1940
1941static int
1942bnx2_init_phy(struct bnx2 *bp)
1943{
1944 u32 val;
1945 int rc = 0;
1946
1947 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1948 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1949
ca58c3af
MC
1950 bp->mii_bmcr = MII_BMCR;
1951 bp->mii_bmsr = MII_BMSR;
27a005b8 1952 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1953 bp->mii_adv = MII_ADVERTISE;
1954 bp->mii_lpa = MII_LPA;
1955
b6016b76
MC
1956 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1957
0d8a6571
MC
1958 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1959 goto setup_phy;
1960
b6016b76
MC
1961 bnx2_read_phy(bp, MII_PHYSID1, &val);
1962 bp->phy_id = val << 16;
1963 bnx2_read_phy(bp, MII_PHYSID2, &val);
1964 bp->phy_id |= val & 0xffff;
1965
1966 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1967 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1968 rc = bnx2_init_5706s_phy(bp);
1969 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1970 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1971 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1972 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1973 }
1974 else {
1975 rc = bnx2_init_copper_phy(bp);
1976 }
1977
0d8a6571
MC
1978setup_phy:
1979 if (!rc)
1980 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1981
1982 return rc;
1983}
1984
1985static int
1986bnx2_set_mac_loopback(struct bnx2 *bp)
1987{
1988 u32 mac_mode;
1989
1990 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1991 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1992 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1993 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1994 bp->link_up = 1;
1995 return 0;
1996}
1997
bc5a0690
MC
1998static int bnx2_test_link(struct bnx2 *);
1999
2000static int
2001bnx2_set_phy_loopback(struct bnx2 *bp)
2002{
2003 u32 mac_mode;
2004 int rc, i;
2005
2006 spin_lock_bh(&bp->phy_lock);
ca58c3af 2007 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2008 BMCR_SPEED1000);
2009 spin_unlock_bh(&bp->phy_lock);
2010 if (rc)
2011 return rc;
2012
2013 for (i = 0; i < 10; i++) {
2014 if (bnx2_test_link(bp) == 0)
2015 break;
80be4434 2016 msleep(100);
bc5a0690
MC
2017 }
2018
2019 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2020 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2021 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2022 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2023
2024 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2025 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2026 bp->link_up = 1;
2027 return 0;
2028}
2029
b6016b76 2030static int
b090ae2b 2031bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
2032{
2033 int i;
2034 u32 val;
2035
b6016b76
MC
2036 bp->fw_wr_seq++;
2037 msg_data |= bp->fw_wr_seq;
2038
e3648b3d 2039 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
2040
2041 /* wait for an acknowledgement. */
b090ae2b
MC
2042 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
2043 msleep(10);
b6016b76 2044
e3648b3d 2045 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
2046
2047 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2048 break;
2049 }
b090ae2b
MC
2050 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2051 return 0;
b6016b76
MC
2052
2053 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2054 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2055 if (!silent)
2056 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2057 "%x\n", msg_data);
b6016b76
MC
2058
2059 msg_data &= ~BNX2_DRV_MSG_CODE;
2060 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2061
e3648b3d 2062 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 2063
b6016b76
MC
2064 return -EBUSY;
2065 }
2066
b090ae2b
MC
2067 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2068 return -EIO;
2069
b6016b76
MC
2070 return 0;
2071}
2072
59b47d8a
MC
2073static int
2074bnx2_init_5709_context(struct bnx2 *bp)
2075{
2076 int i, ret = 0;
2077 u32 val;
2078
2079 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2080 val |= (BCM_PAGE_BITS - 8) << 16;
2081 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2082 for (i = 0; i < 10; i++) {
2083 val = REG_RD(bp, BNX2_CTX_COMMAND);
2084 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2085 break;
2086 udelay(2);
2087 }
2088 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2089 return -EBUSY;
2090
59b47d8a
MC
2091 for (i = 0; i < bp->ctx_pages; i++) {
2092 int j;
2093
2094 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2095 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2096 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2097 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2098 (u64) bp->ctx_blk_mapping[i] >> 32);
2099 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2100 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2101 for (j = 0; j < 10; j++) {
2102
2103 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2104 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2105 break;
2106 udelay(5);
2107 }
2108 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2109 ret = -EBUSY;
2110 break;
2111 }
2112 }
2113 return ret;
2114}
2115
b6016b76
MC
2116static void
2117bnx2_init_context(struct bnx2 *bp)
2118{
2119 u32 vcid;
2120
2121 vcid = 96;
2122 while (vcid) {
2123 u32 vcid_addr, pcid_addr, offset;
7947b20e 2124 int i;
b6016b76
MC
2125
2126 vcid--;
2127
2128 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2129 u32 new_vcid;
2130
2131 vcid_addr = GET_PCID_ADDR(vcid);
2132 if (vcid & 0x8) {
2133 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2134 }
2135 else {
2136 new_vcid = vcid;
2137 }
2138 pcid_addr = GET_PCID_ADDR(new_vcid);
2139 }
2140 else {
2141 vcid_addr = GET_CID_ADDR(vcid);
2142 pcid_addr = vcid_addr;
2143 }
2144
7947b20e
MC
2145 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2146 vcid_addr += (i << PHY_CTX_SHIFT);
2147 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2148
5d5d0015 2149 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2150 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2151
7947b20e
MC
2152 /* Zero out the context. */
2153 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
5d5d0015 2154 CTX_WR(bp, vcid_addr, offset, 0);
7947b20e 2155 }
b6016b76
MC
2156 }
2157}
2158
2159static int
2160bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2161{
2162 u16 *good_mbuf;
2163 u32 good_mbuf_cnt;
2164 u32 val;
2165
2166 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2167 if (good_mbuf == NULL) {
2168 printk(KERN_ERR PFX "Failed to allocate memory in "
2169 "bnx2_alloc_bad_rbuf\n");
2170 return -ENOMEM;
2171 }
2172
2173 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2174 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2175
2176 good_mbuf_cnt = 0;
2177
2178 /* Allocate a bunch of mbufs and save the good ones in an array. */
2179 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2180 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2181 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2182
2183 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2184
2185 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2186
2187 /* The addresses with Bit 9 set are bad memory blocks. */
2188 if (!(val & (1 << 9))) {
2189 good_mbuf[good_mbuf_cnt] = (u16) val;
2190 good_mbuf_cnt++;
2191 }
2192
2193 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2194 }
2195
2196 /* Free the good ones back to the mbuf pool thus discarding
2197 * all the bad ones. */
2198 while (good_mbuf_cnt) {
2199 good_mbuf_cnt--;
2200
2201 val = good_mbuf[good_mbuf_cnt];
2202 val = (val << 9) | val | 1;
2203
2204 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2205 }
2206 kfree(good_mbuf);
2207 return 0;
2208}
2209
2210static void
6aa20a22 2211bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2212{
2213 u32 val;
2214 u8 *mac_addr = bp->dev->dev_addr;
2215
2216 val = (mac_addr[0] << 8) | mac_addr[1];
2217
2218 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2219
6aa20a22 2220 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2221 (mac_addr[4] << 8) | mac_addr[5];
2222
2223 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2224}
2225
47bf4246
MC
2226static inline int
2227bnx2_alloc_rx_page(struct bnx2 *bp, u16 index)
2228{
2229 dma_addr_t mapping;
2230 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2231 struct rx_bd *rxbd =
2232 &bp->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2233 struct page *page = alloc_page(GFP_ATOMIC);
2234
2235 if (!page)
2236 return -ENOMEM;
2237 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2238 PCI_DMA_FROMDEVICE);
2239 rx_pg->page = page;
2240 pci_unmap_addr_set(rx_pg, mapping, mapping);
2241 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2242 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2243 return 0;
2244}
2245
2246static void
2247bnx2_free_rx_page(struct bnx2 *bp, u16 index)
2248{
2249 struct sw_pg *rx_pg = &bp->rx_pg_ring[index];
2250 struct page *page = rx_pg->page;
2251
2252 if (!page)
2253 return;
2254
2255 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2256 PCI_DMA_FROMDEVICE);
2257
2258 __free_page(page);
2259 rx_pg->page = NULL;
2260}
2261
b6016b76
MC
2262static inline int
2263bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2264{
2265 struct sk_buff *skb;
2266 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2267 dma_addr_t mapping;
13daffa2 2268 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2269 unsigned long align;
2270
932f3772 2271 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2272 if (skb == NULL) {
2273 return -ENOMEM;
2274 }
2275
59b47d8a
MC
2276 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2277 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2278
b6016b76
MC
2279 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2280 PCI_DMA_FROMDEVICE);
2281
2282 rx_buf->skb = skb;
2283 pci_unmap_addr_set(rx_buf, mapping, mapping);
2284
2285 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2286 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2287
2288 bp->rx_prod_bseq += bp->rx_buf_use_size;
2289
2290 return 0;
2291}
2292
da3e4fbe
MC
2293static int
2294bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
b6016b76 2295{
da3e4fbe 2296 struct status_block *sblk = bp->status_blk;
b6016b76 2297 u32 new_link_state, old_link_state;
da3e4fbe 2298 int is_set = 1;
b6016b76 2299
da3e4fbe
MC
2300 new_link_state = sblk->status_attn_bits & event;
2301 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2302 if (new_link_state != old_link_state) {
da3e4fbe
MC
2303 if (new_link_state)
2304 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2305 else
2306 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2307 } else
2308 is_set = 0;
2309
2310 return is_set;
2311}
2312
2313static void
2314bnx2_phy_int(struct bnx2 *bp)
2315{
2316 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2317 spin_lock(&bp->phy_lock);
b6016b76 2318 bnx2_set_link(bp);
da3e4fbe 2319 spin_unlock(&bp->phy_lock);
b6016b76 2320 }
0d8a6571
MC
2321 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2322 bnx2_set_remote_link(bp);
2323
b6016b76
MC
2324}
2325
2326static void
2327bnx2_tx_int(struct bnx2 *bp)
2328{
f4e418f7 2329 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2330 u16 hw_cons, sw_cons, sw_ring_cons;
2331 int tx_free_bd = 0;
2332
f4e418f7 2333 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
2334 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2335 hw_cons++;
2336 }
2337 sw_cons = bp->tx_cons;
2338
2339 while (sw_cons != hw_cons) {
2340 struct sw_bd *tx_buf;
2341 struct sk_buff *skb;
2342 int i, last;
2343
2344 sw_ring_cons = TX_RING_IDX(sw_cons);
2345
2346 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2347 skb = tx_buf->skb;
1d39ed56 2348
b6016b76 2349 /* partial BD completions possible with TSO packets */
89114afd 2350 if (skb_is_gso(skb)) {
b6016b76
MC
2351 u16 last_idx, last_ring_idx;
2352
2353 last_idx = sw_cons +
2354 skb_shinfo(skb)->nr_frags + 1;
2355 last_ring_idx = sw_ring_cons +
2356 skb_shinfo(skb)->nr_frags + 1;
2357 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2358 last_idx++;
2359 }
2360 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2361 break;
2362 }
2363 }
1d39ed56 2364
b6016b76
MC
2365 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2366 skb_headlen(skb), PCI_DMA_TODEVICE);
2367
2368 tx_buf->skb = NULL;
2369 last = skb_shinfo(skb)->nr_frags;
2370
2371 for (i = 0; i < last; i++) {
2372 sw_cons = NEXT_TX_BD(sw_cons);
2373
2374 pci_unmap_page(bp->pdev,
2375 pci_unmap_addr(
2376 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2377 mapping),
2378 skb_shinfo(skb)->frags[i].size,
2379 PCI_DMA_TODEVICE);
2380 }
2381
2382 sw_cons = NEXT_TX_BD(sw_cons);
2383
2384 tx_free_bd += last + 1;
2385
745720e5 2386 dev_kfree_skb(skb);
b6016b76 2387
f4e418f7
MC
2388 hw_cons = bp->hw_tx_cons =
2389 sblk->status_tx_quick_consumer_index0;
2390
b6016b76
MC
2391 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2392 hw_cons++;
2393 }
2394 }
2395
e89bbf10 2396 bp->tx_cons = sw_cons;
2f8af120
MC
2397 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2398 * before checking for netif_queue_stopped(). Without the
2399 * memory barrier, there is a small possibility that bnx2_start_xmit()
2400 * will miss it and cause the queue to be stopped forever.
2401 */
2402 smp_mb();
b6016b76 2403
2f8af120
MC
2404 if (unlikely(netif_queue_stopped(bp->dev)) &&
2405 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2406 netif_tx_lock(bp->dev);
b6016b76 2407 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2408 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2409 netif_wake_queue(bp->dev);
2f8af120 2410 netif_tx_unlock(bp->dev);
b6016b76 2411 }
b6016b76
MC
2412}
2413
1db82f2a
MC
2414static void
2415bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct sk_buff *skb, int count)
2416{
2417 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2418 struct rx_bd *cons_bd, *prod_bd;
2419 dma_addr_t mapping;
2420 int i;
2421 u16 hw_prod = bp->rx_pg_prod, prod;
2422 u16 cons = bp->rx_pg_cons;
2423
2424 for (i = 0; i < count; i++) {
2425 prod = RX_PG_RING_IDX(hw_prod);
2426
2427 prod_rx_pg = &bp->rx_pg_ring[prod];
2428 cons_rx_pg = &bp->rx_pg_ring[cons];
2429 cons_bd = &bp->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2430 prod_bd = &bp->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2431
2432 if (i == 0 && skb) {
2433 struct page *page;
2434 struct skb_shared_info *shinfo;
2435
2436 shinfo = skb_shinfo(skb);
2437 shinfo->nr_frags--;
2438 page = shinfo->frags[shinfo->nr_frags].page;
2439 shinfo->frags[shinfo->nr_frags].page = NULL;
2440 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2441 PCI_DMA_FROMDEVICE);
2442 cons_rx_pg->page = page;
2443 pci_unmap_addr_set(cons_rx_pg, mapping, mapping);
2444 dev_kfree_skb(skb);
2445 }
2446 if (prod != cons) {
2447 prod_rx_pg->page = cons_rx_pg->page;
2448 cons_rx_pg->page = NULL;
2449 pci_unmap_addr_set(prod_rx_pg, mapping,
2450 pci_unmap_addr(cons_rx_pg, mapping));
2451
2452 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2453 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2454
2455 }
2456 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2457 hw_prod = NEXT_RX_BD(hw_prod);
2458 }
2459 bp->rx_pg_prod = hw_prod;
2460 bp->rx_pg_cons = cons;
2461}
2462
b6016b76
MC
2463static inline void
2464bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2465 u16 cons, u16 prod)
2466{
236b6394
MC
2467 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2468 struct rx_bd *cons_bd, *prod_bd;
2469
2470 cons_rx_buf = &bp->rx_buf_ring[cons];
2471 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2472
2473 pci_dma_sync_single_for_device(bp->pdev,
2474 pci_unmap_addr(cons_rx_buf, mapping),
2475 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2476
236b6394 2477 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2478
236b6394 2479 prod_rx_buf->skb = skb;
b6016b76 2480
236b6394
MC
2481 if (cons == prod)
2482 return;
b6016b76 2483
236b6394
MC
2484 pci_unmap_addr_set(prod_rx_buf, mapping,
2485 pci_unmap_addr(cons_rx_buf, mapping));
2486
3fdfcc2c
MC
2487 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2488 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2489 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2490 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2491}
2492
85833c62
MC
2493static int
2494bnx2_rx_skb(struct bnx2 *bp, struct sk_buff *skb, unsigned int len,
1db82f2a 2495 unsigned int hdr_len, dma_addr_t dma_addr, u32 ring_idx)
85833c62
MC
2496{
2497 int err;
2498 u16 prod = ring_idx & 0xffff;
2499
2500 err = bnx2_alloc_rx_skb(bp, prod);
2501 if (unlikely(err)) {
2502 bnx2_reuse_rx_skb(bp, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2503 if (hdr_len) {
2504 unsigned int raw_len = len + 4;
2505 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2506
2507 bnx2_reuse_rx_skb_pages(bp, NULL, pages);
2508 }
85833c62
MC
2509 return err;
2510 }
2511
2512 skb_reserve(skb, bp->rx_offset);
2513 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2514 PCI_DMA_FROMDEVICE);
2515
1db82f2a
MC
2516 if (hdr_len == 0) {
2517 skb_put(skb, len);
2518 return 0;
2519 } else {
2520 unsigned int i, frag_len, frag_size, pages;
2521 struct sw_pg *rx_pg;
2522 u16 pg_cons = bp->rx_pg_cons;
2523 u16 pg_prod = bp->rx_pg_prod;
2524
2525 frag_size = len + 4 - hdr_len;
2526 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2527 skb_put(skb, hdr_len);
2528
2529 for (i = 0; i < pages; i++) {
2530 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2531 if (unlikely(frag_len <= 4)) {
2532 unsigned int tail = 4 - frag_len;
2533
2534 bp->rx_pg_cons = pg_cons;
2535 bp->rx_pg_prod = pg_prod;
2536 bnx2_reuse_rx_skb_pages(bp, NULL, pages - i);
2537 skb->len -= tail;
2538 if (i == 0) {
2539 skb->tail -= tail;
2540 } else {
2541 skb_frag_t *frag =
2542 &skb_shinfo(skb)->frags[i - 1];
2543 frag->size -= tail;
2544 skb->data_len -= tail;
2545 skb->truesize -= tail;
2546 }
2547 return 0;
2548 }
2549 rx_pg = &bp->rx_pg_ring[pg_cons];
2550
2551 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping),
2552 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2553
2554 if (i == pages - 1)
2555 frag_len -= 4;
2556
2557 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2558 rx_pg->page = NULL;
2559
2560 err = bnx2_alloc_rx_page(bp, RX_PG_RING_IDX(pg_prod));
2561 if (unlikely(err)) {
2562 bp->rx_pg_cons = pg_cons;
2563 bp->rx_pg_prod = pg_prod;
2564 bnx2_reuse_rx_skb_pages(bp, skb, pages - i);
2565 return err;
2566 }
2567
2568 frag_size -= frag_len;
2569 skb->data_len += frag_len;
2570 skb->truesize += frag_len;
2571 skb->len += frag_len;
2572
2573 pg_prod = NEXT_RX_BD(pg_prod);
2574 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2575 }
2576 bp->rx_pg_prod = pg_prod;
2577 bp->rx_pg_cons = pg_cons;
2578 }
85833c62
MC
2579 return 0;
2580}
2581
c09c2627
MC
2582static inline u16
2583bnx2_get_hw_rx_cons(struct bnx2 *bp)
2584{
2585 u16 cons = bp->status_blk->status_rx_quick_consumer_index0;
2586
2587 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2588 cons++;
2589 return cons;
2590}
2591
b6016b76
MC
2592static int
2593bnx2_rx_int(struct bnx2 *bp, int budget)
2594{
2595 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2596 struct l2_fhdr *rx_hdr;
1db82f2a 2597 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 2598
c09c2627 2599 hw_cons = bnx2_get_hw_rx_cons(bp);
b6016b76
MC
2600 sw_cons = bp->rx_cons;
2601 sw_prod = bp->rx_prod;
2602
2603 /* Memory barrier necessary as speculative reads of the rx
2604 * buffer can be ahead of the index in the status block
2605 */
2606 rmb();
2607 while (sw_cons != hw_cons) {
1db82f2a 2608 unsigned int len, hdr_len;
ade2bfe7 2609 u32 status;
b6016b76
MC
2610 struct sw_bd *rx_buf;
2611 struct sk_buff *skb;
236b6394 2612 dma_addr_t dma_addr;
b6016b76
MC
2613
2614 sw_ring_cons = RX_RING_IDX(sw_cons);
2615 sw_ring_prod = RX_RING_IDX(sw_prod);
2616
2617 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2618 skb = rx_buf->skb;
236b6394
MC
2619
2620 rx_buf->skb = NULL;
2621
2622 dma_addr = pci_unmap_addr(rx_buf, mapping);
2623
2624 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2625 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2626
2627 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 2628 len = rx_hdr->l2_fhdr_pkt_len;
b6016b76 2629
ade2bfe7 2630 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2631 (L2_FHDR_ERRORS_BAD_CRC |
2632 L2_FHDR_ERRORS_PHY_DECODE |
2633 L2_FHDR_ERRORS_ALIGNMENT |
2634 L2_FHDR_ERRORS_TOO_SHORT |
2635 L2_FHDR_ERRORS_GIANT_FRAME)) {
2636
85833c62
MC
2637 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons, sw_ring_prod);
2638 goto next_rx;
b6016b76 2639 }
1db82f2a
MC
2640 hdr_len = 0;
2641 if (status & L2_FHDR_STATUS_SPLIT) {
2642 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2643 pg_ring_used = 1;
2644 } else if (len > bp->rx_jumbo_thresh) {
2645 hdr_len = bp->rx_jumbo_thresh;
2646 pg_ring_used = 1;
2647 }
2648
2649 len -= 4;
b6016b76 2650
5d5d0015 2651 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
2652 struct sk_buff *new_skb;
2653
932f3772 2654 new_skb = netdev_alloc_skb(bp->dev, len + 2);
85833c62
MC
2655 if (new_skb == NULL) {
2656 bnx2_reuse_rx_skb(bp, skb, sw_ring_cons,
2657 sw_ring_prod);
2658 goto next_rx;
2659 }
b6016b76
MC
2660
2661 /* aligned copy */
d626f62b
ACM
2662 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2663 new_skb->data, len + 2);
b6016b76
MC
2664 skb_reserve(new_skb, 2);
2665 skb_put(new_skb, len);
b6016b76
MC
2666
2667 bnx2_reuse_rx_skb(bp, skb,
2668 sw_ring_cons, sw_ring_prod);
2669
2670 skb = new_skb;
1db82f2a 2671 } else if (unlikely(bnx2_rx_skb(bp, skb, len, hdr_len, dma_addr,
85833c62 2672 (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2673 goto next_rx;
b6016b76
MC
2674
2675 skb->protocol = eth_type_trans(skb, bp->dev);
2676
2677 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2678 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2679
745720e5 2680 dev_kfree_skb(skb);
b6016b76
MC
2681 goto next_rx;
2682
2683 }
2684
b6016b76
MC
2685 skb->ip_summed = CHECKSUM_NONE;
2686 if (bp->rx_csum &&
2687 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2688 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2689
ade2bfe7
MC
2690 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2691 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2692 skb->ip_summed = CHECKSUM_UNNECESSARY;
2693 }
2694
2695#ifdef BCM_VLAN
2696 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2697 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2698 rx_hdr->l2_fhdr_vlan_tag);
2699 }
2700 else
2701#endif
2702 netif_receive_skb(skb);
2703
2704 bp->dev->last_rx = jiffies;
2705 rx_pkt++;
2706
2707next_rx:
b6016b76
MC
2708 sw_cons = NEXT_RX_BD(sw_cons);
2709 sw_prod = NEXT_RX_BD(sw_prod);
2710
2711 if ((rx_pkt == budget))
2712 break;
f4e418f7
MC
2713
2714 /* Refresh hw_cons to see if there is new work */
2715 if (sw_cons == hw_cons) {
c09c2627 2716 hw_cons = bnx2_get_hw_rx_cons(bp);
f4e418f7
MC
2717 rmb();
2718 }
b6016b76
MC
2719 }
2720 bp->rx_cons = sw_cons;
2721 bp->rx_prod = sw_prod;
2722
1db82f2a
MC
2723 if (pg_ring_used)
2724 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX,
2725 bp->rx_pg_prod);
2726
b6016b76
MC
2727 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2728
2729 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2730
2731 mmiowb();
2732
2733 return rx_pkt;
2734
2735}
2736
2737/* MSI ISR - The only difference between this and the INTx ISR
2738 * is that the MSI interrupt is always serviced.
2739 */
2740static irqreturn_t
7d12e780 2741bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2742{
2743 struct net_device *dev = dev_instance;
972ec0d4 2744 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2745
c921e4c4 2746 prefetch(bp->status_blk);
b6016b76
MC
2747 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2748 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2749 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2750
2751 /* Return here if interrupt is disabled. */
73eef4cd
MC
2752 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2753 return IRQ_HANDLED;
b6016b76 2754
bea3348e 2755 netif_rx_schedule(dev, &bp->napi);
b6016b76 2756
73eef4cd 2757 return IRQ_HANDLED;
b6016b76
MC
2758}
2759
8e6a72c4
MC
2760static irqreturn_t
2761bnx2_msi_1shot(int irq, void *dev_instance)
2762{
2763 struct net_device *dev = dev_instance;
2764 struct bnx2 *bp = netdev_priv(dev);
2765
2766 prefetch(bp->status_blk);
2767
2768 /* Return here if interrupt is disabled. */
2769 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2770 return IRQ_HANDLED;
2771
bea3348e 2772 netif_rx_schedule(dev, &bp->napi);
8e6a72c4
MC
2773
2774 return IRQ_HANDLED;
2775}
2776
b6016b76 2777static irqreturn_t
7d12e780 2778bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2779{
2780 struct net_device *dev = dev_instance;
972ec0d4 2781 struct bnx2 *bp = netdev_priv(dev);
b8a7ce7b 2782 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2783
2784 /* When using INTx, it is possible for the interrupt to arrive
2785 * at the CPU before the status block posted prior to the
2786 * interrupt. Reading a register will flush the status block.
2787 * When using MSI, the MSI message will always complete after
2788 * the status block write.
2789 */
b8a7ce7b 2790 if ((sblk->status_idx == bp->last_status_idx) &&
b6016b76
MC
2791 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2792 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2793 return IRQ_NONE;
b6016b76
MC
2794
2795 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2796 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2797 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2798
b8a7ce7b
MC
2799 /* Read back to deassert IRQ immediately to avoid too many
2800 * spurious interrupts.
2801 */
2802 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
2803
b6016b76 2804 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2805 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2806 return IRQ_HANDLED;
b6016b76 2807
bea3348e 2808 if (netif_rx_schedule_prep(dev, &bp->napi)) {
b8a7ce7b 2809 bp->last_status_idx = sblk->status_idx;
bea3348e 2810 __netif_rx_schedule(dev, &bp->napi);
b8a7ce7b 2811 }
b6016b76 2812
73eef4cd 2813 return IRQ_HANDLED;
b6016b76
MC
2814}
2815
0d8a6571
MC
2816#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2817 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2818
f4e418f7
MC
2819static inline int
2820bnx2_has_work(struct bnx2 *bp)
2821{
2822 struct status_block *sblk = bp->status_blk;
2823
c09c2627 2824 if ((bnx2_get_hw_rx_cons(bp) != bp->rx_cons) ||
f4e418f7
MC
2825 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2826 return 1;
2827
da3e4fbe
MC
2828 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2829 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2830 return 1;
2831
2832 return 0;
2833}
2834
6f535763 2835static int bnx2_poll_work(struct bnx2 *bp, int work_done, int budget)
b6016b76 2836{
da3e4fbe
MC
2837 struct status_block *sblk = bp->status_blk;
2838 u32 status_attn_bits = sblk->status_attn_bits;
2839 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2840
da3e4fbe
MC
2841 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2842 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2843
b6016b76 2844 bnx2_phy_int(bp);
bf5295bb
MC
2845
2846 /* This is needed to take care of transient status
2847 * during link changes.
2848 */
2849 REG_WR(bp, BNX2_HC_COMMAND,
2850 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2851 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2852 }
2853
6dee6421 2854 if (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 2855 bnx2_tx_int(bp);
b6016b76 2856
c09c2627 2857 if (bnx2_get_hw_rx_cons(bp) != bp->rx_cons)
6f535763 2858 work_done += bnx2_rx_int(bp, budget - work_done);
6aa20a22 2859
6f535763
DM
2860 return work_done;
2861}
2862
2863static int bnx2_poll(struct napi_struct *napi, int budget)
2864{
2865 struct bnx2 *bp = container_of(napi, struct bnx2, napi);
2866 int work_done = 0;
6dee6421 2867 struct status_block *sblk = bp->status_blk;
6f535763
DM
2868
2869 while (1) {
2870 work_done = bnx2_poll_work(bp, work_done, budget);
f4e418f7 2871
6f535763
DM
2872 if (unlikely(work_done >= budget))
2873 break;
2874
6dee6421
MC
2875 /* bp->last_status_idx is used below to tell the hw how
2876 * much work has been processed, so we must read it before
2877 * checking for more work.
2878 */
2879 bp->last_status_idx = sblk->status_idx;
2880 rmb();
6f535763 2881 if (likely(!bnx2_has_work(bp))) {
6f535763
DM
2882 netif_rx_complete(bp->dev, napi);
2883 if (likely(bp->flags & USING_MSI_FLAG)) {
2884 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2885 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2886 bp->last_status_idx);
6dee6421 2887 break;
6f535763 2888 }
1269a8a6
MC
2889 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2890 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 2891 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
1269a8a6 2892 bp->last_status_idx);
1269a8a6 2893
6f535763
DM
2894 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2895 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2896 bp->last_status_idx);
2897 break;
2898 }
b6016b76
MC
2899 }
2900
bea3348e 2901 return work_done;
b6016b76
MC
2902}
2903
932ff279 2904/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2905 * from set_multicast.
2906 */
2907static void
2908bnx2_set_rx_mode(struct net_device *dev)
2909{
972ec0d4 2910 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2911 u32 rx_mode, sort_mode;
2912 int i;
b6016b76 2913
c770a65c 2914 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2915
2916 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2917 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2918 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2919#ifdef BCM_VLAN
e29054f9 2920 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2921 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2922#else
e29054f9
MC
2923 if (!(bp->flags & ASF_ENABLE_FLAG))
2924 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2925#endif
2926 if (dev->flags & IFF_PROMISC) {
2927 /* Promiscuous mode. */
2928 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2929 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2930 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2931 }
2932 else if (dev->flags & IFF_ALLMULTI) {
2933 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2934 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2935 0xffffffff);
2936 }
2937 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2938 }
2939 else {
2940 /* Accept one or more multicast(s). */
2941 struct dev_mc_list *mclist;
2942 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2943 u32 regidx;
2944 u32 bit;
2945 u32 crc;
2946
2947 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2948
2949 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2950 i++, mclist = mclist->next) {
2951
2952 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2953 bit = crc & 0xff;
2954 regidx = (bit & 0xe0) >> 5;
2955 bit &= 0x1f;
2956 mc_filter[regidx] |= (1 << bit);
2957 }
2958
2959 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2960 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2961 mc_filter[i]);
2962 }
2963
2964 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2965 }
2966
2967 if (rx_mode != bp->rx_mode) {
2968 bp->rx_mode = rx_mode;
2969 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2970 }
2971
2972 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2973 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2974 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2975
c770a65c 2976 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2977}
2978
2979static void
2980load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2981 u32 rv2p_proc)
2982{
2983 int i;
2984 u32 val;
2985
2986
2987 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2988 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2989 rv2p_code++;
fba9fe91 2990 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2991 rv2p_code++;
2992
2993 if (rv2p_proc == RV2P_PROC1) {
2994 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2995 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2996 }
2997 else {
2998 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2999 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3000 }
3001 }
3002
3003 /* Reset the processor, un-stall is done later. */
3004 if (rv2p_proc == RV2P_PROC1) {
3005 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3006 }
3007 else {
3008 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3009 }
3010}
3011
af3ee519 3012static int
b6016b76
MC
3013load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
3014{
3015 u32 offset;
3016 u32 val;
af3ee519 3017 int rc;
b6016b76
MC
3018
3019 /* Halt the CPU. */
3020 val = REG_RD_IND(bp, cpu_reg->mode);
3021 val |= cpu_reg->mode_value_halt;
3022 REG_WR_IND(bp, cpu_reg->mode, val);
3023 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3024
3025 /* Load the Text area. */
3026 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519 3027 if (fw->gz_text) {
b6016b76
MC
3028 int j;
3029
ea1f8d5c
MC
3030 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3031 fw->gz_text_len);
3032 if (rc < 0)
b3448b0b 3033 return rc;
ea1f8d5c 3034
b6016b76 3035 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
ea1f8d5c 3036 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
3037 }
3038 }
3039
3040 /* Load the Data area. */
3041 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3042 if (fw->data) {
3043 int j;
3044
3045 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
3046 REG_WR_IND(bp, offset, fw->data[j]);
3047 }
3048 }
3049
3050 /* Load the SBSS area. */
3051 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3052 if (fw->sbss_len) {
b6016b76
MC
3053 int j;
3054
3055 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
ea1f8d5c 3056 REG_WR_IND(bp, offset, 0);
b6016b76
MC
3057 }
3058 }
3059
3060 /* Load the BSS area. */
3061 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3062 if (fw->bss_len) {
b6016b76
MC
3063 int j;
3064
3065 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
ea1f8d5c 3066 REG_WR_IND(bp, offset, 0);
b6016b76
MC
3067 }
3068 }
3069
3070 /* Load the Read-Only area. */
3071 offset = cpu_reg->spad_base +
3072 (fw->rodata_addr - cpu_reg->mips_view_base);
3073 if (fw->rodata) {
3074 int j;
3075
3076 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
3077 REG_WR_IND(bp, offset, fw->rodata[j]);
3078 }
3079 }
3080
3081 /* Clear the pre-fetch instruction. */
3082 REG_WR_IND(bp, cpu_reg->inst, 0);
3083 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
3084
3085 /* Start the CPU. */
3086 val = REG_RD_IND(bp, cpu_reg->mode);
3087 val &= ~cpu_reg->mode_value_halt;
3088 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
3089 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
3090
3091 return 0;
b6016b76
MC
3092}
3093
fba9fe91 3094static int
b6016b76
MC
3095bnx2_init_cpus(struct bnx2 *bp)
3096{
3097 struct cpu_reg cpu_reg;
af3ee519 3098 struct fw_info *fw;
110d0ef9
MC
3099 int rc, rv2p_len;
3100 void *text, *rv2p;
b6016b76
MC
3101
3102 /* Initialize the RV2P processor. */
b3448b0b
DV
3103 text = vmalloc(FW_BUF_SIZE);
3104 if (!text)
3105 return -ENOMEM;
110d0ef9
MC
3106 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3107 rv2p = bnx2_xi_rv2p_proc1;
3108 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3109 } else {
3110 rv2p = bnx2_rv2p_proc1;
3111 rv2p_len = sizeof(bnx2_rv2p_proc1);
3112 }
3113 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3114 if (rc < 0)
fba9fe91 3115 goto init_cpu_err;
ea1f8d5c 3116
b3448b0b 3117 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
fba9fe91 3118
110d0ef9
MC
3119 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3120 rv2p = bnx2_xi_rv2p_proc2;
3121 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3122 } else {
3123 rv2p = bnx2_rv2p_proc2;
3124 rv2p_len = sizeof(bnx2_rv2p_proc2);
3125 }
3126 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3127 if (rc < 0)
fba9fe91 3128 goto init_cpu_err;
ea1f8d5c 3129
b3448b0b 3130 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
b6016b76
MC
3131
3132 /* Initialize the RX Processor. */
3133 cpu_reg.mode = BNX2_RXP_CPU_MODE;
3134 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
3135 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
3136 cpu_reg.state = BNX2_RXP_CPU_STATE;
3137 cpu_reg.state_value_clear = 0xffffff;
3138 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
3139 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
3140 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
3141 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
3142 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
3143 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
3144 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3145
d43584c8
MC
3146 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3147 fw = &bnx2_rxp_fw_09;
3148 else
3149 fw = &bnx2_rxp_fw_06;
fba9fe91 3150
ea1f8d5c 3151 fw->text = text;
af3ee519 3152 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3153 if (rc)
3154 goto init_cpu_err;
3155
b6016b76
MC
3156 /* Initialize the TX Processor. */
3157 cpu_reg.mode = BNX2_TXP_CPU_MODE;
3158 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
3159 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
3160 cpu_reg.state = BNX2_TXP_CPU_STATE;
3161 cpu_reg.state_value_clear = 0xffffff;
3162 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
3163 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
3164 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
3165 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
3166 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
3167 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
3168 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3169
d43584c8
MC
3170 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3171 fw = &bnx2_txp_fw_09;
3172 else
3173 fw = &bnx2_txp_fw_06;
fba9fe91 3174
ea1f8d5c 3175 fw->text = text;
af3ee519 3176 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3177 if (rc)
3178 goto init_cpu_err;
3179
b6016b76
MC
3180 /* Initialize the TX Patch-up Processor. */
3181 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3182 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3183 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3184 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3185 cpu_reg.state_value_clear = 0xffffff;
3186 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3187 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3188 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3189 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3190 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3191 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3192 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3193
d43584c8
MC
3194 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3195 fw = &bnx2_tpat_fw_09;
3196 else
3197 fw = &bnx2_tpat_fw_06;
fba9fe91 3198
ea1f8d5c 3199 fw->text = text;
af3ee519 3200 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3201 if (rc)
3202 goto init_cpu_err;
3203
b6016b76
MC
3204 /* Initialize the Completion Processor. */
3205 cpu_reg.mode = BNX2_COM_CPU_MODE;
3206 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3207 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3208 cpu_reg.state = BNX2_COM_CPU_STATE;
3209 cpu_reg.state_value_clear = 0xffffff;
3210 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3211 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3212 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3213 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3214 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3215 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3216 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3217
d43584c8
MC
3218 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3219 fw = &bnx2_com_fw_09;
3220 else
3221 fw = &bnx2_com_fw_06;
fba9fe91 3222
ea1f8d5c 3223 fw->text = text;
af3ee519 3224 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3225 if (rc)
3226 goto init_cpu_err;
3227
d43584c8
MC
3228 /* Initialize the Command Processor. */
3229 cpu_reg.mode = BNX2_CP_CPU_MODE;
3230 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3231 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3232 cpu_reg.state = BNX2_CP_CPU_STATE;
3233 cpu_reg.state_value_clear = 0xffffff;
3234 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3235 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3236 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3237 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3238 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3239 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3240 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3241
110d0ef9 3242 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d43584c8 3243 fw = &bnx2_cp_fw_09;
110d0ef9
MC
3244 else
3245 fw = &bnx2_cp_fw_06;
3246
3247 fw->text = text;
3248 rc = load_cpu_fw(bp, &cpu_reg, fw);
b6016b76 3249
fba9fe91 3250init_cpu_err:
ea1f8d5c 3251 vfree(text);
fba9fe91 3252 return rc;
b6016b76
MC
3253}
3254
3255static int
829ca9a3 3256bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3257{
3258 u16 pmcsr;
3259
3260 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3261
3262 switch (state) {
829ca9a3 3263 case PCI_D0: {
b6016b76
MC
3264 u32 val;
3265
3266 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3267 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3268 PCI_PM_CTRL_PME_STATUS);
3269
3270 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3271 /* delay required during transition out of D3hot */
3272 msleep(20);
3273
3274 val = REG_RD(bp, BNX2_EMAC_MODE);
3275 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3276 val &= ~BNX2_EMAC_MODE_MPKT;
3277 REG_WR(bp, BNX2_EMAC_MODE, val);
3278
3279 val = REG_RD(bp, BNX2_RPM_CONFIG);
3280 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3281 REG_WR(bp, BNX2_RPM_CONFIG, val);
3282 break;
3283 }
829ca9a3 3284 case PCI_D3hot: {
b6016b76
MC
3285 int i;
3286 u32 val, wol_msg;
3287
3288 if (bp->wol) {
3289 u32 advertising;
3290 u8 autoneg;
3291
3292 autoneg = bp->autoneg;
3293 advertising = bp->advertising;
3294
239cd343
MC
3295 if (bp->phy_port == PORT_TP) {
3296 bp->autoneg = AUTONEG_SPEED;
3297 bp->advertising = ADVERTISED_10baseT_Half |
3298 ADVERTISED_10baseT_Full |
3299 ADVERTISED_100baseT_Half |
3300 ADVERTISED_100baseT_Full |
3301 ADVERTISED_Autoneg;
3302 }
b6016b76 3303
239cd343
MC
3304 spin_lock_bh(&bp->phy_lock);
3305 bnx2_setup_phy(bp, bp->phy_port);
3306 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3307
3308 bp->autoneg = autoneg;
3309 bp->advertising = advertising;
3310
3311 bnx2_set_mac_addr(bp);
3312
3313 val = REG_RD(bp, BNX2_EMAC_MODE);
3314
3315 /* Enable port mode. */
3316 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3317 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3318 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3319 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3320 if (bp->phy_port == PORT_TP)
3321 val |= BNX2_EMAC_MODE_PORT_MII;
3322 else {
3323 val |= BNX2_EMAC_MODE_PORT_GMII;
3324 if (bp->line_speed == SPEED_2500)
3325 val |= BNX2_EMAC_MODE_25G_MODE;
3326 }
b6016b76
MC
3327
3328 REG_WR(bp, BNX2_EMAC_MODE, val);
3329
3330 /* receive all multicast */
3331 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3332 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3333 0xffffffff);
3334 }
3335 REG_WR(bp, BNX2_EMAC_RX_MODE,
3336 BNX2_EMAC_RX_MODE_SORT_MODE);
3337
3338 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3339 BNX2_RPM_SORT_USER0_MC_EN;
3340 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3341 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3342 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3343 BNX2_RPM_SORT_USER0_ENA);
3344
3345 /* Need to enable EMAC and RPM for WOL. */
3346 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3347 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3348 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3349 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3350
3351 val = REG_RD(bp, BNX2_RPM_CONFIG);
3352 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3353 REG_WR(bp, BNX2_RPM_CONFIG, val);
3354
3355 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3356 }
3357 else {
3358 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3359 }
3360
dda1e390
MC
3361 if (!(bp->flags & NO_WOL_FLAG))
3362 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3363
3364 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3365 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3366 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3367
3368 if (bp->wol)
3369 pmcsr |= 3;
3370 }
3371 else {
3372 pmcsr |= 3;
3373 }
3374 if (bp->wol) {
3375 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3376 }
3377 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3378 pmcsr);
3379
3380 /* No more memory access after this point until
3381 * device is brought back to D0.
3382 */
3383 udelay(50);
3384 break;
3385 }
3386 default:
3387 return -EINVAL;
3388 }
3389 return 0;
3390}
3391
3392static int
3393bnx2_acquire_nvram_lock(struct bnx2 *bp)
3394{
3395 u32 val;
3396 int j;
3397
3398 /* Request access to the flash interface. */
3399 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3400 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3401 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3402 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3403 break;
3404
3405 udelay(5);
3406 }
3407
3408 if (j >= NVRAM_TIMEOUT_COUNT)
3409 return -EBUSY;
3410
3411 return 0;
3412}
3413
3414static int
3415bnx2_release_nvram_lock(struct bnx2 *bp)
3416{
3417 int j;
3418 u32 val;
3419
3420 /* Relinquish nvram interface. */
3421 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3422
3423 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3424 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3425 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3426 break;
3427
3428 udelay(5);
3429 }
3430
3431 if (j >= NVRAM_TIMEOUT_COUNT)
3432 return -EBUSY;
3433
3434 return 0;
3435}
3436
3437
3438static int
3439bnx2_enable_nvram_write(struct bnx2 *bp)
3440{
3441 u32 val;
3442
3443 val = REG_RD(bp, BNX2_MISC_CFG);
3444 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3445
e30372c9 3446 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3447 int j;
3448
3449 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3450 REG_WR(bp, BNX2_NVM_COMMAND,
3451 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3452
3453 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3454 udelay(5);
3455
3456 val = REG_RD(bp, BNX2_NVM_COMMAND);
3457 if (val & BNX2_NVM_COMMAND_DONE)
3458 break;
3459 }
3460
3461 if (j >= NVRAM_TIMEOUT_COUNT)
3462 return -EBUSY;
3463 }
3464 return 0;
3465}
3466
3467static void
3468bnx2_disable_nvram_write(struct bnx2 *bp)
3469{
3470 u32 val;
3471
3472 val = REG_RD(bp, BNX2_MISC_CFG);
3473 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3474}
3475
3476
3477static void
3478bnx2_enable_nvram_access(struct bnx2 *bp)
3479{
3480 u32 val;
3481
3482 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3483 /* Enable both bits, even on read. */
6aa20a22 3484 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3485 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3486}
3487
3488static void
3489bnx2_disable_nvram_access(struct bnx2 *bp)
3490{
3491 u32 val;
3492
3493 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3494 /* Disable both bits, even after read. */
6aa20a22 3495 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3496 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3497 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3498}
3499
3500static int
3501bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3502{
3503 u32 cmd;
3504 int j;
3505
e30372c9 3506 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3507 /* Buffered flash, no erase needed */
3508 return 0;
3509
3510 /* Build an erase command */
3511 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3512 BNX2_NVM_COMMAND_DOIT;
3513
3514 /* Need to clear DONE bit separately. */
3515 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3516
3517 /* Address of the NVRAM to read from. */
3518 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3519
3520 /* Issue an erase command. */
3521 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3522
3523 /* Wait for completion. */
3524 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3525 u32 val;
3526
3527 udelay(5);
3528
3529 val = REG_RD(bp, BNX2_NVM_COMMAND);
3530 if (val & BNX2_NVM_COMMAND_DONE)
3531 break;
3532 }
3533
3534 if (j >= NVRAM_TIMEOUT_COUNT)
3535 return -EBUSY;
3536
3537 return 0;
3538}
3539
3540static int
3541bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3542{
3543 u32 cmd;
3544 int j;
3545
3546 /* Build the command word. */
3547 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3548
e30372c9
MC
3549 /* Calculate an offset of a buffered flash, not needed for 5709. */
3550 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3551 offset = ((offset / bp->flash_info->page_size) <<
3552 bp->flash_info->page_bits) +
3553 (offset % bp->flash_info->page_size);
3554 }
3555
3556 /* Need to clear DONE bit separately. */
3557 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3558
3559 /* Address of the NVRAM to read from. */
3560 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3561
3562 /* Issue a read command. */
3563 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3564
3565 /* Wait for completion. */
3566 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3567 u32 val;
3568
3569 udelay(5);
3570
3571 val = REG_RD(bp, BNX2_NVM_COMMAND);
3572 if (val & BNX2_NVM_COMMAND_DONE) {
3573 val = REG_RD(bp, BNX2_NVM_READ);
3574
3575 val = be32_to_cpu(val);
3576 memcpy(ret_val, &val, 4);
3577 break;
3578 }
3579 }
3580 if (j >= NVRAM_TIMEOUT_COUNT)
3581 return -EBUSY;
3582
3583 return 0;
3584}
3585
3586
3587static int
3588bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3589{
3590 u32 cmd, val32;
3591 int j;
3592
3593 /* Build the command word. */
3594 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3595
e30372c9
MC
3596 /* Calculate an offset of a buffered flash, not needed for 5709. */
3597 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3598 offset = ((offset / bp->flash_info->page_size) <<
3599 bp->flash_info->page_bits) +
3600 (offset % bp->flash_info->page_size);
3601 }
3602
3603 /* Need to clear DONE bit separately. */
3604 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3605
3606 memcpy(&val32, val, 4);
3607 val32 = cpu_to_be32(val32);
3608
3609 /* Write the data. */
3610 REG_WR(bp, BNX2_NVM_WRITE, val32);
3611
3612 /* Address of the NVRAM to write to. */
3613 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3614
3615 /* Issue the write command. */
3616 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3617
3618 /* Wait for completion. */
3619 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3620 udelay(5);
3621
3622 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3623 break;
3624 }
3625 if (j >= NVRAM_TIMEOUT_COUNT)
3626 return -EBUSY;
3627
3628 return 0;
3629}
3630
3631static int
3632bnx2_init_nvram(struct bnx2 *bp)
3633{
3634 u32 val;
e30372c9 3635 int j, entry_count, rc = 0;
b6016b76
MC
3636 struct flash_spec *flash;
3637
e30372c9
MC
3638 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3639 bp->flash_info = &flash_5709;
3640 goto get_flash_size;
3641 }
3642
b6016b76
MC
3643 /* Determine the selected interface. */
3644 val = REG_RD(bp, BNX2_NVM_CFG1);
3645
ff8ac609 3646 entry_count = ARRAY_SIZE(flash_table);
b6016b76 3647
b6016b76
MC
3648 if (val & 0x40000000) {
3649
3650 /* Flash interface has been reconfigured */
3651 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3652 j++, flash++) {
3653 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3654 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3655 bp->flash_info = flash;
3656 break;
3657 }
3658 }
3659 }
3660 else {
37137709 3661 u32 mask;
b6016b76
MC
3662 /* Not yet been reconfigured */
3663
37137709
MC
3664 if (val & (1 << 23))
3665 mask = FLASH_BACKUP_STRAP_MASK;
3666 else
3667 mask = FLASH_STRAP_MASK;
3668
b6016b76
MC
3669 for (j = 0, flash = &flash_table[0]; j < entry_count;
3670 j++, flash++) {
3671
37137709 3672 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3673 bp->flash_info = flash;
3674
3675 /* Request access to the flash interface. */
3676 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3677 return rc;
3678
3679 /* Enable access to flash interface */
3680 bnx2_enable_nvram_access(bp);
3681
3682 /* Reconfigure the flash interface */
3683 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3684 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3685 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3686 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3687
3688 /* Disable access to flash interface */
3689 bnx2_disable_nvram_access(bp);
3690 bnx2_release_nvram_lock(bp);
3691
3692 break;
3693 }
3694 }
3695 } /* if (val & 0x40000000) */
3696
3697 if (j == entry_count) {
3698 bp->flash_info = NULL;
2f23c523 3699 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3700 return -ENODEV;
b6016b76
MC
3701 }
3702
e30372c9 3703get_flash_size:
1122db71
MC
3704 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3705 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3706 if (val)
3707 bp->flash_size = val;
3708 else
3709 bp->flash_size = bp->flash_info->total_size;
3710
b6016b76
MC
3711 return rc;
3712}
3713
3714static int
3715bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3716 int buf_size)
3717{
3718 int rc = 0;
3719 u32 cmd_flags, offset32, len32, extra;
3720
3721 if (buf_size == 0)
3722 return 0;
3723
3724 /* Request access to the flash interface. */
3725 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3726 return rc;
3727
3728 /* Enable access to flash interface */
3729 bnx2_enable_nvram_access(bp);
3730
3731 len32 = buf_size;
3732 offset32 = offset;
3733 extra = 0;
3734
3735 cmd_flags = 0;
3736
3737 if (offset32 & 3) {
3738 u8 buf[4];
3739 u32 pre_len;
3740
3741 offset32 &= ~3;
3742 pre_len = 4 - (offset & 3);
3743
3744 if (pre_len >= len32) {
3745 pre_len = len32;
3746 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3747 BNX2_NVM_COMMAND_LAST;
3748 }
3749 else {
3750 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3751 }
3752
3753 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3754
3755 if (rc)
3756 return rc;
3757
3758 memcpy(ret_buf, buf + (offset & 3), pre_len);
3759
3760 offset32 += 4;
3761 ret_buf += pre_len;
3762 len32 -= pre_len;
3763 }
3764 if (len32 & 3) {
3765 extra = 4 - (len32 & 3);
3766 len32 = (len32 + 4) & ~3;
3767 }
3768
3769 if (len32 == 4) {
3770 u8 buf[4];
3771
3772 if (cmd_flags)
3773 cmd_flags = BNX2_NVM_COMMAND_LAST;
3774 else
3775 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3776 BNX2_NVM_COMMAND_LAST;
3777
3778 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3779
3780 memcpy(ret_buf, buf, 4 - extra);
3781 }
3782 else if (len32 > 0) {
3783 u8 buf[4];
3784
3785 /* Read the first word. */
3786 if (cmd_flags)
3787 cmd_flags = 0;
3788 else
3789 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3790
3791 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3792
3793 /* Advance to the next dword. */
3794 offset32 += 4;
3795 ret_buf += 4;
3796 len32 -= 4;
3797
3798 while (len32 > 4 && rc == 0) {
3799 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3800
3801 /* Advance to the next dword. */
3802 offset32 += 4;
3803 ret_buf += 4;
3804 len32 -= 4;
3805 }
3806
3807 if (rc)
3808 return rc;
3809
3810 cmd_flags = BNX2_NVM_COMMAND_LAST;
3811 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3812
3813 memcpy(ret_buf, buf, 4 - extra);
3814 }
3815
3816 /* Disable access to flash interface */
3817 bnx2_disable_nvram_access(bp);
3818
3819 bnx2_release_nvram_lock(bp);
3820
3821 return rc;
3822}
3823
3824static int
3825bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3826 int buf_size)
3827{
3828 u32 written, offset32, len32;
e6be763f 3829 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3830 int rc = 0;
3831 int align_start, align_end;
3832
3833 buf = data_buf;
3834 offset32 = offset;
3835 len32 = buf_size;
3836 align_start = align_end = 0;
3837
3838 if ((align_start = (offset32 & 3))) {
3839 offset32 &= ~3;
c873879c
MC
3840 len32 += align_start;
3841 if (len32 < 4)
3842 len32 = 4;
b6016b76
MC
3843 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3844 return rc;
3845 }
3846
3847 if (len32 & 3) {
c873879c
MC
3848 align_end = 4 - (len32 & 3);
3849 len32 += align_end;
3850 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3851 return rc;
b6016b76
MC
3852 }
3853
3854 if (align_start || align_end) {
e6be763f
MC
3855 align_buf = kmalloc(len32, GFP_KERNEL);
3856 if (align_buf == NULL)
b6016b76
MC
3857 return -ENOMEM;
3858 if (align_start) {
e6be763f 3859 memcpy(align_buf, start, 4);
b6016b76
MC
3860 }
3861 if (align_end) {
e6be763f 3862 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3863 }
e6be763f
MC
3864 memcpy(align_buf + align_start, data_buf, buf_size);
3865 buf = align_buf;
b6016b76
MC
3866 }
3867
e30372c9 3868 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
3869 flash_buffer = kmalloc(264, GFP_KERNEL);
3870 if (flash_buffer == NULL) {
3871 rc = -ENOMEM;
3872 goto nvram_write_end;
3873 }
3874 }
3875
b6016b76
MC
3876 written = 0;
3877 while ((written < len32) && (rc == 0)) {
3878 u32 page_start, page_end, data_start, data_end;
3879 u32 addr, cmd_flags;
3880 int i;
b6016b76
MC
3881
3882 /* Find the page_start addr */
3883 page_start = offset32 + written;
3884 page_start -= (page_start % bp->flash_info->page_size);
3885 /* Find the page_end addr */
3886 page_end = page_start + bp->flash_info->page_size;
3887 /* Find the data_start addr */
3888 data_start = (written == 0) ? offset32 : page_start;
3889 /* Find the data_end addr */
6aa20a22 3890 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3891 (offset32 + len32) : page_end;
3892
3893 /* Request access to the flash interface. */
3894 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3895 goto nvram_write_end;
3896
3897 /* Enable access to flash interface */
3898 bnx2_enable_nvram_access(bp);
3899
3900 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 3901 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3902 int j;
3903
3904 /* Read the whole page into the buffer
3905 * (non-buffer flash only) */
3906 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3907 if (j == (bp->flash_info->page_size - 4)) {
3908 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3909 }
3910 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3911 page_start + j,
3912 &flash_buffer[j],
b6016b76
MC
3913 cmd_flags);
3914
3915 if (rc)
3916 goto nvram_write_end;
3917
3918 cmd_flags = 0;
3919 }
3920 }
3921
3922 /* Enable writes to flash interface (unlock write-protect) */
3923 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3924 goto nvram_write_end;
3925
b6016b76
MC
3926 /* Loop to write back the buffer data from page_start to
3927 * data_start */
3928 i = 0;
e30372c9 3929 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
3930 /* Erase the page */
3931 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3932 goto nvram_write_end;
3933
3934 /* Re-enable the write again for the actual write */
3935 bnx2_enable_nvram_write(bp);
3936
b6016b76
MC
3937 for (addr = page_start; addr < data_start;
3938 addr += 4, i += 4) {
6aa20a22 3939
b6016b76
MC
3940 rc = bnx2_nvram_write_dword(bp, addr,
3941 &flash_buffer[i], cmd_flags);
3942
3943 if (rc != 0)
3944 goto nvram_write_end;
3945
3946 cmd_flags = 0;
3947 }
3948 }
3949
3950 /* Loop to write the new data from data_start to data_end */
bae25761 3951 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 3952 if ((addr == page_end - 4) ||
e30372c9 3953 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
3954 (addr == data_end - 4))) {
3955
3956 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3957 }
3958 rc = bnx2_nvram_write_dword(bp, addr, buf,
3959 cmd_flags);
3960
3961 if (rc != 0)
3962 goto nvram_write_end;
3963
3964 cmd_flags = 0;
3965 buf += 4;
3966 }
3967
3968 /* Loop to write back the buffer data from data_end
3969 * to page_end */
e30372c9 3970 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
3971 for (addr = data_end; addr < page_end;
3972 addr += 4, i += 4) {
6aa20a22 3973
b6016b76
MC
3974 if (addr == page_end-4) {
3975 cmd_flags = BNX2_NVM_COMMAND_LAST;
3976 }
3977 rc = bnx2_nvram_write_dword(bp, addr,
3978 &flash_buffer[i], cmd_flags);
3979
3980 if (rc != 0)
3981 goto nvram_write_end;
3982
3983 cmd_flags = 0;
3984 }
3985 }
3986
3987 /* Disable writes to flash interface (lock write-protect) */
3988 bnx2_disable_nvram_write(bp);
3989
3990 /* Disable access to flash interface */
3991 bnx2_disable_nvram_access(bp);
3992 bnx2_release_nvram_lock(bp);
3993
3994 /* Increment written */
3995 written += data_end - data_start;
3996 }
3997
3998nvram_write_end:
e6be763f
MC
3999 kfree(flash_buffer);
4000 kfree(align_buf);
b6016b76
MC
4001 return rc;
4002}
4003
0d8a6571
MC
4004static void
4005bnx2_init_remote_phy(struct bnx2 *bp)
4006{
4007 u32 val;
4008
4009 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
4010 if (!(bp->phy_flags & PHY_SERDES_FLAG))
4011 return;
4012
4013 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
4014 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4015 return;
4016
4017 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
0d8a6571
MC
4018 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
4019
4020 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
4021 if (val & BNX2_LINK_STATUS_SERDES_LINK)
4022 bp->phy_port = PORT_FIBRE;
4023 else
4024 bp->phy_port = PORT_TP;
489310a4
MC
4025
4026 if (netif_running(bp->dev)) {
4027 u32 sig;
4028
4029 if (val & BNX2_LINK_STATUS_LINK_UP) {
4030 bp->link_up = 1;
4031 netif_carrier_on(bp->dev);
4032 } else {
4033 bp->link_up = 0;
4034 netif_carrier_off(bp->dev);
4035 }
4036 sig = BNX2_DRV_ACK_CAP_SIGNATURE |
4037 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4038 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
4039 sig);
4040 }
0d8a6571
MC
4041 }
4042}
4043
b6016b76
MC
4044static int
4045bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4046{
4047 u32 val;
4048 int i, rc = 0;
489310a4 4049 u8 old_port;
b6016b76
MC
4050
4051 /* Wait for the current PCI transaction to complete before
4052 * issuing a reset. */
4053 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4054 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4055 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4056 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4057 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4058 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4059 udelay(5);
4060
b090ae2b
MC
4061 /* Wait for the firmware to tell us it is ok to issue a reset. */
4062 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
4063
b6016b76
MC
4064 /* Deposit a driver reset signature so the firmware knows that
4065 * this is a soft reset. */
e3648b3d 4066 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
4067 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4068
b6016b76
MC
4069 /* Do a dummy read to force the chip to complete all current transaction
4070 * before we issue a reset. */
4071 val = REG_RD(bp, BNX2_MISC_ID);
4072
234754d5
MC
4073 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4074 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4075 REG_RD(bp, BNX2_MISC_COMMAND);
4076 udelay(5);
b6016b76 4077
234754d5
MC
4078 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4079 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4080
234754d5 4081 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4082
234754d5
MC
4083 } else {
4084 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4085 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4086 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4087
4088 /* Chip reset. */
4089 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4090
594a9dfa
MC
4091 /* Reading back any register after chip reset will hang the
4092 * bus on 5706 A0 and A1. The msleep below provides plenty
4093 * of margin for write posting.
4094 */
234754d5 4095 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4096 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4097 msleep(20);
b6016b76 4098
234754d5
MC
4099 /* Reset takes approximate 30 usec */
4100 for (i = 0; i < 10; i++) {
4101 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4102 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4103 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4104 break;
4105 udelay(10);
4106 }
4107
4108 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4109 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4110 printk(KERN_ERR PFX "Chip reset did not complete\n");
4111 return -EBUSY;
4112 }
b6016b76
MC
4113 }
4114
4115 /* Make sure byte swapping is properly configured. */
4116 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4117 if (val != 0x01020304) {
4118 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4119 return -ENODEV;
4120 }
4121
b6016b76 4122 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
4123 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
4124 if (rc)
4125 return rc;
b6016b76 4126
0d8a6571 4127 spin_lock_bh(&bp->phy_lock);
489310a4 4128 old_port = bp->phy_port;
0d8a6571 4129 bnx2_init_remote_phy(bp);
489310a4 4130 if ((bp->phy_flags & REMOTE_PHY_CAP_FLAG) && old_port != bp->phy_port)
0d8a6571
MC
4131 bnx2_set_default_remote_link(bp);
4132 spin_unlock_bh(&bp->phy_lock);
4133
b6016b76
MC
4134 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4135 /* Adjust the voltage regular to two steps lower. The default
4136 * of this register is 0x0000000e. */
4137 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4138
4139 /* Remove bad rbuf memory from the free pool. */
4140 rc = bnx2_alloc_bad_rbuf(bp);
4141 }
4142
4143 return rc;
4144}
4145
4146static int
4147bnx2_init_chip(struct bnx2 *bp)
4148{
4149 u32 val;
b090ae2b 4150 int rc;
b6016b76
MC
4151
4152 /* Make sure the interrupt is not active. */
4153 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4154
4155 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4156 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4157#ifdef __BIG_ENDIAN
6aa20a22 4158 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4159#endif
6aa20a22 4160 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4161 DMA_READ_CHANS << 12 |
4162 DMA_WRITE_CHANS << 16;
4163
4164 val |= (0x2 << 20) | (1 << 11);
4165
dda1e390 4166 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4167 val |= (1 << 23);
4168
4169 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4170 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
4171 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4172
4173 REG_WR(bp, BNX2_DMA_CONFIG, val);
4174
4175 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4176 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4177 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4178 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4179 }
4180
4181 if (bp->flags & PCIX_FLAG) {
4182 u16 val16;
4183
4184 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4185 &val16);
4186 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4187 val16 & ~PCI_X_CMD_ERO);
4188 }
4189
4190 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4191 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4192 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4193 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4194
4195 /* Initialize context mapping and zero out the quick contexts. The
4196 * context block must have already been enabled. */
641bdcd5
MC
4197 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4198 rc = bnx2_init_5709_context(bp);
4199 if (rc)
4200 return rc;
4201 } else
59b47d8a 4202 bnx2_init_context(bp);
b6016b76 4203
fba9fe91
MC
4204 if ((rc = bnx2_init_cpus(bp)) != 0)
4205 return rc;
4206
b6016b76
MC
4207 bnx2_init_nvram(bp);
4208
4209 bnx2_set_mac_addr(bp);
4210
4211 val = REG_RD(bp, BNX2_MQ_CONFIG);
4212 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4213 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4214 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4215 val |= BNX2_MQ_CONFIG_HALT_DIS;
4216
b6016b76
MC
4217 REG_WR(bp, BNX2_MQ_CONFIG, val);
4218
4219 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4220 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4221 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4222
4223 val = (BCM_PAGE_BITS - 8) << 24;
4224 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4225
4226 /* Configure page size. */
4227 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4228 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4229 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4230 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4231
4232 val = bp->mac_addr[0] +
4233 (bp->mac_addr[1] << 8) +
4234 (bp->mac_addr[2] << 16) +
4235 bp->mac_addr[3] +
4236 (bp->mac_addr[4] << 8) +
4237 (bp->mac_addr[5] << 16);
4238 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4239
4240 /* Program the MTU. Also include 4 bytes for CRC32. */
4241 val = bp->dev->mtu + ETH_HLEN + 4;
4242 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4243 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4244 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4245
4246 bp->last_status_idx = 0;
4247 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4248
4249 /* Set up how to generate a link change interrupt. */
4250 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4251
4252 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4253 (u64) bp->status_blk_mapping & 0xffffffff);
4254 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4255
4256 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4257 (u64) bp->stats_blk_mapping & 0xffffffff);
4258 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4259 (u64) bp->stats_blk_mapping >> 32);
4260
6aa20a22 4261 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4262 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4263
4264 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4265 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4266
4267 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4268 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4269
4270 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4271
4272 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4273
4274 REG_WR(bp, BNX2_HC_COM_TICKS,
4275 (bp->com_ticks_int << 16) | bp->com_ticks);
4276
4277 REG_WR(bp, BNX2_HC_CMD_TICKS,
4278 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4279
02537b06
MC
4280 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4281 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4282 else
7ea6920e 4283 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4284 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4285
4286 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4287 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4288 else {
8e6a72c4
MC
4289 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4290 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4291 }
4292
8e6a72c4
MC
4293 if (bp->flags & ONE_SHOT_MSI_FLAG)
4294 val |= BNX2_HC_CONFIG_ONE_SHOT;
4295
4296 REG_WR(bp, BNX2_HC_CONFIG, val);
4297
b6016b76
MC
4298 /* Clear internal stats counters. */
4299 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4300
da3e4fbe 4301 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4302
4303 /* Initialize the receive filter. */
4304 bnx2_set_rx_mode(bp->dev);
4305
0aa38df7
MC
4306 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4307 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4308 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4309 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4310 }
b090ae2b
MC
4311 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4312 0);
b6016b76 4313
df149d70 4314 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4315 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4316
4317 udelay(20);
4318
bf5295bb
MC
4319 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4320
b090ae2b 4321 return rc;
b6016b76
MC
4322}
4323
59b47d8a
MC
4324static void
4325bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4326{
4327 u32 val, offset0, offset1, offset2, offset3;
4328
4329 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4330 offset0 = BNX2_L2CTX_TYPE_XI;
4331 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4332 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4333 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4334 } else {
4335 offset0 = BNX2_L2CTX_TYPE;
4336 offset1 = BNX2_L2CTX_CMD_TYPE;
4337 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4338 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4339 }
4340 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4341 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4342
4343 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4344 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4345
4346 val = (u64) bp->tx_desc_mapping >> 32;
4347 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4348
4349 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4350 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4351}
b6016b76
MC
4352
4353static void
4354bnx2_init_tx_ring(struct bnx2 *bp)
4355{
4356 struct tx_bd *txbd;
59b47d8a 4357 u32 cid;
b6016b76 4358
2f8af120
MC
4359 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4360
b6016b76 4361 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4362
b6016b76
MC
4363 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4364 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4365
4366 bp->tx_prod = 0;
4367 bp->tx_cons = 0;
f4e418f7 4368 bp->hw_tx_cons = 0;
b6016b76 4369 bp->tx_prod_bseq = 0;
6aa20a22 4370
59b47d8a
MC
4371 cid = TX_CID;
4372 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4373 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4374
59b47d8a 4375 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4376}
4377
4378static void
5d5d0015
MC
4379bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4380 int num_rings)
b6016b76 4381{
b6016b76 4382 int i;
5d5d0015 4383 struct rx_bd *rxbd;
6aa20a22 4384
5d5d0015 4385 for (i = 0; i < num_rings; i++) {
13daffa2 4386 int j;
b6016b76 4387
5d5d0015 4388 rxbd = &rx_ring[i][0];
13daffa2 4389 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 4390 rxbd->rx_bd_len = buf_size;
13daffa2
MC
4391 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4392 }
5d5d0015 4393 if (i == (num_rings - 1))
13daffa2
MC
4394 j = 0;
4395 else
4396 j = i + 1;
5d5d0015
MC
4397 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4398 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 4399 }
5d5d0015
MC
4400}
4401
4402static void
4403bnx2_init_rx_ring(struct bnx2 *bp)
4404{
4405 int i;
4406 u16 prod, ring_prod;
4407 u32 val, rx_cid_addr = GET_CID_ADDR(RX_CID);
4408
4409 bp->rx_prod = 0;
4410 bp->rx_cons = 0;
4411 bp->rx_prod_bseq = 0;
47bf4246
MC
4412 bp->rx_pg_prod = 0;
4413 bp->rx_pg_cons = 0;
5d5d0015
MC
4414
4415 bnx2_init_rxbd_rings(bp->rx_desc_ring, bp->rx_desc_mapping,
4416 bp->rx_buf_use_size, bp->rx_max_ring);
4417
4418 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246
MC
4419 if (bp->rx_pg_ring_size) {
4420 bnx2_init_rxbd_rings(bp->rx_pg_desc_ring,
4421 bp->rx_pg_desc_mapping,
4422 PAGE_SIZE, bp->rx_max_pg_ring);
4423 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
4424 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4425 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
4426 BNX2_L2CTX_RBDC_JUMBO_KEY);
4427
4428 val = (u64) bp->rx_pg_desc_mapping[0] >> 32;
4429 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
4430
4431 val = (u64) bp->rx_pg_desc_mapping[0] & 0xffffffff;
4432 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
4433
4434 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4435 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4436 }
b6016b76
MC
4437
4438 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4439 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4440 val |= 0x02 << 8;
5d5d0015 4441 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
b6016b76 4442
13daffa2 4443 val = (u64) bp->rx_desc_mapping[0] >> 32;
5d5d0015 4444 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 4445
13daffa2 4446 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
5d5d0015 4447 CTX_WR(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 4448
47bf4246
MC
4449 ring_prod = prod = bp->rx_pg_prod;
4450 for (i = 0; i < bp->rx_pg_ring_size; i++) {
4451 if (bnx2_alloc_rx_page(bp, ring_prod) < 0)
4452 break;
4453 prod = NEXT_RX_BD(prod);
4454 ring_prod = RX_PG_RING_IDX(prod);
4455 }
4456 bp->rx_pg_prod = prod;
4457
5d5d0015 4458 ring_prod = prod = bp->rx_prod;
236b6394 4459 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4460 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4461 break;
4462 }
4463 prod = NEXT_RX_BD(prod);
4464 ring_prod = RX_RING_IDX(prod);
4465 }
4466 bp->rx_prod = prod;
4467
47bf4246 4468 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_PG_BDIDX, bp->rx_pg_prod);
b6016b76
MC
4469 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4470
4471 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4472}
4473
5d5d0015 4474static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 4475{
5d5d0015 4476 u32 max, num_rings = 1;
13daffa2 4477
5d5d0015
MC
4478 while (ring_size > MAX_RX_DESC_CNT) {
4479 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
4480 num_rings++;
4481 }
4482 /* round to next power of 2 */
5d5d0015 4483 max = max_size;
13daffa2
MC
4484 while ((max & num_rings) == 0)
4485 max >>= 1;
4486
4487 if (num_rings != max)
4488 max <<= 1;
4489
5d5d0015
MC
4490 return max;
4491}
4492
4493static void
4494bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4495{
84eaa187 4496 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
4497
4498 /* 8 for CRC and VLAN */
4499 rx_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
4500
84eaa187
MC
4501 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4502 sizeof(struct skb_shared_info);
4503
5d5d0015 4504 bp->rx_copy_thresh = RX_COPY_THRESH;
47bf4246
MC
4505 bp->rx_pg_ring_size = 0;
4506 bp->rx_max_pg_ring = 0;
4507 bp->rx_max_pg_ring_idx = 0;
84eaa187
MC
4508 if (rx_space > PAGE_SIZE) {
4509 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4510
4511 jumbo_size = size * pages;
4512 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4513 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4514
4515 bp->rx_pg_ring_size = jumbo_size;
4516 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
4517 MAX_RX_PG_RINGS);
4518 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
4519 rx_size = RX_COPY_THRESH + bp->rx_offset;
4520 bp->rx_copy_thresh = 0;
4521 }
5d5d0015
MC
4522
4523 bp->rx_buf_use_size = rx_size;
4524 /* hw alignment */
4525 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
1db82f2a 4526 bp->rx_jumbo_thresh = rx_size - bp->rx_offset;
5d5d0015
MC
4527 bp->rx_ring_size = size;
4528 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
4529 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4530}
4531
b6016b76
MC
4532static void
4533bnx2_free_tx_skbs(struct bnx2 *bp)
4534{
4535 int i;
4536
4537 if (bp->tx_buf_ring == NULL)
4538 return;
4539
4540 for (i = 0; i < TX_DESC_CNT; ) {
4541 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4542 struct sk_buff *skb = tx_buf->skb;
4543 int j, last;
4544
4545 if (skb == NULL) {
4546 i++;
4547 continue;
4548 }
4549
4550 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4551 skb_headlen(skb), PCI_DMA_TODEVICE);
4552
4553 tx_buf->skb = NULL;
4554
4555 last = skb_shinfo(skb)->nr_frags;
4556 for (j = 0; j < last; j++) {
4557 tx_buf = &bp->tx_buf_ring[i + j + 1];
4558 pci_unmap_page(bp->pdev,
4559 pci_unmap_addr(tx_buf, mapping),
4560 skb_shinfo(skb)->frags[j].size,
4561 PCI_DMA_TODEVICE);
4562 }
745720e5 4563 dev_kfree_skb(skb);
b6016b76
MC
4564 i += j + 1;
4565 }
4566
4567}
4568
4569static void
4570bnx2_free_rx_skbs(struct bnx2 *bp)
4571{
4572 int i;
4573
4574 if (bp->rx_buf_ring == NULL)
4575 return;
4576
13daffa2 4577 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4578 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4579 struct sk_buff *skb = rx_buf->skb;
4580
05d0f1cf 4581 if (skb == NULL)
b6016b76
MC
4582 continue;
4583
4584 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4585 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4586
4587 rx_buf->skb = NULL;
4588
745720e5 4589 dev_kfree_skb(skb);
b6016b76 4590 }
47bf4246
MC
4591 for (i = 0; i < bp->rx_max_pg_ring_idx; i++)
4592 bnx2_free_rx_page(bp, i);
b6016b76
MC
4593}
4594
4595static void
4596bnx2_free_skbs(struct bnx2 *bp)
4597{
4598 bnx2_free_tx_skbs(bp);
4599 bnx2_free_rx_skbs(bp);
4600}
4601
4602static int
4603bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4604{
4605 int rc;
4606
4607 rc = bnx2_reset_chip(bp, reset_code);
4608 bnx2_free_skbs(bp);
4609 if (rc)
4610 return rc;
4611
fba9fe91
MC
4612 if ((rc = bnx2_init_chip(bp)) != 0)
4613 return rc;
4614
b6016b76
MC
4615 bnx2_init_tx_ring(bp);
4616 bnx2_init_rx_ring(bp);
4617 return 0;
4618}
4619
4620static int
4621bnx2_init_nic(struct bnx2 *bp)
4622{
4623 int rc;
4624
4625 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4626 return rc;
4627
80be4434 4628 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4629 bnx2_init_phy(bp);
4630 bnx2_set_link(bp);
0d8a6571 4631 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4632 return 0;
4633}
4634
4635static int
4636bnx2_test_registers(struct bnx2 *bp)
4637{
4638 int ret;
5bae30c9 4639 int i, is_5709;
f71e1309 4640 static const struct {
b6016b76
MC
4641 u16 offset;
4642 u16 flags;
5bae30c9 4643#define BNX2_FL_NOT_5709 1
b6016b76
MC
4644 u32 rw_mask;
4645 u32 ro_mask;
4646 } reg_tbl[] = {
4647 { 0x006c, 0, 0x00000000, 0x0000003f },
4648 { 0x0090, 0, 0xffffffff, 0x00000000 },
4649 { 0x0094, 0, 0x00000000, 0x00000000 },
4650
5bae30c9
MC
4651 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4652 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4653 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4654 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4655 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4656 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4657 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4658 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4659 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4660
4661 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4662 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4663 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4664 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4665 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4666 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4667
4668 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4669 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4670 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4671
4672 { 0x1000, 0, 0x00000000, 0x00000001 },
4673 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4674
4675 { 0x1408, 0, 0x01c00800, 0x00000000 },
4676 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4677 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4678 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4679 { 0x14b0, 0, 0x00000002, 0x00000001 },
4680 { 0x14b8, 0, 0x00000000, 0x00000000 },
4681 { 0x14c0, 0, 0x00000000, 0x00000009 },
4682 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4683 { 0x14cc, 0, 0x00000000, 0x00000001 },
4684 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4685
4686 { 0x1800, 0, 0x00000000, 0x00000001 },
4687 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4688
4689 { 0x2800, 0, 0x00000000, 0x00000001 },
4690 { 0x2804, 0, 0x00000000, 0x00003f01 },
4691 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4692 { 0x2810, 0, 0xffff0000, 0x00000000 },
4693 { 0x2814, 0, 0xffff0000, 0x00000000 },
4694 { 0x2818, 0, 0xffff0000, 0x00000000 },
4695 { 0x281c, 0, 0xffff0000, 0x00000000 },
4696 { 0x2834, 0, 0xffffffff, 0x00000000 },
4697 { 0x2840, 0, 0x00000000, 0xffffffff },
4698 { 0x2844, 0, 0x00000000, 0xffffffff },
4699 { 0x2848, 0, 0xffffffff, 0x00000000 },
4700 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4701
4702 { 0x2c00, 0, 0x00000000, 0x00000011 },
4703 { 0x2c04, 0, 0x00000000, 0x00030007 },
4704
b6016b76
MC
4705 { 0x3c00, 0, 0x00000000, 0x00000001 },
4706 { 0x3c04, 0, 0x00000000, 0x00070000 },
4707 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4708 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4709 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4710 { 0x3c14, 0, 0x00000000, 0xffffffff },
4711 { 0x3c18, 0, 0x00000000, 0xffffffff },
4712 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4713 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4714
4715 { 0x5004, 0, 0x00000000, 0x0000007f },
4716 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4717
b6016b76
MC
4718 { 0x5c00, 0, 0x00000000, 0x00000001 },
4719 { 0x5c04, 0, 0x00000000, 0x0003000f },
4720 { 0x5c08, 0, 0x00000003, 0x00000000 },
4721 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4722 { 0x5c10, 0, 0x00000000, 0xffffffff },
4723 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4724 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4725 { 0x5c88, 0, 0x00000000, 0x00077373 },
4726 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4727
4728 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4729 { 0x680c, 0, 0xffffffff, 0x00000000 },
4730 { 0x6810, 0, 0xffffffff, 0x00000000 },
4731 { 0x6814, 0, 0xffffffff, 0x00000000 },
4732 { 0x6818, 0, 0xffffffff, 0x00000000 },
4733 { 0x681c, 0, 0xffffffff, 0x00000000 },
4734 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4735 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4736 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4737 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4738 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4739 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4740 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4741 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4742 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4743 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4744 { 0x684c, 0, 0xffffffff, 0x00000000 },
4745 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4746 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4747 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4748 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4749 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4750 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4751
4752 { 0xffff, 0, 0x00000000, 0x00000000 },
4753 };
4754
4755 ret = 0;
5bae30c9
MC
4756 is_5709 = 0;
4757 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4758 is_5709 = 1;
4759
b6016b76
MC
4760 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4761 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4762 u16 flags = reg_tbl[i].flags;
4763
4764 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4765 continue;
b6016b76
MC
4766
4767 offset = (u32) reg_tbl[i].offset;
4768 rw_mask = reg_tbl[i].rw_mask;
4769 ro_mask = reg_tbl[i].ro_mask;
4770
14ab9b86 4771 save_val = readl(bp->regview + offset);
b6016b76 4772
14ab9b86 4773 writel(0, bp->regview + offset);
b6016b76 4774
14ab9b86 4775 val = readl(bp->regview + offset);
b6016b76
MC
4776 if ((val & rw_mask) != 0) {
4777 goto reg_test_err;
4778 }
4779
4780 if ((val & ro_mask) != (save_val & ro_mask)) {
4781 goto reg_test_err;
4782 }
4783
14ab9b86 4784 writel(0xffffffff, bp->regview + offset);
b6016b76 4785
14ab9b86 4786 val = readl(bp->regview + offset);
b6016b76
MC
4787 if ((val & rw_mask) != rw_mask) {
4788 goto reg_test_err;
4789 }
4790
4791 if ((val & ro_mask) != (save_val & ro_mask)) {
4792 goto reg_test_err;
4793 }
4794
14ab9b86 4795 writel(save_val, bp->regview + offset);
b6016b76
MC
4796 continue;
4797
4798reg_test_err:
14ab9b86 4799 writel(save_val, bp->regview + offset);
b6016b76
MC
4800 ret = -ENODEV;
4801 break;
4802 }
4803 return ret;
4804}
4805
4806static int
4807bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4808{
f71e1309 4809 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4810 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4811 int i;
4812
4813 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4814 u32 offset;
4815
4816 for (offset = 0; offset < size; offset += 4) {
4817
4818 REG_WR_IND(bp, start + offset, test_pattern[i]);
4819
4820 if (REG_RD_IND(bp, start + offset) !=
4821 test_pattern[i]) {
4822 return -ENODEV;
4823 }
4824 }
4825 }
4826 return 0;
4827}
4828
4829static int
4830bnx2_test_memory(struct bnx2 *bp)
4831{
4832 int ret = 0;
4833 int i;
5bae30c9 4834 static struct mem_entry {
b6016b76
MC
4835 u32 offset;
4836 u32 len;
5bae30c9 4837 } mem_tbl_5706[] = {
b6016b76 4838 { 0x60000, 0x4000 },
5b0c76ad 4839 { 0xa0000, 0x3000 },
b6016b76
MC
4840 { 0xe0000, 0x4000 },
4841 { 0x120000, 0x4000 },
4842 { 0x1a0000, 0x4000 },
4843 { 0x160000, 0x4000 },
4844 { 0xffffffff, 0 },
5bae30c9
MC
4845 },
4846 mem_tbl_5709[] = {
4847 { 0x60000, 0x4000 },
4848 { 0xa0000, 0x3000 },
4849 { 0xe0000, 0x4000 },
4850 { 0x120000, 0x4000 },
4851 { 0x1a0000, 0x4000 },
4852 { 0xffffffff, 0 },
b6016b76 4853 };
5bae30c9
MC
4854 struct mem_entry *mem_tbl;
4855
4856 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4857 mem_tbl = mem_tbl_5709;
4858 else
4859 mem_tbl = mem_tbl_5706;
b6016b76
MC
4860
4861 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4862 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4863 mem_tbl[i].len)) != 0) {
4864 return ret;
4865 }
4866 }
6aa20a22 4867
b6016b76
MC
4868 return ret;
4869}
4870
bc5a0690
MC
4871#define BNX2_MAC_LOOPBACK 0
4872#define BNX2_PHY_LOOPBACK 1
4873
b6016b76 4874static int
bc5a0690 4875bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4876{
4877 unsigned int pkt_size, num_pkts, i;
4878 struct sk_buff *skb, *rx_skb;
4879 unsigned char *packet;
bc5a0690 4880 u16 rx_start_idx, rx_idx;
b6016b76
MC
4881 dma_addr_t map;
4882 struct tx_bd *txbd;
4883 struct sw_bd *rx_buf;
4884 struct l2_fhdr *rx_hdr;
4885 int ret = -ENODEV;
4886
bc5a0690
MC
4887 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4888 bp->loopback = MAC_LOOPBACK;
4889 bnx2_set_mac_loopback(bp);
4890 }
4891 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
489310a4
MC
4892 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4893 return 0;
4894
80be4434 4895 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4896 bnx2_set_phy_loopback(bp);
4897 }
4898 else
4899 return -EINVAL;
b6016b76 4900
84eaa187 4901 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 4902 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4903 if (!skb)
4904 return -ENOMEM;
b6016b76 4905 packet = skb_put(skb, pkt_size);
6634292b 4906 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4907 memset(packet + 6, 0x0, 8);
4908 for (i = 14; i < pkt_size; i++)
4909 packet[i] = (unsigned char) (i & 0xff);
4910
4911 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4912 PCI_DMA_TODEVICE);
4913
bf5295bb
MC
4914 REG_WR(bp, BNX2_HC_COMMAND,
4915 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4916
b6016b76
MC
4917 REG_RD(bp, BNX2_HC_COMMAND);
4918
4919 udelay(5);
4920 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4921
b6016b76
MC
4922 num_pkts = 0;
4923
bc5a0690 4924 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4925
4926 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4927 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4928 txbd->tx_bd_mss_nbytes = pkt_size;
4929 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4930
4931 num_pkts++;
bc5a0690
MC
4932 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4933 bp->tx_prod_bseq += pkt_size;
b6016b76 4934
234754d5
MC
4935 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4936 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4937
4938 udelay(100);
4939
bf5295bb
MC
4940 REG_WR(bp, BNX2_HC_COMMAND,
4941 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4942
b6016b76
MC
4943 REG_RD(bp, BNX2_HC_COMMAND);
4944
4945 udelay(5);
4946
4947 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4948 dev_kfree_skb(skb);
b6016b76 4949
bc5a0690 4950 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4951 goto loopback_test_done;
4952 }
4953
4954 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4955 if (rx_idx != rx_start_idx + num_pkts) {
4956 goto loopback_test_done;
4957 }
4958
4959 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4960 rx_skb = rx_buf->skb;
4961
4962 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4963 skb_reserve(rx_skb, bp->rx_offset);
4964
4965 pci_dma_sync_single_for_cpu(bp->pdev,
4966 pci_unmap_addr(rx_buf, mapping),
4967 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4968
ade2bfe7 4969 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4970 (L2_FHDR_ERRORS_BAD_CRC |
4971 L2_FHDR_ERRORS_PHY_DECODE |
4972 L2_FHDR_ERRORS_ALIGNMENT |
4973 L2_FHDR_ERRORS_TOO_SHORT |
4974 L2_FHDR_ERRORS_GIANT_FRAME)) {
4975
4976 goto loopback_test_done;
4977 }
4978
4979 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4980 goto loopback_test_done;
4981 }
4982
4983 for (i = 14; i < pkt_size; i++) {
4984 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4985 goto loopback_test_done;
4986 }
4987 }
4988
4989 ret = 0;
4990
4991loopback_test_done:
4992 bp->loopback = 0;
4993 return ret;
4994}
4995
bc5a0690
MC
4996#define BNX2_MAC_LOOPBACK_FAILED 1
4997#define BNX2_PHY_LOOPBACK_FAILED 2
4998#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4999 BNX2_PHY_LOOPBACK_FAILED)
5000
5001static int
5002bnx2_test_loopback(struct bnx2 *bp)
5003{
5004 int rc = 0;
5005
5006 if (!netif_running(bp->dev))
5007 return BNX2_LOOPBACK_FAILED;
5008
5009 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5010 spin_lock_bh(&bp->phy_lock);
5011 bnx2_init_phy(bp);
5012 spin_unlock_bh(&bp->phy_lock);
5013 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5014 rc |= BNX2_MAC_LOOPBACK_FAILED;
5015 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5016 rc |= BNX2_PHY_LOOPBACK_FAILED;
5017 return rc;
5018}
5019
b6016b76
MC
5020#define NVRAM_SIZE 0x200
5021#define CRC32_RESIDUAL 0xdebb20e3
5022
5023static int
5024bnx2_test_nvram(struct bnx2 *bp)
5025{
5026 u32 buf[NVRAM_SIZE / 4];
5027 u8 *data = (u8 *) buf;
5028 int rc = 0;
5029 u32 magic, csum;
5030
5031 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5032 goto test_nvram_done;
5033
5034 magic = be32_to_cpu(buf[0]);
5035 if (magic != 0x669955aa) {
5036 rc = -ENODEV;
5037 goto test_nvram_done;
5038 }
5039
5040 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5041 goto test_nvram_done;
5042
5043 csum = ether_crc_le(0x100, data);
5044 if (csum != CRC32_RESIDUAL) {
5045 rc = -ENODEV;
5046 goto test_nvram_done;
5047 }
5048
5049 csum = ether_crc_le(0x100, data + 0x100);
5050 if (csum != CRC32_RESIDUAL) {
5051 rc = -ENODEV;
5052 }
5053
5054test_nvram_done:
5055 return rc;
5056}
5057
5058static int
5059bnx2_test_link(struct bnx2 *bp)
5060{
5061 u32 bmsr;
5062
489310a4
MC
5063 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5064 if (bp->link_up)
5065 return 0;
5066 return -ENODEV;
5067 }
c770a65c 5068 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5069 bnx2_enable_bmsr1(bp);
5070 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5071 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5072 bnx2_disable_bmsr1(bp);
c770a65c 5073 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5074
b6016b76
MC
5075 if (bmsr & BMSR_LSTATUS) {
5076 return 0;
5077 }
5078 return -ENODEV;
5079}
5080
5081static int
5082bnx2_test_intr(struct bnx2 *bp)
5083{
5084 int i;
b6016b76
MC
5085 u16 status_idx;
5086
5087 if (!netif_running(bp->dev))
5088 return -ENODEV;
5089
5090 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5091
5092 /* This register is not touched during run-time. */
bf5295bb 5093 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5094 REG_RD(bp, BNX2_HC_COMMAND);
5095
5096 for (i = 0; i < 10; i++) {
5097 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5098 status_idx) {
5099
5100 break;
5101 }
5102
5103 msleep_interruptible(10);
5104 }
5105 if (i < 10)
5106 return 0;
5107
5108 return -ENODEV;
5109}
5110
5111static void
48b01e2d 5112bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5113{
48b01e2d
MC
5114 spin_lock(&bp->phy_lock);
5115 if (bp->serdes_an_pending)
5116 bp->serdes_an_pending--;
5117 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5118 u32 bmcr;
b6016b76 5119
48b01e2d 5120 bp->current_interval = bp->timer_interval;
cd339a0e 5121
ca58c3af 5122 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5123
48b01e2d
MC
5124 if (bmcr & BMCR_ANENABLE) {
5125 u32 phy1, phy2;
b6016b76 5126
48b01e2d
MC
5127 bnx2_write_phy(bp, 0x1c, 0x7c00);
5128 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 5129
48b01e2d
MC
5130 bnx2_write_phy(bp, 0x17, 0x0f01);
5131 bnx2_read_phy(bp, 0x15, &phy2);
5132 bnx2_write_phy(bp, 0x17, 0x0f01);
5133 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 5134
48b01e2d
MC
5135 if ((phy1 & 0x10) && /* SIGNAL DETECT */
5136 !(phy2 & 0x20)) { /* no CONFIG */
5137
5138 bmcr &= ~BMCR_ANENABLE;
5139 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5140 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
5141 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
5142 }
b6016b76 5143 }
48b01e2d
MC
5144 }
5145 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5146 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
5147 u32 phy2;
b6016b76 5148
48b01e2d
MC
5149 bnx2_write_phy(bp, 0x17, 0x0f01);
5150 bnx2_read_phy(bp, 0x15, &phy2);
5151 if (phy2 & 0x20) {
5152 u32 bmcr;
cd339a0e 5153
ca58c3af 5154 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5155 bmcr |= BMCR_ANENABLE;
ca58c3af 5156 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5157
48b01e2d
MC
5158 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
5159 }
5160 } else
5161 bp->current_interval = bp->timer_interval;
b6016b76 5162
48b01e2d
MC
5163 spin_unlock(&bp->phy_lock);
5164}
b6016b76 5165
f8dd064e
MC
5166static void
5167bnx2_5708_serdes_timer(struct bnx2 *bp)
5168{
0d8a6571
MC
5169 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
5170 return;
5171
f8dd064e
MC
5172 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
5173 bp->serdes_an_pending = 0;
5174 return;
5175 }
b6016b76 5176
f8dd064e
MC
5177 spin_lock(&bp->phy_lock);
5178 if (bp->serdes_an_pending)
5179 bp->serdes_an_pending--;
5180 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5181 u32 bmcr;
b6016b76 5182
ca58c3af 5183 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5184 if (bmcr & BMCR_ANENABLE) {
605a9e20 5185 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
5186 bp->current_interval = SERDES_FORCED_TIMEOUT;
5187 } else {
605a9e20 5188 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
5189 bp->serdes_an_pending = 2;
5190 bp->current_interval = bp->timer_interval;
b6016b76 5191 }
b6016b76 5192
f8dd064e
MC
5193 } else
5194 bp->current_interval = bp->timer_interval;
b6016b76 5195
f8dd064e
MC
5196 spin_unlock(&bp->phy_lock);
5197}
5198
48b01e2d
MC
5199static void
5200bnx2_timer(unsigned long data)
5201{
5202 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 5203
48b01e2d
MC
5204 if (!netif_running(bp->dev))
5205 return;
b6016b76 5206
48b01e2d
MC
5207 if (atomic_read(&bp->intr_sem) != 0)
5208 goto bnx2_restart_timer;
b6016b76 5209
df149d70 5210 bnx2_send_heart_beat(bp);
b6016b76 5211
48b01e2d 5212 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 5213
02537b06
MC
5214 /* workaround occasional corrupted counters */
5215 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5216 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5217 BNX2_HC_COMMAND_STATS_NOW);
5218
f8dd064e
MC
5219 if (bp->phy_flags & PHY_SERDES_FLAG) {
5220 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5221 bnx2_5706_serdes_timer(bp);
27a005b8 5222 else
f8dd064e 5223 bnx2_5708_serdes_timer(bp);
b6016b76
MC
5224 }
5225
5226bnx2_restart_timer:
cd339a0e 5227 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5228}
5229
8e6a72c4
MC
5230static int
5231bnx2_request_irq(struct bnx2 *bp)
5232{
5233 struct net_device *dev = bp->dev;
5234 int rc = 0;
5235
5236 if (bp->flags & USING_MSI_FLAG) {
5237 irq_handler_t fn = bnx2_msi;
5238
5239 if (bp->flags & ONE_SHOT_MSI_FLAG)
5240 fn = bnx2_msi_1shot;
5241
5242 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
5243 } else
5244 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
5245 IRQF_SHARED, dev->name, dev);
5246 return rc;
5247}
5248
5249static void
5250bnx2_free_irq(struct bnx2 *bp)
5251{
5252 struct net_device *dev = bp->dev;
5253
5254 if (bp->flags & USING_MSI_FLAG) {
5255 free_irq(bp->pdev->irq, dev);
5256 pci_disable_msi(bp->pdev);
5257 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
5258 } else
5259 free_irq(bp->pdev->irq, dev);
5260}
5261
b6016b76
MC
5262/* Called with rtnl_lock */
5263static int
5264bnx2_open(struct net_device *dev)
5265{
972ec0d4 5266 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5267 int rc;
5268
1b2f922f
MC
5269 netif_carrier_off(dev);
5270
829ca9a3 5271 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5272 bnx2_disable_int(bp);
5273
5274 rc = bnx2_alloc_mem(bp);
5275 if (rc)
5276 return rc;
5277
bea3348e
SH
5278 napi_enable(&bp->napi);
5279
8e6a72c4 5280 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
b6016b76
MC
5281 if (pci_enable_msi(bp->pdev) == 0) {
5282 bp->flags |= USING_MSI_FLAG;
8e6a72c4
MC
5283 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5284 bp->flags |= ONE_SHOT_MSI_FLAG;
b6016b76 5285 }
b6016b76 5286 }
8e6a72c4
MC
5287 rc = bnx2_request_irq(bp);
5288
b6016b76 5289 if (rc) {
bea3348e 5290 napi_disable(&bp->napi);
b6016b76
MC
5291 bnx2_free_mem(bp);
5292 return rc;
5293 }
5294
5295 rc = bnx2_init_nic(bp);
5296
5297 if (rc) {
bea3348e 5298 napi_disable(&bp->napi);
8e6a72c4 5299 bnx2_free_irq(bp);
b6016b76
MC
5300 bnx2_free_skbs(bp);
5301 bnx2_free_mem(bp);
5302 return rc;
5303 }
6aa20a22 5304
cd339a0e 5305 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5306
5307 atomic_set(&bp->intr_sem, 0);
5308
5309 bnx2_enable_int(bp);
5310
5311 if (bp->flags & USING_MSI_FLAG) {
5312 /* Test MSI to make sure it is working
5313 * If MSI test fails, go back to INTx mode
5314 */
5315 if (bnx2_test_intr(bp) != 0) {
5316 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5317 " using MSI, switching to INTx mode. Please"
5318 " report this failure to the PCI maintainer"
5319 " and include system chipset information.\n",
5320 bp->dev->name);
5321
5322 bnx2_disable_int(bp);
8e6a72c4 5323 bnx2_free_irq(bp);
b6016b76
MC
5324
5325 rc = bnx2_init_nic(bp);
5326
8e6a72c4
MC
5327 if (!rc)
5328 rc = bnx2_request_irq(bp);
5329
b6016b76 5330 if (rc) {
bea3348e 5331 napi_disable(&bp->napi);
b6016b76
MC
5332 bnx2_free_skbs(bp);
5333 bnx2_free_mem(bp);
5334 del_timer_sync(&bp->timer);
5335 return rc;
5336 }
5337 bnx2_enable_int(bp);
5338 }
5339 }
5340 if (bp->flags & USING_MSI_FLAG) {
5341 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5342 }
5343
5344 netif_start_queue(dev);
5345
5346 return 0;
5347}
5348
5349static void
c4028958 5350bnx2_reset_task(struct work_struct *work)
b6016b76 5351{
c4028958 5352 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5353
afdc08b9
MC
5354 if (!netif_running(bp->dev))
5355 return;
5356
5357 bp->in_reset_task = 1;
b6016b76
MC
5358 bnx2_netif_stop(bp);
5359
5360 bnx2_init_nic(bp);
5361
5362 atomic_set(&bp->intr_sem, 1);
5363 bnx2_netif_start(bp);
afdc08b9 5364 bp->in_reset_task = 0;
b6016b76
MC
5365}
5366
5367static void
5368bnx2_tx_timeout(struct net_device *dev)
5369{
972ec0d4 5370 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5371
5372 /* This allows the netif to be shutdown gracefully before resetting */
5373 schedule_work(&bp->reset_task);
5374}
5375
5376#ifdef BCM_VLAN
5377/* Called with rtnl_lock */
5378static void
5379bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5380{
972ec0d4 5381 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5382
5383 bnx2_netif_stop(bp);
5384
5385 bp->vlgrp = vlgrp;
5386 bnx2_set_rx_mode(dev);
5387
5388 bnx2_netif_start(bp);
5389}
b6016b76
MC
5390#endif
5391
932ff279 5392/* Called with netif_tx_lock.
2f8af120
MC
5393 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5394 * netif_wake_queue().
b6016b76
MC
5395 */
5396static int
5397bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5398{
972ec0d4 5399 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5400 dma_addr_t mapping;
5401 struct tx_bd *txbd;
5402 struct sw_bd *tx_buf;
5403 u32 len, vlan_tag_flags, last_frag, mss;
5404 u16 prod, ring_prod;
5405 int i;
5406
e89bbf10 5407 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5408 netif_stop_queue(dev);
5409 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5410 dev->name);
5411
5412 return NETDEV_TX_BUSY;
5413 }
5414 len = skb_headlen(skb);
5415 prod = bp->tx_prod;
5416 ring_prod = TX_RING_IDX(prod);
5417
5418 vlan_tag_flags = 0;
84fa7933 5419 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5420 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5421 }
5422
5423 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5424 vlan_tag_flags |=
5425 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5426 }
fde82055 5427 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5428 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5429 struct iphdr *iph;
b6016b76 5430
b6016b76
MC
5431 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5432
4666f87a
MC
5433 tcp_opt_len = tcp_optlen(skb);
5434
5435 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5436 u32 tcp_off = skb_transport_offset(skb) -
5437 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5438
4666f87a
MC
5439 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5440 TX_BD_FLAGS_SW_FLAGS;
5441 if (likely(tcp_off == 0))
5442 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5443 else {
5444 tcp_off >>= 3;
5445 vlan_tag_flags |= ((tcp_off & 0x3) <<
5446 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5447 ((tcp_off & 0x10) <<
5448 TX_BD_FLAGS_TCP6_OFF4_SHL);
5449 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5450 }
5451 } else {
5452 if (skb_header_cloned(skb) &&
5453 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5454 dev_kfree_skb(skb);
5455 return NETDEV_TX_OK;
5456 }
b6016b76 5457
4666f87a
MC
5458 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5459
5460 iph = ip_hdr(skb);
5461 iph->check = 0;
5462 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5463 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5464 iph->daddr, 0,
5465 IPPROTO_TCP,
5466 0);
5467 if (tcp_opt_len || (iph->ihl > 5)) {
5468 vlan_tag_flags |= ((iph->ihl - 5) +
5469 (tcp_opt_len >> 2)) << 8;
5470 }
b6016b76 5471 }
4666f87a 5472 } else
b6016b76 5473 mss = 0;
b6016b76
MC
5474
5475 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5476
b6016b76
MC
5477 tx_buf = &bp->tx_buf_ring[ring_prod];
5478 tx_buf->skb = skb;
5479 pci_unmap_addr_set(tx_buf, mapping, mapping);
5480
5481 txbd = &bp->tx_desc_ring[ring_prod];
5482
5483 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5484 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5485 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5486 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5487
5488 last_frag = skb_shinfo(skb)->nr_frags;
5489
5490 for (i = 0; i < last_frag; i++) {
5491 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5492
5493 prod = NEXT_TX_BD(prod);
5494 ring_prod = TX_RING_IDX(prod);
5495 txbd = &bp->tx_desc_ring[ring_prod];
5496
5497 len = frag->size;
5498 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5499 len, PCI_DMA_TODEVICE);
5500 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5501 mapping, mapping);
5502
5503 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5504 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5505 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5506 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5507
5508 }
5509 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5510
5511 prod = NEXT_TX_BD(prod);
5512 bp->tx_prod_bseq += skb->len;
5513
234754d5
MC
5514 REG_WR16(bp, bp->tx_bidx_addr, prod);
5515 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5516
5517 mmiowb();
5518
5519 bp->tx_prod = prod;
5520 dev->trans_start = jiffies;
5521
e89bbf10 5522 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 5523 netif_stop_queue(dev);
2f8af120 5524 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 5525 netif_wake_queue(dev);
b6016b76
MC
5526 }
5527
5528 return NETDEV_TX_OK;
5529}
5530
5531/* Called with rtnl_lock */
5532static int
5533bnx2_close(struct net_device *dev)
5534{
972ec0d4 5535 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5536 u32 reset_code;
5537
afdc08b9
MC
5538 /* Calling flush_scheduled_work() may deadlock because
5539 * linkwatch_event() may be on the workqueue and it will try to get
5540 * the rtnl_lock which we are holding.
5541 */
5542 while (bp->in_reset_task)
5543 msleep(1);
5544
bea3348e
SH
5545 bnx2_disable_int_sync(bp);
5546 napi_disable(&bp->napi);
b6016b76 5547 del_timer_sync(&bp->timer);
dda1e390 5548 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5549 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5550 else if (bp->wol)
b6016b76
MC
5551 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5552 else
5553 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5554 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5555 bnx2_free_irq(bp);
b6016b76
MC
5556 bnx2_free_skbs(bp);
5557 bnx2_free_mem(bp);
5558 bp->link_up = 0;
5559 netif_carrier_off(bp->dev);
829ca9a3 5560 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5561 return 0;
5562}
5563
5564#define GET_NET_STATS64(ctr) \
5565 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5566 (unsigned long) (ctr##_lo)
5567
5568#define GET_NET_STATS32(ctr) \
5569 (ctr##_lo)
5570
5571#if (BITS_PER_LONG == 64)
5572#define GET_NET_STATS GET_NET_STATS64
5573#else
5574#define GET_NET_STATS GET_NET_STATS32
5575#endif
5576
5577static struct net_device_stats *
5578bnx2_get_stats(struct net_device *dev)
5579{
972ec0d4 5580 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5581 struct statistics_block *stats_blk = bp->stats_blk;
5582 struct net_device_stats *net_stats = &bp->net_stats;
5583
5584 if (bp->stats_blk == NULL) {
5585 return net_stats;
5586 }
5587 net_stats->rx_packets =
5588 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5589 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5590 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5591
5592 net_stats->tx_packets =
5593 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5594 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5595 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5596
5597 net_stats->rx_bytes =
5598 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5599
5600 net_stats->tx_bytes =
5601 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5602
6aa20a22 5603 net_stats->multicast =
b6016b76
MC
5604 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5605
6aa20a22 5606 net_stats->collisions =
b6016b76
MC
5607 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5608
6aa20a22 5609 net_stats->rx_length_errors =
b6016b76
MC
5610 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5611 stats_blk->stat_EtherStatsOverrsizePkts);
5612
6aa20a22 5613 net_stats->rx_over_errors =
b6016b76
MC
5614 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5615
6aa20a22 5616 net_stats->rx_frame_errors =
b6016b76
MC
5617 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5618
6aa20a22 5619 net_stats->rx_crc_errors =
b6016b76
MC
5620 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5621
5622 net_stats->rx_errors = net_stats->rx_length_errors +
5623 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5624 net_stats->rx_crc_errors;
5625
5626 net_stats->tx_aborted_errors =
5627 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5628 stats_blk->stat_Dot3StatsLateCollisions);
5629
5b0c76ad
MC
5630 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5631 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5632 net_stats->tx_carrier_errors = 0;
5633 else {
5634 net_stats->tx_carrier_errors =
5635 (unsigned long)
5636 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5637 }
5638
5639 net_stats->tx_errors =
6aa20a22 5640 (unsigned long)
b6016b76
MC
5641 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5642 +
5643 net_stats->tx_aborted_errors +
5644 net_stats->tx_carrier_errors;
5645
cea94db9
MC
5646 net_stats->rx_missed_errors =
5647 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5648 stats_blk->stat_FwRxDrop);
5649
b6016b76
MC
5650 return net_stats;
5651}
5652
5653/* All ethtool functions called with rtnl_lock */
5654
5655static int
5656bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5657{
972ec0d4 5658 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5659 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5660
5661 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5662 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5663 support_serdes = 1;
5664 support_copper = 1;
5665 } else if (bp->phy_port == PORT_FIBRE)
5666 support_serdes = 1;
5667 else
5668 support_copper = 1;
5669
5670 if (support_serdes) {
b6016b76
MC
5671 cmd->supported |= SUPPORTED_1000baseT_Full |
5672 SUPPORTED_FIBRE;
605a9e20
MC
5673 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5674 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5675
b6016b76 5676 }
7b6b8347 5677 if (support_copper) {
b6016b76
MC
5678 cmd->supported |= SUPPORTED_10baseT_Half |
5679 SUPPORTED_10baseT_Full |
5680 SUPPORTED_100baseT_Half |
5681 SUPPORTED_100baseT_Full |
5682 SUPPORTED_1000baseT_Full |
5683 SUPPORTED_TP;
5684
b6016b76
MC
5685 }
5686
7b6b8347
MC
5687 spin_lock_bh(&bp->phy_lock);
5688 cmd->port = bp->phy_port;
b6016b76
MC
5689 cmd->advertising = bp->advertising;
5690
5691 if (bp->autoneg & AUTONEG_SPEED) {
5692 cmd->autoneg = AUTONEG_ENABLE;
5693 }
5694 else {
5695 cmd->autoneg = AUTONEG_DISABLE;
5696 }
5697
5698 if (netif_carrier_ok(dev)) {
5699 cmd->speed = bp->line_speed;
5700 cmd->duplex = bp->duplex;
5701 }
5702 else {
5703 cmd->speed = -1;
5704 cmd->duplex = -1;
5705 }
7b6b8347 5706 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5707
5708 cmd->transceiver = XCVR_INTERNAL;
5709 cmd->phy_address = bp->phy_addr;
5710
5711 return 0;
5712}
6aa20a22 5713
b6016b76
MC
5714static int
5715bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5716{
972ec0d4 5717 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5718 u8 autoneg = bp->autoneg;
5719 u8 req_duplex = bp->req_duplex;
5720 u16 req_line_speed = bp->req_line_speed;
5721 u32 advertising = bp->advertising;
7b6b8347
MC
5722 int err = -EINVAL;
5723
5724 spin_lock_bh(&bp->phy_lock);
5725
5726 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5727 goto err_out_unlock;
5728
5729 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5730 goto err_out_unlock;
b6016b76
MC
5731
5732 if (cmd->autoneg == AUTONEG_ENABLE) {
5733 autoneg |= AUTONEG_SPEED;
5734
6aa20a22 5735 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5736
5737 /* allow advertising 1 speed */
5738 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5739 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5740 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5741 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5742
7b6b8347
MC
5743 if (cmd->port == PORT_FIBRE)
5744 goto err_out_unlock;
b6016b76
MC
5745
5746 advertising = cmd->advertising;
5747
27a005b8 5748 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5749 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5750 (cmd->port == PORT_TP))
5751 goto err_out_unlock;
5752 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5753 advertising = cmd->advertising;
7b6b8347
MC
5754 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5755 goto err_out_unlock;
b6016b76 5756 else {
7b6b8347 5757 if (cmd->port == PORT_FIBRE)
b6016b76 5758 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5759 else
b6016b76 5760 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5761 }
5762 advertising |= ADVERTISED_Autoneg;
5763 }
5764 else {
7b6b8347 5765 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5766 if ((cmd->speed != SPEED_1000 &&
5767 cmd->speed != SPEED_2500) ||
5768 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5769 goto err_out_unlock;
80be4434
MC
5770
5771 if (cmd->speed == SPEED_2500 &&
5772 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5773 goto err_out_unlock;
b6016b76 5774 }
7b6b8347
MC
5775 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5776 goto err_out_unlock;
5777
b6016b76
MC
5778 autoneg &= ~AUTONEG_SPEED;
5779 req_line_speed = cmd->speed;
5780 req_duplex = cmd->duplex;
5781 advertising = 0;
5782 }
5783
5784 bp->autoneg = autoneg;
5785 bp->advertising = advertising;
5786 bp->req_line_speed = req_line_speed;
5787 bp->req_duplex = req_duplex;
5788
7b6b8347 5789 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5790
7b6b8347 5791err_out_unlock:
c770a65c 5792 spin_unlock_bh(&bp->phy_lock);
b6016b76 5793
7b6b8347 5794 return err;
b6016b76
MC
5795}
5796
5797static void
5798bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5799{
972ec0d4 5800 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5801
5802 strcpy(info->driver, DRV_MODULE_NAME);
5803 strcpy(info->version, DRV_MODULE_VERSION);
5804 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 5805 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
5806}
5807
244ac4f4
MC
5808#define BNX2_REGDUMP_LEN (32 * 1024)
5809
5810static int
5811bnx2_get_regs_len(struct net_device *dev)
5812{
5813 return BNX2_REGDUMP_LEN;
5814}
5815
5816static void
5817bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5818{
5819 u32 *p = _p, i, offset;
5820 u8 *orig_p = _p;
5821 struct bnx2 *bp = netdev_priv(dev);
5822 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5823 0x0800, 0x0880, 0x0c00, 0x0c10,
5824 0x0c30, 0x0d08, 0x1000, 0x101c,
5825 0x1040, 0x1048, 0x1080, 0x10a4,
5826 0x1400, 0x1490, 0x1498, 0x14f0,
5827 0x1500, 0x155c, 0x1580, 0x15dc,
5828 0x1600, 0x1658, 0x1680, 0x16d8,
5829 0x1800, 0x1820, 0x1840, 0x1854,
5830 0x1880, 0x1894, 0x1900, 0x1984,
5831 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5832 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5833 0x2000, 0x2030, 0x23c0, 0x2400,
5834 0x2800, 0x2820, 0x2830, 0x2850,
5835 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5836 0x3c00, 0x3c94, 0x4000, 0x4010,
5837 0x4080, 0x4090, 0x43c0, 0x4458,
5838 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5839 0x4fc0, 0x5010, 0x53c0, 0x5444,
5840 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5841 0x5fc0, 0x6000, 0x6400, 0x6428,
5842 0x6800, 0x6848, 0x684c, 0x6860,
5843 0x6888, 0x6910, 0x8000 };
5844
5845 regs->version = 0;
5846
5847 memset(p, 0, BNX2_REGDUMP_LEN);
5848
5849 if (!netif_running(bp->dev))
5850 return;
5851
5852 i = 0;
5853 offset = reg_boundaries[0];
5854 p += offset;
5855 while (offset < BNX2_REGDUMP_LEN) {
5856 *p++ = REG_RD(bp, offset);
5857 offset += 4;
5858 if (offset == reg_boundaries[i + 1]) {
5859 offset = reg_boundaries[i + 2];
5860 p = (u32 *) (orig_p + offset);
5861 i += 2;
5862 }
5863 }
5864}
5865
b6016b76
MC
5866static void
5867bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5868{
972ec0d4 5869 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5870
5871 if (bp->flags & NO_WOL_FLAG) {
5872 wol->supported = 0;
5873 wol->wolopts = 0;
5874 }
5875 else {
5876 wol->supported = WAKE_MAGIC;
5877 if (bp->wol)
5878 wol->wolopts = WAKE_MAGIC;
5879 else
5880 wol->wolopts = 0;
5881 }
5882 memset(&wol->sopass, 0, sizeof(wol->sopass));
5883}
5884
5885static int
5886bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5887{
972ec0d4 5888 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5889
5890 if (wol->wolopts & ~WAKE_MAGIC)
5891 return -EINVAL;
5892
5893 if (wol->wolopts & WAKE_MAGIC) {
5894 if (bp->flags & NO_WOL_FLAG)
5895 return -EINVAL;
5896
5897 bp->wol = 1;
5898 }
5899 else {
5900 bp->wol = 0;
5901 }
5902 return 0;
5903}
5904
5905static int
5906bnx2_nway_reset(struct net_device *dev)
5907{
972ec0d4 5908 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5909 u32 bmcr;
5910
5911 if (!(bp->autoneg & AUTONEG_SPEED)) {
5912 return -EINVAL;
5913 }
5914
c770a65c 5915 spin_lock_bh(&bp->phy_lock);
b6016b76 5916
7b6b8347
MC
5917 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5918 int rc;
5919
5920 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5921 spin_unlock_bh(&bp->phy_lock);
5922 return rc;
5923 }
5924
b6016b76
MC
5925 /* Force a link down visible on the other side */
5926 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5927 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5928 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5929
5930 msleep(20);
5931
c770a65c 5932 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5933
5934 bp->current_interval = SERDES_AN_TIMEOUT;
5935 bp->serdes_an_pending = 1;
5936 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5937 }
5938
ca58c3af 5939 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5940 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5941 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5942
c770a65c 5943 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5944
5945 return 0;
5946}
5947
5948static int
5949bnx2_get_eeprom_len(struct net_device *dev)
5950{
972ec0d4 5951 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5952
1122db71 5953 if (bp->flash_info == NULL)
b6016b76
MC
5954 return 0;
5955
1122db71 5956 return (int) bp->flash_size;
b6016b76
MC
5957}
5958
5959static int
5960bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5961 u8 *eebuf)
5962{
972ec0d4 5963 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5964 int rc;
5965
1064e944 5966 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
5967
5968 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5969
5970 return rc;
5971}
5972
5973static int
5974bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5975 u8 *eebuf)
5976{
972ec0d4 5977 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5978 int rc;
5979
1064e944 5980 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
5981
5982 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5983
5984 return rc;
5985}
5986
5987static int
5988bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5989{
972ec0d4 5990 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5991
5992 memset(coal, 0, sizeof(struct ethtool_coalesce));
5993
5994 coal->rx_coalesce_usecs = bp->rx_ticks;
5995 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5996 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5997 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5998
5999 coal->tx_coalesce_usecs = bp->tx_ticks;
6000 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6001 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6002 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6003
6004 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6005
6006 return 0;
6007}
6008
6009static int
6010bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6011{
972ec0d4 6012 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6013
6014 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6015 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6016
6aa20a22 6017 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6018 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6019
6020 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6021 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6022
6023 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6024 if (bp->rx_quick_cons_trip_int > 0xff)
6025 bp->rx_quick_cons_trip_int = 0xff;
6026
6027 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6028 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6029
6030 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6031 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6032
6033 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6034 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6035
6036 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6037 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6038 0xff;
6039
6040 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6041 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6042 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6043 bp->stats_ticks = USEC_PER_SEC;
6044 }
7ea6920e
MC
6045 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6046 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6047 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6048
6049 if (netif_running(bp->dev)) {
6050 bnx2_netif_stop(bp);
6051 bnx2_init_nic(bp);
6052 bnx2_netif_start(bp);
6053 }
6054
6055 return 0;
6056}
6057
6058static void
6059bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6060{
972ec0d4 6061 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6062
13daffa2 6063 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6064 ering->rx_mini_max_pending = 0;
47bf4246 6065 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6066
6067 ering->rx_pending = bp->rx_ring_size;
6068 ering->rx_mini_pending = 0;
47bf4246 6069 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6070
6071 ering->tx_max_pending = MAX_TX_DESC_CNT;
6072 ering->tx_pending = bp->tx_ring_size;
6073}
6074
6075static int
5d5d0015 6076bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6077{
13daffa2
MC
6078 if (netif_running(bp->dev)) {
6079 bnx2_netif_stop(bp);
6080 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6081 bnx2_free_skbs(bp);
6082 bnx2_free_mem(bp);
6083 }
6084
5d5d0015
MC
6085 bnx2_set_rx_ring_size(bp, rx);
6086 bp->tx_ring_size = tx;
b6016b76
MC
6087
6088 if (netif_running(bp->dev)) {
13daffa2
MC
6089 int rc;
6090
6091 rc = bnx2_alloc_mem(bp);
6092 if (rc)
6093 return rc;
b6016b76
MC
6094 bnx2_init_nic(bp);
6095 bnx2_netif_start(bp);
6096 }
b6016b76
MC
6097 return 0;
6098}
6099
5d5d0015
MC
6100static int
6101bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6102{
6103 struct bnx2 *bp = netdev_priv(dev);
6104 int rc;
6105
6106 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6107 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6108 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6109
6110 return -EINVAL;
6111 }
6112 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6113 return rc;
6114}
6115
b6016b76
MC
6116static void
6117bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6118{
972ec0d4 6119 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6120
6121 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6122 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6123 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6124}
6125
6126static int
6127bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6128{
972ec0d4 6129 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6130
6131 bp->req_flow_ctrl = 0;
6132 if (epause->rx_pause)
6133 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6134 if (epause->tx_pause)
6135 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6136
6137 if (epause->autoneg) {
6138 bp->autoneg |= AUTONEG_FLOW_CTRL;
6139 }
6140 else {
6141 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6142 }
6143
c770a65c 6144 spin_lock_bh(&bp->phy_lock);
b6016b76 6145
0d8a6571 6146 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 6147
c770a65c 6148 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6149
6150 return 0;
6151}
6152
6153static u32
6154bnx2_get_rx_csum(struct net_device *dev)
6155{
972ec0d4 6156 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6157
6158 return bp->rx_csum;
6159}
6160
6161static int
6162bnx2_set_rx_csum(struct net_device *dev, u32 data)
6163{
972ec0d4 6164 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6165
6166 bp->rx_csum = data;
6167 return 0;
6168}
6169
b11d6213
MC
6170static int
6171bnx2_set_tso(struct net_device *dev, u32 data)
6172{
4666f87a
MC
6173 struct bnx2 *bp = netdev_priv(dev);
6174
6175 if (data) {
b11d6213 6176 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6177 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6178 dev->features |= NETIF_F_TSO6;
6179 } else
6180 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6181 NETIF_F_TSO_ECN);
b11d6213
MC
6182 return 0;
6183}
6184
cea94db9 6185#define BNX2_NUM_STATS 46
b6016b76 6186
14ab9b86 6187static struct {
b6016b76
MC
6188 char string[ETH_GSTRING_LEN];
6189} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6190 { "rx_bytes" },
6191 { "rx_error_bytes" },
6192 { "tx_bytes" },
6193 { "tx_error_bytes" },
6194 { "rx_ucast_packets" },
6195 { "rx_mcast_packets" },
6196 { "rx_bcast_packets" },
6197 { "tx_ucast_packets" },
6198 { "tx_mcast_packets" },
6199 { "tx_bcast_packets" },
6200 { "tx_mac_errors" },
6201 { "tx_carrier_errors" },
6202 { "rx_crc_errors" },
6203 { "rx_align_errors" },
6204 { "tx_single_collisions" },
6205 { "tx_multi_collisions" },
6206 { "tx_deferred" },
6207 { "tx_excess_collisions" },
6208 { "tx_late_collisions" },
6209 { "tx_total_collisions" },
6210 { "rx_fragments" },
6211 { "rx_jabbers" },
6212 { "rx_undersize_packets" },
6213 { "rx_oversize_packets" },
6214 { "rx_64_byte_packets" },
6215 { "rx_65_to_127_byte_packets" },
6216 { "rx_128_to_255_byte_packets" },
6217 { "rx_256_to_511_byte_packets" },
6218 { "rx_512_to_1023_byte_packets" },
6219 { "rx_1024_to_1522_byte_packets" },
6220 { "rx_1523_to_9022_byte_packets" },
6221 { "tx_64_byte_packets" },
6222 { "tx_65_to_127_byte_packets" },
6223 { "tx_128_to_255_byte_packets" },
6224 { "tx_256_to_511_byte_packets" },
6225 { "tx_512_to_1023_byte_packets" },
6226 { "tx_1024_to_1522_byte_packets" },
6227 { "tx_1523_to_9022_byte_packets" },
6228 { "rx_xon_frames" },
6229 { "rx_xoff_frames" },
6230 { "tx_xon_frames" },
6231 { "tx_xoff_frames" },
6232 { "rx_mac_ctrl_frames" },
6233 { "rx_filtered_packets" },
6234 { "rx_discards" },
cea94db9 6235 { "rx_fw_discards" },
b6016b76
MC
6236};
6237
6238#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6239
f71e1309 6240static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6241 STATS_OFFSET32(stat_IfHCInOctets_hi),
6242 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6243 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6244 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6245 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6246 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6247 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6248 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6249 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6250 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6251 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
6252 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6253 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6254 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6255 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6256 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6257 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6258 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6259 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6260 STATS_OFFSET32(stat_EtherStatsCollisions),
6261 STATS_OFFSET32(stat_EtherStatsFragments),
6262 STATS_OFFSET32(stat_EtherStatsJabbers),
6263 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6264 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6265 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6266 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6267 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6268 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6269 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6270 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6271 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6272 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6273 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6274 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6275 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6276 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6277 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6278 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6279 STATS_OFFSET32(stat_XonPauseFramesReceived),
6280 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6281 STATS_OFFSET32(stat_OutXonSent),
6282 STATS_OFFSET32(stat_OutXoffSent),
6283 STATS_OFFSET32(stat_MacControlFramesReceived),
6284 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6285 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6286 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6287};
6288
6289/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6290 * skipped because of errata.
6aa20a22 6291 */
14ab9b86 6292static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6293 8,0,8,8,8,8,8,8,8,8,
6294 4,0,4,4,4,4,4,4,4,4,
6295 4,4,4,4,4,4,4,4,4,4,
6296 4,4,4,4,4,4,4,4,4,4,
cea94db9 6297 4,4,4,4,4,4,
b6016b76
MC
6298};
6299
5b0c76ad
MC
6300static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6301 8,0,8,8,8,8,8,8,8,8,
6302 4,4,4,4,4,4,4,4,4,4,
6303 4,4,4,4,4,4,4,4,4,4,
6304 4,4,4,4,4,4,4,4,4,4,
cea94db9 6305 4,4,4,4,4,4,
5b0c76ad
MC
6306};
6307
b6016b76
MC
6308#define BNX2_NUM_TESTS 6
6309
14ab9b86 6310static struct {
b6016b76
MC
6311 char string[ETH_GSTRING_LEN];
6312} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6313 { "register_test (offline)" },
6314 { "memory_test (offline)" },
6315 { "loopback_test (offline)" },
6316 { "nvram_test (online)" },
6317 { "interrupt_test (online)" },
6318 { "link_test (online)" },
6319};
6320
6321static int
b9f2c044 6322bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 6323{
b9f2c044
JG
6324 switch (sset) {
6325 case ETH_SS_TEST:
6326 return BNX2_NUM_TESTS;
6327 case ETH_SS_STATS:
6328 return BNX2_NUM_STATS;
6329 default:
6330 return -EOPNOTSUPP;
6331 }
b6016b76
MC
6332}
6333
6334static void
6335bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6336{
972ec0d4 6337 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6338
6339 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6340 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6341 int i;
6342
b6016b76
MC
6343 bnx2_netif_stop(bp);
6344 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6345 bnx2_free_skbs(bp);
6346
6347 if (bnx2_test_registers(bp) != 0) {
6348 buf[0] = 1;
6349 etest->flags |= ETH_TEST_FL_FAILED;
6350 }
6351 if (bnx2_test_memory(bp) != 0) {
6352 buf[1] = 1;
6353 etest->flags |= ETH_TEST_FL_FAILED;
6354 }
bc5a0690 6355 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6356 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6357
6358 if (!netif_running(bp->dev)) {
6359 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6360 }
6361 else {
6362 bnx2_init_nic(bp);
6363 bnx2_netif_start(bp);
6364 }
6365
6366 /* wait for link up */
80be4434
MC
6367 for (i = 0; i < 7; i++) {
6368 if (bp->link_up)
6369 break;
6370 msleep_interruptible(1000);
6371 }
b6016b76
MC
6372 }
6373
6374 if (bnx2_test_nvram(bp) != 0) {
6375 buf[3] = 1;
6376 etest->flags |= ETH_TEST_FL_FAILED;
6377 }
6378 if (bnx2_test_intr(bp) != 0) {
6379 buf[4] = 1;
6380 etest->flags |= ETH_TEST_FL_FAILED;
6381 }
6382
6383 if (bnx2_test_link(bp) != 0) {
6384 buf[5] = 1;
6385 etest->flags |= ETH_TEST_FL_FAILED;
6386
6387 }
6388}
6389
6390static void
6391bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6392{
6393 switch (stringset) {
6394 case ETH_SS_STATS:
6395 memcpy(buf, bnx2_stats_str_arr,
6396 sizeof(bnx2_stats_str_arr));
6397 break;
6398 case ETH_SS_TEST:
6399 memcpy(buf, bnx2_tests_str_arr,
6400 sizeof(bnx2_tests_str_arr));
6401 break;
6402 }
6403}
6404
b6016b76
MC
6405static void
6406bnx2_get_ethtool_stats(struct net_device *dev,
6407 struct ethtool_stats *stats, u64 *buf)
6408{
972ec0d4 6409 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6410 int i;
6411 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6412 u8 *stats_len_arr = NULL;
b6016b76
MC
6413
6414 if (hw_stats == NULL) {
6415 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6416 return;
6417 }
6418
5b0c76ad
MC
6419 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6420 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6421 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6422 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6423 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6424 else
6425 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6426
6427 for (i = 0; i < BNX2_NUM_STATS; i++) {
6428 if (stats_len_arr[i] == 0) {
6429 /* skip this counter */
6430 buf[i] = 0;
6431 continue;
6432 }
6433 if (stats_len_arr[i] == 4) {
6434 /* 4-byte counter */
6435 buf[i] = (u64)
6436 *(hw_stats + bnx2_stats_offset_arr[i]);
6437 continue;
6438 }
6439 /* 8-byte counter */
6440 buf[i] = (((u64) *(hw_stats +
6441 bnx2_stats_offset_arr[i])) << 32) +
6442 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6443 }
6444}
6445
6446static int
6447bnx2_phys_id(struct net_device *dev, u32 data)
6448{
972ec0d4 6449 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6450 int i;
6451 u32 save;
6452
6453 if (data == 0)
6454 data = 2;
6455
6456 save = REG_RD(bp, BNX2_MISC_CFG);
6457 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6458
6459 for (i = 0; i < (data * 2); i++) {
6460 if ((i % 2) == 0) {
6461 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6462 }
6463 else {
6464 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6465 BNX2_EMAC_LED_1000MB_OVERRIDE |
6466 BNX2_EMAC_LED_100MB_OVERRIDE |
6467 BNX2_EMAC_LED_10MB_OVERRIDE |
6468 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6469 BNX2_EMAC_LED_TRAFFIC);
6470 }
6471 msleep_interruptible(500);
6472 if (signal_pending(current))
6473 break;
6474 }
6475 REG_WR(bp, BNX2_EMAC_LED, 0);
6476 REG_WR(bp, BNX2_MISC_CFG, save);
6477 return 0;
6478}
6479
4666f87a
MC
6480static int
6481bnx2_set_tx_csum(struct net_device *dev, u32 data)
6482{
6483 struct bnx2 *bp = netdev_priv(dev);
6484
6485 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 6486 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
6487 else
6488 return (ethtool_op_set_tx_csum(dev, data));
6489}
6490
7282d491 6491static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6492 .get_settings = bnx2_get_settings,
6493 .set_settings = bnx2_set_settings,
6494 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6495 .get_regs_len = bnx2_get_regs_len,
6496 .get_regs = bnx2_get_regs,
b6016b76
MC
6497 .get_wol = bnx2_get_wol,
6498 .set_wol = bnx2_set_wol,
6499 .nway_reset = bnx2_nway_reset,
6500 .get_link = ethtool_op_get_link,
6501 .get_eeprom_len = bnx2_get_eeprom_len,
6502 .get_eeprom = bnx2_get_eeprom,
6503 .set_eeprom = bnx2_set_eeprom,
6504 .get_coalesce = bnx2_get_coalesce,
6505 .set_coalesce = bnx2_set_coalesce,
6506 .get_ringparam = bnx2_get_ringparam,
6507 .set_ringparam = bnx2_set_ringparam,
6508 .get_pauseparam = bnx2_get_pauseparam,
6509 .set_pauseparam = bnx2_set_pauseparam,
6510 .get_rx_csum = bnx2_get_rx_csum,
6511 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 6512 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 6513 .set_sg = ethtool_op_set_sg,
b11d6213 6514 .set_tso = bnx2_set_tso,
b6016b76
MC
6515 .self_test = bnx2_self_test,
6516 .get_strings = bnx2_get_strings,
6517 .phys_id = bnx2_phys_id,
b6016b76 6518 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 6519 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
6520};
6521
6522/* Called with rtnl_lock */
6523static int
6524bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6525{
14ab9b86 6526 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6527 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6528 int err;
6529
6530 switch(cmd) {
6531 case SIOCGMIIPHY:
6532 data->phy_id = bp->phy_addr;
6533
6534 /* fallthru */
6535 case SIOCGMIIREG: {
6536 u32 mii_regval;
6537
7b6b8347
MC
6538 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6539 return -EOPNOTSUPP;
6540
dad3e452
MC
6541 if (!netif_running(dev))
6542 return -EAGAIN;
6543
c770a65c 6544 spin_lock_bh(&bp->phy_lock);
b6016b76 6545 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6546 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6547
6548 data->val_out = mii_regval;
6549
6550 return err;
6551 }
6552
6553 case SIOCSMIIREG:
6554 if (!capable(CAP_NET_ADMIN))
6555 return -EPERM;
6556
7b6b8347
MC
6557 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6558 return -EOPNOTSUPP;
6559
dad3e452
MC
6560 if (!netif_running(dev))
6561 return -EAGAIN;
6562
c770a65c 6563 spin_lock_bh(&bp->phy_lock);
b6016b76 6564 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6565 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6566
6567 return err;
6568
6569 default:
6570 /* do nothing */
6571 break;
6572 }
6573 return -EOPNOTSUPP;
6574}
6575
6576/* Called with rtnl_lock */
6577static int
6578bnx2_change_mac_addr(struct net_device *dev, void *p)
6579{
6580 struct sockaddr *addr = p;
972ec0d4 6581 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6582
73eef4cd
MC
6583 if (!is_valid_ether_addr(addr->sa_data))
6584 return -EINVAL;
6585
b6016b76
MC
6586 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6587 if (netif_running(dev))
6588 bnx2_set_mac_addr(bp);
6589
6590 return 0;
6591}
6592
6593/* Called with rtnl_lock */
6594static int
6595bnx2_change_mtu(struct net_device *dev, int new_mtu)
6596{
972ec0d4 6597 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6598
6599 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6600 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6601 return -EINVAL;
6602
6603 dev->mtu = new_mtu;
5d5d0015 6604 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
6605}
6606
6607#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6608static void
6609poll_bnx2(struct net_device *dev)
6610{
972ec0d4 6611 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6612
6613 disable_irq(bp->pdev->irq);
7d12e780 6614 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6615 enable_irq(bp->pdev->irq);
6616}
6617#endif
6618
253c8b75
MC
6619static void __devinit
6620bnx2_get_5709_media(struct bnx2 *bp)
6621{
6622 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6623 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6624 u32 strap;
6625
6626 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6627 return;
6628 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6629 bp->phy_flags |= PHY_SERDES_FLAG;
6630 return;
6631 }
6632
6633 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6634 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6635 else
6636 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6637
6638 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6639 switch (strap) {
6640 case 0x4:
6641 case 0x5:
6642 case 0x6:
6643 bp->phy_flags |= PHY_SERDES_FLAG;
6644 return;
6645 }
6646 } else {
6647 switch (strap) {
6648 case 0x1:
6649 case 0x2:
6650 case 0x4:
6651 bp->phy_flags |= PHY_SERDES_FLAG;
6652 return;
6653 }
6654 }
6655}
6656
883e5151
MC
6657static void __devinit
6658bnx2_get_pci_speed(struct bnx2 *bp)
6659{
6660 u32 reg;
6661
6662 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6663 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6664 u32 clkreg;
6665
6666 bp->flags |= PCIX_FLAG;
6667
6668 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6669
6670 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6671 switch (clkreg) {
6672 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6673 bp->bus_speed_mhz = 133;
6674 break;
6675
6676 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6677 bp->bus_speed_mhz = 100;
6678 break;
6679
6680 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6681 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6682 bp->bus_speed_mhz = 66;
6683 break;
6684
6685 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6686 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6687 bp->bus_speed_mhz = 50;
6688 break;
6689
6690 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6691 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6692 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6693 bp->bus_speed_mhz = 33;
6694 break;
6695 }
6696 }
6697 else {
6698 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6699 bp->bus_speed_mhz = 66;
6700 else
6701 bp->bus_speed_mhz = 33;
6702 }
6703
6704 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6705 bp->flags |= PCI_32BIT_FLAG;
6706
6707}
6708
b6016b76
MC
6709static int __devinit
6710bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6711{
6712 struct bnx2 *bp;
6713 unsigned long mem_len;
58fc2ea4 6714 int rc, i, j;
b6016b76 6715 u32 reg;
40453c83 6716 u64 dma_mask, persist_dma_mask;
b6016b76 6717
b6016b76 6718 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6719 bp = netdev_priv(dev);
b6016b76
MC
6720
6721 bp->flags = 0;
6722 bp->phy_flags = 0;
6723
6724 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6725 rc = pci_enable_device(pdev);
6726 if (rc) {
898eb71c 6727 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
6728 goto err_out;
6729 }
6730
6731 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6732 dev_err(&pdev->dev,
2e8a538d 6733 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6734 rc = -ENODEV;
6735 goto err_out_disable;
6736 }
6737
6738 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6739 if (rc) {
9b91cf9d 6740 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6741 goto err_out_disable;
6742 }
6743
6744 pci_set_master(pdev);
6745
6746 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6747 if (bp->pm_cap == 0) {
9b91cf9d 6748 dev_err(&pdev->dev,
2e8a538d 6749 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6750 rc = -EIO;
6751 goto err_out_release;
6752 }
6753
b6016b76
MC
6754 bp->dev = dev;
6755 bp->pdev = pdev;
6756
6757 spin_lock_init(&bp->phy_lock);
1b8227c4 6758 spin_lock_init(&bp->indirect_lock);
c4028958 6759 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6760
6761 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6762 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6763 dev->mem_end = dev->mem_start + mem_len;
6764 dev->irq = pdev->irq;
6765
6766 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6767
6768 if (!bp->regview) {
9b91cf9d 6769 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6770 rc = -ENOMEM;
6771 goto err_out_release;
6772 }
6773
6774 /* Configure byte swap and enable write to the reg_window registers.
6775 * Rely on CPU to do target byte swapping on big endian systems
6776 * The chip's target access swapping will not swap all accesses
6777 */
6778 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6779 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6780 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6781
829ca9a3 6782 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6783
6784 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6785
883e5151
MC
6786 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6787 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6788 dev_err(&pdev->dev,
6789 "Cannot find PCIE capability, aborting.\n");
6790 rc = -EIO;
6791 goto err_out_unmap;
6792 }
6793 bp->flags |= PCIE_FLAG;
6794 } else {
59b47d8a
MC
6795 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6796 if (bp->pcix_cap == 0) {
6797 dev_err(&pdev->dev,
6798 "Cannot find PCIX capability, aborting.\n");
6799 rc = -EIO;
6800 goto err_out_unmap;
6801 }
6802 }
6803
8e6a72c4
MC
6804 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6805 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6806 bp->flags |= MSI_CAP_FLAG;
6807 }
6808
40453c83
MC
6809 /* 5708 cannot support DMA addresses > 40-bit. */
6810 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6811 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6812 else
6813 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6814
6815 /* Configure DMA attributes. */
6816 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6817 dev->features |= NETIF_F_HIGHDMA;
6818 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6819 if (rc) {
6820 dev_err(&pdev->dev,
6821 "pci_set_consistent_dma_mask failed, aborting.\n");
6822 goto err_out_unmap;
6823 }
6824 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6825 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6826 goto err_out_unmap;
6827 }
6828
883e5151
MC
6829 if (!(bp->flags & PCIE_FLAG))
6830 bnx2_get_pci_speed(bp);
b6016b76
MC
6831
6832 /* 5706A0 may falsely detect SERR and PERR. */
6833 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6834 reg = REG_RD(bp, PCI_COMMAND);
6835 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6836 REG_WR(bp, PCI_COMMAND, reg);
6837 }
6838 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6839 !(bp->flags & PCIX_FLAG)) {
6840
9b91cf9d 6841 dev_err(&pdev->dev,
2e8a538d 6842 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6843 goto err_out_unmap;
6844 }
6845
6846 bnx2_init_nvram(bp);
6847
e3648b3d
MC
6848 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6849
6850 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6851 BNX2_SHM_HDR_SIGNATURE_SIG) {
6852 u32 off = PCI_FUNC(pdev->devfn) << 2;
6853
6854 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6855 } else
e3648b3d
MC
6856 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6857
b6016b76
MC
6858 /* Get the permanent MAC address. First we need to make sure the
6859 * firmware is actually running.
6860 */
e3648b3d 6861 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6862
6863 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6864 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6865 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6866 rc = -ENODEV;
6867 goto err_out_unmap;
6868 }
6869
58fc2ea4
MC
6870 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
6871 for (i = 0, j = 0; i < 3; i++) {
6872 u8 num, k, skip0;
6873
6874 num = (u8) (reg >> (24 - (i * 8)));
6875 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
6876 if (num >= k || !skip0 || k == 1) {
6877 bp->fw_version[j++] = (num / k) + '0';
6878 skip0 = 0;
6879 }
6880 }
6881 if (i != 2)
6882 bp->fw_version[j++] = '.';
6883 }
846f5c62
MC
6884 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE);
6885 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
6886 bp->wol = 1;
6887
6888 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
c2d3db8c
MC
6889 bp->flags |= ASF_ENABLE_FLAG;
6890
6891 for (i = 0; i < 30; i++) {
6892 reg = REG_RD_IND(bp, bp->shmem_base +
6893 BNX2_BC_STATE_CONDITION);
6894 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
6895 break;
6896 msleep(10);
6897 }
6898 }
58fc2ea4
MC
6899 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_BC_STATE_CONDITION);
6900 reg &= BNX2_CONDITION_MFW_RUN_MASK;
6901 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
6902 reg != BNX2_CONDITION_MFW_RUN_NONE) {
6903 int i;
6904 u32 addr = REG_RD_IND(bp, bp->shmem_base + BNX2_MFW_VER_PTR);
6905
6906 bp->fw_version[j++] = ' ';
6907 for (i = 0; i < 3; i++) {
6908 reg = REG_RD_IND(bp, addr + i * 4);
6909 reg = swab32(reg);
6910 memcpy(&bp->fw_version[j], &reg, 4);
6911 j += 4;
6912 }
6913 }
b6016b76 6914
e3648b3d 6915 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6916 bp->mac_addr[0] = (u8) (reg >> 8);
6917 bp->mac_addr[1] = (u8) reg;
6918
e3648b3d 6919 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6920 bp->mac_addr[2] = (u8) (reg >> 24);
6921 bp->mac_addr[3] = (u8) (reg >> 16);
6922 bp->mac_addr[4] = (u8) (reg >> 8);
6923 bp->mac_addr[5] = (u8) reg;
6924
5d5d0015
MC
6925 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6926
b6016b76 6927 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6928 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6929
6930 bp->rx_csum = 1;
6931
b6016b76
MC
6932 bp->tx_quick_cons_trip_int = 20;
6933 bp->tx_quick_cons_trip = 20;
6934 bp->tx_ticks_int = 80;
6935 bp->tx_ticks = 80;
6aa20a22 6936
b6016b76
MC
6937 bp->rx_quick_cons_trip_int = 6;
6938 bp->rx_quick_cons_trip = 6;
6939 bp->rx_ticks_int = 18;
6940 bp->rx_ticks = 18;
6941
7ea6920e 6942 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6943
6944 bp->timer_interval = HZ;
cd339a0e 6945 bp->current_interval = HZ;
b6016b76 6946
5b0c76ad
MC
6947 bp->phy_addr = 1;
6948
b6016b76 6949 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6950 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6951 bnx2_get_5709_media(bp);
6952 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6953 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6954
0d8a6571 6955 bp->phy_port = PORT_TP;
bac0dff6 6956 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6957 bp->phy_port = PORT_FIBRE;
846f5c62
MC
6958 reg = REG_RD_IND(bp, bp->shmem_base +
6959 BNX2_SHARED_HW_CFG_CONFIG);
6960 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
6961 bp->flags |= NO_WOL_FLAG;
6962 bp->wol = 0;
6963 }
bac0dff6 6964 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 6965 bp->phy_addr = 2;
5b0c76ad
MC
6966 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6967 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6968 }
0d8a6571
MC
6969 bnx2_init_remote_phy(bp);
6970
261dd5ca
MC
6971 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6972 CHIP_NUM(bp) == CHIP_NUM_5708)
6973 bp->phy_flags |= PHY_CRC_FIX_FLAG;
fb0c18bd
MC
6974 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
6975 (CHIP_REV(bp) == CHIP_REV_Ax ||
6976 CHIP_REV(bp) == CHIP_REV_Bx))
b659f44e 6977 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 6978
16088272
MC
6979 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6980 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
846f5c62 6981 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
dda1e390 6982 bp->flags |= NO_WOL_FLAG;
846f5c62
MC
6983 bp->wol = 0;
6984 }
dda1e390 6985
b6016b76
MC
6986 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6987 bp->tx_quick_cons_trip_int =
6988 bp->tx_quick_cons_trip;
6989 bp->tx_ticks_int = bp->tx_ticks;
6990 bp->rx_quick_cons_trip_int =
6991 bp->rx_quick_cons_trip;
6992 bp->rx_ticks_int = bp->rx_ticks;
6993 bp->comp_prod_trip_int = bp->comp_prod_trip;
6994 bp->com_ticks_int = bp->com_ticks;
6995 bp->cmd_ticks_int = bp->cmd_ticks;
6996 }
6997
f9317a40
MC
6998 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6999 *
7000 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7001 * with byte enables disabled on the unused 32-bit word. This is legal
7002 * but causes problems on the AMD 8132 which will eventually stop
7003 * responding after a while.
7004 *
7005 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7006 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7007 */
7008 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7009 struct pci_dev *amd_8132 = NULL;
7010
7011 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7012 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7013 amd_8132))) {
f9317a40 7014
44c10138
AK
7015 if (amd_8132->revision >= 0x10 &&
7016 amd_8132->revision <= 0x13) {
f9317a40
MC
7017 disable_msi = 1;
7018 pci_dev_put(amd_8132);
7019 break;
7020 }
7021 }
7022 }
7023
deaf391b 7024 bnx2_set_default_link(bp);
b6016b76
MC
7025 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7026
cd339a0e
MC
7027 init_timer(&bp->timer);
7028 bp->timer.expires = RUN_AT(bp->timer_interval);
7029 bp->timer.data = (unsigned long) bp;
7030 bp->timer.function = bnx2_timer;
7031
b6016b76
MC
7032 return 0;
7033
7034err_out_unmap:
7035 if (bp->regview) {
7036 iounmap(bp->regview);
73eef4cd 7037 bp->regview = NULL;
b6016b76
MC
7038 }
7039
7040err_out_release:
7041 pci_release_regions(pdev);
7042
7043err_out_disable:
7044 pci_disable_device(pdev);
7045 pci_set_drvdata(pdev, NULL);
7046
7047err_out:
7048 return rc;
7049}
7050
883e5151
MC
7051static char * __devinit
7052bnx2_bus_string(struct bnx2 *bp, char *str)
7053{
7054 char *s = str;
7055
7056 if (bp->flags & PCIE_FLAG) {
7057 s += sprintf(s, "PCI Express");
7058 } else {
7059 s += sprintf(s, "PCI");
7060 if (bp->flags & PCIX_FLAG)
7061 s += sprintf(s, "-X");
7062 if (bp->flags & PCI_32BIT_FLAG)
7063 s += sprintf(s, " 32-bit");
7064 else
7065 s += sprintf(s, " 64-bit");
7066 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7067 }
7068 return str;
7069}
7070
b6016b76
MC
7071static int __devinit
7072bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7073{
7074 static int version_printed = 0;
7075 struct net_device *dev = NULL;
7076 struct bnx2 *bp;
0795af57 7077 int rc;
883e5151 7078 char str[40];
0795af57 7079 DECLARE_MAC_BUF(mac);
b6016b76
MC
7080
7081 if (version_printed++ == 0)
7082 printk(KERN_INFO "%s", version);
7083
7084 /* dev zeroed in init_etherdev */
7085 dev = alloc_etherdev(sizeof(*bp));
7086
7087 if (!dev)
7088 return -ENOMEM;
7089
7090 rc = bnx2_init_board(pdev, dev);
7091 if (rc < 0) {
7092 free_netdev(dev);
7093 return rc;
7094 }
7095
7096 dev->open = bnx2_open;
7097 dev->hard_start_xmit = bnx2_start_xmit;
7098 dev->stop = bnx2_close;
7099 dev->get_stats = bnx2_get_stats;
7100 dev->set_multicast_list = bnx2_set_rx_mode;
7101 dev->do_ioctl = bnx2_ioctl;
7102 dev->set_mac_address = bnx2_change_mac_addr;
7103 dev->change_mtu = bnx2_change_mtu;
7104 dev->tx_timeout = bnx2_tx_timeout;
7105 dev->watchdog_timeo = TX_TIMEOUT;
7106#ifdef BCM_VLAN
7107 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76 7108#endif
b6016b76 7109 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 7110
972ec0d4 7111 bp = netdev_priv(dev);
bea3348e 7112 netif_napi_add(dev, &bp->napi, bnx2_poll, 64);
b6016b76
MC
7113
7114#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7115 dev->poll_controller = poll_bnx2;
7116#endif
7117
1b2f922f
MC
7118 pci_set_drvdata(pdev, dev);
7119
7120 memcpy(dev->dev_addr, bp->mac_addr, 6);
7121 memcpy(dev->perm_addr, bp->mac_addr, 6);
7122 bp->name = board_info[ent->driver_data].name;
7123
d212f87b 7124 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 7125 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
7126 dev->features |= NETIF_F_IPV6_CSUM;
7127
1b2f922f
MC
7128#ifdef BCM_VLAN
7129 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7130#endif
7131 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7132 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7133 dev->features |= NETIF_F_TSO6;
1b2f922f 7134
b6016b76 7135 if ((rc = register_netdev(dev))) {
9b91cf9d 7136 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
7137 if (bp->regview)
7138 iounmap(bp->regview);
7139 pci_release_regions(pdev);
7140 pci_disable_device(pdev);
7141 pci_set_drvdata(pdev, NULL);
7142 free_netdev(dev);
7143 return rc;
7144 }
7145
883e5151 7146 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
0795af57 7147 "IRQ %d, node addr %s\n",
b6016b76
MC
7148 dev->name,
7149 bp->name,
7150 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7151 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 7152 bnx2_bus_string(bp, str),
b6016b76 7153 dev->base_addr,
0795af57 7154 bp->pdev->irq, print_mac(mac, dev->dev_addr));
b6016b76 7155
b6016b76
MC
7156 return 0;
7157}
7158
7159static void __devexit
7160bnx2_remove_one(struct pci_dev *pdev)
7161{
7162 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7163 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7164
afdc08b9
MC
7165 flush_scheduled_work();
7166
b6016b76
MC
7167 unregister_netdev(dev);
7168
7169 if (bp->regview)
7170 iounmap(bp->regview);
7171
7172 free_netdev(dev);
7173 pci_release_regions(pdev);
7174 pci_disable_device(pdev);
7175 pci_set_drvdata(pdev, NULL);
7176}
7177
7178static int
829ca9a3 7179bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
7180{
7181 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7182 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7183 u32 reset_code;
7184
6caebb02
MC
7185 /* PCI register 4 needs to be saved whether netif_running() or not.
7186 * MSI address and data need to be saved if using MSI and
7187 * netif_running().
7188 */
7189 pci_save_state(pdev);
b6016b76
MC
7190 if (!netif_running(dev))
7191 return 0;
7192
1d60290f 7193 flush_scheduled_work();
b6016b76
MC
7194 bnx2_netif_stop(bp);
7195 netif_device_detach(dev);
7196 del_timer_sync(&bp->timer);
dda1e390 7197 if (bp->flags & NO_WOL_FLAG)
6c4f095e 7198 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 7199 else if (bp->wol)
b6016b76
MC
7200 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
7201 else
7202 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
7203 bnx2_reset_chip(bp, reset_code);
7204 bnx2_free_skbs(bp);
829ca9a3 7205 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
7206 return 0;
7207}
7208
7209static int
7210bnx2_resume(struct pci_dev *pdev)
7211{
7212 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7213 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7214
6caebb02 7215 pci_restore_state(pdev);
b6016b76
MC
7216 if (!netif_running(dev))
7217 return 0;
7218
829ca9a3 7219 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7220 netif_device_attach(dev);
7221 bnx2_init_nic(bp);
7222 bnx2_netif_start(bp);
7223 return 0;
7224}
7225
7226static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
7227 .name = DRV_MODULE_NAME,
7228 .id_table = bnx2_pci_tbl,
7229 .probe = bnx2_init_one,
7230 .remove = __devexit_p(bnx2_remove_one),
7231 .suspend = bnx2_suspend,
7232 .resume = bnx2_resume,
b6016b76
MC
7233};
7234
7235static int __init bnx2_init(void)
7236{
29917620 7237 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
7238}
7239
7240static void __exit bnx2_cleanup(void)
7241{
7242 pci_unregister_driver(&bnx2_pci_driver);
7243}
7244
7245module_init(bnx2_init);
7246module_exit(bnx2_cleanup);
7247
7248
7249