Merge branch 'topic/azt3328' into for-linus
[linux-2.6-block.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
a6952b52 3 * Copyright (c) 2004-2009 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
f2a4f052 38#include <linux/if_vlan.h>
08013fa3 39#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
f2a4f052
MC
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
57579f76 49#include <linux/firmware.h>
706bf240 50#include <linux/log2.h>
ccffad25 51#include <linux/list.h>
f2a4f052 52
4edd473f
MC
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
b6016b76
MC
57#include "bnx2.h"
58#include "bnx2_fw.h"
b3448b0b 59
b6016b76
MC
60#define DRV_MODULE_NAME "bnx2"
61#define PFX DRV_MODULE_NAME ": "
581daf7e
MC
62#define DRV_MODULE_VERSION "2.0.1"
63#define DRV_MODULE_RELDATE "May 6, 2009"
57579f76
MC
64#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
65#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
66#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-4.6.15.fw"
b6016b76
MC
68
69#define RUN_AT(x) (jiffies + (x))
70
71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT (5*HZ)
73
fefa8645 74static char version[] __devinitdata =
b6016b76
MC
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
81MODULE_FIRMWARE(FW_MIPS_FILE_06);
82MODULE_FIRMWARE(FW_RV2P_FILE_06);
83MODULE_FIRMWARE(FW_MIPS_FILE_09);
84MODULE_FIRMWARE(FW_RV2P_FILE_09);
b6016b76
MC
85
86static int disable_msi = 0;
87
88module_param(disable_msi, int, 0);
89MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
90
91typedef enum {
92 BCM5706 = 0,
93 NC370T,
94 NC370I,
95 BCM5706S,
96 NC370F,
5b0c76ad
MC
97 BCM5708,
98 BCM5708S,
bac0dff6 99 BCM5709,
27a005b8 100 BCM5709S,
7bb0a04f 101 BCM5716,
1caacecb 102 BCM5716S,
b6016b76
MC
103} board_t;
104
105/* indexed by board_t, above */
fefa8645 106static struct {
b6016b76
MC
107 char *name;
108} board_info[] __devinitdata = {
109 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
110 { "HP NC370T Multifunction Gigabit Server Adapter" },
111 { "HP NC370i Multifunction Gigabit Server Adapter" },
112 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
113 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
114 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 116 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 117 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 118 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 119 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
120 };
121
7bb0a04f 122static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
132 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
141 { PCI_VENDOR_ID_BROADCOM, 0x163b,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 143 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 144 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
145 { 0, }
146};
147
148static struct flash_spec flash_table[] =
149{
e30372c9
MC
150#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
151#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 152 /* Slow EEPROM */
37137709 153 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 154 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
155 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
156 "EEPROM - slow"},
37137709
MC
157 /* Expansion entry 0001 */
158 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 159 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
160 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
161 "Entry 0001"},
b6016b76
MC
162 /* Saifun SA25F010 (non-buffered flash) */
163 /* strap, cfg1, & write1 need updates */
37137709 164 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 165 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
166 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
167 "Non-buffered flash (128kB)"},
168 /* Saifun SA25F020 (non-buffered flash) */
169 /* strap, cfg1, & write1 need updates */
37137709 170 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
172 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
173 "Non-buffered flash (256kB)"},
37137709
MC
174 /* Expansion entry 0100 */
175 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 176 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
177 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
178 "Entry 0100"},
179 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 180 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
183 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
184 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
185 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 186 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
187 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
188 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
189 /* Saifun SA25F005 (non-buffered flash) */
190 /* strap, cfg1, & write1 need updates */
191 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
193 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
194 "Non-buffered flash (64kB)"},
195 /* Fast EEPROM */
196 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 197 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
198 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
199 "EEPROM - fast"},
200 /* Expansion entry 1001 */
201 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1001"},
205 /* Expansion entry 1010 */
206 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 "Entry 1010"},
210 /* ATMEL AT45DB011B (buffered flash) */
211 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 212 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
213 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
214 "Buffered flash (128kB)"},
215 /* Expansion entry 1100 */
216 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1100"},
220 /* Expansion entry 1101 */
221 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 222 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
223 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
224 "Entry 1101"},
225 /* Ateml Expansion entry 1110 */
226 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
228 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
229 "Entry 1110 (Atmel)"},
230 /* ATMEL AT45DB021B (buffered flash) */
231 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 232 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
233 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
234 "Buffered flash (256kB)"},
b6016b76
MC
235};
236
e30372c9
MC
237static struct flash_spec flash_5709 = {
238 .flags = BNX2_NV_BUFFERED,
239 .page_bits = BCM5709_FLASH_PAGE_BITS,
240 .page_size = BCM5709_FLASH_PAGE_SIZE,
241 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
242 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
243 .name = "5709 Buffered flash (256kB)",
244};
245
b6016b76
MC
246MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
247
35e9010b 248static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 249{
2f8af120 250 u32 diff;
e89bbf10 251
2f8af120 252 smp_mb();
faac9c4b
MC
253
254 /* The ring uses 256 indices for 255 entries, one of them
255 * needs to be skipped.
256 */
35e9010b 257 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
258 if (unlikely(diff >= TX_DESC_CNT)) {
259 diff &= 0xffff;
260 if (diff == TX_DESC_CNT)
261 diff = MAX_TX_DESC_CNT;
262 }
e89bbf10
MC
263 return (bp->tx_ring_size - diff);
264}
265
b6016b76
MC
266static u32
267bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
268{
1b8227c4
MC
269 u32 val;
270
271 spin_lock_bh(&bp->indirect_lock);
b6016b76 272 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
273 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
274 spin_unlock_bh(&bp->indirect_lock);
275 return val;
b6016b76
MC
276}
277
278static void
279bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
280{
1b8227c4 281 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
282 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
283 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 284 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
285}
286
2726d6e1
MC
287static void
288bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
289{
290 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
291}
292
293static u32
294bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
295{
296 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
297}
298
b6016b76
MC
299static void
300bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
301{
302 offset += cid_addr;
1b8227c4 303 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
304 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
305 int i;
306
307 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
308 REG_WR(bp, BNX2_CTX_CTX_CTRL,
309 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
310 for (i = 0; i < 5; i++) {
59b47d8a
MC
311 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
312 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
313 break;
314 udelay(5);
315 }
316 } else {
317 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
318 REG_WR(bp, BNX2_CTX_DATA, val);
319 }
1b8227c4 320 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
321}
322
4edd473f
MC
323#ifdef BCM_CNIC
324static int
325bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
326{
327 struct bnx2 *bp = netdev_priv(dev);
328 struct drv_ctl_io *io = &info->data.io;
329
330 switch (info->cmd) {
331 case DRV_CTL_IO_WR_CMD:
332 bnx2_reg_wr_ind(bp, io->offset, io->data);
333 break;
334 case DRV_CTL_IO_RD_CMD:
335 io->data = bnx2_reg_rd_ind(bp, io->offset);
336 break;
337 case DRV_CTL_CTX_WR_CMD:
338 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
339 break;
340 default:
341 return -EINVAL;
342 }
343 return 0;
344}
345
346static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
347{
348 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
349 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
350 int sb_id;
351
352 if (bp->flags & BNX2_FLAG_USING_MSIX) {
353 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
354 bnapi->cnic_present = 0;
355 sb_id = bp->irq_nvecs;
356 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
357 } else {
358 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
359 bnapi->cnic_tag = bnapi->last_status_idx;
360 bnapi->cnic_present = 1;
361 sb_id = 0;
362 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
363 }
364
365 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
366 cp->irq_arr[0].status_blk = (void *)
367 ((unsigned long) bnapi->status_blk.msi +
368 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
369 cp->irq_arr[0].status_blk_num = sb_id;
370 cp->num_irq = 1;
371}
372
373static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
374 void *data)
375{
376 struct bnx2 *bp = netdev_priv(dev);
377 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
378
379 if (ops == NULL)
380 return -EINVAL;
381
382 if (cp->drv_state & CNIC_DRV_STATE_REGD)
383 return -EBUSY;
384
385 bp->cnic_data = data;
386 rcu_assign_pointer(bp->cnic_ops, ops);
387
388 cp->num_irq = 0;
389 cp->drv_state = CNIC_DRV_STATE_REGD;
390
391 bnx2_setup_cnic_irq_info(bp);
392
393 return 0;
394}
395
396static int bnx2_unregister_cnic(struct net_device *dev)
397{
398 struct bnx2 *bp = netdev_priv(dev);
399 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
400 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
401
c5a88950 402 mutex_lock(&bp->cnic_lock);
4edd473f
MC
403 cp->drv_state = 0;
404 bnapi->cnic_present = 0;
405 rcu_assign_pointer(bp->cnic_ops, NULL);
c5a88950 406 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
407 synchronize_rcu();
408 return 0;
409}
410
411struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
412{
413 struct bnx2 *bp = netdev_priv(dev);
414 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
415
416 cp->drv_owner = THIS_MODULE;
417 cp->chip_id = bp->chip_id;
418 cp->pdev = bp->pdev;
419 cp->io_base = bp->regview;
420 cp->drv_ctl = bnx2_drv_ctl;
421 cp->drv_register_cnic = bnx2_register_cnic;
422 cp->drv_unregister_cnic = bnx2_unregister_cnic;
423
424 return cp;
425}
426EXPORT_SYMBOL(bnx2_cnic_probe);
427
428static void
429bnx2_cnic_stop(struct bnx2 *bp)
430{
431 struct cnic_ops *c_ops;
432 struct cnic_ctl_info info;
433
c5a88950
MC
434 mutex_lock(&bp->cnic_lock);
435 c_ops = bp->cnic_ops;
4edd473f
MC
436 if (c_ops) {
437 info.cmd = CNIC_CTL_STOP_CMD;
438 c_ops->cnic_ctl(bp->cnic_data, &info);
439 }
c5a88950 440 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
441}
442
443static void
444bnx2_cnic_start(struct bnx2 *bp)
445{
446 struct cnic_ops *c_ops;
447 struct cnic_ctl_info info;
448
c5a88950
MC
449 mutex_lock(&bp->cnic_lock);
450 c_ops = bp->cnic_ops;
4edd473f
MC
451 if (c_ops) {
452 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
453 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
454
455 bnapi->cnic_tag = bnapi->last_status_idx;
456 }
457 info.cmd = CNIC_CTL_START_CMD;
458 c_ops->cnic_ctl(bp->cnic_data, &info);
459 }
c5a88950 460 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
461}
462
463#else
464
465static void
466bnx2_cnic_stop(struct bnx2 *bp)
467{
468}
469
470static void
471bnx2_cnic_start(struct bnx2 *bp)
472{
473}
474
475#endif
476
b6016b76
MC
477static int
478bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
479{
480 u32 val1;
481 int i, ret;
482
583c28e5 483 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
484 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
485 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
486
487 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
488 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
489
490 udelay(40);
491 }
492
493 val1 = (bp->phy_addr << 21) | (reg << 16) |
494 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
495 BNX2_EMAC_MDIO_COMM_START_BUSY;
496 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
497
498 for (i = 0; i < 50; i++) {
499 udelay(10);
500
501 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
502 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
503 udelay(5);
504
505 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
507
508 break;
509 }
510 }
511
512 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
513 *val = 0x0;
514 ret = -EBUSY;
515 }
516 else {
517 *val = val1;
518 ret = 0;
519 }
520
583c28e5 521 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
522 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
523 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
524
525 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
526 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
527
528 udelay(40);
529 }
530
531 return ret;
532}
533
534static int
535bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
536{
537 u32 val1;
538 int i, ret;
539
583c28e5 540 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
541 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
542 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
543
544 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
545 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
546
547 udelay(40);
548 }
549
550 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
551 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
552 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
553 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 554
b6016b76
MC
555 for (i = 0; i < 50; i++) {
556 udelay(10);
557
558 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
559 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
560 udelay(5);
561 break;
562 }
563 }
564
565 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
566 ret = -EBUSY;
567 else
568 ret = 0;
569
583c28e5 570 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
571 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
572 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
573
574 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
575 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
576
577 udelay(40);
578 }
579
580 return ret;
581}
582
583static void
584bnx2_disable_int(struct bnx2 *bp)
585{
b4b36042
MC
586 int i;
587 struct bnx2_napi *bnapi;
588
589 for (i = 0; i < bp->irq_nvecs; i++) {
590 bnapi = &bp->bnx2_napi[i];
591 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
592 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
593 }
b6016b76
MC
594 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
595}
596
597static void
598bnx2_enable_int(struct bnx2 *bp)
599{
b4b36042
MC
600 int i;
601 struct bnx2_napi *bnapi;
35efa7c1 602
b4b36042
MC
603 for (i = 0; i < bp->irq_nvecs; i++) {
604 bnapi = &bp->bnx2_napi[i];
1269a8a6 605
b4b36042
MC
606 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
607 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
608 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
609 bnapi->last_status_idx);
b6016b76 610
b4b36042
MC
611 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613 bnapi->last_status_idx);
614 }
bf5295bb 615 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
616}
617
618static void
619bnx2_disable_int_sync(struct bnx2 *bp)
620{
b4b36042
MC
621 int i;
622
b6016b76
MC
623 atomic_inc(&bp->intr_sem);
624 bnx2_disable_int(bp);
b4b36042
MC
625 for (i = 0; i < bp->irq_nvecs; i++)
626 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
627}
628
35efa7c1
MC
629static void
630bnx2_napi_disable(struct bnx2 *bp)
631{
b4b36042
MC
632 int i;
633
634 for (i = 0; i < bp->irq_nvecs; i++)
635 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
636}
637
638static void
639bnx2_napi_enable(struct bnx2 *bp)
640{
b4b36042
MC
641 int i;
642
643 for (i = 0; i < bp->irq_nvecs; i++)
644 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
645}
646
b6016b76
MC
647static void
648bnx2_netif_stop(struct bnx2 *bp)
649{
4edd473f 650 bnx2_cnic_stop(bp);
b6016b76
MC
651 bnx2_disable_int_sync(bp);
652 if (netif_running(bp->dev)) {
35efa7c1 653 bnx2_napi_disable(bp);
b6016b76
MC
654 netif_tx_disable(bp->dev);
655 bp->dev->trans_start = jiffies; /* prevent tx timeout */
656 }
657}
658
659static void
660bnx2_netif_start(struct bnx2 *bp)
661{
662 if (atomic_dec_and_test(&bp->intr_sem)) {
663 if (netif_running(bp->dev)) {
706bf240 664 netif_tx_wake_all_queues(bp->dev);
35efa7c1 665 bnx2_napi_enable(bp);
b6016b76 666 bnx2_enable_int(bp);
4edd473f 667 bnx2_cnic_start(bp);
b6016b76
MC
668 }
669 }
670}
671
35e9010b
MC
672static void
673bnx2_free_tx_mem(struct bnx2 *bp)
674{
675 int i;
676
677 for (i = 0; i < bp->num_tx_rings; i++) {
678 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
679 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
680
681 if (txr->tx_desc_ring) {
682 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
683 txr->tx_desc_ring,
684 txr->tx_desc_mapping);
685 txr->tx_desc_ring = NULL;
686 }
687 kfree(txr->tx_buf_ring);
688 txr->tx_buf_ring = NULL;
689 }
690}
691
bb4f98ab
MC
692static void
693bnx2_free_rx_mem(struct bnx2 *bp)
694{
695 int i;
696
697 for (i = 0; i < bp->num_rx_rings; i++) {
698 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
699 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
700 int j;
701
702 for (j = 0; j < bp->rx_max_ring; j++) {
703 if (rxr->rx_desc_ring[j])
704 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
705 rxr->rx_desc_ring[j],
706 rxr->rx_desc_mapping[j]);
707 rxr->rx_desc_ring[j] = NULL;
708 }
25b0b999 709 vfree(rxr->rx_buf_ring);
bb4f98ab
MC
710 rxr->rx_buf_ring = NULL;
711
712 for (j = 0; j < bp->rx_max_pg_ring; j++) {
713 if (rxr->rx_pg_desc_ring[j])
714 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
3298a738
MC
715 rxr->rx_pg_desc_ring[j],
716 rxr->rx_pg_desc_mapping[j]);
717 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab 718 }
25b0b999 719 vfree(rxr->rx_pg_ring);
bb4f98ab
MC
720 rxr->rx_pg_ring = NULL;
721 }
722}
723
35e9010b
MC
724static int
725bnx2_alloc_tx_mem(struct bnx2 *bp)
726{
727 int i;
728
729 for (i = 0; i < bp->num_tx_rings; i++) {
730 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
731 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
732
733 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
734 if (txr->tx_buf_ring == NULL)
735 return -ENOMEM;
736
737 txr->tx_desc_ring =
738 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
739 &txr->tx_desc_mapping);
740 if (txr->tx_desc_ring == NULL)
741 return -ENOMEM;
742 }
743 return 0;
744}
745
bb4f98ab
MC
746static int
747bnx2_alloc_rx_mem(struct bnx2 *bp)
748{
749 int i;
750
751 for (i = 0; i < bp->num_rx_rings; i++) {
752 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
753 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
754 int j;
755
756 rxr->rx_buf_ring =
757 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
758 if (rxr->rx_buf_ring == NULL)
759 return -ENOMEM;
760
761 memset(rxr->rx_buf_ring, 0,
762 SW_RXBD_RING_SIZE * bp->rx_max_ring);
763
764 for (j = 0; j < bp->rx_max_ring; j++) {
765 rxr->rx_desc_ring[j] =
766 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
767 &rxr->rx_desc_mapping[j]);
768 if (rxr->rx_desc_ring[j] == NULL)
769 return -ENOMEM;
770
771 }
772
773 if (bp->rx_pg_ring_size) {
774 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
775 bp->rx_max_pg_ring);
776 if (rxr->rx_pg_ring == NULL)
777 return -ENOMEM;
778
779 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
780 bp->rx_max_pg_ring);
781 }
782
783 for (j = 0; j < bp->rx_max_pg_ring; j++) {
784 rxr->rx_pg_desc_ring[j] =
785 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
786 &rxr->rx_pg_desc_mapping[j]);
787 if (rxr->rx_pg_desc_ring[j] == NULL)
788 return -ENOMEM;
789
790 }
791 }
792 return 0;
793}
794
b6016b76
MC
795static void
796bnx2_free_mem(struct bnx2 *bp)
797{
13daffa2 798 int i;
43e80b89 799 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 800
35e9010b 801 bnx2_free_tx_mem(bp);
bb4f98ab 802 bnx2_free_rx_mem(bp);
35e9010b 803
59b47d8a
MC
804 for (i = 0; i < bp->ctx_pages; i++) {
805 if (bp->ctx_blk[i]) {
806 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
807 bp->ctx_blk[i],
808 bp->ctx_blk_mapping[i]);
809 bp->ctx_blk[i] = NULL;
810 }
811 }
43e80b89 812 if (bnapi->status_blk.msi) {
0f31f994 813 pci_free_consistent(bp->pdev, bp->status_stats_size,
43e80b89
MC
814 bnapi->status_blk.msi,
815 bp->status_blk_mapping);
816 bnapi->status_blk.msi = NULL;
0f31f994 817 bp->stats_blk = NULL;
b6016b76 818 }
b6016b76
MC
819}
820
821static int
822bnx2_alloc_mem(struct bnx2 *bp)
823{
35e9010b 824 int i, status_blk_size, err;
43e80b89
MC
825 struct bnx2_napi *bnapi;
826 void *status_blk;
b6016b76 827
0f31f994
MC
828 /* Combine status and statistics blocks into one allocation. */
829 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 830 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
831 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
832 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
833 bp->status_stats_size = status_blk_size +
834 sizeof(struct statistics_block);
835
43e80b89
MC
836 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
837 &bp->status_blk_mapping);
838 if (status_blk == NULL)
b6016b76
MC
839 goto alloc_mem_err;
840
43e80b89 841 memset(status_blk, 0, bp->status_stats_size);
b6016b76 842
43e80b89
MC
843 bnapi = &bp->bnx2_napi[0];
844 bnapi->status_blk.msi = status_blk;
845 bnapi->hw_tx_cons_ptr =
846 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
847 bnapi->hw_rx_cons_ptr =
848 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 849 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
b4b36042 850 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
43e80b89
MC
851 struct status_block_msix *sblk;
852
853 bnapi = &bp->bnx2_napi[i];
b4b36042 854
43e80b89
MC
855 sblk = (void *) (status_blk +
856 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
857 bnapi->status_blk.msix = sblk;
858 bnapi->hw_tx_cons_ptr =
859 &sblk->status_tx_quick_consumer_index;
860 bnapi->hw_rx_cons_ptr =
861 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
862 bnapi->int_num = i << 24;
863 }
864 }
35efa7c1 865
43e80b89 866 bp->stats_blk = status_blk + status_blk_size;
b6016b76 867
0f31f994 868 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 869
59b47d8a
MC
870 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
871 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
872 if (bp->ctx_pages == 0)
873 bp->ctx_pages = 1;
874 for (i = 0; i < bp->ctx_pages; i++) {
875 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
876 BCM_PAGE_SIZE,
877 &bp->ctx_blk_mapping[i]);
878 if (bp->ctx_blk[i] == NULL)
879 goto alloc_mem_err;
880 }
881 }
35e9010b 882
bb4f98ab
MC
883 err = bnx2_alloc_rx_mem(bp);
884 if (err)
885 goto alloc_mem_err;
886
35e9010b
MC
887 err = bnx2_alloc_tx_mem(bp);
888 if (err)
889 goto alloc_mem_err;
890
b6016b76
MC
891 return 0;
892
893alloc_mem_err:
894 bnx2_free_mem(bp);
895 return -ENOMEM;
896}
897
e3648b3d
MC
898static void
899bnx2_report_fw_link(struct bnx2 *bp)
900{
901 u32 fw_link_status = 0;
902
583c28e5 903 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
904 return;
905
e3648b3d
MC
906 if (bp->link_up) {
907 u32 bmsr;
908
909 switch (bp->line_speed) {
910 case SPEED_10:
911 if (bp->duplex == DUPLEX_HALF)
912 fw_link_status = BNX2_LINK_STATUS_10HALF;
913 else
914 fw_link_status = BNX2_LINK_STATUS_10FULL;
915 break;
916 case SPEED_100:
917 if (bp->duplex == DUPLEX_HALF)
918 fw_link_status = BNX2_LINK_STATUS_100HALF;
919 else
920 fw_link_status = BNX2_LINK_STATUS_100FULL;
921 break;
922 case SPEED_1000:
923 if (bp->duplex == DUPLEX_HALF)
924 fw_link_status = BNX2_LINK_STATUS_1000HALF;
925 else
926 fw_link_status = BNX2_LINK_STATUS_1000FULL;
927 break;
928 case SPEED_2500:
929 if (bp->duplex == DUPLEX_HALF)
930 fw_link_status = BNX2_LINK_STATUS_2500HALF;
931 else
932 fw_link_status = BNX2_LINK_STATUS_2500FULL;
933 break;
934 }
935
936 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
937
938 if (bp->autoneg) {
939 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
940
ca58c3af
MC
941 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
942 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
943
944 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 945 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
946 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
947 else
948 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
949 }
950 }
951 else
952 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
953
2726d6e1 954 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
955}
956
9b1084b8
MC
957static char *
958bnx2_xceiver_str(struct bnx2 *bp)
959{
960 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 961 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
9b1084b8
MC
962 "Copper"));
963}
964
b6016b76
MC
965static void
966bnx2_report_link(struct bnx2 *bp)
967{
968 if (bp->link_up) {
969 netif_carrier_on(bp->dev);
9b1084b8
MC
970 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
971 bnx2_xceiver_str(bp));
b6016b76
MC
972
973 printk("%d Mbps ", bp->line_speed);
974
975 if (bp->duplex == DUPLEX_FULL)
976 printk("full duplex");
977 else
978 printk("half duplex");
979
980 if (bp->flow_ctrl) {
981 if (bp->flow_ctrl & FLOW_CTRL_RX) {
982 printk(", receive ");
983 if (bp->flow_ctrl & FLOW_CTRL_TX)
984 printk("& transmit ");
985 }
986 else {
987 printk(", transmit ");
988 }
989 printk("flow control ON");
990 }
991 printk("\n");
992 }
993 else {
994 netif_carrier_off(bp->dev);
9b1084b8
MC
995 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
996 bnx2_xceiver_str(bp));
b6016b76 997 }
e3648b3d
MC
998
999 bnx2_report_fw_link(bp);
b6016b76
MC
1000}
1001
1002static void
1003bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1004{
1005 u32 local_adv, remote_adv;
1006
1007 bp->flow_ctrl = 0;
6aa20a22 1008 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
1009 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1010
1011 if (bp->duplex == DUPLEX_FULL) {
1012 bp->flow_ctrl = bp->req_flow_ctrl;
1013 }
1014 return;
1015 }
1016
1017 if (bp->duplex != DUPLEX_FULL) {
1018 return;
1019 }
1020
583c28e5 1021 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
1022 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1023 u32 val;
1024
1025 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1026 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1027 bp->flow_ctrl |= FLOW_CTRL_TX;
1028 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1029 bp->flow_ctrl |= FLOW_CTRL_RX;
1030 return;
1031 }
1032
ca58c3af
MC
1033 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1034 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 1035
583c28e5 1036 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1037 u32 new_local_adv = 0;
1038 u32 new_remote_adv = 0;
1039
1040 if (local_adv & ADVERTISE_1000XPAUSE)
1041 new_local_adv |= ADVERTISE_PAUSE_CAP;
1042 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1043 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1044 if (remote_adv & ADVERTISE_1000XPAUSE)
1045 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1046 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1047 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1048
1049 local_adv = new_local_adv;
1050 remote_adv = new_remote_adv;
1051 }
1052
1053 /* See Table 28B-3 of 802.3ab-1999 spec. */
1054 if (local_adv & ADVERTISE_PAUSE_CAP) {
1055 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1056 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1057 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1058 }
1059 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1060 bp->flow_ctrl = FLOW_CTRL_RX;
1061 }
1062 }
1063 else {
1064 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1065 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1066 }
1067 }
1068 }
1069 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1070 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1071 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1072
1073 bp->flow_ctrl = FLOW_CTRL_TX;
1074 }
1075 }
1076}
1077
27a005b8
MC
1078static int
1079bnx2_5709s_linkup(struct bnx2 *bp)
1080{
1081 u32 val, speed;
1082
1083 bp->link_up = 1;
1084
1085 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1086 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1087 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1088
1089 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1090 bp->line_speed = bp->req_line_speed;
1091 bp->duplex = bp->req_duplex;
1092 return 0;
1093 }
1094 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1095 switch (speed) {
1096 case MII_BNX2_GP_TOP_AN_SPEED_10:
1097 bp->line_speed = SPEED_10;
1098 break;
1099 case MII_BNX2_GP_TOP_AN_SPEED_100:
1100 bp->line_speed = SPEED_100;
1101 break;
1102 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1103 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1104 bp->line_speed = SPEED_1000;
1105 break;
1106 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1107 bp->line_speed = SPEED_2500;
1108 break;
1109 }
1110 if (val & MII_BNX2_GP_TOP_AN_FD)
1111 bp->duplex = DUPLEX_FULL;
1112 else
1113 bp->duplex = DUPLEX_HALF;
1114 return 0;
1115}
1116
b6016b76 1117static int
5b0c76ad
MC
1118bnx2_5708s_linkup(struct bnx2 *bp)
1119{
1120 u32 val;
1121
1122 bp->link_up = 1;
1123 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1124 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1125 case BCM5708S_1000X_STAT1_SPEED_10:
1126 bp->line_speed = SPEED_10;
1127 break;
1128 case BCM5708S_1000X_STAT1_SPEED_100:
1129 bp->line_speed = SPEED_100;
1130 break;
1131 case BCM5708S_1000X_STAT1_SPEED_1G:
1132 bp->line_speed = SPEED_1000;
1133 break;
1134 case BCM5708S_1000X_STAT1_SPEED_2G5:
1135 bp->line_speed = SPEED_2500;
1136 break;
1137 }
1138 if (val & BCM5708S_1000X_STAT1_FD)
1139 bp->duplex = DUPLEX_FULL;
1140 else
1141 bp->duplex = DUPLEX_HALF;
1142
1143 return 0;
1144}
1145
1146static int
1147bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
1148{
1149 u32 bmcr, local_adv, remote_adv, common;
1150
1151 bp->link_up = 1;
1152 bp->line_speed = SPEED_1000;
1153
ca58c3af 1154 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1155 if (bmcr & BMCR_FULLDPLX) {
1156 bp->duplex = DUPLEX_FULL;
1157 }
1158 else {
1159 bp->duplex = DUPLEX_HALF;
1160 }
1161
1162 if (!(bmcr & BMCR_ANENABLE)) {
1163 return 0;
1164 }
1165
ca58c3af
MC
1166 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1167 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1168
1169 common = local_adv & remote_adv;
1170 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1171
1172 if (common & ADVERTISE_1000XFULL) {
1173 bp->duplex = DUPLEX_FULL;
1174 }
1175 else {
1176 bp->duplex = DUPLEX_HALF;
1177 }
1178 }
1179
1180 return 0;
1181}
1182
1183static int
1184bnx2_copper_linkup(struct bnx2 *bp)
1185{
1186 u32 bmcr;
1187
ca58c3af 1188 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1189 if (bmcr & BMCR_ANENABLE) {
1190 u32 local_adv, remote_adv, common;
1191
1192 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1193 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1194
1195 common = local_adv & (remote_adv >> 2);
1196 if (common & ADVERTISE_1000FULL) {
1197 bp->line_speed = SPEED_1000;
1198 bp->duplex = DUPLEX_FULL;
1199 }
1200 else if (common & ADVERTISE_1000HALF) {
1201 bp->line_speed = SPEED_1000;
1202 bp->duplex = DUPLEX_HALF;
1203 }
1204 else {
ca58c3af
MC
1205 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1206 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1207
1208 common = local_adv & remote_adv;
1209 if (common & ADVERTISE_100FULL) {
1210 bp->line_speed = SPEED_100;
1211 bp->duplex = DUPLEX_FULL;
1212 }
1213 else if (common & ADVERTISE_100HALF) {
1214 bp->line_speed = SPEED_100;
1215 bp->duplex = DUPLEX_HALF;
1216 }
1217 else if (common & ADVERTISE_10FULL) {
1218 bp->line_speed = SPEED_10;
1219 bp->duplex = DUPLEX_FULL;
1220 }
1221 else if (common & ADVERTISE_10HALF) {
1222 bp->line_speed = SPEED_10;
1223 bp->duplex = DUPLEX_HALF;
1224 }
1225 else {
1226 bp->line_speed = 0;
1227 bp->link_up = 0;
1228 }
1229 }
1230 }
1231 else {
1232 if (bmcr & BMCR_SPEED100) {
1233 bp->line_speed = SPEED_100;
1234 }
1235 else {
1236 bp->line_speed = SPEED_10;
1237 }
1238 if (bmcr & BMCR_FULLDPLX) {
1239 bp->duplex = DUPLEX_FULL;
1240 }
1241 else {
1242 bp->duplex = DUPLEX_HALF;
1243 }
1244 }
1245
1246 return 0;
1247}
1248
83e3fc89 1249static void
bb4f98ab 1250bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1251{
bb4f98ab 1252 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1253
1254 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1255 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1256 val |= 0x02 << 8;
1257
1258 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1259 u32 lo_water, hi_water;
1260
1261 if (bp->flow_ctrl & FLOW_CTRL_TX)
1262 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1263 else
1264 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1265 if (lo_water >= bp->rx_ring_size)
1266 lo_water = 0;
1267
1268 hi_water = bp->rx_ring_size / 4;
1269
1270 if (hi_water <= lo_water)
1271 lo_water = 0;
1272
1273 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1274 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1275
1276 if (hi_water > 0xf)
1277 hi_water = 0xf;
1278 else if (hi_water == 0)
1279 lo_water = 0;
1280 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1281 }
1282 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1283}
1284
bb4f98ab
MC
1285static void
1286bnx2_init_all_rx_contexts(struct bnx2 *bp)
1287{
1288 int i;
1289 u32 cid;
1290
1291 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1292 if (i == 1)
1293 cid = RX_RSS_CID;
1294 bnx2_init_rx_context(bp, cid);
1295 }
1296}
1297
344478db 1298static void
b6016b76
MC
1299bnx2_set_mac_link(struct bnx2 *bp)
1300{
1301 u32 val;
1302
1303 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1304 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1305 (bp->duplex == DUPLEX_HALF)) {
1306 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1307 }
1308
1309 /* Configure the EMAC mode register. */
1310 val = REG_RD(bp, BNX2_EMAC_MODE);
1311
1312 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1313 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1314 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1315
1316 if (bp->link_up) {
5b0c76ad
MC
1317 switch (bp->line_speed) {
1318 case SPEED_10:
59b47d8a
MC
1319 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1320 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1321 break;
1322 }
1323 /* fall through */
1324 case SPEED_100:
1325 val |= BNX2_EMAC_MODE_PORT_MII;
1326 break;
1327 case SPEED_2500:
59b47d8a 1328 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1329 /* fall through */
1330 case SPEED_1000:
1331 val |= BNX2_EMAC_MODE_PORT_GMII;
1332 break;
1333 }
b6016b76
MC
1334 }
1335 else {
1336 val |= BNX2_EMAC_MODE_PORT_GMII;
1337 }
1338
1339 /* Set the MAC to operate in the appropriate duplex mode. */
1340 if (bp->duplex == DUPLEX_HALF)
1341 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1342 REG_WR(bp, BNX2_EMAC_MODE, val);
1343
1344 /* Enable/disable rx PAUSE. */
1345 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1346
1347 if (bp->flow_ctrl & FLOW_CTRL_RX)
1348 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1349 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1350
1351 /* Enable/disable tx PAUSE. */
1352 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1353 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1354
1355 if (bp->flow_ctrl & FLOW_CTRL_TX)
1356 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1357 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1358
1359 /* Acknowledge the interrupt. */
1360 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1361
83e3fc89 1362 if (CHIP_NUM(bp) == CHIP_NUM_5709)
bb4f98ab 1363 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1364}
1365
27a005b8
MC
1366static void
1367bnx2_enable_bmsr1(struct bnx2 *bp)
1368{
583c28e5 1369 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1370 (CHIP_NUM(bp) == CHIP_NUM_5709))
1371 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1372 MII_BNX2_BLK_ADDR_GP_STATUS);
1373}
1374
1375static void
1376bnx2_disable_bmsr1(struct bnx2 *bp)
1377{
583c28e5 1378 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1379 (CHIP_NUM(bp) == CHIP_NUM_5709))
1380 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1381 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1382}
1383
605a9e20
MC
1384static int
1385bnx2_test_and_enable_2g5(struct bnx2 *bp)
1386{
1387 u32 up1;
1388 int ret = 1;
1389
583c28e5 1390 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1391 return 0;
1392
1393 if (bp->autoneg & AUTONEG_SPEED)
1394 bp->advertising |= ADVERTISED_2500baseX_Full;
1395
27a005b8
MC
1396 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1397 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1398
605a9e20
MC
1399 bnx2_read_phy(bp, bp->mii_up1, &up1);
1400 if (!(up1 & BCM5708S_UP1_2G5)) {
1401 up1 |= BCM5708S_UP1_2G5;
1402 bnx2_write_phy(bp, bp->mii_up1, up1);
1403 ret = 0;
1404 }
1405
27a005b8
MC
1406 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1408 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1409
605a9e20
MC
1410 return ret;
1411}
1412
1413static int
1414bnx2_test_and_disable_2g5(struct bnx2 *bp)
1415{
1416 u32 up1;
1417 int ret = 0;
1418
583c28e5 1419 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1420 return 0;
1421
27a005b8
MC
1422 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1423 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1424
605a9e20
MC
1425 bnx2_read_phy(bp, bp->mii_up1, &up1);
1426 if (up1 & BCM5708S_UP1_2G5) {
1427 up1 &= ~BCM5708S_UP1_2G5;
1428 bnx2_write_phy(bp, bp->mii_up1, up1);
1429 ret = 1;
1430 }
1431
27a005b8
MC
1432 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1433 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1434 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1435
605a9e20
MC
1436 return ret;
1437}
1438
1439static void
1440bnx2_enable_forced_2g5(struct bnx2 *bp)
1441{
1442 u32 bmcr;
1443
583c28e5 1444 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1445 return;
1446
27a005b8
MC
1447 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1448 u32 val;
1449
1450 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1451 MII_BNX2_BLK_ADDR_SERDES_DIG);
1452 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1453 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1454 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1455 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1456
1457 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1458 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1459 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1460
1461 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1462 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463 bmcr |= BCM5708S_BMCR_FORCE_2500;
1464 }
1465
1466 if (bp->autoneg & AUTONEG_SPEED) {
1467 bmcr &= ~BMCR_ANENABLE;
1468 if (bp->req_duplex == DUPLEX_FULL)
1469 bmcr |= BMCR_FULLDPLX;
1470 }
1471 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1472}
1473
1474static void
1475bnx2_disable_forced_2g5(struct bnx2 *bp)
1476{
1477 u32 bmcr;
1478
583c28e5 1479 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1480 return;
1481
27a005b8
MC
1482 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1483 u32 val;
1484
1485 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1486 MII_BNX2_BLK_ADDR_SERDES_DIG);
1487 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1488 val &= ~MII_BNX2_SD_MISC1_FORCE;
1489 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1490
1491 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1492 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1493 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1494
1495 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1496 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1498 }
1499
1500 if (bp->autoneg & AUTONEG_SPEED)
1501 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1502 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1503}
1504
b2fadeae
MC
1505static void
1506bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1507{
1508 u32 val;
1509
1510 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1511 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1512 if (start)
1513 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1514 else
1515 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1516}
1517
b6016b76
MC
1518static int
1519bnx2_set_link(struct bnx2 *bp)
1520{
1521 u32 bmsr;
1522 u8 link_up;
1523
80be4434 1524 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1525 bp->link_up = 1;
1526 return 0;
1527 }
1528
583c28e5 1529 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1530 return 0;
1531
b6016b76
MC
1532 link_up = bp->link_up;
1533
27a005b8
MC
1534 bnx2_enable_bmsr1(bp);
1535 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1536 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1537 bnx2_disable_bmsr1(bp);
b6016b76 1538
583c28e5 1539 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1540 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1541 u32 val, an_dbg;
b6016b76 1542
583c28e5 1543 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1544 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1545 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1546 }
b6016b76 1547 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1548
1549 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1550 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1551 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1552
1553 if ((val & BNX2_EMAC_STATUS_LINK) &&
1554 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1555 bmsr |= BMSR_LSTATUS;
1556 else
1557 bmsr &= ~BMSR_LSTATUS;
1558 }
1559
1560 if (bmsr & BMSR_LSTATUS) {
1561 bp->link_up = 1;
1562
583c28e5 1563 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1564 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1565 bnx2_5706s_linkup(bp);
1566 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1567 bnx2_5708s_linkup(bp);
27a005b8
MC
1568 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1569 bnx2_5709s_linkup(bp);
b6016b76
MC
1570 }
1571 else {
1572 bnx2_copper_linkup(bp);
1573 }
1574 bnx2_resolve_flow_ctrl(bp);
1575 }
1576 else {
583c28e5 1577 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1578 (bp->autoneg & AUTONEG_SPEED))
1579 bnx2_disable_forced_2g5(bp);
b6016b76 1580
583c28e5 1581 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1582 u32 bmcr;
1583
1584 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1585 bmcr |= BMCR_ANENABLE;
1586 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1587
583c28e5 1588 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1589 }
b6016b76
MC
1590 bp->link_up = 0;
1591 }
1592
1593 if (bp->link_up != link_up) {
1594 bnx2_report_link(bp);
1595 }
1596
1597 bnx2_set_mac_link(bp);
1598
1599 return 0;
1600}
1601
1602static int
1603bnx2_reset_phy(struct bnx2 *bp)
1604{
1605 int i;
1606 u32 reg;
1607
ca58c3af 1608 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1609
1610#define PHY_RESET_MAX_WAIT 100
1611 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1612 udelay(10);
1613
ca58c3af 1614 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1615 if (!(reg & BMCR_RESET)) {
1616 udelay(20);
1617 break;
1618 }
1619 }
1620 if (i == PHY_RESET_MAX_WAIT) {
1621 return -EBUSY;
1622 }
1623 return 0;
1624}
1625
1626static u32
1627bnx2_phy_get_pause_adv(struct bnx2 *bp)
1628{
1629 u32 adv = 0;
1630
1631 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1632 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1633
583c28e5 1634 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1635 adv = ADVERTISE_1000XPAUSE;
1636 }
1637 else {
1638 adv = ADVERTISE_PAUSE_CAP;
1639 }
1640 }
1641 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1642 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1643 adv = ADVERTISE_1000XPSE_ASYM;
1644 }
1645 else {
1646 adv = ADVERTISE_PAUSE_ASYM;
1647 }
1648 }
1649 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1650 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1651 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1652 }
1653 else {
1654 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1655 }
1656 }
1657 return adv;
1658}
1659
a2f13890 1660static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1661
b6016b76 1662static int
0d8a6571 1663bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1664__releases(&bp->phy_lock)
1665__acquires(&bp->phy_lock)
0d8a6571
MC
1666{
1667 u32 speed_arg = 0, pause_adv;
1668
1669 pause_adv = bnx2_phy_get_pause_adv(bp);
1670
1671 if (bp->autoneg & AUTONEG_SPEED) {
1672 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1673 if (bp->advertising & ADVERTISED_10baseT_Half)
1674 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1675 if (bp->advertising & ADVERTISED_10baseT_Full)
1676 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1677 if (bp->advertising & ADVERTISED_100baseT_Half)
1678 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1679 if (bp->advertising & ADVERTISED_100baseT_Full)
1680 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1681 if (bp->advertising & ADVERTISED_1000baseT_Full)
1682 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1683 if (bp->advertising & ADVERTISED_2500baseX_Full)
1684 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1685 } else {
1686 if (bp->req_line_speed == SPEED_2500)
1687 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1688 else if (bp->req_line_speed == SPEED_1000)
1689 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1690 else if (bp->req_line_speed == SPEED_100) {
1691 if (bp->req_duplex == DUPLEX_FULL)
1692 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1693 else
1694 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1695 } else if (bp->req_line_speed == SPEED_10) {
1696 if (bp->req_duplex == DUPLEX_FULL)
1697 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1698 else
1699 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1700 }
1701 }
1702
1703 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1704 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1705 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1706 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1707
1708 if (port == PORT_TP)
1709 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1710 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1711
2726d6e1 1712 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1713
1714 spin_unlock_bh(&bp->phy_lock);
a2f13890 1715 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1716 spin_lock_bh(&bp->phy_lock);
1717
1718 return 0;
1719}
1720
1721static int
1722bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1723__releases(&bp->phy_lock)
1724__acquires(&bp->phy_lock)
b6016b76 1725{
605a9e20 1726 u32 adv, bmcr;
b6016b76
MC
1727 u32 new_adv = 0;
1728
583c28e5 1729 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1730 return (bnx2_setup_remote_phy(bp, port));
1731
b6016b76
MC
1732 if (!(bp->autoneg & AUTONEG_SPEED)) {
1733 u32 new_bmcr;
5b0c76ad
MC
1734 int force_link_down = 0;
1735
605a9e20
MC
1736 if (bp->req_line_speed == SPEED_2500) {
1737 if (!bnx2_test_and_enable_2g5(bp))
1738 force_link_down = 1;
1739 } else if (bp->req_line_speed == SPEED_1000) {
1740 if (bnx2_test_and_disable_2g5(bp))
1741 force_link_down = 1;
1742 }
ca58c3af 1743 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1744 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1745
ca58c3af 1746 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1747 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1748 new_bmcr |= BMCR_SPEED1000;
605a9e20 1749
27a005b8
MC
1750 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1751 if (bp->req_line_speed == SPEED_2500)
1752 bnx2_enable_forced_2g5(bp);
1753 else if (bp->req_line_speed == SPEED_1000) {
1754 bnx2_disable_forced_2g5(bp);
1755 new_bmcr &= ~0x2000;
1756 }
1757
1758 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1759 if (bp->req_line_speed == SPEED_2500)
1760 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1761 else
1762 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1763 }
1764
b6016b76 1765 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1766 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1767 new_bmcr |= BMCR_FULLDPLX;
1768 }
1769 else {
5b0c76ad 1770 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1771 new_bmcr &= ~BMCR_FULLDPLX;
1772 }
5b0c76ad 1773 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1774 /* Force a link down visible on the other side */
1775 if (bp->link_up) {
ca58c3af 1776 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1777 ~(ADVERTISE_1000XFULL |
1778 ADVERTISE_1000XHALF));
ca58c3af 1779 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1780 BMCR_ANRESTART | BMCR_ANENABLE);
1781
1782 bp->link_up = 0;
1783 netif_carrier_off(bp->dev);
ca58c3af 1784 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1785 bnx2_report_link(bp);
b6016b76 1786 }
ca58c3af
MC
1787 bnx2_write_phy(bp, bp->mii_adv, adv);
1788 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1789 } else {
1790 bnx2_resolve_flow_ctrl(bp);
1791 bnx2_set_mac_link(bp);
b6016b76
MC
1792 }
1793 return 0;
1794 }
1795
605a9e20 1796 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1797
b6016b76
MC
1798 if (bp->advertising & ADVERTISED_1000baseT_Full)
1799 new_adv |= ADVERTISE_1000XFULL;
1800
1801 new_adv |= bnx2_phy_get_pause_adv(bp);
1802
ca58c3af
MC
1803 bnx2_read_phy(bp, bp->mii_adv, &adv);
1804 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1805
1806 bp->serdes_an_pending = 0;
1807 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1808 /* Force a link down visible on the other side */
1809 if (bp->link_up) {
ca58c3af 1810 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1811 spin_unlock_bh(&bp->phy_lock);
1812 msleep(20);
1813 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1814 }
1815
ca58c3af
MC
1816 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1817 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1818 BMCR_ANENABLE);
f8dd064e
MC
1819 /* Speed up link-up time when the link partner
1820 * does not autonegotiate which is very common
1821 * in blade servers. Some blade servers use
1822 * IPMI for kerboard input and it's important
1823 * to minimize link disruptions. Autoneg. involves
1824 * exchanging base pages plus 3 next pages and
1825 * normally completes in about 120 msec.
1826 */
40105c0b 1827 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1828 bp->serdes_an_pending = 1;
1829 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1830 } else {
1831 bnx2_resolve_flow_ctrl(bp);
1832 bnx2_set_mac_link(bp);
b6016b76
MC
1833 }
1834
1835 return 0;
1836}
1837
1838#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1839 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1840 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1841 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1842
1843#define ETHTOOL_ALL_COPPER_SPEED \
1844 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1845 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1846 ADVERTISED_1000baseT_Full)
1847
1848#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1849 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1850
b6016b76
MC
1851#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1852
0d8a6571
MC
1853static void
1854bnx2_set_default_remote_link(struct bnx2 *bp)
1855{
1856 u32 link;
1857
1858 if (bp->phy_port == PORT_TP)
2726d6e1 1859 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1860 else
2726d6e1 1861 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1862
1863 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1864 bp->req_line_speed = 0;
1865 bp->autoneg |= AUTONEG_SPEED;
1866 bp->advertising = ADVERTISED_Autoneg;
1867 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1868 bp->advertising |= ADVERTISED_10baseT_Half;
1869 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1870 bp->advertising |= ADVERTISED_10baseT_Full;
1871 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1872 bp->advertising |= ADVERTISED_100baseT_Half;
1873 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1874 bp->advertising |= ADVERTISED_100baseT_Full;
1875 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1876 bp->advertising |= ADVERTISED_1000baseT_Full;
1877 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1878 bp->advertising |= ADVERTISED_2500baseX_Full;
1879 } else {
1880 bp->autoneg = 0;
1881 bp->advertising = 0;
1882 bp->req_duplex = DUPLEX_FULL;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1884 bp->req_line_speed = SPEED_10;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1886 bp->req_duplex = DUPLEX_HALF;
1887 }
1888 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1889 bp->req_line_speed = SPEED_100;
1890 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1891 bp->req_duplex = DUPLEX_HALF;
1892 }
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1894 bp->req_line_speed = SPEED_1000;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1896 bp->req_line_speed = SPEED_2500;
1897 }
1898}
1899
deaf391b
MC
1900static void
1901bnx2_set_default_link(struct bnx2 *bp)
1902{
ab59859d
HH
1903 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1904 bnx2_set_default_remote_link(bp);
1905 return;
1906 }
0d8a6571 1907
deaf391b
MC
1908 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1909 bp->req_line_speed = 0;
583c28e5 1910 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1911 u32 reg;
1912
1913 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1914
2726d6e1 1915 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1916 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1917 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1918 bp->autoneg = 0;
1919 bp->req_line_speed = bp->line_speed = SPEED_1000;
1920 bp->req_duplex = DUPLEX_FULL;
1921 }
1922 } else
1923 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1924}
1925
df149d70
MC
1926static void
1927bnx2_send_heart_beat(struct bnx2 *bp)
1928{
1929 u32 msg;
1930 u32 addr;
1931
1932 spin_lock(&bp->indirect_lock);
1933 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1934 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1935 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1936 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1937 spin_unlock(&bp->indirect_lock);
1938}
1939
0d8a6571
MC
1940static void
1941bnx2_remote_phy_event(struct bnx2 *bp)
1942{
1943 u32 msg;
1944 u8 link_up = bp->link_up;
1945 u8 old_port;
1946
2726d6e1 1947 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1948
df149d70
MC
1949 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1950 bnx2_send_heart_beat(bp);
1951
1952 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1953
0d8a6571
MC
1954 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1955 bp->link_up = 0;
1956 else {
1957 u32 speed;
1958
1959 bp->link_up = 1;
1960 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1961 bp->duplex = DUPLEX_FULL;
1962 switch (speed) {
1963 case BNX2_LINK_STATUS_10HALF:
1964 bp->duplex = DUPLEX_HALF;
1965 case BNX2_LINK_STATUS_10FULL:
1966 bp->line_speed = SPEED_10;
1967 break;
1968 case BNX2_LINK_STATUS_100HALF:
1969 bp->duplex = DUPLEX_HALF;
1970 case BNX2_LINK_STATUS_100BASE_T4:
1971 case BNX2_LINK_STATUS_100FULL:
1972 bp->line_speed = SPEED_100;
1973 break;
1974 case BNX2_LINK_STATUS_1000HALF:
1975 bp->duplex = DUPLEX_HALF;
1976 case BNX2_LINK_STATUS_1000FULL:
1977 bp->line_speed = SPEED_1000;
1978 break;
1979 case BNX2_LINK_STATUS_2500HALF:
1980 bp->duplex = DUPLEX_HALF;
1981 case BNX2_LINK_STATUS_2500FULL:
1982 bp->line_speed = SPEED_2500;
1983 break;
1984 default:
1985 bp->line_speed = 0;
1986 break;
1987 }
1988
0d8a6571
MC
1989 bp->flow_ctrl = 0;
1990 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1991 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1992 if (bp->duplex == DUPLEX_FULL)
1993 bp->flow_ctrl = bp->req_flow_ctrl;
1994 } else {
1995 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1996 bp->flow_ctrl |= FLOW_CTRL_TX;
1997 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1998 bp->flow_ctrl |= FLOW_CTRL_RX;
1999 }
2000
2001 old_port = bp->phy_port;
2002 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2003 bp->phy_port = PORT_FIBRE;
2004 else
2005 bp->phy_port = PORT_TP;
2006
2007 if (old_port != bp->phy_port)
2008 bnx2_set_default_link(bp);
2009
0d8a6571
MC
2010 }
2011 if (bp->link_up != link_up)
2012 bnx2_report_link(bp);
2013
2014 bnx2_set_mac_link(bp);
2015}
2016
2017static int
2018bnx2_set_remote_link(struct bnx2 *bp)
2019{
2020 u32 evt_code;
2021
2726d6e1 2022 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
2023 switch (evt_code) {
2024 case BNX2_FW_EVT_CODE_LINK_EVENT:
2025 bnx2_remote_phy_event(bp);
2026 break;
2027 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2028 default:
df149d70 2029 bnx2_send_heart_beat(bp);
0d8a6571
MC
2030 break;
2031 }
2032 return 0;
2033}
2034
b6016b76
MC
2035static int
2036bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
2037__releases(&bp->phy_lock)
2038__acquires(&bp->phy_lock)
b6016b76
MC
2039{
2040 u32 bmcr;
2041 u32 new_bmcr;
2042
ca58c3af 2043 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
2044
2045 if (bp->autoneg & AUTONEG_SPEED) {
2046 u32 adv_reg, adv1000_reg;
2047 u32 new_adv_reg = 0;
2048 u32 new_adv1000_reg = 0;
2049
ca58c3af 2050 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
2051 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2052 ADVERTISE_PAUSE_ASYM);
2053
2054 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2055 adv1000_reg &= PHY_ALL_1000_SPEED;
2056
2057 if (bp->advertising & ADVERTISED_10baseT_Half)
2058 new_adv_reg |= ADVERTISE_10HALF;
2059 if (bp->advertising & ADVERTISED_10baseT_Full)
2060 new_adv_reg |= ADVERTISE_10FULL;
2061 if (bp->advertising & ADVERTISED_100baseT_Half)
2062 new_adv_reg |= ADVERTISE_100HALF;
2063 if (bp->advertising & ADVERTISED_100baseT_Full)
2064 new_adv_reg |= ADVERTISE_100FULL;
2065 if (bp->advertising & ADVERTISED_1000baseT_Full)
2066 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 2067
b6016b76
MC
2068 new_adv_reg |= ADVERTISE_CSMA;
2069
2070 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2071
2072 if ((adv1000_reg != new_adv1000_reg) ||
2073 (adv_reg != new_adv_reg) ||
2074 ((bmcr & BMCR_ANENABLE) == 0)) {
2075
ca58c3af 2076 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 2077 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 2078 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
2079 BMCR_ANENABLE);
2080 }
2081 else if (bp->link_up) {
2082 /* Flow ctrl may have changed from auto to forced */
2083 /* or vice-versa. */
2084
2085 bnx2_resolve_flow_ctrl(bp);
2086 bnx2_set_mac_link(bp);
2087 }
2088 return 0;
2089 }
2090
2091 new_bmcr = 0;
2092 if (bp->req_line_speed == SPEED_100) {
2093 new_bmcr |= BMCR_SPEED100;
2094 }
2095 if (bp->req_duplex == DUPLEX_FULL) {
2096 new_bmcr |= BMCR_FULLDPLX;
2097 }
2098 if (new_bmcr != bmcr) {
2099 u32 bmsr;
b6016b76 2100
ca58c3af
MC
2101 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2102 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 2103
b6016b76
MC
2104 if (bmsr & BMSR_LSTATUS) {
2105 /* Force link down */
ca58c3af 2106 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
2107 spin_unlock_bh(&bp->phy_lock);
2108 msleep(50);
2109 spin_lock_bh(&bp->phy_lock);
2110
ca58c3af
MC
2111 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2112 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
2113 }
2114
ca58c3af 2115 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
2116
2117 /* Normally, the new speed is setup after the link has
2118 * gone down and up again. In some cases, link will not go
2119 * down so we need to set up the new speed here.
2120 */
2121 if (bmsr & BMSR_LSTATUS) {
2122 bp->line_speed = bp->req_line_speed;
2123 bp->duplex = bp->req_duplex;
2124 bnx2_resolve_flow_ctrl(bp);
2125 bnx2_set_mac_link(bp);
2126 }
27a005b8
MC
2127 } else {
2128 bnx2_resolve_flow_ctrl(bp);
2129 bnx2_set_mac_link(bp);
b6016b76
MC
2130 }
2131 return 0;
2132}
2133
2134static int
0d8a6571 2135bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
2136__releases(&bp->phy_lock)
2137__acquires(&bp->phy_lock)
b6016b76
MC
2138{
2139 if (bp->loopback == MAC_LOOPBACK)
2140 return 0;
2141
583c28e5 2142 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 2143 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
2144 }
2145 else {
2146 return (bnx2_setup_copper_phy(bp));
2147 }
2148}
2149
27a005b8 2150static int
9a120bc5 2151bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
2152{
2153 u32 val;
2154
2155 bp->mii_bmcr = MII_BMCR + 0x10;
2156 bp->mii_bmsr = MII_BMSR + 0x10;
2157 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2158 bp->mii_adv = MII_ADVERTISE + 0x10;
2159 bp->mii_lpa = MII_LPA + 0x10;
2160 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2161
2162 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2163 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2164
2165 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2166 if (reset_phy)
2167 bnx2_reset_phy(bp);
27a005b8
MC
2168
2169 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2170
2171 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2172 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2173 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2174 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2175
2176 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2177 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2178 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2179 val |= BCM5708S_UP1_2G5;
2180 else
2181 val &= ~BCM5708S_UP1_2G5;
2182 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2183
2184 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2185 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2186 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2187 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2188
2189 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2190
2191 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2192 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2193 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2194
2195 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2196
2197 return 0;
2198}
2199
b6016b76 2200static int
9a120bc5 2201bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2202{
2203 u32 val;
2204
9a120bc5
MC
2205 if (reset_phy)
2206 bnx2_reset_phy(bp);
27a005b8
MC
2207
2208 bp->mii_up1 = BCM5708S_UP1;
2209
5b0c76ad
MC
2210 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2211 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2212 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2213
2214 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2215 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2216 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2217
2218 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2219 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2220 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2221
583c28e5 2222 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2223 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2224 val |= BCM5708S_UP1_2G5;
2225 bnx2_write_phy(bp, BCM5708S_UP1, val);
2226 }
2227
2228 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2229 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2230 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2231 /* increase tx signal amplitude */
2232 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2233 BCM5708S_BLK_ADDR_TX_MISC);
2234 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2235 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2236 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2237 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2238 }
2239
2726d6e1 2240 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2241 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2242
2243 if (val) {
2244 u32 is_backplane;
2245
2726d6e1 2246 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2247 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2248 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2249 BCM5708S_BLK_ADDR_TX_MISC);
2250 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252 BCM5708S_BLK_ADDR_DIG);
2253 }
2254 }
2255 return 0;
2256}
2257
2258static int
9a120bc5 2259bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2260{
9a120bc5
MC
2261 if (reset_phy)
2262 bnx2_reset_phy(bp);
27a005b8 2263
583c28e5 2264 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2265
59b47d8a
MC
2266 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2267 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2268
2269 if (bp->dev->mtu > 1500) {
2270 u32 val;
2271
2272 /* Set extended packet length bit */
2273 bnx2_write_phy(bp, 0x18, 0x7);
2274 bnx2_read_phy(bp, 0x18, &val);
2275 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2276
2277 bnx2_write_phy(bp, 0x1c, 0x6c00);
2278 bnx2_read_phy(bp, 0x1c, &val);
2279 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2280 }
2281 else {
2282 u32 val;
2283
2284 bnx2_write_phy(bp, 0x18, 0x7);
2285 bnx2_read_phy(bp, 0x18, &val);
2286 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2287
2288 bnx2_write_phy(bp, 0x1c, 0x6c00);
2289 bnx2_read_phy(bp, 0x1c, &val);
2290 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2291 }
2292
2293 return 0;
2294}
2295
2296static int
9a120bc5 2297bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2298{
5b0c76ad
MC
2299 u32 val;
2300
9a120bc5
MC
2301 if (reset_phy)
2302 bnx2_reset_phy(bp);
27a005b8 2303
583c28e5 2304 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2305 bnx2_write_phy(bp, 0x18, 0x0c00);
2306 bnx2_write_phy(bp, 0x17, 0x000a);
2307 bnx2_write_phy(bp, 0x15, 0x310b);
2308 bnx2_write_phy(bp, 0x17, 0x201f);
2309 bnx2_write_phy(bp, 0x15, 0x9506);
2310 bnx2_write_phy(bp, 0x17, 0x401f);
2311 bnx2_write_phy(bp, 0x15, 0x14e2);
2312 bnx2_write_phy(bp, 0x18, 0x0400);
2313 }
2314
583c28e5 2315 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2316 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2317 MII_BNX2_DSP_EXPAND_REG | 0x8);
2318 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2319 val &= ~(1 << 8);
2320 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2321 }
2322
b6016b76 2323 if (bp->dev->mtu > 1500) {
b6016b76
MC
2324 /* Set extended packet length bit */
2325 bnx2_write_phy(bp, 0x18, 0x7);
2326 bnx2_read_phy(bp, 0x18, &val);
2327 bnx2_write_phy(bp, 0x18, val | 0x4000);
2328
2329 bnx2_read_phy(bp, 0x10, &val);
2330 bnx2_write_phy(bp, 0x10, val | 0x1);
2331 }
2332 else {
b6016b76
MC
2333 bnx2_write_phy(bp, 0x18, 0x7);
2334 bnx2_read_phy(bp, 0x18, &val);
2335 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2336
2337 bnx2_read_phy(bp, 0x10, &val);
2338 bnx2_write_phy(bp, 0x10, val & ~0x1);
2339 }
2340
5b0c76ad
MC
2341 /* ethernet@wirespeed */
2342 bnx2_write_phy(bp, 0x18, 0x7007);
2343 bnx2_read_phy(bp, 0x18, &val);
2344 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2345 return 0;
2346}
2347
2348
2349static int
9a120bc5 2350bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2351__releases(&bp->phy_lock)
2352__acquires(&bp->phy_lock)
b6016b76
MC
2353{
2354 u32 val;
2355 int rc = 0;
2356
583c28e5
MC
2357 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2358 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2359
ca58c3af
MC
2360 bp->mii_bmcr = MII_BMCR;
2361 bp->mii_bmsr = MII_BMSR;
27a005b8 2362 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2363 bp->mii_adv = MII_ADVERTISE;
2364 bp->mii_lpa = MII_LPA;
2365
b6016b76
MC
2366 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2367
583c28e5 2368 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2369 goto setup_phy;
2370
b6016b76
MC
2371 bnx2_read_phy(bp, MII_PHYSID1, &val);
2372 bp->phy_id = val << 16;
2373 bnx2_read_phy(bp, MII_PHYSID2, &val);
2374 bp->phy_id |= val & 0xffff;
2375
583c28e5 2376 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2377 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2378 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2379 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2380 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2381 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2382 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2383 }
2384 else {
9a120bc5 2385 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2386 }
2387
0d8a6571
MC
2388setup_phy:
2389 if (!rc)
2390 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2391
2392 return rc;
2393}
2394
2395static int
2396bnx2_set_mac_loopback(struct bnx2 *bp)
2397{
2398 u32 mac_mode;
2399
2400 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2401 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2402 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2403 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2404 bp->link_up = 1;
2405 return 0;
2406}
2407
bc5a0690
MC
2408static int bnx2_test_link(struct bnx2 *);
2409
2410static int
2411bnx2_set_phy_loopback(struct bnx2 *bp)
2412{
2413 u32 mac_mode;
2414 int rc, i;
2415
2416 spin_lock_bh(&bp->phy_lock);
ca58c3af 2417 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2418 BMCR_SPEED1000);
2419 spin_unlock_bh(&bp->phy_lock);
2420 if (rc)
2421 return rc;
2422
2423 for (i = 0; i < 10; i++) {
2424 if (bnx2_test_link(bp) == 0)
2425 break;
80be4434 2426 msleep(100);
bc5a0690
MC
2427 }
2428
2429 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2430 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2431 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2432 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2433
2434 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2435 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2436 bp->link_up = 1;
2437 return 0;
2438}
2439
b6016b76 2440static int
a2f13890 2441bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2442{
2443 int i;
2444 u32 val;
2445
b6016b76
MC
2446 bp->fw_wr_seq++;
2447 msg_data |= bp->fw_wr_seq;
2448
2726d6e1 2449 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2450
a2f13890
MC
2451 if (!ack)
2452 return 0;
2453
b6016b76 2454 /* wait for an acknowledgement. */
40105c0b 2455 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2456 msleep(10);
b6016b76 2457
2726d6e1 2458 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2459
2460 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2461 break;
2462 }
b090ae2b
MC
2463 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2464 return 0;
b6016b76
MC
2465
2466 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2467 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2468 if (!silent)
2469 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2470 "%x\n", msg_data);
b6016b76
MC
2471
2472 msg_data &= ~BNX2_DRV_MSG_CODE;
2473 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2474
2726d6e1 2475 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2476
b6016b76
MC
2477 return -EBUSY;
2478 }
2479
b090ae2b
MC
2480 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2481 return -EIO;
2482
b6016b76
MC
2483 return 0;
2484}
2485
59b47d8a
MC
2486static int
2487bnx2_init_5709_context(struct bnx2 *bp)
2488{
2489 int i, ret = 0;
2490 u32 val;
2491
2492 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2493 val |= (BCM_PAGE_BITS - 8) << 16;
2494 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2495 for (i = 0; i < 10; i++) {
2496 val = REG_RD(bp, BNX2_CTX_COMMAND);
2497 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2498 break;
2499 udelay(2);
2500 }
2501 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2502 return -EBUSY;
2503
59b47d8a
MC
2504 for (i = 0; i < bp->ctx_pages; i++) {
2505 int j;
2506
352f7687
MC
2507 if (bp->ctx_blk[i])
2508 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2509 else
2510 return -ENOMEM;
2511
59b47d8a
MC
2512 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2513 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2514 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2515 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2516 (u64) bp->ctx_blk_mapping[i] >> 32);
2517 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2518 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2519 for (j = 0; j < 10; j++) {
2520
2521 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2522 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2523 break;
2524 udelay(5);
2525 }
2526 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2527 ret = -EBUSY;
2528 break;
2529 }
2530 }
2531 return ret;
2532}
2533
b6016b76
MC
2534static void
2535bnx2_init_context(struct bnx2 *bp)
2536{
2537 u32 vcid;
2538
2539 vcid = 96;
2540 while (vcid) {
2541 u32 vcid_addr, pcid_addr, offset;
7947b20e 2542 int i;
b6016b76
MC
2543
2544 vcid--;
2545
2546 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2547 u32 new_vcid;
2548
2549 vcid_addr = GET_PCID_ADDR(vcid);
2550 if (vcid & 0x8) {
2551 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2552 }
2553 else {
2554 new_vcid = vcid;
2555 }
2556 pcid_addr = GET_PCID_ADDR(new_vcid);
2557 }
2558 else {
2559 vcid_addr = GET_CID_ADDR(vcid);
2560 pcid_addr = vcid_addr;
2561 }
2562
7947b20e
MC
2563 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2564 vcid_addr += (i << PHY_CTX_SHIFT);
2565 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2566
5d5d0015 2567 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2568 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2569
7947b20e
MC
2570 /* Zero out the context. */
2571 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2572 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2573 }
b6016b76
MC
2574 }
2575}
2576
2577static int
2578bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2579{
2580 u16 *good_mbuf;
2581 u32 good_mbuf_cnt;
2582 u32 val;
2583
2584 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2585 if (good_mbuf == NULL) {
2586 printk(KERN_ERR PFX "Failed to allocate memory in "
2587 "bnx2_alloc_bad_rbuf\n");
2588 return -ENOMEM;
2589 }
2590
2591 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2592 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2593
2594 good_mbuf_cnt = 0;
2595
2596 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2597 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2598 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2599 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2600 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2601
2726d6e1 2602 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2603
2604 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2605
2606 /* The addresses with Bit 9 set are bad memory blocks. */
2607 if (!(val & (1 << 9))) {
2608 good_mbuf[good_mbuf_cnt] = (u16) val;
2609 good_mbuf_cnt++;
2610 }
2611
2726d6e1 2612 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2613 }
2614
2615 /* Free the good ones back to the mbuf pool thus discarding
2616 * all the bad ones. */
2617 while (good_mbuf_cnt) {
2618 good_mbuf_cnt--;
2619
2620 val = good_mbuf[good_mbuf_cnt];
2621 val = (val << 9) | val | 1;
2622
2726d6e1 2623 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2624 }
2625 kfree(good_mbuf);
2626 return 0;
2627}
2628
2629static void
5fcaed01 2630bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2631{
2632 u32 val;
b6016b76
MC
2633
2634 val = (mac_addr[0] << 8) | mac_addr[1];
2635
5fcaed01 2636 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2637
6aa20a22 2638 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2639 (mac_addr[4] << 8) | mac_addr[5];
2640
5fcaed01 2641 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2642}
2643
47bf4246 2644static inline int
bb4f98ab 2645bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246
MC
2646{
2647 dma_addr_t mapping;
bb4f98ab 2648 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2649 struct rx_bd *rxbd =
bb4f98ab 2650 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
47bf4246
MC
2651 struct page *page = alloc_page(GFP_ATOMIC);
2652
2653 if (!page)
2654 return -ENOMEM;
2655 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2656 PCI_DMA_FROMDEVICE);
3d16af86
BL
2657 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2658 __free_page(page);
2659 return -EIO;
2660 }
2661
47bf4246
MC
2662 rx_pg->page = page;
2663 pci_unmap_addr_set(rx_pg, mapping, mapping);
2664 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2665 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2666 return 0;
2667}
2668
2669static void
bb4f98ab 2670bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2671{
bb4f98ab 2672 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2673 struct page *page = rx_pg->page;
2674
2675 if (!page)
2676 return;
2677
2678 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2679 PCI_DMA_FROMDEVICE);
2680
2681 __free_page(page);
2682 rx_pg->page = NULL;
2683}
2684
b6016b76 2685static inline int
bb4f98ab 2686bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
b6016b76
MC
2687{
2688 struct sk_buff *skb;
bb4f98ab 2689 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2690 dma_addr_t mapping;
bb4f98ab 2691 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2692 unsigned long align;
2693
932f3772 2694 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2695 if (skb == NULL) {
2696 return -ENOMEM;
2697 }
2698
59b47d8a
MC
2699 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2700 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2701
b6016b76
MC
2702 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2703 PCI_DMA_FROMDEVICE);
3d16af86
BL
2704 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2705 dev_kfree_skb(skb);
2706 return -EIO;
2707 }
b6016b76
MC
2708
2709 rx_buf->skb = skb;
2710 pci_unmap_addr_set(rx_buf, mapping, mapping);
2711
2712 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2713 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2714
bb4f98ab 2715 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2716
2717 return 0;
2718}
2719
da3e4fbe 2720static int
35efa7c1 2721bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2722{
43e80b89 2723 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2724 u32 new_link_state, old_link_state;
da3e4fbe 2725 int is_set = 1;
b6016b76 2726
da3e4fbe
MC
2727 new_link_state = sblk->status_attn_bits & event;
2728 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2729 if (new_link_state != old_link_state) {
da3e4fbe
MC
2730 if (new_link_state)
2731 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2732 else
2733 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2734 } else
2735 is_set = 0;
2736
2737 return is_set;
2738}
2739
2740static void
35efa7c1 2741bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2742{
74ecc62d
MC
2743 spin_lock(&bp->phy_lock);
2744
2745 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2746 bnx2_set_link(bp);
35efa7c1 2747 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2748 bnx2_set_remote_link(bp);
2749
74ecc62d
MC
2750 spin_unlock(&bp->phy_lock);
2751
b6016b76
MC
2752}
2753
ead7270b 2754static inline u16
35efa7c1 2755bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2756{
2757 u16 cons;
2758
43e80b89
MC
2759 /* Tell compiler that status block fields can change. */
2760 barrier();
2761 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2762 barrier();
ead7270b
MC
2763 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2764 cons++;
2765 return cons;
2766}
2767
57851d84
MC
2768static int
2769bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2770{
35e9010b 2771 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2772 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2773 int tx_pkt = 0, index;
2774 struct netdev_queue *txq;
2775
2776 index = (bnapi - bp->bnx2_napi);
2777 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2778
35efa7c1 2779 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2780 sw_cons = txr->tx_cons;
b6016b76
MC
2781
2782 while (sw_cons != hw_cons) {
3d16af86 2783 struct sw_tx_bd *tx_buf;
b6016b76
MC
2784 struct sk_buff *skb;
2785 int i, last;
2786
2787 sw_ring_cons = TX_RING_IDX(sw_cons);
2788
35e9010b 2789 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2790 skb = tx_buf->skb;
1d39ed56 2791
d62fda08
ED
2792 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2793 prefetch(&skb->end);
2794
b6016b76 2795 /* partial BD completions possible with TSO packets */
d62fda08 2796 if (tx_buf->is_gso) {
b6016b76
MC
2797 u16 last_idx, last_ring_idx;
2798
d62fda08
ED
2799 last_idx = sw_cons + tx_buf->nr_frags + 1;
2800 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
b6016b76
MC
2801 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2802 last_idx++;
2803 }
2804 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2805 break;
2806 }
2807 }
1d39ed56 2808
3d16af86 2809 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
b6016b76
MC
2810
2811 tx_buf->skb = NULL;
d62fda08 2812 last = tx_buf->nr_frags;
b6016b76
MC
2813
2814 for (i = 0; i < last; i++) {
2815 sw_cons = NEXT_TX_BD(sw_cons);
b6016b76
MC
2816 }
2817
2818 sw_cons = NEXT_TX_BD(sw_cons);
2819
745720e5 2820 dev_kfree_skb(skb);
57851d84
MC
2821 tx_pkt++;
2822 if (tx_pkt == budget)
2823 break;
b6016b76 2824
d62fda08
ED
2825 if (hw_cons == sw_cons)
2826 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2827 }
2828
35e9010b
MC
2829 txr->hw_tx_cons = hw_cons;
2830 txr->tx_cons = sw_cons;
706bf240 2831
2f8af120 2832 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2833 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2834 * memory barrier, there is a small possibility that bnx2_start_xmit()
2835 * will miss it and cause the queue to be stopped forever.
2836 */
2837 smp_mb();
b6016b76 2838
706bf240 2839 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2840 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2841 __netif_tx_lock(txq, smp_processor_id());
2842 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2843 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2844 netif_tx_wake_queue(txq);
2845 __netif_tx_unlock(txq);
b6016b76 2846 }
706bf240 2847
57851d84 2848 return tx_pkt;
b6016b76
MC
2849}
2850
1db82f2a 2851static void
bb4f98ab 2852bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2853 struct sk_buff *skb, int count)
1db82f2a
MC
2854{
2855 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2856 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2857 int i;
3d16af86 2858 u16 hw_prod, prod;
bb4f98ab 2859 u16 cons = rxr->rx_pg_cons;
1db82f2a 2860
3d16af86
BL
2861 cons_rx_pg = &rxr->rx_pg_ring[cons];
2862
2863 /* The caller was unable to allocate a new page to replace the
2864 * last one in the frags array, so we need to recycle that page
2865 * and then free the skb.
2866 */
2867 if (skb) {
2868 struct page *page;
2869 struct skb_shared_info *shinfo;
2870
2871 shinfo = skb_shinfo(skb);
2872 shinfo->nr_frags--;
2873 page = shinfo->frags[shinfo->nr_frags].page;
2874 shinfo->frags[shinfo->nr_frags].page = NULL;
2875
2876 cons_rx_pg->page = page;
2877 dev_kfree_skb(skb);
2878 }
2879
2880 hw_prod = rxr->rx_pg_prod;
2881
1db82f2a
MC
2882 for (i = 0; i < count; i++) {
2883 prod = RX_PG_RING_IDX(hw_prod);
2884
bb4f98ab
MC
2885 prod_rx_pg = &rxr->rx_pg_ring[prod];
2886 cons_rx_pg = &rxr->rx_pg_ring[cons];
2887 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2888 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2889
1db82f2a
MC
2890 if (prod != cons) {
2891 prod_rx_pg->page = cons_rx_pg->page;
2892 cons_rx_pg->page = NULL;
2893 pci_unmap_addr_set(prod_rx_pg, mapping,
2894 pci_unmap_addr(cons_rx_pg, mapping));
2895
2896 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2897 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2898
2899 }
2900 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2901 hw_prod = NEXT_RX_BD(hw_prod);
2902 }
bb4f98ab
MC
2903 rxr->rx_pg_prod = hw_prod;
2904 rxr->rx_pg_cons = cons;
1db82f2a
MC
2905}
2906
b6016b76 2907static inline void
bb4f98ab
MC
2908bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2909 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2910{
236b6394
MC
2911 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2912 struct rx_bd *cons_bd, *prod_bd;
2913
bb4f98ab
MC
2914 cons_rx_buf = &rxr->rx_buf_ring[cons];
2915 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76
MC
2916
2917 pci_dma_sync_single_for_device(bp->pdev,
2918 pci_unmap_addr(cons_rx_buf, mapping),
601d3d18 2919 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2920
bb4f98ab 2921 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2922
236b6394 2923 prod_rx_buf->skb = skb;
b6016b76 2924
236b6394
MC
2925 if (cons == prod)
2926 return;
b6016b76 2927
236b6394
MC
2928 pci_unmap_addr_set(prod_rx_buf, mapping,
2929 pci_unmap_addr(cons_rx_buf, mapping));
2930
bb4f98ab
MC
2931 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2932 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2933 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2934 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2935}
2936
85833c62 2937static int
bb4f98ab 2938bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2939 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2940 u32 ring_idx)
85833c62
MC
2941{
2942 int err;
2943 u16 prod = ring_idx & 0xffff;
2944
bb4f98ab 2945 err = bnx2_alloc_rx_skb(bp, rxr, prod);
85833c62 2946 if (unlikely(err)) {
bb4f98ab 2947 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2948 if (hdr_len) {
2949 unsigned int raw_len = len + 4;
2950 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2951
bb4f98ab 2952 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 2953 }
85833c62
MC
2954 return err;
2955 }
2956
d89cb6af 2957 skb_reserve(skb, BNX2_RX_OFFSET);
85833c62
MC
2958 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2959 PCI_DMA_FROMDEVICE);
2960
1db82f2a
MC
2961 if (hdr_len == 0) {
2962 skb_put(skb, len);
2963 return 0;
2964 } else {
2965 unsigned int i, frag_len, frag_size, pages;
2966 struct sw_pg *rx_pg;
bb4f98ab
MC
2967 u16 pg_cons = rxr->rx_pg_cons;
2968 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
2969
2970 frag_size = len + 4 - hdr_len;
2971 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2972 skb_put(skb, hdr_len);
2973
2974 for (i = 0; i < pages; i++) {
3d16af86
BL
2975 dma_addr_t mapping_old;
2976
1db82f2a
MC
2977 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2978 if (unlikely(frag_len <= 4)) {
2979 unsigned int tail = 4 - frag_len;
2980
bb4f98ab
MC
2981 rxr->rx_pg_cons = pg_cons;
2982 rxr->rx_pg_prod = pg_prod;
2983 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 2984 pages - i);
1db82f2a
MC
2985 skb->len -= tail;
2986 if (i == 0) {
2987 skb->tail -= tail;
2988 } else {
2989 skb_frag_t *frag =
2990 &skb_shinfo(skb)->frags[i - 1];
2991 frag->size -= tail;
2992 skb->data_len -= tail;
2993 skb->truesize -= tail;
2994 }
2995 return 0;
2996 }
bb4f98ab 2997 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 2998
3d16af86
BL
2999 /* Don't unmap yet. If we're unable to allocate a new
3000 * page, we need to recycle the page and the DMA addr.
3001 */
3002 mapping_old = pci_unmap_addr(rx_pg, mapping);
1db82f2a
MC
3003 if (i == pages - 1)
3004 frag_len -= 4;
3005
3006 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3007 rx_pg->page = NULL;
3008
bb4f98ab
MC
3009 err = bnx2_alloc_rx_page(bp, rxr,
3010 RX_PG_RING_IDX(pg_prod));
1db82f2a 3011 if (unlikely(err)) {
bb4f98ab
MC
3012 rxr->rx_pg_cons = pg_cons;
3013 rxr->rx_pg_prod = pg_prod;
3014 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 3015 pages - i);
1db82f2a
MC
3016 return err;
3017 }
3018
3d16af86
BL
3019 pci_unmap_page(bp->pdev, mapping_old,
3020 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3021
1db82f2a
MC
3022 frag_size -= frag_len;
3023 skb->data_len += frag_len;
3024 skb->truesize += frag_len;
3025 skb->len += frag_len;
3026
3027 pg_prod = NEXT_RX_BD(pg_prod);
3028 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3029 }
bb4f98ab
MC
3030 rxr->rx_pg_prod = pg_prod;
3031 rxr->rx_pg_cons = pg_cons;
1db82f2a 3032 }
85833c62
MC
3033 return 0;
3034}
3035
c09c2627 3036static inline u16
35efa7c1 3037bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 3038{
bb4f98ab
MC
3039 u16 cons;
3040
43e80b89
MC
3041 /* Tell compiler that status block fields can change. */
3042 barrier();
3043 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 3044 barrier();
c09c2627
MC
3045 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3046 cons++;
3047 return cons;
3048}
3049
b6016b76 3050static int
35efa7c1 3051bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 3052{
bb4f98ab 3053 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
3054 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3055 struct l2_fhdr *rx_hdr;
1db82f2a 3056 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 3057
35efa7c1 3058 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
3059 sw_cons = rxr->rx_cons;
3060 sw_prod = rxr->rx_prod;
b6016b76
MC
3061
3062 /* Memory barrier necessary as speculative reads of the rx
3063 * buffer can be ahead of the index in the status block
3064 */
3065 rmb();
3066 while (sw_cons != hw_cons) {
1db82f2a 3067 unsigned int len, hdr_len;
ade2bfe7 3068 u32 status;
b6016b76
MC
3069 struct sw_bd *rx_buf;
3070 struct sk_buff *skb;
236b6394 3071 dma_addr_t dma_addr;
f22828e8
MC
3072 u16 vtag = 0;
3073 int hw_vlan __maybe_unused = 0;
b6016b76
MC
3074
3075 sw_ring_cons = RX_RING_IDX(sw_cons);
3076 sw_ring_prod = RX_RING_IDX(sw_prod);
3077
bb4f98ab 3078 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 3079 skb = rx_buf->skb;
236b6394
MC
3080
3081 rx_buf->skb = NULL;
3082
3083 dma_addr = pci_unmap_addr(rx_buf, mapping);
3084
3085 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
601d3d18
BL
3086 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3087 PCI_DMA_FROMDEVICE);
b6016b76
MC
3088
3089 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 3090 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 3091 status = rx_hdr->l2_fhdr_status;
b6016b76 3092
1db82f2a
MC
3093 hdr_len = 0;
3094 if (status & L2_FHDR_STATUS_SPLIT) {
3095 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3096 pg_ring_used = 1;
3097 } else if (len > bp->rx_jumbo_thresh) {
3098 hdr_len = bp->rx_jumbo_thresh;
3099 pg_ring_used = 1;
3100 }
3101
990ec380
MC
3102 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3103 L2_FHDR_ERRORS_PHY_DECODE |
3104 L2_FHDR_ERRORS_ALIGNMENT |
3105 L2_FHDR_ERRORS_TOO_SHORT |
3106 L2_FHDR_ERRORS_GIANT_FRAME))) {
3107
3108 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3109 sw_ring_prod);
3110 if (pg_ring_used) {
3111 int pages;
3112
3113 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3114
3115 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3116 }
3117 goto next_rx;
3118 }
3119
1db82f2a 3120 len -= 4;
b6016b76 3121
5d5d0015 3122 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
3123 struct sk_buff *new_skb;
3124
f22828e8 3125 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 3126 if (new_skb == NULL) {
bb4f98ab 3127 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
3128 sw_ring_prod);
3129 goto next_rx;
3130 }
b6016b76
MC
3131
3132 /* aligned copy */
d89cb6af 3133 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
3134 BNX2_RX_OFFSET - 6,
3135 new_skb->data, len + 6);
3136 skb_reserve(new_skb, 6);
b6016b76 3137 skb_put(new_skb, len);
b6016b76 3138
bb4f98ab 3139 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
3140 sw_ring_cons, sw_ring_prod);
3141
3142 skb = new_skb;
bb4f98ab 3143 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 3144 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 3145 goto next_rx;
b6016b76 3146
f22828e8
MC
3147 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3148 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3149 vtag = rx_hdr->l2_fhdr_vlan_tag;
3150#ifdef BCM_VLAN
3151 if (bp->vlgrp)
3152 hw_vlan = 1;
3153 else
3154#endif
3155 {
3156 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3157 __skb_push(skb, 4);
3158
3159 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3160 ve->h_vlan_proto = htons(ETH_P_8021Q);
3161 ve->h_vlan_TCI = htons(vtag);
3162 len += 4;
3163 }
3164 }
3165
b6016b76
MC
3166 skb->protocol = eth_type_trans(skb, bp->dev);
3167
3168 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3169 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3170
745720e5 3171 dev_kfree_skb(skb);
b6016b76
MC
3172 goto next_rx;
3173
3174 }
3175
b6016b76
MC
3176 skb->ip_summed = CHECKSUM_NONE;
3177 if (bp->rx_csum &&
3178 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3179 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3180
ade2bfe7
MC
3181 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3182 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3183 skb->ip_summed = CHECKSUM_UNNECESSARY;
3184 }
3185
0c8dfc83
DM
3186 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3187
b6016b76 3188#ifdef BCM_VLAN
f22828e8
MC
3189 if (hw_vlan)
3190 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
b6016b76
MC
3191 else
3192#endif
3193 netif_receive_skb(skb);
3194
b6016b76
MC
3195 rx_pkt++;
3196
3197next_rx:
b6016b76
MC
3198 sw_cons = NEXT_RX_BD(sw_cons);
3199 sw_prod = NEXT_RX_BD(sw_prod);
3200
3201 if ((rx_pkt == budget))
3202 break;
f4e418f7
MC
3203
3204 /* Refresh hw_cons to see if there is new work */
3205 if (sw_cons == hw_cons) {
35efa7c1 3206 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3207 rmb();
3208 }
b6016b76 3209 }
bb4f98ab
MC
3210 rxr->rx_cons = sw_cons;
3211 rxr->rx_prod = sw_prod;
b6016b76 3212
1db82f2a 3213 if (pg_ring_used)
bb4f98ab 3214 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3215
bb4f98ab 3216 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3217
bb4f98ab 3218 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3219
3220 mmiowb();
3221
3222 return rx_pkt;
3223
3224}
3225
3226/* MSI ISR - The only difference between this and the INTx ISR
3227 * is that the MSI interrupt is always serviced.
3228 */
3229static irqreturn_t
7d12e780 3230bnx2_msi(int irq, void *dev_instance)
b6016b76 3231{
f0ea2e63
MC
3232 struct bnx2_napi *bnapi = dev_instance;
3233 struct bnx2 *bp = bnapi->bp;
b6016b76 3234
43e80b89 3235 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3236 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3237 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3238 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3239
3240 /* Return here if interrupt is disabled. */
73eef4cd
MC
3241 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3242 return IRQ_HANDLED;
b6016b76 3243
288379f0 3244 napi_schedule(&bnapi->napi);
b6016b76 3245
73eef4cd 3246 return IRQ_HANDLED;
b6016b76
MC
3247}
3248
8e6a72c4
MC
3249static irqreturn_t
3250bnx2_msi_1shot(int irq, void *dev_instance)
3251{
f0ea2e63
MC
3252 struct bnx2_napi *bnapi = dev_instance;
3253 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3254
43e80b89 3255 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3256
3257 /* Return here if interrupt is disabled. */
3258 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3259 return IRQ_HANDLED;
3260
288379f0 3261 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3262
3263 return IRQ_HANDLED;
3264}
3265
b6016b76 3266static irqreturn_t
7d12e780 3267bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3268{
f0ea2e63
MC
3269 struct bnx2_napi *bnapi = dev_instance;
3270 struct bnx2 *bp = bnapi->bp;
43e80b89 3271 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3272
3273 /* When using INTx, it is possible for the interrupt to arrive
3274 * at the CPU before the status block posted prior to the
3275 * interrupt. Reading a register will flush the status block.
3276 * When using MSI, the MSI message will always complete after
3277 * the status block write.
3278 */
35efa7c1 3279 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3280 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3281 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3282 return IRQ_NONE;
b6016b76
MC
3283
3284 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3285 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3286 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3287
b8a7ce7b
MC
3288 /* Read back to deassert IRQ immediately to avoid too many
3289 * spurious interrupts.
3290 */
3291 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3292
b6016b76 3293 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3294 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3295 return IRQ_HANDLED;
b6016b76 3296
288379f0 3297 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3298 bnapi->last_status_idx = sblk->status_idx;
288379f0 3299 __napi_schedule(&bnapi->napi);
b8a7ce7b 3300 }
b6016b76 3301
73eef4cd 3302 return IRQ_HANDLED;
b6016b76
MC
3303}
3304
f4e418f7 3305static inline int
43e80b89 3306bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3307{
35e9010b 3308 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3309 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3310
bb4f98ab 3311 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3312 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3313 return 1;
43e80b89
MC
3314 return 0;
3315}
3316
3317#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3318 STATUS_ATTN_BITS_TIMER_ABORT)
3319
3320static inline int
3321bnx2_has_work(struct bnx2_napi *bnapi)
3322{
3323 struct status_block *sblk = bnapi->status_blk.msi;
3324
3325 if (bnx2_has_fast_work(bnapi))
3326 return 1;
f4e418f7 3327
4edd473f
MC
3328#ifdef BCM_CNIC
3329 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3330 return 1;
3331#endif
3332
da3e4fbe
MC
3333 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3334 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3335 return 1;
3336
3337 return 0;
3338}
3339
efba0180
MC
3340static void
3341bnx2_chk_missed_msi(struct bnx2 *bp)
3342{
3343 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3344 u32 msi_ctrl;
3345
3346 if (bnx2_has_work(bnapi)) {
3347 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3348 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3349 return;
3350
3351 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3352 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3353 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3354 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3355 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3356 }
3357 }
3358
3359 bp->idle_chk_status_idx = bnapi->last_status_idx;
3360}
3361
4edd473f
MC
3362#ifdef BCM_CNIC
3363static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3364{
3365 struct cnic_ops *c_ops;
3366
3367 if (!bnapi->cnic_present)
3368 return;
3369
3370 rcu_read_lock();
3371 c_ops = rcu_dereference(bp->cnic_ops);
3372 if (c_ops)
3373 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3374 bnapi->status_blk.msi);
3375 rcu_read_unlock();
3376}
3377#endif
3378
43e80b89 3379static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3380{
43e80b89 3381 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3382 u32 status_attn_bits = sblk->status_attn_bits;
3383 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3384
da3e4fbe
MC
3385 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3386 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3387
35efa7c1 3388 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3389
3390 /* This is needed to take care of transient status
3391 * during link changes.
3392 */
3393 REG_WR(bp, BNX2_HC_COMMAND,
3394 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3395 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3396 }
43e80b89
MC
3397}
3398
3399static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3400 int work_done, int budget)
3401{
3402 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3403 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3404
35e9010b 3405 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3406 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3407
bb4f98ab 3408 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3409 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3410
6f535763
DM
3411 return work_done;
3412}
3413
f0ea2e63
MC
3414static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3415{
3416 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3417 struct bnx2 *bp = bnapi->bp;
3418 int work_done = 0;
3419 struct status_block_msix *sblk = bnapi->status_blk.msix;
3420
3421 while (1) {
3422 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3423 if (unlikely(work_done >= budget))
3424 break;
3425
3426 bnapi->last_status_idx = sblk->status_idx;
3427 /* status idx must be read before checking for more work. */
3428 rmb();
3429 if (likely(!bnx2_has_fast_work(bnapi))) {
3430
288379f0 3431 napi_complete(napi);
f0ea2e63
MC
3432 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3433 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3434 bnapi->last_status_idx);
3435 break;
3436 }
3437 }
3438 return work_done;
3439}
3440
6f535763
DM
3441static int bnx2_poll(struct napi_struct *napi, int budget)
3442{
35efa7c1
MC
3443 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3444 struct bnx2 *bp = bnapi->bp;
6f535763 3445 int work_done = 0;
43e80b89 3446 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3447
3448 while (1) {
43e80b89
MC
3449 bnx2_poll_link(bp, bnapi);
3450
35efa7c1 3451 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3452
4edd473f
MC
3453#ifdef BCM_CNIC
3454 bnx2_poll_cnic(bp, bnapi);
3455#endif
3456
35efa7c1 3457 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3458 * much work has been processed, so we must read it before
3459 * checking for more work.
3460 */
35efa7c1 3461 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3462
3463 if (unlikely(work_done >= budget))
3464 break;
3465
6dee6421 3466 rmb();
35efa7c1 3467 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3468 napi_complete(napi);
f86e82fb 3469 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3470 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3471 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3472 bnapi->last_status_idx);
6dee6421 3473 break;
6f535763 3474 }
1269a8a6
MC
3475 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3476 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3477 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3478 bnapi->last_status_idx);
1269a8a6 3479
6f535763
DM
3480 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3481 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3482 bnapi->last_status_idx);
6f535763
DM
3483 break;
3484 }
b6016b76
MC
3485 }
3486
bea3348e 3487 return work_done;
b6016b76
MC
3488}
3489
932ff279 3490/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3491 * from set_multicast.
3492 */
3493static void
3494bnx2_set_rx_mode(struct net_device *dev)
3495{
972ec0d4 3496 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3497 u32 rx_mode, sort_mode;
ccffad25 3498 struct netdev_hw_addr *ha;
b6016b76 3499 int i;
b6016b76 3500
9f52b564
MC
3501 if (!netif_running(dev))
3502 return;
3503
c770a65c 3504 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3505
3506 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3507 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3508 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3509#ifdef BCM_VLAN
7c6337a1 3510 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3511 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 3512#else
7c6337a1 3513 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
e29054f9 3514 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3515#endif
3516 if (dev->flags & IFF_PROMISC) {
3517 /* Promiscuous mode. */
3518 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3519 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3520 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3521 }
3522 else if (dev->flags & IFF_ALLMULTI) {
3523 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3524 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3525 0xffffffff);
3526 }
3527 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3528 }
3529 else {
3530 /* Accept one or more multicast(s). */
3531 struct dev_mc_list *mclist;
3532 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3533 u32 regidx;
3534 u32 bit;
3535 u32 crc;
3536
3537 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3538
3539 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3540 i++, mclist = mclist->next) {
3541
3542 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3543 bit = crc & 0xff;
3544 regidx = (bit & 0xe0) >> 5;
3545 bit &= 0x1f;
3546 mc_filter[regidx] |= (1 << bit);
3547 }
3548
3549 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3550 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3551 mc_filter[i]);
3552 }
3553
3554 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3555 }
3556
31278e71 3557 if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
5fcaed01
BL
3558 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3559 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3560 BNX2_RPM_SORT_USER0_PROM_VLAN;
3561 } else if (!(dev->flags & IFF_PROMISC)) {
5fcaed01 3562 /* Add all entries into to the match filter list */
ccffad25 3563 i = 0;
31278e71 3564 list_for_each_entry(ha, &dev->uc.list, list) {
ccffad25 3565 bnx2_set_mac_addr(bp, ha->addr,
5fcaed01
BL
3566 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3567 sort_mode |= (1 <<
3568 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
ccffad25 3569 i++;
5fcaed01
BL
3570 }
3571
3572 }
3573
b6016b76
MC
3574 if (rx_mode != bp->rx_mode) {
3575 bp->rx_mode = rx_mode;
3576 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3577 }
3578
3579 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3580 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3581 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3582
c770a65c 3583 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3584}
3585
57579f76
MC
3586static int __devinit
3587check_fw_section(const struct firmware *fw,
3588 const struct bnx2_fw_file_section *section,
3589 u32 alignment, bool non_empty)
3590{
3591 u32 offset = be32_to_cpu(section->offset);
3592 u32 len = be32_to_cpu(section->len);
3593
3594 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3595 return -EINVAL;
3596 if ((non_empty && len == 0) || len > fw->size - offset ||
3597 len & (alignment - 1))
3598 return -EINVAL;
3599 return 0;
3600}
3601
3602static int __devinit
3603check_mips_fw_entry(const struct firmware *fw,
3604 const struct bnx2_mips_fw_file_entry *entry)
3605{
3606 if (check_fw_section(fw, &entry->text, 4, true) ||
3607 check_fw_section(fw, &entry->data, 4, false) ||
3608 check_fw_section(fw, &entry->rodata, 4, false))
3609 return -EINVAL;
3610 return 0;
3611}
3612
3613static int __devinit
3614bnx2_request_firmware(struct bnx2 *bp)
b6016b76 3615{
57579f76 3616 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3617 const struct bnx2_mips_fw_file *mips_fw;
3618 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3619 int rc;
3620
3621 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3622 mips_fw_file = FW_MIPS_FILE_09;
3623 rv2p_fw_file = FW_RV2P_FILE_09;
3624 } else {
3625 mips_fw_file = FW_MIPS_FILE_06;
3626 rv2p_fw_file = FW_RV2P_FILE_06;
3627 }
3628
3629 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3630 if (rc) {
3631 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3632 mips_fw_file);
3633 return rc;
3634 }
3635
3636 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3637 if (rc) {
3638 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3639 rv2p_fw_file);
3640 return rc;
3641 }
5ee1c326
BB
3642 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3643 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3644 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3645 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3646 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3647 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3648 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3649 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
57579f76
MC
3650 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3651 mips_fw_file);
3652 return -EINVAL;
3653 }
5ee1c326
BB
3654 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3655 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3656 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
57579f76
MC
3657 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3658 rv2p_fw_file);
3659 return -EINVAL;
3660 }
3661
3662 return 0;
3663}
3664
3665static u32
3666rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3667{
3668 switch (idx) {
3669 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3670 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3671 rv2p_code |= RV2P_BD_PAGE_SIZE;
3672 break;
3673 }
3674 return rv2p_code;
3675}
3676
3677static int
3678load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3679 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3680{
3681 u32 rv2p_code_len, file_offset;
3682 __be32 *rv2p_code;
b6016b76 3683 int i;
57579f76
MC
3684 u32 val, cmd, addr;
3685
3686 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3687 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3688
3689 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3690
57579f76
MC
3691 if (rv2p_proc == RV2P_PROC1) {
3692 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3693 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3694 } else {
3695 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3696 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3697 }
b6016b76
MC
3698
3699 for (i = 0; i < rv2p_code_len; i += 8) {
57579f76 3700 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3701 rv2p_code++;
57579f76 3702 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3703 rv2p_code++;
3704
57579f76
MC
3705 val = (i / 8) | cmd;
3706 REG_WR(bp, addr, val);
3707 }
3708
3709 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3710 for (i = 0; i < 8; i++) {
3711 u32 loc, code;
3712
3713 loc = be32_to_cpu(fw_entry->fixup[i]);
3714 if (loc && ((loc * 4) < rv2p_code_len)) {
3715 code = be32_to_cpu(*(rv2p_code + loc - 1));
3716 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3717 code = be32_to_cpu(*(rv2p_code + loc));
3718 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3719 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3720
3721 val = (loc / 2) | cmd;
3722 REG_WR(bp, addr, val);
b6016b76
MC
3723 }
3724 }
3725
3726 /* Reset the processor, un-stall is done later. */
3727 if (rv2p_proc == RV2P_PROC1) {
3728 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3729 }
3730 else {
3731 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3732 }
57579f76
MC
3733
3734 return 0;
b6016b76
MC
3735}
3736
af3ee519 3737static int
57579f76
MC
3738load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3739 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3740{
57579f76
MC
3741 u32 addr, len, file_offset;
3742 __be32 *data;
b6016b76
MC
3743 u32 offset;
3744 u32 val;
3745
3746 /* Halt the CPU. */
2726d6e1 3747 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3748 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3749 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3750 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3751
3752 /* Load the Text area. */
57579f76
MC
3753 addr = be32_to_cpu(fw_entry->text.addr);
3754 len = be32_to_cpu(fw_entry->text.len);
3755 file_offset = be32_to_cpu(fw_entry->text.offset);
3756 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3757
57579f76
MC
3758 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3759 if (len) {
b6016b76
MC
3760 int j;
3761
57579f76
MC
3762 for (j = 0; j < (len / 4); j++, offset += 4)
3763 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3764 }
3765
57579f76
MC
3766 /* Load the Data area. */
3767 addr = be32_to_cpu(fw_entry->data.addr);
3768 len = be32_to_cpu(fw_entry->data.len);
3769 file_offset = be32_to_cpu(fw_entry->data.offset);
3770 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3771
57579f76
MC
3772 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3773 if (len) {
b6016b76
MC
3774 int j;
3775
57579f76
MC
3776 for (j = 0; j < (len / 4); j++, offset += 4)
3777 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3778 }
3779
3780 /* Load the Read-Only area. */
57579f76
MC
3781 addr = be32_to_cpu(fw_entry->rodata.addr);
3782 len = be32_to_cpu(fw_entry->rodata.len);
3783 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3784 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3785
3786 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3787 if (len) {
b6016b76
MC
3788 int j;
3789
57579f76
MC
3790 for (j = 0; j < (len / 4); j++, offset += 4)
3791 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3792 }
3793
3794 /* Clear the pre-fetch instruction. */
2726d6e1 3795 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3796
3797 val = be32_to_cpu(fw_entry->start_addr);
3798 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3799
3800 /* Start the CPU. */
2726d6e1 3801 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3802 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3803 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3804 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3805
3806 return 0;
b6016b76
MC
3807}
3808
fba9fe91 3809static int
b6016b76
MC
3810bnx2_init_cpus(struct bnx2 *bp)
3811{
57579f76
MC
3812 const struct bnx2_mips_fw_file *mips_fw =
3813 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3814 const struct bnx2_rv2p_fw_file *rv2p_fw =
3815 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3816 int rc;
b6016b76
MC
3817
3818 /* Initialize the RV2P processor. */
57579f76
MC
3819 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3820 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3821
3822 /* Initialize the RX Processor. */
57579f76 3823 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3824 if (rc)
3825 goto init_cpu_err;
3826
b6016b76 3827 /* Initialize the TX Processor. */
57579f76 3828 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3829 if (rc)
3830 goto init_cpu_err;
3831
b6016b76 3832 /* Initialize the TX Patch-up Processor. */
57579f76 3833 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3834 if (rc)
3835 goto init_cpu_err;
3836
b6016b76 3837 /* Initialize the Completion Processor. */
57579f76 3838 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3839 if (rc)
3840 goto init_cpu_err;
3841
d43584c8 3842 /* Initialize the Command Processor. */
57579f76 3843 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3844
fba9fe91 3845init_cpu_err:
fba9fe91 3846 return rc;
b6016b76
MC
3847}
3848
3849static int
829ca9a3 3850bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3851{
3852 u16 pmcsr;
3853
3854 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3855
3856 switch (state) {
829ca9a3 3857 case PCI_D0: {
b6016b76
MC
3858 u32 val;
3859
3860 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3861 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3862 PCI_PM_CTRL_PME_STATUS);
3863
3864 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3865 /* delay required during transition out of D3hot */
3866 msleep(20);
3867
3868 val = REG_RD(bp, BNX2_EMAC_MODE);
3869 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3870 val &= ~BNX2_EMAC_MODE_MPKT;
3871 REG_WR(bp, BNX2_EMAC_MODE, val);
3872
3873 val = REG_RD(bp, BNX2_RPM_CONFIG);
3874 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3875 REG_WR(bp, BNX2_RPM_CONFIG, val);
3876 break;
3877 }
829ca9a3 3878 case PCI_D3hot: {
b6016b76
MC
3879 int i;
3880 u32 val, wol_msg;
3881
3882 if (bp->wol) {
3883 u32 advertising;
3884 u8 autoneg;
3885
3886 autoneg = bp->autoneg;
3887 advertising = bp->advertising;
3888
239cd343
MC
3889 if (bp->phy_port == PORT_TP) {
3890 bp->autoneg = AUTONEG_SPEED;
3891 bp->advertising = ADVERTISED_10baseT_Half |
3892 ADVERTISED_10baseT_Full |
3893 ADVERTISED_100baseT_Half |
3894 ADVERTISED_100baseT_Full |
3895 ADVERTISED_Autoneg;
3896 }
b6016b76 3897
239cd343
MC
3898 spin_lock_bh(&bp->phy_lock);
3899 bnx2_setup_phy(bp, bp->phy_port);
3900 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3901
3902 bp->autoneg = autoneg;
3903 bp->advertising = advertising;
3904
5fcaed01 3905 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3906
3907 val = REG_RD(bp, BNX2_EMAC_MODE);
3908
3909 /* Enable port mode. */
3910 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3911 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3912 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3913 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3914 if (bp->phy_port == PORT_TP)
3915 val |= BNX2_EMAC_MODE_PORT_MII;
3916 else {
3917 val |= BNX2_EMAC_MODE_PORT_GMII;
3918 if (bp->line_speed == SPEED_2500)
3919 val |= BNX2_EMAC_MODE_25G_MODE;
3920 }
b6016b76
MC
3921
3922 REG_WR(bp, BNX2_EMAC_MODE, val);
3923
3924 /* receive all multicast */
3925 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3926 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3927 0xffffffff);
3928 }
3929 REG_WR(bp, BNX2_EMAC_RX_MODE,
3930 BNX2_EMAC_RX_MODE_SORT_MODE);
3931
3932 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3933 BNX2_RPM_SORT_USER0_MC_EN;
3934 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3935 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3936 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3937 BNX2_RPM_SORT_USER0_ENA);
3938
3939 /* Need to enable EMAC and RPM for WOL. */
3940 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3941 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3942 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3943 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3944
3945 val = REG_RD(bp, BNX2_RPM_CONFIG);
3946 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3947 REG_WR(bp, BNX2_RPM_CONFIG, val);
3948
3949 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3950 }
3951 else {
3952 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3953 }
3954
f86e82fb 3955 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
3956 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3957 1, 0);
b6016b76
MC
3958
3959 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3960 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3961 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3962
3963 if (bp->wol)
3964 pmcsr |= 3;
3965 }
3966 else {
3967 pmcsr |= 3;
3968 }
3969 if (bp->wol) {
3970 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3971 }
3972 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3973 pmcsr);
3974
3975 /* No more memory access after this point until
3976 * device is brought back to D0.
3977 */
3978 udelay(50);
3979 break;
3980 }
3981 default:
3982 return -EINVAL;
3983 }
3984 return 0;
3985}
3986
3987static int
3988bnx2_acquire_nvram_lock(struct bnx2 *bp)
3989{
3990 u32 val;
3991 int j;
3992
3993 /* Request access to the flash interface. */
3994 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3995 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3996 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3997 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3998 break;
3999
4000 udelay(5);
4001 }
4002
4003 if (j >= NVRAM_TIMEOUT_COUNT)
4004 return -EBUSY;
4005
4006 return 0;
4007}
4008
4009static int
4010bnx2_release_nvram_lock(struct bnx2 *bp)
4011{
4012 int j;
4013 u32 val;
4014
4015 /* Relinquish nvram interface. */
4016 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4017
4018 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4019 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4020 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4021 break;
4022
4023 udelay(5);
4024 }
4025
4026 if (j >= NVRAM_TIMEOUT_COUNT)
4027 return -EBUSY;
4028
4029 return 0;
4030}
4031
4032
4033static int
4034bnx2_enable_nvram_write(struct bnx2 *bp)
4035{
4036 u32 val;
4037
4038 val = REG_RD(bp, BNX2_MISC_CFG);
4039 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4040
e30372c9 4041 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
4042 int j;
4043
4044 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4045 REG_WR(bp, BNX2_NVM_COMMAND,
4046 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4047
4048 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4049 udelay(5);
4050
4051 val = REG_RD(bp, BNX2_NVM_COMMAND);
4052 if (val & BNX2_NVM_COMMAND_DONE)
4053 break;
4054 }
4055
4056 if (j >= NVRAM_TIMEOUT_COUNT)
4057 return -EBUSY;
4058 }
4059 return 0;
4060}
4061
4062static void
4063bnx2_disable_nvram_write(struct bnx2 *bp)
4064{
4065 u32 val;
4066
4067 val = REG_RD(bp, BNX2_MISC_CFG);
4068 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4069}
4070
4071
4072static void
4073bnx2_enable_nvram_access(struct bnx2 *bp)
4074{
4075 u32 val;
4076
4077 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4078 /* Enable both bits, even on read. */
6aa20a22 4079 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4080 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4081}
4082
4083static void
4084bnx2_disable_nvram_access(struct bnx2 *bp)
4085{
4086 u32 val;
4087
4088 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4089 /* Disable both bits, even after read. */
6aa20a22 4090 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4091 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4092 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4093}
4094
4095static int
4096bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4097{
4098 u32 cmd;
4099 int j;
4100
e30372c9 4101 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
4102 /* Buffered flash, no erase needed */
4103 return 0;
4104
4105 /* Build an erase command */
4106 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4107 BNX2_NVM_COMMAND_DOIT;
4108
4109 /* Need to clear DONE bit separately. */
4110 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4111
4112 /* Address of the NVRAM to read from. */
4113 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4114
4115 /* Issue an erase command. */
4116 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4117
4118 /* Wait for completion. */
4119 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4120 u32 val;
4121
4122 udelay(5);
4123
4124 val = REG_RD(bp, BNX2_NVM_COMMAND);
4125 if (val & BNX2_NVM_COMMAND_DONE)
4126 break;
4127 }
4128
4129 if (j >= NVRAM_TIMEOUT_COUNT)
4130 return -EBUSY;
4131
4132 return 0;
4133}
4134
4135static int
4136bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4137{
4138 u32 cmd;
4139 int j;
4140
4141 /* Build the command word. */
4142 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4143
e30372c9
MC
4144 /* Calculate an offset of a buffered flash, not needed for 5709. */
4145 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4146 offset = ((offset / bp->flash_info->page_size) <<
4147 bp->flash_info->page_bits) +
4148 (offset % bp->flash_info->page_size);
4149 }
4150
4151 /* Need to clear DONE bit separately. */
4152 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4153
4154 /* Address of the NVRAM to read from. */
4155 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4156
4157 /* Issue a read command. */
4158 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4159
4160 /* Wait for completion. */
4161 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4162 u32 val;
4163
4164 udelay(5);
4165
4166 val = REG_RD(bp, BNX2_NVM_COMMAND);
4167 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
4168 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4169 memcpy(ret_val, &v, 4);
b6016b76
MC
4170 break;
4171 }
4172 }
4173 if (j >= NVRAM_TIMEOUT_COUNT)
4174 return -EBUSY;
4175
4176 return 0;
4177}
4178
4179
4180static int
4181bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4182{
b491edd5
AV
4183 u32 cmd;
4184 __be32 val32;
b6016b76
MC
4185 int j;
4186
4187 /* Build the command word. */
4188 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4189
e30372c9
MC
4190 /* Calculate an offset of a buffered flash, not needed for 5709. */
4191 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4192 offset = ((offset / bp->flash_info->page_size) <<
4193 bp->flash_info->page_bits) +
4194 (offset % bp->flash_info->page_size);
4195 }
4196
4197 /* Need to clear DONE bit separately. */
4198 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4199
4200 memcpy(&val32, val, 4);
b6016b76
MC
4201
4202 /* Write the data. */
b491edd5 4203 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4204
4205 /* Address of the NVRAM to write to. */
4206 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4207
4208 /* Issue the write command. */
4209 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4210
4211 /* Wait for completion. */
4212 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4213 udelay(5);
4214
4215 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4216 break;
4217 }
4218 if (j >= NVRAM_TIMEOUT_COUNT)
4219 return -EBUSY;
4220
4221 return 0;
4222}
4223
4224static int
4225bnx2_init_nvram(struct bnx2 *bp)
4226{
4227 u32 val;
e30372c9 4228 int j, entry_count, rc = 0;
b6016b76
MC
4229 struct flash_spec *flash;
4230
e30372c9
MC
4231 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4232 bp->flash_info = &flash_5709;
4233 goto get_flash_size;
4234 }
4235
b6016b76
MC
4236 /* Determine the selected interface. */
4237 val = REG_RD(bp, BNX2_NVM_CFG1);
4238
ff8ac609 4239 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4240
b6016b76
MC
4241 if (val & 0x40000000) {
4242
4243 /* Flash interface has been reconfigured */
4244 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4245 j++, flash++) {
4246 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4247 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4248 bp->flash_info = flash;
4249 break;
4250 }
4251 }
4252 }
4253 else {
37137709 4254 u32 mask;
b6016b76
MC
4255 /* Not yet been reconfigured */
4256
37137709
MC
4257 if (val & (1 << 23))
4258 mask = FLASH_BACKUP_STRAP_MASK;
4259 else
4260 mask = FLASH_STRAP_MASK;
4261
b6016b76
MC
4262 for (j = 0, flash = &flash_table[0]; j < entry_count;
4263 j++, flash++) {
4264
37137709 4265 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4266 bp->flash_info = flash;
4267
4268 /* Request access to the flash interface. */
4269 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4270 return rc;
4271
4272 /* Enable access to flash interface */
4273 bnx2_enable_nvram_access(bp);
4274
4275 /* Reconfigure the flash interface */
4276 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4277 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4278 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4279 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4280
4281 /* Disable access to flash interface */
4282 bnx2_disable_nvram_access(bp);
4283 bnx2_release_nvram_lock(bp);
4284
4285 break;
4286 }
4287 }
4288 } /* if (val & 0x40000000) */
4289
4290 if (j == entry_count) {
4291 bp->flash_info = NULL;
2f23c523 4292 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 4293 return -ENODEV;
b6016b76
MC
4294 }
4295
e30372c9 4296get_flash_size:
2726d6e1 4297 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4298 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4299 if (val)
4300 bp->flash_size = val;
4301 else
4302 bp->flash_size = bp->flash_info->total_size;
4303
b6016b76
MC
4304 return rc;
4305}
4306
4307static int
4308bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4309 int buf_size)
4310{
4311 int rc = 0;
4312 u32 cmd_flags, offset32, len32, extra;
4313
4314 if (buf_size == 0)
4315 return 0;
4316
4317 /* Request access to the flash interface. */
4318 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4319 return rc;
4320
4321 /* Enable access to flash interface */
4322 bnx2_enable_nvram_access(bp);
4323
4324 len32 = buf_size;
4325 offset32 = offset;
4326 extra = 0;
4327
4328 cmd_flags = 0;
4329
4330 if (offset32 & 3) {
4331 u8 buf[4];
4332 u32 pre_len;
4333
4334 offset32 &= ~3;
4335 pre_len = 4 - (offset & 3);
4336
4337 if (pre_len >= len32) {
4338 pre_len = len32;
4339 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4340 BNX2_NVM_COMMAND_LAST;
4341 }
4342 else {
4343 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4344 }
4345
4346 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4347
4348 if (rc)
4349 return rc;
4350
4351 memcpy(ret_buf, buf + (offset & 3), pre_len);
4352
4353 offset32 += 4;
4354 ret_buf += pre_len;
4355 len32 -= pre_len;
4356 }
4357 if (len32 & 3) {
4358 extra = 4 - (len32 & 3);
4359 len32 = (len32 + 4) & ~3;
4360 }
4361
4362 if (len32 == 4) {
4363 u8 buf[4];
4364
4365 if (cmd_flags)
4366 cmd_flags = BNX2_NVM_COMMAND_LAST;
4367 else
4368 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4369 BNX2_NVM_COMMAND_LAST;
4370
4371 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4372
4373 memcpy(ret_buf, buf, 4 - extra);
4374 }
4375 else if (len32 > 0) {
4376 u8 buf[4];
4377
4378 /* Read the first word. */
4379 if (cmd_flags)
4380 cmd_flags = 0;
4381 else
4382 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4383
4384 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4385
4386 /* Advance to the next dword. */
4387 offset32 += 4;
4388 ret_buf += 4;
4389 len32 -= 4;
4390
4391 while (len32 > 4 && rc == 0) {
4392 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4393
4394 /* Advance to the next dword. */
4395 offset32 += 4;
4396 ret_buf += 4;
4397 len32 -= 4;
4398 }
4399
4400 if (rc)
4401 return rc;
4402
4403 cmd_flags = BNX2_NVM_COMMAND_LAST;
4404 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4405
4406 memcpy(ret_buf, buf, 4 - extra);
4407 }
4408
4409 /* Disable access to flash interface */
4410 bnx2_disable_nvram_access(bp);
4411
4412 bnx2_release_nvram_lock(bp);
4413
4414 return rc;
4415}
4416
4417static int
4418bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4419 int buf_size)
4420{
4421 u32 written, offset32, len32;
e6be763f 4422 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4423 int rc = 0;
4424 int align_start, align_end;
4425
4426 buf = data_buf;
4427 offset32 = offset;
4428 len32 = buf_size;
4429 align_start = align_end = 0;
4430
4431 if ((align_start = (offset32 & 3))) {
4432 offset32 &= ~3;
c873879c
MC
4433 len32 += align_start;
4434 if (len32 < 4)
4435 len32 = 4;
b6016b76
MC
4436 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4437 return rc;
4438 }
4439
4440 if (len32 & 3) {
c873879c
MC
4441 align_end = 4 - (len32 & 3);
4442 len32 += align_end;
4443 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4444 return rc;
b6016b76
MC
4445 }
4446
4447 if (align_start || align_end) {
e6be763f
MC
4448 align_buf = kmalloc(len32, GFP_KERNEL);
4449 if (align_buf == NULL)
b6016b76
MC
4450 return -ENOMEM;
4451 if (align_start) {
e6be763f 4452 memcpy(align_buf, start, 4);
b6016b76
MC
4453 }
4454 if (align_end) {
e6be763f 4455 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4456 }
e6be763f
MC
4457 memcpy(align_buf + align_start, data_buf, buf_size);
4458 buf = align_buf;
b6016b76
MC
4459 }
4460
e30372c9 4461 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4462 flash_buffer = kmalloc(264, GFP_KERNEL);
4463 if (flash_buffer == NULL) {
4464 rc = -ENOMEM;
4465 goto nvram_write_end;
4466 }
4467 }
4468
b6016b76
MC
4469 written = 0;
4470 while ((written < len32) && (rc == 0)) {
4471 u32 page_start, page_end, data_start, data_end;
4472 u32 addr, cmd_flags;
4473 int i;
b6016b76
MC
4474
4475 /* Find the page_start addr */
4476 page_start = offset32 + written;
4477 page_start -= (page_start % bp->flash_info->page_size);
4478 /* Find the page_end addr */
4479 page_end = page_start + bp->flash_info->page_size;
4480 /* Find the data_start addr */
4481 data_start = (written == 0) ? offset32 : page_start;
4482 /* Find the data_end addr */
6aa20a22 4483 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4484 (offset32 + len32) : page_end;
4485
4486 /* Request access to the flash interface. */
4487 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4488 goto nvram_write_end;
4489
4490 /* Enable access to flash interface */
4491 bnx2_enable_nvram_access(bp);
4492
4493 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4494 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4495 int j;
4496
4497 /* Read the whole page into the buffer
4498 * (non-buffer flash only) */
4499 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4500 if (j == (bp->flash_info->page_size - 4)) {
4501 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4502 }
4503 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4504 page_start + j,
4505 &flash_buffer[j],
b6016b76
MC
4506 cmd_flags);
4507
4508 if (rc)
4509 goto nvram_write_end;
4510
4511 cmd_flags = 0;
4512 }
4513 }
4514
4515 /* Enable writes to flash interface (unlock write-protect) */
4516 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4517 goto nvram_write_end;
4518
b6016b76
MC
4519 /* Loop to write back the buffer data from page_start to
4520 * data_start */
4521 i = 0;
e30372c9 4522 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4523 /* Erase the page */
4524 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4525 goto nvram_write_end;
4526
4527 /* Re-enable the write again for the actual write */
4528 bnx2_enable_nvram_write(bp);
4529
b6016b76
MC
4530 for (addr = page_start; addr < data_start;
4531 addr += 4, i += 4) {
6aa20a22 4532
b6016b76
MC
4533 rc = bnx2_nvram_write_dword(bp, addr,
4534 &flash_buffer[i], cmd_flags);
4535
4536 if (rc != 0)
4537 goto nvram_write_end;
4538
4539 cmd_flags = 0;
4540 }
4541 }
4542
4543 /* Loop to write the new data from data_start to data_end */
bae25761 4544 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4545 if ((addr == page_end - 4) ||
e30372c9 4546 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4547 (addr == data_end - 4))) {
4548
4549 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4550 }
4551 rc = bnx2_nvram_write_dword(bp, addr, buf,
4552 cmd_flags);
4553
4554 if (rc != 0)
4555 goto nvram_write_end;
4556
4557 cmd_flags = 0;
4558 buf += 4;
4559 }
4560
4561 /* Loop to write back the buffer data from data_end
4562 * to page_end */
e30372c9 4563 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4564 for (addr = data_end; addr < page_end;
4565 addr += 4, i += 4) {
6aa20a22 4566
b6016b76
MC
4567 if (addr == page_end-4) {
4568 cmd_flags = BNX2_NVM_COMMAND_LAST;
4569 }
4570 rc = bnx2_nvram_write_dword(bp, addr,
4571 &flash_buffer[i], cmd_flags);
4572
4573 if (rc != 0)
4574 goto nvram_write_end;
4575
4576 cmd_flags = 0;
4577 }
4578 }
4579
4580 /* Disable writes to flash interface (lock write-protect) */
4581 bnx2_disable_nvram_write(bp);
4582
4583 /* Disable access to flash interface */
4584 bnx2_disable_nvram_access(bp);
4585 bnx2_release_nvram_lock(bp);
4586
4587 /* Increment written */
4588 written += data_end - data_start;
4589 }
4590
4591nvram_write_end:
e6be763f
MC
4592 kfree(flash_buffer);
4593 kfree(align_buf);
b6016b76
MC
4594 return rc;
4595}
4596
0d8a6571 4597static void
7c62e83b 4598bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4599{
7c62e83b 4600 u32 val, sig = 0;
0d8a6571 4601
583c28e5 4602 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4603 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4604
4605 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4606 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4607
2726d6e1 4608 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4609 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4610 return;
4611
7c62e83b
MC
4612 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4613 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4614 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4615 }
4616
4617 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4618 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4619 u32 link;
4620
583c28e5 4621 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4622
7c62e83b
MC
4623 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4624 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4625 bp->phy_port = PORT_FIBRE;
4626 else
4627 bp->phy_port = PORT_TP;
489310a4 4628
7c62e83b
MC
4629 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4630 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4631 }
7c62e83b
MC
4632
4633 if (netif_running(bp->dev) && sig)
4634 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4635}
4636
b4b36042
MC
4637static void
4638bnx2_setup_msix_tbl(struct bnx2 *bp)
4639{
4640 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4641
4642 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4643 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4644}
4645
b6016b76
MC
4646static int
4647bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4648{
4649 u32 val;
4650 int i, rc = 0;
489310a4 4651 u8 old_port;
b6016b76
MC
4652
4653 /* Wait for the current PCI transaction to complete before
4654 * issuing a reset. */
4655 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4656 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4657 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4658 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4659 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4660 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4661 udelay(5);
4662
b090ae2b 4663 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4664 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4665
b6016b76
MC
4666 /* Deposit a driver reset signature so the firmware knows that
4667 * this is a soft reset. */
2726d6e1
MC
4668 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4669 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4670
b6016b76
MC
4671 /* Do a dummy read to force the chip to complete all current transaction
4672 * before we issue a reset. */
4673 val = REG_RD(bp, BNX2_MISC_ID);
4674
234754d5
MC
4675 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4676 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4677 REG_RD(bp, BNX2_MISC_COMMAND);
4678 udelay(5);
b6016b76 4679
234754d5
MC
4680 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4681 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4682
234754d5 4683 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4684
234754d5
MC
4685 } else {
4686 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4687 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4688 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4689
4690 /* Chip reset. */
4691 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4692
594a9dfa
MC
4693 /* Reading back any register after chip reset will hang the
4694 * bus on 5706 A0 and A1. The msleep below provides plenty
4695 * of margin for write posting.
4696 */
234754d5 4697 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4698 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4699 msleep(20);
b6016b76 4700
234754d5
MC
4701 /* Reset takes approximate 30 usec */
4702 for (i = 0; i < 10; i++) {
4703 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4704 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4705 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4706 break;
4707 udelay(10);
4708 }
4709
4710 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4711 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4712 printk(KERN_ERR PFX "Chip reset did not complete\n");
4713 return -EBUSY;
4714 }
b6016b76
MC
4715 }
4716
4717 /* Make sure byte swapping is properly configured. */
4718 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4719 if (val != 0x01020304) {
4720 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4721 return -ENODEV;
4722 }
4723
b6016b76 4724 /* Wait for the firmware to finish its initialization. */
a2f13890 4725 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4726 if (rc)
4727 return rc;
b6016b76 4728
0d8a6571 4729 spin_lock_bh(&bp->phy_lock);
489310a4 4730 old_port = bp->phy_port;
7c62e83b 4731 bnx2_init_fw_cap(bp);
583c28e5
MC
4732 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4733 old_port != bp->phy_port)
0d8a6571
MC
4734 bnx2_set_default_remote_link(bp);
4735 spin_unlock_bh(&bp->phy_lock);
4736
b6016b76
MC
4737 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4738 /* Adjust the voltage regular to two steps lower. The default
4739 * of this register is 0x0000000e. */
4740 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4741
4742 /* Remove bad rbuf memory from the free pool. */
4743 rc = bnx2_alloc_bad_rbuf(bp);
4744 }
4745
f86e82fb 4746 if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
4747 bnx2_setup_msix_tbl(bp);
4748
b6016b76
MC
4749 return rc;
4750}
4751
4752static int
4753bnx2_init_chip(struct bnx2 *bp)
4754{
d8026d93 4755 u32 val, mtu;
b4b36042 4756 int rc, i;
b6016b76
MC
4757
4758 /* Make sure the interrupt is not active. */
4759 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4760
4761 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4762 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4763#ifdef __BIG_ENDIAN
6aa20a22 4764 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4765#endif
6aa20a22 4766 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4767 DMA_READ_CHANS << 12 |
4768 DMA_WRITE_CHANS << 16;
4769
4770 val |= (0x2 << 20) | (1 << 11);
4771
f86e82fb 4772 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4773 val |= (1 << 23);
4774
4775 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4776 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4777 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4778
4779 REG_WR(bp, BNX2_DMA_CONFIG, val);
4780
4781 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4782 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4783 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4784 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4785 }
4786
f86e82fb 4787 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4788 u16 val16;
4789
4790 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4791 &val16);
4792 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4793 val16 & ~PCI_X_CMD_ERO);
4794 }
4795
4796 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4797 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4798 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4799 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4800
4801 /* Initialize context mapping and zero out the quick contexts. The
4802 * context block must have already been enabled. */
641bdcd5
MC
4803 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4804 rc = bnx2_init_5709_context(bp);
4805 if (rc)
4806 return rc;
4807 } else
59b47d8a 4808 bnx2_init_context(bp);
b6016b76 4809
fba9fe91
MC
4810 if ((rc = bnx2_init_cpus(bp)) != 0)
4811 return rc;
4812
b6016b76
MC
4813 bnx2_init_nvram(bp);
4814
5fcaed01 4815 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4816
4817 val = REG_RD(bp, BNX2_MQ_CONFIG);
4818 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4819 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4edd473f
MC
4820 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4821 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4822 if (CHIP_REV(bp) == CHIP_REV_Ax)
4823 val |= BNX2_MQ_CONFIG_HALT_DIS;
4824 }
68c9f75a 4825
b6016b76
MC
4826 REG_WR(bp, BNX2_MQ_CONFIG, val);
4827
4828 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4829 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4830 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4831
4832 val = (BCM_PAGE_BITS - 8) << 24;
4833 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4834
4835 /* Configure page size. */
4836 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4837 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4838 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4839 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4840
4841 val = bp->mac_addr[0] +
4842 (bp->mac_addr[1] << 8) +
4843 (bp->mac_addr[2] << 16) +
4844 bp->mac_addr[3] +
4845 (bp->mac_addr[4] << 8) +
4846 (bp->mac_addr[5] << 16);
4847 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4848
4849 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4850 mtu = bp->dev->mtu;
4851 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4852 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4853 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4854 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4855
d8026d93
MC
4856 if (mtu < 1500)
4857 mtu = 1500;
4858
4859 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4860 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4861 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4862
b4b36042
MC
4863 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4864 bp->bnx2_napi[i].last_status_idx = 0;
4865
efba0180
MC
4866 bp->idle_chk_status_idx = 0xffff;
4867
b6016b76
MC
4868 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4869
4870 /* Set up how to generate a link change interrupt. */
4871 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4872
4873 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4874 (u64) bp->status_blk_mapping & 0xffffffff);
4875 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4876
4877 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4878 (u64) bp->stats_blk_mapping & 0xffffffff);
4879 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4880 (u64) bp->stats_blk_mapping >> 32);
4881
6aa20a22 4882 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4883 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4884
4885 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4886 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4887
4888 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4889 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4890
4891 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4892
4893 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4894
4895 REG_WR(bp, BNX2_HC_COM_TICKS,
4896 (bp->com_ticks_int << 16) | bp->com_ticks);
4897
4898 REG_WR(bp, BNX2_HC_CMD_TICKS,
4899 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4900
02537b06
MC
4901 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4902 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4903 else
7ea6920e 4904 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4905 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4906
4907 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4908 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4909 else {
8e6a72c4
MC
4910 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4911 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4912 }
4913
5e9ad9e1 4914 if (bp->irq_nvecs > 1) {
c76c0475
MC
4915 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4916 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4917
5e9ad9e1
MC
4918 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4919 }
4920
4921 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4922 val |= BNX2_HC_CONFIG_ONE_SHOT;
4923
4924 REG_WR(bp, BNX2_HC_CONFIG, val);
4925
4926 for (i = 1; i < bp->irq_nvecs; i++) {
4927 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4928 BNX2_HC_SB_CONFIG_1;
4929
6f743ca0 4930 REG_WR(bp, base,
c76c0475 4931 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4932 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4933 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4934
6f743ca0 4935 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
4936 (bp->tx_quick_cons_trip_int << 16) |
4937 bp->tx_quick_cons_trip);
4938
6f743ca0 4939 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
4940 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4941
5e9ad9e1
MC
4942 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4943 (bp->rx_quick_cons_trip_int << 16) |
4944 bp->rx_quick_cons_trip);
8e6a72c4 4945
5e9ad9e1
MC
4946 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4947 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4948 }
8e6a72c4 4949
b6016b76
MC
4950 /* Clear internal stats counters. */
4951 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4952
da3e4fbe 4953 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4954
4955 /* Initialize the receive filter. */
4956 bnx2_set_rx_mode(bp->dev);
4957
0aa38df7
MC
4958 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4959 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4960 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4961 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4962 }
b090ae2b 4963 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 4964 1, 0);
b6016b76 4965
df149d70 4966 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4967 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4968
4969 udelay(20);
4970
bf5295bb
MC
4971 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4972
b090ae2b 4973 return rc;
b6016b76
MC
4974}
4975
c76c0475
MC
4976static void
4977bnx2_clear_ring_states(struct bnx2 *bp)
4978{
4979 struct bnx2_napi *bnapi;
35e9010b 4980 struct bnx2_tx_ring_info *txr;
bb4f98ab 4981 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
4982 int i;
4983
4984 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4985 bnapi = &bp->bnx2_napi[i];
35e9010b 4986 txr = &bnapi->tx_ring;
bb4f98ab 4987 rxr = &bnapi->rx_ring;
c76c0475 4988
35e9010b
MC
4989 txr->tx_cons = 0;
4990 txr->hw_tx_cons = 0;
bb4f98ab
MC
4991 rxr->rx_prod_bseq = 0;
4992 rxr->rx_prod = 0;
4993 rxr->rx_cons = 0;
4994 rxr->rx_pg_prod = 0;
4995 rxr->rx_pg_cons = 0;
c76c0475
MC
4996 }
4997}
4998
59b47d8a 4999static void
35e9010b 5000bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
5001{
5002 u32 val, offset0, offset1, offset2, offset3;
62a8313c 5003 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
5004
5005 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5006 offset0 = BNX2_L2CTX_TYPE_XI;
5007 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5008 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5009 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5010 } else {
5011 offset0 = BNX2_L2CTX_TYPE;
5012 offset1 = BNX2_L2CTX_CMD_TYPE;
5013 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5014 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5015 }
5016 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 5017 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
5018
5019 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 5020 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 5021
35e9010b 5022 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 5023 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 5024
35e9010b 5025 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 5026 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 5027}
b6016b76
MC
5028
5029static void
35e9010b 5030bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
5031{
5032 struct tx_bd *txbd;
c76c0475
MC
5033 u32 cid = TX_CID;
5034 struct bnx2_napi *bnapi;
35e9010b 5035 struct bnx2_tx_ring_info *txr;
c76c0475 5036
35e9010b
MC
5037 bnapi = &bp->bnx2_napi[ring_num];
5038 txr = &bnapi->tx_ring;
5039
5040 if (ring_num == 0)
5041 cid = TX_CID;
5042 else
5043 cid = TX_TSS_CID + ring_num - 1;
b6016b76 5044
2f8af120
MC
5045 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5046
35e9010b 5047 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 5048
35e9010b
MC
5049 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5050 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 5051
35e9010b
MC
5052 txr->tx_prod = 0;
5053 txr->tx_prod_bseq = 0;
6aa20a22 5054
35e9010b
MC
5055 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5056 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 5057
35e9010b 5058 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
5059}
5060
5061static void
5d5d0015
MC
5062bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5063 int num_rings)
b6016b76 5064{
b6016b76 5065 int i;
5d5d0015 5066 struct rx_bd *rxbd;
6aa20a22 5067
5d5d0015 5068 for (i = 0; i < num_rings; i++) {
13daffa2 5069 int j;
b6016b76 5070
5d5d0015 5071 rxbd = &rx_ring[i][0];
13daffa2 5072 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 5073 rxbd->rx_bd_len = buf_size;
13daffa2
MC
5074 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5075 }
5d5d0015 5076 if (i == (num_rings - 1))
13daffa2
MC
5077 j = 0;
5078 else
5079 j = i + 1;
5d5d0015
MC
5080 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5081 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 5082 }
5d5d0015
MC
5083}
5084
5085static void
bb4f98ab 5086bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
5087{
5088 int i;
5089 u16 prod, ring_prod;
bb4f98ab
MC
5090 u32 cid, rx_cid_addr, val;
5091 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5092 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5093
5094 if (ring_num == 0)
5095 cid = RX_CID;
5096 else
5097 cid = RX_RSS_CID + ring_num - 1;
5098
5099 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 5100
bb4f98ab 5101 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
5102 bp->rx_buf_use_size, bp->rx_max_ring);
5103
bb4f98ab 5104 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
5105
5106 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5107 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5108 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5109 }
5110
62a8313c 5111 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 5112 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
5113 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5114 rxr->rx_pg_desc_mapping,
47bf4246
MC
5115 PAGE_SIZE, bp->rx_max_pg_ring);
5116 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
5117 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5118 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 5119 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 5120
bb4f98ab 5121 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 5122 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 5123
bb4f98ab 5124 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 5125 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
5126
5127 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5128 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5129 }
b6016b76 5130
bb4f98ab 5131 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 5132 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 5133
bb4f98ab 5134 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 5135 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 5136
bb4f98ab 5137 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 5138 for (i = 0; i < bp->rx_pg_ring_size; i++) {
bb4f98ab 5139 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
47bf4246
MC
5140 break;
5141 prod = NEXT_RX_BD(prod);
5142 ring_prod = RX_PG_RING_IDX(prod);
5143 }
bb4f98ab 5144 rxr->rx_pg_prod = prod;
47bf4246 5145
bb4f98ab 5146 ring_prod = prod = rxr->rx_prod;
236b6394 5147 for (i = 0; i < bp->rx_ring_size; i++) {
bb4f98ab 5148 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
b6016b76 5149 break;
b6016b76
MC
5150 prod = NEXT_RX_BD(prod);
5151 ring_prod = RX_RING_IDX(prod);
5152 }
bb4f98ab 5153 rxr->rx_prod = prod;
b6016b76 5154
bb4f98ab
MC
5155 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5156 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5157 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 5158
bb4f98ab
MC
5159 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5160 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5161
5162 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
5163}
5164
35e9010b
MC
5165static void
5166bnx2_init_all_rings(struct bnx2 *bp)
5167{
5168 int i;
5e9ad9e1 5169 u32 val;
35e9010b
MC
5170
5171 bnx2_clear_ring_states(bp);
5172
5173 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5174 for (i = 0; i < bp->num_tx_rings; i++)
5175 bnx2_init_tx_ring(bp, i);
5176
5177 if (bp->num_tx_rings > 1)
5178 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5179 (TX_TSS_CID << 7));
5180
5e9ad9e1
MC
5181 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5182 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5183
bb4f98ab
MC
5184 for (i = 0; i < bp->num_rx_rings; i++)
5185 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
5186
5187 if (bp->num_rx_rings > 1) {
5188 u32 tbl_32;
5189 u8 *tbl = (u8 *) &tbl_32;
5190
5191 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5192 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5193
5194 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5195 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5196 if ((i % 4) == 3)
5197 bnx2_reg_wr_ind(bp,
5198 BNX2_RXP_SCRATCH_RSS_TBL + i,
5199 cpu_to_be32(tbl_32));
5200 }
5201
5202 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5203 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5204
5205 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5206
5207 }
35e9010b
MC
5208}
5209
5d5d0015 5210static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5211{
5d5d0015 5212 u32 max, num_rings = 1;
13daffa2 5213
5d5d0015
MC
5214 while (ring_size > MAX_RX_DESC_CNT) {
5215 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
5216 num_rings++;
5217 }
5218 /* round to next power of 2 */
5d5d0015 5219 max = max_size;
13daffa2
MC
5220 while ((max & num_rings) == 0)
5221 max >>= 1;
5222
5223 if (num_rings != max)
5224 max <<= 1;
5225
5d5d0015
MC
5226 return max;
5227}
5228
5229static void
5230bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5231{
84eaa187 5232 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5233
5234 /* 8 for CRC and VLAN */
d89cb6af 5235 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5236
84eaa187
MC
5237 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5238 sizeof(struct skb_shared_info);
5239
601d3d18 5240 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5241 bp->rx_pg_ring_size = 0;
5242 bp->rx_max_pg_ring = 0;
5243 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5244 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5245 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5246
5247 jumbo_size = size * pages;
5248 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5249 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5250
5251 bp->rx_pg_ring_size = jumbo_size;
5252 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5253 MAX_RX_PG_RINGS);
5254 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5255 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5256 bp->rx_copy_thresh = 0;
5257 }
5d5d0015
MC
5258
5259 bp->rx_buf_use_size = rx_size;
5260 /* hw alignment */
5261 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5262 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5263 bp->rx_ring_size = size;
5264 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5265 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5266}
5267
b6016b76
MC
5268static void
5269bnx2_free_tx_skbs(struct bnx2 *bp)
5270{
5271 int i;
5272
35e9010b
MC
5273 for (i = 0; i < bp->num_tx_rings; i++) {
5274 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5275 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5276 int j;
b6016b76 5277
35e9010b 5278 if (txr->tx_buf_ring == NULL)
b6016b76 5279 continue;
b6016b76 5280
35e9010b 5281 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5282 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5283 struct sk_buff *skb = tx_buf->skb;
35e9010b
MC
5284
5285 if (skb == NULL) {
5286 j++;
5287 continue;
5288 }
5289
3d16af86 5290 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
b6016b76 5291
35e9010b 5292 tx_buf->skb = NULL;
b6016b76 5293
3d16af86 5294 j += skb_shinfo(skb)->nr_frags + 1;
35e9010b 5295 dev_kfree_skb(skb);
b6016b76 5296 }
b6016b76 5297 }
b6016b76
MC
5298}
5299
5300static void
5301bnx2_free_rx_skbs(struct bnx2 *bp)
5302{
5303 int i;
5304
bb4f98ab
MC
5305 for (i = 0; i < bp->num_rx_rings; i++) {
5306 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5307 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5308 int j;
b6016b76 5309
bb4f98ab
MC
5310 if (rxr->rx_buf_ring == NULL)
5311 return;
b6016b76 5312
bb4f98ab
MC
5313 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5314 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5315 struct sk_buff *skb = rx_buf->skb;
b6016b76 5316
bb4f98ab
MC
5317 if (skb == NULL)
5318 continue;
b6016b76 5319
bb4f98ab
MC
5320 pci_unmap_single(bp->pdev,
5321 pci_unmap_addr(rx_buf, mapping),
5322 bp->rx_buf_use_size,
5323 PCI_DMA_FROMDEVICE);
b6016b76 5324
bb4f98ab
MC
5325 rx_buf->skb = NULL;
5326
5327 dev_kfree_skb(skb);
5328 }
5329 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5330 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5331 }
5332}
5333
5334static void
5335bnx2_free_skbs(struct bnx2 *bp)
5336{
5337 bnx2_free_tx_skbs(bp);
5338 bnx2_free_rx_skbs(bp);
5339}
5340
5341static int
5342bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5343{
5344 int rc;
5345
5346 rc = bnx2_reset_chip(bp, reset_code);
5347 bnx2_free_skbs(bp);
5348 if (rc)
5349 return rc;
5350
fba9fe91
MC
5351 if ((rc = bnx2_init_chip(bp)) != 0)
5352 return rc;
5353
35e9010b 5354 bnx2_init_all_rings(bp);
b6016b76
MC
5355 return 0;
5356}
5357
5358static int
9a120bc5 5359bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5360{
5361 int rc;
5362
5363 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5364 return rc;
5365
80be4434 5366 spin_lock_bh(&bp->phy_lock);
9a120bc5 5367 bnx2_init_phy(bp, reset_phy);
b6016b76 5368 bnx2_set_link(bp);
543a827d
MC
5369 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5370 bnx2_remote_phy_event(bp);
0d8a6571 5371 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5372 return 0;
5373}
5374
74bf4ba3
MC
5375static int
5376bnx2_shutdown_chip(struct bnx2 *bp)
5377{
5378 u32 reset_code;
5379
5380 if (bp->flags & BNX2_FLAG_NO_WOL)
5381 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5382 else if (bp->wol)
5383 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5384 else
5385 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5386
5387 return bnx2_reset_chip(bp, reset_code);
5388}
5389
b6016b76
MC
5390static int
5391bnx2_test_registers(struct bnx2 *bp)
5392{
5393 int ret;
5bae30c9 5394 int i, is_5709;
f71e1309 5395 static const struct {
b6016b76
MC
5396 u16 offset;
5397 u16 flags;
5bae30c9 5398#define BNX2_FL_NOT_5709 1
b6016b76
MC
5399 u32 rw_mask;
5400 u32 ro_mask;
5401 } reg_tbl[] = {
5402 { 0x006c, 0, 0x00000000, 0x0000003f },
5403 { 0x0090, 0, 0xffffffff, 0x00000000 },
5404 { 0x0094, 0, 0x00000000, 0x00000000 },
5405
5bae30c9
MC
5406 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5407 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5408 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5409 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5410 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5411 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5412 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5413 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5414 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5415
5416 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5417 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5418 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5419 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5420 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5421 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5422
5423 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5424 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5425 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5426
5427 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5428 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5429
5430 { 0x1408, 0, 0x01c00800, 0x00000000 },
5431 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5432 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5433 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5434 { 0x14b0, 0, 0x00000002, 0x00000001 },
5435 { 0x14b8, 0, 0x00000000, 0x00000000 },
5436 { 0x14c0, 0, 0x00000000, 0x00000009 },
5437 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5438 { 0x14cc, 0, 0x00000000, 0x00000001 },
5439 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5440
5441 { 0x1800, 0, 0x00000000, 0x00000001 },
5442 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5443
5444 { 0x2800, 0, 0x00000000, 0x00000001 },
5445 { 0x2804, 0, 0x00000000, 0x00003f01 },
5446 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5447 { 0x2810, 0, 0xffff0000, 0x00000000 },
5448 { 0x2814, 0, 0xffff0000, 0x00000000 },
5449 { 0x2818, 0, 0xffff0000, 0x00000000 },
5450 { 0x281c, 0, 0xffff0000, 0x00000000 },
5451 { 0x2834, 0, 0xffffffff, 0x00000000 },
5452 { 0x2840, 0, 0x00000000, 0xffffffff },
5453 { 0x2844, 0, 0x00000000, 0xffffffff },
5454 { 0x2848, 0, 0xffffffff, 0x00000000 },
5455 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5456
5457 { 0x2c00, 0, 0x00000000, 0x00000011 },
5458 { 0x2c04, 0, 0x00000000, 0x00030007 },
5459
b6016b76
MC
5460 { 0x3c00, 0, 0x00000000, 0x00000001 },
5461 { 0x3c04, 0, 0x00000000, 0x00070000 },
5462 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5463 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5464 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5465 { 0x3c14, 0, 0x00000000, 0xffffffff },
5466 { 0x3c18, 0, 0x00000000, 0xffffffff },
5467 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5468 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5469
5470 { 0x5004, 0, 0x00000000, 0x0000007f },
5471 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5472
b6016b76
MC
5473 { 0x5c00, 0, 0x00000000, 0x00000001 },
5474 { 0x5c04, 0, 0x00000000, 0x0003000f },
5475 { 0x5c08, 0, 0x00000003, 0x00000000 },
5476 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5477 { 0x5c10, 0, 0x00000000, 0xffffffff },
5478 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5479 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5480 { 0x5c88, 0, 0x00000000, 0x00077373 },
5481 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5482
5483 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5484 { 0x680c, 0, 0xffffffff, 0x00000000 },
5485 { 0x6810, 0, 0xffffffff, 0x00000000 },
5486 { 0x6814, 0, 0xffffffff, 0x00000000 },
5487 { 0x6818, 0, 0xffffffff, 0x00000000 },
5488 { 0x681c, 0, 0xffffffff, 0x00000000 },
5489 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5490 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5491 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5492 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5493 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5494 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5495 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5496 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5497 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5498 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5499 { 0x684c, 0, 0xffffffff, 0x00000000 },
5500 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5501 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5502 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5503 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5504 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5505 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5506
5507 { 0xffff, 0, 0x00000000, 0x00000000 },
5508 };
5509
5510 ret = 0;
5bae30c9
MC
5511 is_5709 = 0;
5512 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5513 is_5709 = 1;
5514
b6016b76
MC
5515 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5516 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5517 u16 flags = reg_tbl[i].flags;
5518
5519 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5520 continue;
b6016b76
MC
5521
5522 offset = (u32) reg_tbl[i].offset;
5523 rw_mask = reg_tbl[i].rw_mask;
5524 ro_mask = reg_tbl[i].ro_mask;
5525
14ab9b86 5526 save_val = readl(bp->regview + offset);
b6016b76 5527
14ab9b86 5528 writel(0, bp->regview + offset);
b6016b76 5529
14ab9b86 5530 val = readl(bp->regview + offset);
b6016b76
MC
5531 if ((val & rw_mask) != 0) {
5532 goto reg_test_err;
5533 }
5534
5535 if ((val & ro_mask) != (save_val & ro_mask)) {
5536 goto reg_test_err;
5537 }
5538
14ab9b86 5539 writel(0xffffffff, bp->regview + offset);
b6016b76 5540
14ab9b86 5541 val = readl(bp->regview + offset);
b6016b76
MC
5542 if ((val & rw_mask) != rw_mask) {
5543 goto reg_test_err;
5544 }
5545
5546 if ((val & ro_mask) != (save_val & ro_mask)) {
5547 goto reg_test_err;
5548 }
5549
14ab9b86 5550 writel(save_val, bp->regview + offset);
b6016b76
MC
5551 continue;
5552
5553reg_test_err:
14ab9b86 5554 writel(save_val, bp->regview + offset);
b6016b76
MC
5555 ret = -ENODEV;
5556 break;
5557 }
5558 return ret;
5559}
5560
5561static int
5562bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5563{
f71e1309 5564 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5565 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5566 int i;
5567
5568 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5569 u32 offset;
5570
5571 for (offset = 0; offset < size; offset += 4) {
5572
2726d6e1 5573 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5574
2726d6e1 5575 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5576 test_pattern[i]) {
5577 return -ENODEV;
5578 }
5579 }
5580 }
5581 return 0;
5582}
5583
5584static int
5585bnx2_test_memory(struct bnx2 *bp)
5586{
5587 int ret = 0;
5588 int i;
5bae30c9 5589 static struct mem_entry {
b6016b76
MC
5590 u32 offset;
5591 u32 len;
5bae30c9 5592 } mem_tbl_5706[] = {
b6016b76 5593 { 0x60000, 0x4000 },
5b0c76ad 5594 { 0xa0000, 0x3000 },
b6016b76
MC
5595 { 0xe0000, 0x4000 },
5596 { 0x120000, 0x4000 },
5597 { 0x1a0000, 0x4000 },
5598 { 0x160000, 0x4000 },
5599 { 0xffffffff, 0 },
5bae30c9
MC
5600 },
5601 mem_tbl_5709[] = {
5602 { 0x60000, 0x4000 },
5603 { 0xa0000, 0x3000 },
5604 { 0xe0000, 0x4000 },
5605 { 0x120000, 0x4000 },
5606 { 0x1a0000, 0x4000 },
5607 { 0xffffffff, 0 },
b6016b76 5608 };
5bae30c9
MC
5609 struct mem_entry *mem_tbl;
5610
5611 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5612 mem_tbl = mem_tbl_5709;
5613 else
5614 mem_tbl = mem_tbl_5706;
b6016b76
MC
5615
5616 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5617 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5618 mem_tbl[i].len)) != 0) {
5619 return ret;
5620 }
5621 }
6aa20a22 5622
b6016b76
MC
5623 return ret;
5624}
5625
bc5a0690
MC
5626#define BNX2_MAC_LOOPBACK 0
5627#define BNX2_PHY_LOOPBACK 1
5628
b6016b76 5629static int
bc5a0690 5630bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5631{
5632 unsigned int pkt_size, num_pkts, i;
5633 struct sk_buff *skb, *rx_skb;
5634 unsigned char *packet;
bc5a0690 5635 u16 rx_start_idx, rx_idx;
b6016b76
MC
5636 dma_addr_t map;
5637 struct tx_bd *txbd;
5638 struct sw_bd *rx_buf;
5639 struct l2_fhdr *rx_hdr;
5640 int ret = -ENODEV;
c76c0475 5641 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5642 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5643 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5644
5645 tx_napi = bnapi;
b6016b76 5646
35e9010b 5647 txr = &tx_napi->tx_ring;
bb4f98ab 5648 rxr = &bnapi->rx_ring;
bc5a0690
MC
5649 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5650 bp->loopback = MAC_LOOPBACK;
5651 bnx2_set_mac_loopback(bp);
5652 }
5653 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5654 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5655 return 0;
5656
80be4434 5657 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5658 bnx2_set_phy_loopback(bp);
5659 }
5660 else
5661 return -EINVAL;
b6016b76 5662
84eaa187 5663 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5664 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5665 if (!skb)
5666 return -ENOMEM;
b6016b76 5667 packet = skb_put(skb, pkt_size);
6634292b 5668 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5669 memset(packet + 6, 0x0, 8);
5670 for (i = 14; i < pkt_size; i++)
5671 packet[i] = (unsigned char) (i & 0xff);
5672
3d16af86
BL
5673 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5674 dev_kfree_skb(skb);
5675 return -EIO;
5676 }
042a53a9 5677 map = skb_shinfo(skb)->dma_head;
b6016b76 5678
bf5295bb
MC
5679 REG_WR(bp, BNX2_HC_COMMAND,
5680 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5681
b6016b76
MC
5682 REG_RD(bp, BNX2_HC_COMMAND);
5683
5684 udelay(5);
35efa7c1 5685 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5686
b6016b76
MC
5687 num_pkts = 0;
5688
35e9010b 5689 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5690
5691 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5692 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5693 txbd->tx_bd_mss_nbytes = pkt_size;
5694 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5695
5696 num_pkts++;
35e9010b
MC
5697 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5698 txr->tx_prod_bseq += pkt_size;
b6016b76 5699
35e9010b
MC
5700 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5701 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5702
5703 udelay(100);
5704
bf5295bb
MC
5705 REG_WR(bp, BNX2_HC_COMMAND,
5706 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5707
b6016b76
MC
5708 REG_RD(bp, BNX2_HC_COMMAND);
5709
5710 udelay(5);
5711
3d16af86 5712 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
745720e5 5713 dev_kfree_skb(skb);
b6016b76 5714
35e9010b 5715 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5716 goto loopback_test_done;
b6016b76 5717
35efa7c1 5718 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5719 if (rx_idx != rx_start_idx + num_pkts) {
5720 goto loopback_test_done;
5721 }
5722
bb4f98ab 5723 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5724 rx_skb = rx_buf->skb;
5725
5726 rx_hdr = (struct l2_fhdr *) rx_skb->data;
d89cb6af 5727 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76
MC
5728
5729 pci_dma_sync_single_for_cpu(bp->pdev,
5730 pci_unmap_addr(rx_buf, mapping),
5731 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5732
ade2bfe7 5733 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5734 (L2_FHDR_ERRORS_BAD_CRC |
5735 L2_FHDR_ERRORS_PHY_DECODE |
5736 L2_FHDR_ERRORS_ALIGNMENT |
5737 L2_FHDR_ERRORS_TOO_SHORT |
5738 L2_FHDR_ERRORS_GIANT_FRAME)) {
5739
5740 goto loopback_test_done;
5741 }
5742
5743 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5744 goto loopback_test_done;
5745 }
5746
5747 for (i = 14; i < pkt_size; i++) {
5748 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5749 goto loopback_test_done;
5750 }
5751 }
5752
5753 ret = 0;
5754
5755loopback_test_done:
5756 bp->loopback = 0;
5757 return ret;
5758}
5759
bc5a0690
MC
5760#define BNX2_MAC_LOOPBACK_FAILED 1
5761#define BNX2_PHY_LOOPBACK_FAILED 2
5762#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5763 BNX2_PHY_LOOPBACK_FAILED)
5764
5765static int
5766bnx2_test_loopback(struct bnx2 *bp)
5767{
5768 int rc = 0;
5769
5770 if (!netif_running(bp->dev))
5771 return BNX2_LOOPBACK_FAILED;
5772
5773 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5774 spin_lock_bh(&bp->phy_lock);
9a120bc5 5775 bnx2_init_phy(bp, 1);
bc5a0690
MC
5776 spin_unlock_bh(&bp->phy_lock);
5777 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5778 rc |= BNX2_MAC_LOOPBACK_FAILED;
5779 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5780 rc |= BNX2_PHY_LOOPBACK_FAILED;
5781 return rc;
5782}
5783
b6016b76
MC
5784#define NVRAM_SIZE 0x200
5785#define CRC32_RESIDUAL 0xdebb20e3
5786
5787static int
5788bnx2_test_nvram(struct bnx2 *bp)
5789{
b491edd5 5790 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5791 u8 *data = (u8 *) buf;
5792 int rc = 0;
5793 u32 magic, csum;
5794
5795 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5796 goto test_nvram_done;
5797
5798 magic = be32_to_cpu(buf[0]);
5799 if (magic != 0x669955aa) {
5800 rc = -ENODEV;
5801 goto test_nvram_done;
5802 }
5803
5804 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5805 goto test_nvram_done;
5806
5807 csum = ether_crc_le(0x100, data);
5808 if (csum != CRC32_RESIDUAL) {
5809 rc = -ENODEV;
5810 goto test_nvram_done;
5811 }
5812
5813 csum = ether_crc_le(0x100, data + 0x100);
5814 if (csum != CRC32_RESIDUAL) {
5815 rc = -ENODEV;
5816 }
5817
5818test_nvram_done:
5819 return rc;
5820}
5821
5822static int
5823bnx2_test_link(struct bnx2 *bp)
5824{
5825 u32 bmsr;
5826
9f52b564
MC
5827 if (!netif_running(bp->dev))
5828 return -ENODEV;
5829
583c28e5 5830 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5831 if (bp->link_up)
5832 return 0;
5833 return -ENODEV;
5834 }
c770a65c 5835 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5836 bnx2_enable_bmsr1(bp);
5837 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5838 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5839 bnx2_disable_bmsr1(bp);
c770a65c 5840 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5841
b6016b76
MC
5842 if (bmsr & BMSR_LSTATUS) {
5843 return 0;
5844 }
5845 return -ENODEV;
5846}
5847
5848static int
5849bnx2_test_intr(struct bnx2 *bp)
5850{
5851 int i;
b6016b76
MC
5852 u16 status_idx;
5853
5854 if (!netif_running(bp->dev))
5855 return -ENODEV;
5856
5857 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5858
5859 /* This register is not touched during run-time. */
bf5295bb 5860 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5861 REG_RD(bp, BNX2_HC_COMMAND);
5862
5863 for (i = 0; i < 10; i++) {
5864 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5865 status_idx) {
5866
5867 break;
5868 }
5869
5870 msleep_interruptible(10);
5871 }
5872 if (i < 10)
5873 return 0;
5874
5875 return -ENODEV;
5876}
5877
38ea3686 5878/* Determining link for parallel detection. */
b2fadeae
MC
5879static int
5880bnx2_5706_serdes_has_link(struct bnx2 *bp)
5881{
5882 u32 mode_ctl, an_dbg, exp;
5883
38ea3686
MC
5884 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5885 return 0;
5886
b2fadeae
MC
5887 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5888 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5889
5890 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5891 return 0;
5892
5893 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5894 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5895 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5896
f3014c0c 5897 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5898 return 0;
5899
5900 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5901 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5902 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5903
5904 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5905 return 0;
5906
5907 return 1;
5908}
5909
b6016b76 5910static void
48b01e2d 5911bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5912{
b2fadeae
MC
5913 int check_link = 1;
5914
48b01e2d 5915 spin_lock(&bp->phy_lock);
b2fadeae 5916 if (bp->serdes_an_pending) {
48b01e2d 5917 bp->serdes_an_pending--;
b2fadeae
MC
5918 check_link = 0;
5919 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 5920 u32 bmcr;
b6016b76 5921
ac392abc 5922 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 5923
ca58c3af 5924 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5925
48b01e2d 5926 if (bmcr & BMCR_ANENABLE) {
b2fadeae 5927 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
5928 bmcr &= ~BMCR_ANENABLE;
5929 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5930 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 5931 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 5932 }
b6016b76 5933 }
48b01e2d
MC
5934 }
5935 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 5936 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 5937 u32 phy2;
b6016b76 5938
48b01e2d
MC
5939 bnx2_write_phy(bp, 0x17, 0x0f01);
5940 bnx2_read_phy(bp, 0x15, &phy2);
5941 if (phy2 & 0x20) {
5942 u32 bmcr;
cd339a0e 5943
ca58c3af 5944 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5945 bmcr |= BMCR_ANENABLE;
ca58c3af 5946 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5947
583c28e5 5948 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
5949 }
5950 } else
ac392abc 5951 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5952
a2724e25 5953 if (check_link) {
b2fadeae
MC
5954 u32 val;
5955
5956 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5957 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5958 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5959
a2724e25
MC
5960 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5961 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5962 bnx2_5706s_force_link_dn(bp, 1);
5963 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5964 } else
5965 bnx2_set_link(bp);
5966 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5967 bnx2_set_link(bp);
b2fadeae 5968 }
48b01e2d
MC
5969 spin_unlock(&bp->phy_lock);
5970}
b6016b76 5971
f8dd064e
MC
5972static void
5973bnx2_5708_serdes_timer(struct bnx2 *bp)
5974{
583c28e5 5975 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
5976 return;
5977
583c28e5 5978 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
5979 bp->serdes_an_pending = 0;
5980 return;
5981 }
b6016b76 5982
f8dd064e
MC
5983 spin_lock(&bp->phy_lock);
5984 if (bp->serdes_an_pending)
5985 bp->serdes_an_pending--;
5986 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5987 u32 bmcr;
b6016b76 5988
ca58c3af 5989 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5990 if (bmcr & BMCR_ANENABLE) {
605a9e20 5991 bnx2_enable_forced_2g5(bp);
40105c0b 5992 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 5993 } else {
605a9e20 5994 bnx2_disable_forced_2g5(bp);
f8dd064e 5995 bp->serdes_an_pending = 2;
ac392abc 5996 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5997 }
b6016b76 5998
f8dd064e 5999 } else
ac392abc 6000 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6001
f8dd064e
MC
6002 spin_unlock(&bp->phy_lock);
6003}
6004
48b01e2d
MC
6005static void
6006bnx2_timer(unsigned long data)
6007{
6008 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 6009
48b01e2d
MC
6010 if (!netif_running(bp->dev))
6011 return;
b6016b76 6012
48b01e2d
MC
6013 if (atomic_read(&bp->intr_sem) != 0)
6014 goto bnx2_restart_timer;
b6016b76 6015
efba0180
MC
6016 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6017 BNX2_FLAG_USING_MSI)
6018 bnx2_chk_missed_msi(bp);
6019
df149d70 6020 bnx2_send_heart_beat(bp);
b6016b76 6021
2726d6e1
MC
6022 bp->stats_blk->stat_FwRxDrop =
6023 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 6024
02537b06
MC
6025 /* workaround occasional corrupted counters */
6026 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
6027 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6028 BNX2_HC_COMMAND_STATS_NOW);
6029
583c28e5 6030 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
6031 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6032 bnx2_5706_serdes_timer(bp);
27a005b8 6033 else
f8dd064e 6034 bnx2_5708_serdes_timer(bp);
b6016b76
MC
6035 }
6036
6037bnx2_restart_timer:
cd339a0e 6038 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6039}
6040
8e6a72c4
MC
6041static int
6042bnx2_request_irq(struct bnx2 *bp)
6043{
6d866ffc 6044 unsigned long flags;
b4b36042
MC
6045 struct bnx2_irq *irq;
6046 int rc = 0, i;
8e6a72c4 6047
f86e82fb 6048 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
6049 flags = 0;
6050 else
6051 flags = IRQF_SHARED;
b4b36042
MC
6052
6053 for (i = 0; i < bp->irq_nvecs; i++) {
6054 irq = &bp->irq_tbl[i];
c76c0475 6055 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 6056 &bp->bnx2_napi[i]);
b4b36042
MC
6057 if (rc)
6058 break;
6059 irq->requested = 1;
6060 }
8e6a72c4
MC
6061 return rc;
6062}
6063
6064static void
6065bnx2_free_irq(struct bnx2 *bp)
6066{
b4b36042
MC
6067 struct bnx2_irq *irq;
6068 int i;
8e6a72c4 6069
b4b36042
MC
6070 for (i = 0; i < bp->irq_nvecs; i++) {
6071 irq = &bp->irq_tbl[i];
6072 if (irq->requested)
f0ea2e63 6073 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 6074 irq->requested = 0;
6d866ffc 6075 }
f86e82fb 6076 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 6077 pci_disable_msi(bp->pdev);
f86e82fb 6078 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
6079 pci_disable_msix(bp->pdev);
6080
f86e82fb 6081 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
6082}
6083
6084static void
5e9ad9e1 6085bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 6086{
57851d84
MC
6087 int i, rc;
6088 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
6089 struct net_device *dev = bp->dev;
6090 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 6091
b4b36042
MC
6092 bnx2_setup_msix_tbl(bp);
6093 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6094 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6095 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84
MC
6096
6097 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6098 msix_ent[i].entry = i;
6099 msix_ent[i].vector = 0;
6100 }
6101
6102 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6103 if (rc != 0)
6104 return;
6105
5e9ad9e1 6106 bp->irq_nvecs = msix_vecs;
f86e82fb 6107 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
69010313 6108 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
57851d84 6109 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
6110 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6111 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6112 }
6d866ffc
MC
6113}
6114
6115static void
6116bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6117{
5e9ad9e1 6118 int cpus = num_online_cpus();
706bf240 6119 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 6120
6d866ffc
MC
6121 bp->irq_tbl[0].handler = bnx2_interrupt;
6122 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
6123 bp->irq_nvecs = 1;
6124 bp->irq_tbl[0].vector = bp->pdev->irq;
6125
5e9ad9e1
MC
6126 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6127 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 6128
f86e82fb
DM
6129 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6130 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 6131 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 6132 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 6133 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 6134 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
6135 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6136 } else
6137 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
6138
6139 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
6140 }
6141 }
706bf240
BL
6142
6143 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6144 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6145
5e9ad9e1 6146 bp->num_rx_rings = bp->irq_nvecs;
8e6a72c4
MC
6147}
6148
b6016b76
MC
6149/* Called with rtnl_lock */
6150static int
6151bnx2_open(struct net_device *dev)
6152{
972ec0d4 6153 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6154 int rc;
6155
1b2f922f
MC
6156 netif_carrier_off(dev);
6157
829ca9a3 6158 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6159 bnx2_disable_int(bp);
6160
35e9010b
MC
6161 bnx2_setup_int_mode(bp, disable_msi);
6162 bnx2_napi_enable(bp);
b6016b76 6163 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
6164 if (rc)
6165 goto open_err;
b6016b76 6166
8e6a72c4 6167 rc = bnx2_request_irq(bp);
2739a8bb
MC
6168 if (rc)
6169 goto open_err;
b6016b76 6170
9a120bc5 6171 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
6172 if (rc)
6173 goto open_err;
6aa20a22 6174
cd339a0e 6175 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6176
6177 atomic_set(&bp->intr_sem, 0);
6178
6179 bnx2_enable_int(bp);
6180
f86e82fb 6181 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
6182 /* Test MSI to make sure it is working
6183 * If MSI test fails, go back to INTx mode
6184 */
6185 if (bnx2_test_intr(bp) != 0) {
6186 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6187 " using MSI, switching to INTx mode. Please"
6188 " report this failure to the PCI maintainer"
6189 " and include system chipset information.\n",
6190 bp->dev->name);
6191
6192 bnx2_disable_int(bp);
8e6a72c4 6193 bnx2_free_irq(bp);
b6016b76 6194
6d866ffc
MC
6195 bnx2_setup_int_mode(bp, 1);
6196
9a120bc5 6197 rc = bnx2_init_nic(bp, 0);
b6016b76 6198
8e6a72c4
MC
6199 if (!rc)
6200 rc = bnx2_request_irq(bp);
6201
b6016b76 6202 if (rc) {
b6016b76 6203 del_timer_sync(&bp->timer);
2739a8bb 6204 goto open_err;
b6016b76
MC
6205 }
6206 bnx2_enable_int(bp);
6207 }
6208 }
f86e82fb 6209 if (bp->flags & BNX2_FLAG_USING_MSI)
b6016b76 6210 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
f86e82fb 6211 else if (bp->flags & BNX2_FLAG_USING_MSIX)
57851d84 6212 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
b6016b76 6213
706bf240 6214 netif_tx_start_all_queues(dev);
b6016b76
MC
6215
6216 return 0;
2739a8bb
MC
6217
6218open_err:
6219 bnx2_napi_disable(bp);
6220 bnx2_free_skbs(bp);
6221 bnx2_free_irq(bp);
6222 bnx2_free_mem(bp);
6223 return rc;
b6016b76
MC
6224}
6225
6226static void
c4028958 6227bnx2_reset_task(struct work_struct *work)
b6016b76 6228{
c4028958 6229 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 6230
afdc08b9
MC
6231 if (!netif_running(bp->dev))
6232 return;
6233
b6016b76
MC
6234 bnx2_netif_stop(bp);
6235
9a120bc5 6236 bnx2_init_nic(bp, 1);
b6016b76
MC
6237
6238 atomic_set(&bp->intr_sem, 1);
6239 bnx2_netif_start(bp);
6240}
6241
6242static void
6243bnx2_tx_timeout(struct net_device *dev)
6244{
972ec0d4 6245 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6246
6247 /* This allows the netif to be shutdown gracefully before resetting */
6248 schedule_work(&bp->reset_task);
6249}
6250
6251#ifdef BCM_VLAN
6252/* Called with rtnl_lock */
6253static void
6254bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6255{
972ec0d4 6256 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6257
6258 bnx2_netif_stop(bp);
6259
6260 bp->vlgrp = vlgrp;
6261 bnx2_set_rx_mode(dev);
7c62e83b
MC
6262 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6263 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
b6016b76
MC
6264
6265 bnx2_netif_start(bp);
6266}
b6016b76
MC
6267#endif
6268
932ff279 6269/* Called with netif_tx_lock.
2f8af120
MC
6270 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6271 * netif_wake_queue().
b6016b76
MC
6272 */
6273static int
6274bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6275{
972ec0d4 6276 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6277 dma_addr_t mapping;
6278 struct tx_bd *txbd;
3d16af86 6279 struct sw_tx_bd *tx_buf;
b6016b76
MC
6280 u32 len, vlan_tag_flags, last_frag, mss;
6281 u16 prod, ring_prod;
6282 int i;
706bf240
BL
6283 struct bnx2_napi *bnapi;
6284 struct bnx2_tx_ring_info *txr;
6285 struct netdev_queue *txq;
3d16af86 6286 struct skb_shared_info *sp;
706bf240
BL
6287
6288 /* Determine which tx ring we will be placed on */
6289 i = skb_get_queue_mapping(skb);
6290 bnapi = &bp->bnx2_napi[i];
6291 txr = &bnapi->tx_ring;
6292 txq = netdev_get_tx_queue(dev, i);
b6016b76 6293
35e9010b 6294 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6295 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6296 netif_tx_stop_queue(txq);
b6016b76
MC
6297 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6298 dev->name);
6299
6300 return NETDEV_TX_BUSY;
6301 }
6302 len = skb_headlen(skb);
35e9010b 6303 prod = txr->tx_prod;
b6016b76
MC
6304 ring_prod = TX_RING_IDX(prod);
6305
6306 vlan_tag_flags = 0;
84fa7933 6307 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6308 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6309 }
6310
729b85cd 6311#ifdef BCM_VLAN
79ea13ce 6312 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
b6016b76
MC
6313 vlan_tag_flags |=
6314 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6315 }
729b85cd 6316#endif
fde82055 6317 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6318 u32 tcp_opt_len;
eddc9ec5 6319 struct iphdr *iph;
b6016b76 6320
b6016b76
MC
6321 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6322
4666f87a
MC
6323 tcp_opt_len = tcp_optlen(skb);
6324
6325 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6326 u32 tcp_off = skb_transport_offset(skb) -
6327 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6328
4666f87a
MC
6329 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6330 TX_BD_FLAGS_SW_FLAGS;
6331 if (likely(tcp_off == 0))
6332 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6333 else {
6334 tcp_off >>= 3;
6335 vlan_tag_flags |= ((tcp_off & 0x3) <<
6336 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6337 ((tcp_off & 0x10) <<
6338 TX_BD_FLAGS_TCP6_OFF4_SHL);
6339 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6340 }
6341 } else {
4666f87a 6342 iph = ip_hdr(skb);
4666f87a
MC
6343 if (tcp_opt_len || (iph->ihl > 5)) {
6344 vlan_tag_flags |= ((iph->ihl - 5) +
6345 (tcp_opt_len >> 2)) << 8;
6346 }
b6016b76 6347 }
4666f87a 6348 } else
b6016b76 6349 mss = 0;
b6016b76 6350
3d16af86
BL
6351 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6352 dev_kfree_skb(skb);
6353 return NETDEV_TX_OK;
6354 }
6355
6356 sp = skb_shinfo(skb);
042a53a9 6357 mapping = sp->dma_head;
6aa20a22 6358
35e9010b 6359 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6360 tx_buf->skb = skb;
b6016b76 6361
35e9010b 6362 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6363
6364 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6365 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6366 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6367 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6368
6369 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6370 tx_buf->nr_frags = last_frag;
6371 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6372
6373 for (i = 0; i < last_frag; i++) {
6374 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6375
6376 prod = NEXT_TX_BD(prod);
6377 ring_prod = TX_RING_IDX(prod);
35e9010b 6378 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6379
6380 len = frag->size;
042a53a9 6381 mapping = sp->dma_maps[i];
b6016b76
MC
6382
6383 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6384 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6385 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6386 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6387
6388 }
6389 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6390
6391 prod = NEXT_TX_BD(prod);
35e9010b 6392 txr->tx_prod_bseq += skb->len;
b6016b76 6393
35e9010b
MC
6394 REG_WR16(bp, txr->tx_bidx_addr, prod);
6395 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6396
6397 mmiowb();
6398
35e9010b 6399 txr->tx_prod = prod;
b6016b76 6400
35e9010b 6401 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6402 netif_tx_stop_queue(txq);
35e9010b 6403 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6404 netif_tx_wake_queue(txq);
b6016b76
MC
6405 }
6406
6407 return NETDEV_TX_OK;
6408}
6409
6410/* Called with rtnl_lock */
6411static int
6412bnx2_close(struct net_device *dev)
6413{
972ec0d4 6414 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6415
4bb073c0 6416 cancel_work_sync(&bp->reset_task);
afdc08b9 6417
bea3348e 6418 bnx2_disable_int_sync(bp);
35efa7c1 6419 bnx2_napi_disable(bp);
b6016b76 6420 del_timer_sync(&bp->timer);
74bf4ba3 6421 bnx2_shutdown_chip(bp);
8e6a72c4 6422 bnx2_free_irq(bp);
b6016b76
MC
6423 bnx2_free_skbs(bp);
6424 bnx2_free_mem(bp);
6425 bp->link_up = 0;
6426 netif_carrier_off(bp->dev);
829ca9a3 6427 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6428 return 0;
6429}
6430
6431#define GET_NET_STATS64(ctr) \
6432 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6433 (unsigned long) (ctr##_lo)
6434
6435#define GET_NET_STATS32(ctr) \
6436 (ctr##_lo)
6437
6438#if (BITS_PER_LONG == 64)
6439#define GET_NET_STATS GET_NET_STATS64
6440#else
6441#define GET_NET_STATS GET_NET_STATS32
6442#endif
6443
6444static struct net_device_stats *
6445bnx2_get_stats(struct net_device *dev)
6446{
972ec0d4 6447 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6448 struct statistics_block *stats_blk = bp->stats_blk;
d8e8034d 6449 struct net_device_stats *net_stats = &dev->stats;
b6016b76
MC
6450
6451 if (bp->stats_blk == NULL) {
6452 return net_stats;
6453 }
6454 net_stats->rx_packets =
6455 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6456 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6457 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6458
6459 net_stats->tx_packets =
6460 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6461 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6462 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6463
6464 net_stats->rx_bytes =
6465 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6466
6467 net_stats->tx_bytes =
6468 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6469
6aa20a22 6470 net_stats->multicast =
b6016b76
MC
6471 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6472
6aa20a22 6473 net_stats->collisions =
b6016b76
MC
6474 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6475
6aa20a22 6476 net_stats->rx_length_errors =
b6016b76
MC
6477 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6478 stats_blk->stat_EtherStatsOverrsizePkts);
6479
6aa20a22 6480 net_stats->rx_over_errors =
b6016b76
MC
6481 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6482
6aa20a22 6483 net_stats->rx_frame_errors =
b6016b76
MC
6484 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6485
6aa20a22 6486 net_stats->rx_crc_errors =
b6016b76
MC
6487 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6488
6489 net_stats->rx_errors = net_stats->rx_length_errors +
6490 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6491 net_stats->rx_crc_errors;
6492
6493 net_stats->tx_aborted_errors =
6494 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6495 stats_blk->stat_Dot3StatsLateCollisions);
6496
5b0c76ad
MC
6497 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6498 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6499 net_stats->tx_carrier_errors = 0;
6500 else {
6501 net_stats->tx_carrier_errors =
6502 (unsigned long)
6503 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6504 }
6505
6506 net_stats->tx_errors =
6aa20a22 6507 (unsigned long)
b6016b76
MC
6508 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6509 +
6510 net_stats->tx_aborted_errors +
6511 net_stats->tx_carrier_errors;
6512
cea94db9
MC
6513 net_stats->rx_missed_errors =
6514 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6515 stats_blk->stat_FwRxDrop);
6516
b6016b76
MC
6517 return net_stats;
6518}
6519
6520/* All ethtool functions called with rtnl_lock */
6521
6522static int
6523bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6524{
972ec0d4 6525 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6526 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6527
6528 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6529 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6530 support_serdes = 1;
6531 support_copper = 1;
6532 } else if (bp->phy_port == PORT_FIBRE)
6533 support_serdes = 1;
6534 else
6535 support_copper = 1;
6536
6537 if (support_serdes) {
b6016b76
MC
6538 cmd->supported |= SUPPORTED_1000baseT_Full |
6539 SUPPORTED_FIBRE;
583c28e5 6540 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6541 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6542
b6016b76 6543 }
7b6b8347 6544 if (support_copper) {
b6016b76
MC
6545 cmd->supported |= SUPPORTED_10baseT_Half |
6546 SUPPORTED_10baseT_Full |
6547 SUPPORTED_100baseT_Half |
6548 SUPPORTED_100baseT_Full |
6549 SUPPORTED_1000baseT_Full |
6550 SUPPORTED_TP;
6551
b6016b76
MC
6552 }
6553
7b6b8347
MC
6554 spin_lock_bh(&bp->phy_lock);
6555 cmd->port = bp->phy_port;
b6016b76
MC
6556 cmd->advertising = bp->advertising;
6557
6558 if (bp->autoneg & AUTONEG_SPEED) {
6559 cmd->autoneg = AUTONEG_ENABLE;
6560 }
6561 else {
6562 cmd->autoneg = AUTONEG_DISABLE;
6563 }
6564
6565 if (netif_carrier_ok(dev)) {
6566 cmd->speed = bp->line_speed;
6567 cmd->duplex = bp->duplex;
6568 }
6569 else {
6570 cmd->speed = -1;
6571 cmd->duplex = -1;
6572 }
7b6b8347 6573 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6574
6575 cmd->transceiver = XCVR_INTERNAL;
6576 cmd->phy_address = bp->phy_addr;
6577
6578 return 0;
6579}
6aa20a22 6580
b6016b76
MC
6581static int
6582bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6583{
972ec0d4 6584 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6585 u8 autoneg = bp->autoneg;
6586 u8 req_duplex = bp->req_duplex;
6587 u16 req_line_speed = bp->req_line_speed;
6588 u32 advertising = bp->advertising;
7b6b8347
MC
6589 int err = -EINVAL;
6590
6591 spin_lock_bh(&bp->phy_lock);
6592
6593 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6594 goto err_out_unlock;
6595
583c28e5
MC
6596 if (cmd->port != bp->phy_port &&
6597 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6598 goto err_out_unlock;
b6016b76 6599
d6b14486
MC
6600 /* If device is down, we can store the settings only if the user
6601 * is setting the currently active port.
6602 */
6603 if (!netif_running(dev) && cmd->port != bp->phy_port)
6604 goto err_out_unlock;
6605
b6016b76
MC
6606 if (cmd->autoneg == AUTONEG_ENABLE) {
6607 autoneg |= AUTONEG_SPEED;
6608
6aa20a22 6609 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6610
6611 /* allow advertising 1 speed */
6612 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6613 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6614 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6615 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6616
7b6b8347
MC
6617 if (cmd->port == PORT_FIBRE)
6618 goto err_out_unlock;
b6016b76
MC
6619
6620 advertising = cmd->advertising;
6621
27a005b8 6622 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
583c28e5 6623 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
7b6b8347
MC
6624 (cmd->port == PORT_TP))
6625 goto err_out_unlock;
6626 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 6627 advertising = cmd->advertising;
7b6b8347
MC
6628 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6629 goto err_out_unlock;
b6016b76 6630 else {
7b6b8347 6631 if (cmd->port == PORT_FIBRE)
b6016b76 6632 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 6633 else
b6016b76 6634 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6635 }
6636 advertising |= ADVERTISED_Autoneg;
6637 }
6638 else {
7b6b8347 6639 if (cmd->port == PORT_FIBRE) {
80be4434
MC
6640 if ((cmd->speed != SPEED_1000 &&
6641 cmd->speed != SPEED_2500) ||
6642 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6643 goto err_out_unlock;
80be4434
MC
6644
6645 if (cmd->speed == SPEED_2500 &&
583c28e5 6646 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6647 goto err_out_unlock;
b6016b76 6648 }
7b6b8347
MC
6649 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6650 goto err_out_unlock;
6651
b6016b76
MC
6652 autoneg &= ~AUTONEG_SPEED;
6653 req_line_speed = cmd->speed;
6654 req_duplex = cmd->duplex;
6655 advertising = 0;
6656 }
6657
6658 bp->autoneg = autoneg;
6659 bp->advertising = advertising;
6660 bp->req_line_speed = req_line_speed;
6661 bp->req_duplex = req_duplex;
6662
d6b14486
MC
6663 err = 0;
6664 /* If device is down, the new settings will be picked up when it is
6665 * brought up.
6666 */
6667 if (netif_running(dev))
6668 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6669
7b6b8347 6670err_out_unlock:
c770a65c 6671 spin_unlock_bh(&bp->phy_lock);
b6016b76 6672
7b6b8347 6673 return err;
b6016b76
MC
6674}
6675
6676static void
6677bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6678{
972ec0d4 6679 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6680
6681 strcpy(info->driver, DRV_MODULE_NAME);
6682 strcpy(info->version, DRV_MODULE_VERSION);
6683 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6684 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6685}
6686
244ac4f4
MC
6687#define BNX2_REGDUMP_LEN (32 * 1024)
6688
6689static int
6690bnx2_get_regs_len(struct net_device *dev)
6691{
6692 return BNX2_REGDUMP_LEN;
6693}
6694
6695static void
6696bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6697{
6698 u32 *p = _p, i, offset;
6699 u8 *orig_p = _p;
6700 struct bnx2 *bp = netdev_priv(dev);
6701 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6702 0x0800, 0x0880, 0x0c00, 0x0c10,
6703 0x0c30, 0x0d08, 0x1000, 0x101c,
6704 0x1040, 0x1048, 0x1080, 0x10a4,
6705 0x1400, 0x1490, 0x1498, 0x14f0,
6706 0x1500, 0x155c, 0x1580, 0x15dc,
6707 0x1600, 0x1658, 0x1680, 0x16d8,
6708 0x1800, 0x1820, 0x1840, 0x1854,
6709 0x1880, 0x1894, 0x1900, 0x1984,
6710 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6711 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6712 0x2000, 0x2030, 0x23c0, 0x2400,
6713 0x2800, 0x2820, 0x2830, 0x2850,
6714 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6715 0x3c00, 0x3c94, 0x4000, 0x4010,
6716 0x4080, 0x4090, 0x43c0, 0x4458,
6717 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6718 0x4fc0, 0x5010, 0x53c0, 0x5444,
6719 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6720 0x5fc0, 0x6000, 0x6400, 0x6428,
6721 0x6800, 0x6848, 0x684c, 0x6860,
6722 0x6888, 0x6910, 0x8000 };
6723
6724 regs->version = 0;
6725
6726 memset(p, 0, BNX2_REGDUMP_LEN);
6727
6728 if (!netif_running(bp->dev))
6729 return;
6730
6731 i = 0;
6732 offset = reg_boundaries[0];
6733 p += offset;
6734 while (offset < BNX2_REGDUMP_LEN) {
6735 *p++ = REG_RD(bp, offset);
6736 offset += 4;
6737 if (offset == reg_boundaries[i + 1]) {
6738 offset = reg_boundaries[i + 2];
6739 p = (u32 *) (orig_p + offset);
6740 i += 2;
6741 }
6742 }
6743}
6744
b6016b76
MC
6745static void
6746bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6747{
972ec0d4 6748 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6749
f86e82fb 6750 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6751 wol->supported = 0;
6752 wol->wolopts = 0;
6753 }
6754 else {
6755 wol->supported = WAKE_MAGIC;
6756 if (bp->wol)
6757 wol->wolopts = WAKE_MAGIC;
6758 else
6759 wol->wolopts = 0;
6760 }
6761 memset(&wol->sopass, 0, sizeof(wol->sopass));
6762}
6763
6764static int
6765bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6766{
972ec0d4 6767 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6768
6769 if (wol->wolopts & ~WAKE_MAGIC)
6770 return -EINVAL;
6771
6772 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6773 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6774 return -EINVAL;
6775
6776 bp->wol = 1;
6777 }
6778 else {
6779 bp->wol = 0;
6780 }
6781 return 0;
6782}
6783
6784static int
6785bnx2_nway_reset(struct net_device *dev)
6786{
972ec0d4 6787 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6788 u32 bmcr;
6789
9f52b564
MC
6790 if (!netif_running(dev))
6791 return -EAGAIN;
6792
b6016b76
MC
6793 if (!(bp->autoneg & AUTONEG_SPEED)) {
6794 return -EINVAL;
6795 }
6796
c770a65c 6797 spin_lock_bh(&bp->phy_lock);
b6016b76 6798
583c28e5 6799 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6800 int rc;
6801
6802 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6803 spin_unlock_bh(&bp->phy_lock);
6804 return rc;
6805 }
6806
b6016b76 6807 /* Force a link down visible on the other side */
583c28e5 6808 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6809 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6810 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6811
6812 msleep(20);
6813
c770a65c 6814 spin_lock_bh(&bp->phy_lock);
f8dd064e 6815
40105c0b 6816 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
6817 bp->serdes_an_pending = 1;
6818 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6819 }
6820
ca58c3af 6821 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6822 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6823 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6824
c770a65c 6825 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6826
6827 return 0;
6828}
6829
7959ea25
ON
6830static u32
6831bnx2_get_link(struct net_device *dev)
6832{
6833 struct bnx2 *bp = netdev_priv(dev);
6834
6835 return bp->link_up;
6836}
6837
b6016b76
MC
6838static int
6839bnx2_get_eeprom_len(struct net_device *dev)
6840{
972ec0d4 6841 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6842
1122db71 6843 if (bp->flash_info == NULL)
b6016b76
MC
6844 return 0;
6845
1122db71 6846 return (int) bp->flash_size;
b6016b76
MC
6847}
6848
6849static int
6850bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6851 u8 *eebuf)
6852{
972ec0d4 6853 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6854 int rc;
6855
9f52b564
MC
6856 if (!netif_running(dev))
6857 return -EAGAIN;
6858
1064e944 6859 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6860
6861 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6862
6863 return rc;
6864}
6865
6866static int
6867bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6868 u8 *eebuf)
6869{
972ec0d4 6870 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6871 int rc;
6872
9f52b564
MC
6873 if (!netif_running(dev))
6874 return -EAGAIN;
6875
1064e944 6876 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6877
6878 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6879
6880 return rc;
6881}
6882
6883static int
6884bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6885{
972ec0d4 6886 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6887
6888 memset(coal, 0, sizeof(struct ethtool_coalesce));
6889
6890 coal->rx_coalesce_usecs = bp->rx_ticks;
6891 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6892 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6893 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6894
6895 coal->tx_coalesce_usecs = bp->tx_ticks;
6896 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6897 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6898 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6899
6900 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6901
6902 return 0;
6903}
6904
6905static int
6906bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6907{
972ec0d4 6908 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6909
6910 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6911 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6912
6aa20a22 6913 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6914 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6915
6916 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6917 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6918
6919 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6920 if (bp->rx_quick_cons_trip_int > 0xff)
6921 bp->rx_quick_cons_trip_int = 0xff;
6922
6923 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6924 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6925
6926 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6927 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6928
6929 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6930 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6931
6932 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6933 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6934 0xff;
6935
6936 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6937 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6938 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6939 bp->stats_ticks = USEC_PER_SEC;
6940 }
7ea6920e
MC
6941 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6942 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6943 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6944
6945 if (netif_running(bp->dev)) {
6946 bnx2_netif_stop(bp);
9a120bc5 6947 bnx2_init_nic(bp, 0);
b6016b76
MC
6948 bnx2_netif_start(bp);
6949 }
6950
6951 return 0;
6952}
6953
6954static void
6955bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6956{
972ec0d4 6957 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6958
13daffa2 6959 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6960 ering->rx_mini_max_pending = 0;
47bf4246 6961 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6962
6963 ering->rx_pending = bp->rx_ring_size;
6964 ering->rx_mini_pending = 0;
47bf4246 6965 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6966
6967 ering->tx_max_pending = MAX_TX_DESC_CNT;
6968 ering->tx_pending = bp->tx_ring_size;
6969}
6970
6971static int
5d5d0015 6972bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6973{
13daffa2
MC
6974 if (netif_running(bp->dev)) {
6975 bnx2_netif_stop(bp);
6976 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6977 bnx2_free_skbs(bp);
6978 bnx2_free_mem(bp);
6979 }
6980
5d5d0015
MC
6981 bnx2_set_rx_ring_size(bp, rx);
6982 bp->tx_ring_size = tx;
b6016b76
MC
6983
6984 if (netif_running(bp->dev)) {
13daffa2
MC
6985 int rc;
6986
6987 rc = bnx2_alloc_mem(bp);
6988 if (rc)
6989 return rc;
9a120bc5 6990 bnx2_init_nic(bp, 0);
b6016b76
MC
6991 bnx2_netif_start(bp);
6992 }
b6016b76
MC
6993 return 0;
6994}
6995
5d5d0015
MC
6996static int
6997bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6998{
6999 struct bnx2 *bp = netdev_priv(dev);
7000 int rc;
7001
7002 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7003 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7004 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7005
7006 return -EINVAL;
7007 }
7008 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7009 return rc;
7010}
7011
b6016b76
MC
7012static void
7013bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7014{
972ec0d4 7015 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7016
7017 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7018 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7019 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7020}
7021
7022static int
7023bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7024{
972ec0d4 7025 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7026
7027 bp->req_flow_ctrl = 0;
7028 if (epause->rx_pause)
7029 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7030 if (epause->tx_pause)
7031 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7032
7033 if (epause->autoneg) {
7034 bp->autoneg |= AUTONEG_FLOW_CTRL;
7035 }
7036 else {
7037 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7038 }
7039
9f52b564
MC
7040 if (netif_running(dev)) {
7041 spin_lock_bh(&bp->phy_lock);
7042 bnx2_setup_phy(bp, bp->phy_port);
7043 spin_unlock_bh(&bp->phy_lock);
7044 }
b6016b76
MC
7045
7046 return 0;
7047}
7048
7049static u32
7050bnx2_get_rx_csum(struct net_device *dev)
7051{
972ec0d4 7052 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7053
7054 return bp->rx_csum;
7055}
7056
7057static int
7058bnx2_set_rx_csum(struct net_device *dev, u32 data)
7059{
972ec0d4 7060 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7061
7062 bp->rx_csum = data;
7063 return 0;
7064}
7065
b11d6213
MC
7066static int
7067bnx2_set_tso(struct net_device *dev, u32 data)
7068{
4666f87a
MC
7069 struct bnx2 *bp = netdev_priv(dev);
7070
7071 if (data) {
b11d6213 7072 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7073 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7074 dev->features |= NETIF_F_TSO6;
7075 } else
7076 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7077 NETIF_F_TSO_ECN);
b11d6213
MC
7078 return 0;
7079}
7080
cea94db9 7081#define BNX2_NUM_STATS 46
b6016b76 7082
14ab9b86 7083static struct {
b6016b76
MC
7084 char string[ETH_GSTRING_LEN];
7085} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
7086 { "rx_bytes" },
7087 { "rx_error_bytes" },
7088 { "tx_bytes" },
7089 { "tx_error_bytes" },
7090 { "rx_ucast_packets" },
7091 { "rx_mcast_packets" },
7092 { "rx_bcast_packets" },
7093 { "tx_ucast_packets" },
7094 { "tx_mcast_packets" },
7095 { "tx_bcast_packets" },
7096 { "tx_mac_errors" },
7097 { "tx_carrier_errors" },
7098 { "rx_crc_errors" },
7099 { "rx_align_errors" },
7100 { "tx_single_collisions" },
7101 { "tx_multi_collisions" },
7102 { "tx_deferred" },
7103 { "tx_excess_collisions" },
7104 { "tx_late_collisions" },
7105 { "tx_total_collisions" },
7106 { "rx_fragments" },
7107 { "rx_jabbers" },
7108 { "rx_undersize_packets" },
7109 { "rx_oversize_packets" },
7110 { "rx_64_byte_packets" },
7111 { "rx_65_to_127_byte_packets" },
7112 { "rx_128_to_255_byte_packets" },
7113 { "rx_256_to_511_byte_packets" },
7114 { "rx_512_to_1023_byte_packets" },
7115 { "rx_1024_to_1522_byte_packets" },
7116 { "rx_1523_to_9022_byte_packets" },
7117 { "tx_64_byte_packets" },
7118 { "tx_65_to_127_byte_packets" },
7119 { "tx_128_to_255_byte_packets" },
7120 { "tx_256_to_511_byte_packets" },
7121 { "tx_512_to_1023_byte_packets" },
7122 { "tx_1024_to_1522_byte_packets" },
7123 { "tx_1523_to_9022_byte_packets" },
7124 { "rx_xon_frames" },
7125 { "rx_xoff_frames" },
7126 { "tx_xon_frames" },
7127 { "tx_xoff_frames" },
7128 { "rx_mac_ctrl_frames" },
7129 { "rx_filtered_packets" },
7130 { "rx_discards" },
cea94db9 7131 { "rx_fw_discards" },
b6016b76
MC
7132};
7133
7134#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7135
f71e1309 7136static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7137 STATS_OFFSET32(stat_IfHCInOctets_hi),
7138 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7139 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7140 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7141 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7142 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7143 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7144 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7145 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7146 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7147 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
7148 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7149 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7150 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7151 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7152 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7153 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7154 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7155 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7156 STATS_OFFSET32(stat_EtherStatsCollisions),
7157 STATS_OFFSET32(stat_EtherStatsFragments),
7158 STATS_OFFSET32(stat_EtherStatsJabbers),
7159 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7160 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7161 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7162 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7163 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7164 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7165 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7166 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7167 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7168 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7169 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7170 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7171 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7172 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7173 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7174 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7175 STATS_OFFSET32(stat_XonPauseFramesReceived),
7176 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7177 STATS_OFFSET32(stat_OutXonSent),
7178 STATS_OFFSET32(stat_OutXoffSent),
7179 STATS_OFFSET32(stat_MacControlFramesReceived),
7180 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7181 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 7182 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
7183};
7184
7185/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7186 * skipped because of errata.
6aa20a22 7187 */
14ab9b86 7188static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7189 8,0,8,8,8,8,8,8,8,8,
7190 4,0,4,4,4,4,4,4,4,4,
7191 4,4,4,4,4,4,4,4,4,4,
7192 4,4,4,4,4,4,4,4,4,4,
cea94db9 7193 4,4,4,4,4,4,
b6016b76
MC
7194};
7195
5b0c76ad
MC
7196static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7197 8,0,8,8,8,8,8,8,8,8,
7198 4,4,4,4,4,4,4,4,4,4,
7199 4,4,4,4,4,4,4,4,4,4,
7200 4,4,4,4,4,4,4,4,4,4,
cea94db9 7201 4,4,4,4,4,4,
5b0c76ad
MC
7202};
7203
b6016b76
MC
7204#define BNX2_NUM_TESTS 6
7205
14ab9b86 7206static struct {
b6016b76
MC
7207 char string[ETH_GSTRING_LEN];
7208} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7209 { "register_test (offline)" },
7210 { "memory_test (offline)" },
7211 { "loopback_test (offline)" },
7212 { "nvram_test (online)" },
7213 { "interrupt_test (online)" },
7214 { "link_test (online)" },
7215};
7216
7217static int
b9f2c044 7218bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7219{
b9f2c044
JG
7220 switch (sset) {
7221 case ETH_SS_TEST:
7222 return BNX2_NUM_TESTS;
7223 case ETH_SS_STATS:
7224 return BNX2_NUM_STATS;
7225 default:
7226 return -EOPNOTSUPP;
7227 }
b6016b76
MC
7228}
7229
7230static void
7231bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7232{
972ec0d4 7233 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7234
9f52b564
MC
7235 bnx2_set_power_state(bp, PCI_D0);
7236
b6016b76
MC
7237 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7238 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7239 int i;
7240
b6016b76
MC
7241 bnx2_netif_stop(bp);
7242 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7243 bnx2_free_skbs(bp);
7244
7245 if (bnx2_test_registers(bp) != 0) {
7246 buf[0] = 1;
7247 etest->flags |= ETH_TEST_FL_FAILED;
7248 }
7249 if (bnx2_test_memory(bp) != 0) {
7250 buf[1] = 1;
7251 etest->flags |= ETH_TEST_FL_FAILED;
7252 }
bc5a0690 7253 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7254 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7255
9f52b564
MC
7256 if (!netif_running(bp->dev))
7257 bnx2_shutdown_chip(bp);
b6016b76 7258 else {
9a120bc5 7259 bnx2_init_nic(bp, 1);
b6016b76
MC
7260 bnx2_netif_start(bp);
7261 }
7262
7263 /* wait for link up */
80be4434
MC
7264 for (i = 0; i < 7; i++) {
7265 if (bp->link_up)
7266 break;
7267 msleep_interruptible(1000);
7268 }
b6016b76
MC
7269 }
7270
7271 if (bnx2_test_nvram(bp) != 0) {
7272 buf[3] = 1;
7273 etest->flags |= ETH_TEST_FL_FAILED;
7274 }
7275 if (bnx2_test_intr(bp) != 0) {
7276 buf[4] = 1;
7277 etest->flags |= ETH_TEST_FL_FAILED;
7278 }
7279
7280 if (bnx2_test_link(bp) != 0) {
7281 buf[5] = 1;
7282 etest->flags |= ETH_TEST_FL_FAILED;
7283
7284 }
9f52b564
MC
7285 if (!netif_running(bp->dev))
7286 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7287}
7288
7289static void
7290bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7291{
7292 switch (stringset) {
7293 case ETH_SS_STATS:
7294 memcpy(buf, bnx2_stats_str_arr,
7295 sizeof(bnx2_stats_str_arr));
7296 break;
7297 case ETH_SS_TEST:
7298 memcpy(buf, bnx2_tests_str_arr,
7299 sizeof(bnx2_tests_str_arr));
7300 break;
7301 }
7302}
7303
b6016b76
MC
7304static void
7305bnx2_get_ethtool_stats(struct net_device *dev,
7306 struct ethtool_stats *stats, u64 *buf)
7307{
972ec0d4 7308 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7309 int i;
7310 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 7311 u8 *stats_len_arr = NULL;
b6016b76
MC
7312
7313 if (hw_stats == NULL) {
7314 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7315 return;
7316 }
7317
5b0c76ad
MC
7318 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7319 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7320 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7321 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7322 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7323 else
7324 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7325
7326 for (i = 0; i < BNX2_NUM_STATS; i++) {
7327 if (stats_len_arr[i] == 0) {
7328 /* skip this counter */
7329 buf[i] = 0;
7330 continue;
7331 }
7332 if (stats_len_arr[i] == 4) {
7333 /* 4-byte counter */
7334 buf[i] = (u64)
7335 *(hw_stats + bnx2_stats_offset_arr[i]);
7336 continue;
7337 }
7338 /* 8-byte counter */
7339 buf[i] = (((u64) *(hw_stats +
7340 bnx2_stats_offset_arr[i])) << 32) +
7341 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7342 }
7343}
7344
7345static int
7346bnx2_phys_id(struct net_device *dev, u32 data)
7347{
972ec0d4 7348 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7349 int i;
7350 u32 save;
7351
9f52b564
MC
7352 bnx2_set_power_state(bp, PCI_D0);
7353
b6016b76
MC
7354 if (data == 0)
7355 data = 2;
7356
7357 save = REG_RD(bp, BNX2_MISC_CFG);
7358 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7359
7360 for (i = 0; i < (data * 2); i++) {
7361 if ((i % 2) == 0) {
7362 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7363 }
7364 else {
7365 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7366 BNX2_EMAC_LED_1000MB_OVERRIDE |
7367 BNX2_EMAC_LED_100MB_OVERRIDE |
7368 BNX2_EMAC_LED_10MB_OVERRIDE |
7369 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7370 BNX2_EMAC_LED_TRAFFIC);
7371 }
7372 msleep_interruptible(500);
7373 if (signal_pending(current))
7374 break;
7375 }
7376 REG_WR(bp, BNX2_EMAC_LED, 0);
7377 REG_WR(bp, BNX2_MISC_CFG, save);
9f52b564
MC
7378
7379 if (!netif_running(dev))
7380 bnx2_set_power_state(bp, PCI_D3hot);
7381
b6016b76
MC
7382 return 0;
7383}
7384
4666f87a
MC
7385static int
7386bnx2_set_tx_csum(struct net_device *dev, u32 data)
7387{
7388 struct bnx2 *bp = netdev_priv(dev);
7389
7390 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 7391 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
7392 else
7393 return (ethtool_op_set_tx_csum(dev, data));
7394}
7395
7282d491 7396static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7397 .get_settings = bnx2_get_settings,
7398 .set_settings = bnx2_set_settings,
7399 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7400 .get_regs_len = bnx2_get_regs_len,
7401 .get_regs = bnx2_get_regs,
b6016b76
MC
7402 .get_wol = bnx2_get_wol,
7403 .set_wol = bnx2_set_wol,
7404 .nway_reset = bnx2_nway_reset,
7959ea25 7405 .get_link = bnx2_get_link,
b6016b76
MC
7406 .get_eeprom_len = bnx2_get_eeprom_len,
7407 .get_eeprom = bnx2_get_eeprom,
7408 .set_eeprom = bnx2_set_eeprom,
7409 .get_coalesce = bnx2_get_coalesce,
7410 .set_coalesce = bnx2_set_coalesce,
7411 .get_ringparam = bnx2_get_ringparam,
7412 .set_ringparam = bnx2_set_ringparam,
7413 .get_pauseparam = bnx2_get_pauseparam,
7414 .set_pauseparam = bnx2_set_pauseparam,
7415 .get_rx_csum = bnx2_get_rx_csum,
7416 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 7417 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 7418 .set_sg = ethtool_op_set_sg,
b11d6213 7419 .set_tso = bnx2_set_tso,
b6016b76
MC
7420 .self_test = bnx2_self_test,
7421 .get_strings = bnx2_get_strings,
7422 .phys_id = bnx2_phys_id,
b6016b76 7423 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7424 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7425};
7426
7427/* Called with rtnl_lock */
7428static int
7429bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7430{
14ab9b86 7431 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7432 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7433 int err;
7434
7435 switch(cmd) {
7436 case SIOCGMIIPHY:
7437 data->phy_id = bp->phy_addr;
7438
7439 /* fallthru */
7440 case SIOCGMIIREG: {
7441 u32 mii_regval;
7442
583c28e5 7443 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7444 return -EOPNOTSUPP;
7445
dad3e452
MC
7446 if (!netif_running(dev))
7447 return -EAGAIN;
7448
c770a65c 7449 spin_lock_bh(&bp->phy_lock);
b6016b76 7450 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7451 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7452
7453 data->val_out = mii_regval;
7454
7455 return err;
7456 }
7457
7458 case SIOCSMIIREG:
7459 if (!capable(CAP_NET_ADMIN))
7460 return -EPERM;
7461
583c28e5 7462 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7463 return -EOPNOTSUPP;
7464
dad3e452
MC
7465 if (!netif_running(dev))
7466 return -EAGAIN;
7467
c770a65c 7468 spin_lock_bh(&bp->phy_lock);
b6016b76 7469 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7470 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7471
7472 return err;
7473
7474 default:
7475 /* do nothing */
7476 break;
7477 }
7478 return -EOPNOTSUPP;
7479}
7480
7481/* Called with rtnl_lock */
7482static int
7483bnx2_change_mac_addr(struct net_device *dev, void *p)
7484{
7485 struct sockaddr *addr = p;
972ec0d4 7486 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7487
73eef4cd
MC
7488 if (!is_valid_ether_addr(addr->sa_data))
7489 return -EINVAL;
7490
b6016b76
MC
7491 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7492 if (netif_running(dev))
5fcaed01 7493 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7494
7495 return 0;
7496}
7497
7498/* Called with rtnl_lock */
7499static int
7500bnx2_change_mtu(struct net_device *dev, int new_mtu)
7501{
972ec0d4 7502 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7503
7504 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7505 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7506 return -EINVAL;
7507
7508 dev->mtu = new_mtu;
5d5d0015 7509 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
7510}
7511
7512#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7513static void
7514poll_bnx2(struct net_device *dev)
7515{
972ec0d4 7516 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7517 int i;
b6016b76 7518
b2af2c1d
NH
7519 for (i = 0; i < bp->irq_nvecs; i++) {
7520 disable_irq(bp->irq_tbl[i].vector);
7521 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7522 enable_irq(bp->irq_tbl[i].vector);
7523 }
b6016b76
MC
7524}
7525#endif
7526
253c8b75
MC
7527static void __devinit
7528bnx2_get_5709_media(struct bnx2 *bp)
7529{
7530 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7531 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7532 u32 strap;
7533
7534 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7535 return;
7536 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7537 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7538 return;
7539 }
7540
7541 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7542 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7543 else
7544 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7545
7546 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7547 switch (strap) {
7548 case 0x4:
7549 case 0x5:
7550 case 0x6:
583c28e5 7551 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7552 return;
7553 }
7554 } else {
7555 switch (strap) {
7556 case 0x1:
7557 case 0x2:
7558 case 0x4:
583c28e5 7559 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7560 return;
7561 }
7562 }
7563}
7564
883e5151
MC
7565static void __devinit
7566bnx2_get_pci_speed(struct bnx2 *bp)
7567{
7568 u32 reg;
7569
7570 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7571 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7572 u32 clkreg;
7573
f86e82fb 7574 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7575
7576 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7577
7578 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7579 switch (clkreg) {
7580 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7581 bp->bus_speed_mhz = 133;
7582 break;
7583
7584 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7585 bp->bus_speed_mhz = 100;
7586 break;
7587
7588 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7589 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7590 bp->bus_speed_mhz = 66;
7591 break;
7592
7593 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7594 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7595 bp->bus_speed_mhz = 50;
7596 break;
7597
7598 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7599 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7600 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7601 bp->bus_speed_mhz = 33;
7602 break;
7603 }
7604 }
7605 else {
7606 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7607 bp->bus_speed_mhz = 66;
7608 else
7609 bp->bus_speed_mhz = 33;
7610 }
7611
7612 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7613 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7614
7615}
7616
b6016b76
MC
7617static int __devinit
7618bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7619{
7620 struct bnx2 *bp;
7621 unsigned long mem_len;
58fc2ea4 7622 int rc, i, j;
b6016b76 7623 u32 reg;
40453c83 7624 u64 dma_mask, persist_dma_mask;
b6016b76 7625
b6016b76 7626 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7627 bp = netdev_priv(dev);
b6016b76
MC
7628
7629 bp->flags = 0;
7630 bp->phy_flags = 0;
7631
7632 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7633 rc = pci_enable_device(pdev);
7634 if (rc) {
898eb71c 7635 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
7636 goto err_out;
7637 }
7638
7639 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7640 dev_err(&pdev->dev,
2e8a538d 7641 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
7642 rc = -ENODEV;
7643 goto err_out_disable;
7644 }
7645
7646 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7647 if (rc) {
9b91cf9d 7648 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
7649 goto err_out_disable;
7650 }
7651
7652 pci_set_master(pdev);
6ff2da49 7653 pci_save_state(pdev);
b6016b76
MC
7654
7655 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7656 if (bp->pm_cap == 0) {
9b91cf9d 7657 dev_err(&pdev->dev,
2e8a538d 7658 "Cannot find power management capability, aborting.\n");
b6016b76
MC
7659 rc = -EIO;
7660 goto err_out_release;
7661 }
7662
b6016b76
MC
7663 bp->dev = dev;
7664 bp->pdev = pdev;
7665
7666 spin_lock_init(&bp->phy_lock);
1b8227c4 7667 spin_lock_init(&bp->indirect_lock);
c5a88950
MC
7668#ifdef BCM_CNIC
7669 mutex_init(&bp->cnic_lock);
7670#endif
c4028958 7671 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7672
7673 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
4edd473f 7674 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
b6016b76
MC
7675 dev->mem_end = dev->mem_start + mem_len;
7676 dev->irq = pdev->irq;
7677
7678 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7679
7680 if (!bp->regview) {
9b91cf9d 7681 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
7682 rc = -ENOMEM;
7683 goto err_out_release;
7684 }
7685
7686 /* Configure byte swap and enable write to the reg_window registers.
7687 * Rely on CPU to do target byte swapping on big endian systems
7688 * The chip's target access swapping will not swap all accesses
7689 */
7690 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7691 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7692 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7693
829ca9a3 7694 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7695
7696 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7697
883e5151
MC
7698 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7699 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7700 dev_err(&pdev->dev,
7701 "Cannot find PCIE capability, aborting.\n");
7702 rc = -EIO;
7703 goto err_out_unmap;
7704 }
f86e82fb 7705 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7706 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7707 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
883e5151 7708 } else {
59b47d8a
MC
7709 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7710 if (bp->pcix_cap == 0) {
7711 dev_err(&pdev->dev,
7712 "Cannot find PCIX capability, aborting.\n");
7713 rc = -EIO;
7714 goto err_out_unmap;
7715 }
7716 }
7717
b4b36042
MC
7718 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7719 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7720 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7721 }
7722
8e6a72c4
MC
7723 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7724 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7725 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7726 }
7727
40453c83
MC
7728 /* 5708 cannot support DMA addresses > 40-bit. */
7729 if (CHIP_NUM(bp) == CHIP_NUM_5708)
50cf156a 7730 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 7731 else
6a35528a 7732 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
7733
7734 /* Configure DMA attributes. */
7735 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7736 dev->features |= NETIF_F_HIGHDMA;
7737 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7738 if (rc) {
7739 dev_err(&pdev->dev,
7740 "pci_set_consistent_dma_mask failed, aborting.\n");
7741 goto err_out_unmap;
7742 }
284901a9 7743 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
40453c83
MC
7744 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7745 goto err_out_unmap;
7746 }
7747
f86e82fb 7748 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 7749 bnx2_get_pci_speed(bp);
b6016b76
MC
7750
7751 /* 5706A0 may falsely detect SERR and PERR. */
7752 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7753 reg = REG_RD(bp, PCI_COMMAND);
7754 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7755 REG_WR(bp, PCI_COMMAND, reg);
7756 }
7757 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 7758 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 7759
9b91cf9d 7760 dev_err(&pdev->dev,
2e8a538d 7761 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
7762 goto err_out_unmap;
7763 }
7764
7765 bnx2_init_nvram(bp);
7766
2726d6e1 7767 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
7768
7769 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
7770 BNX2_SHM_HDR_SIGNATURE_SIG) {
7771 u32 off = PCI_FUNC(pdev->devfn) << 2;
7772
2726d6e1 7773 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 7774 } else
e3648b3d
MC
7775 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7776
b6016b76
MC
7777 /* Get the permanent MAC address. First we need to make sure the
7778 * firmware is actually running.
7779 */
2726d6e1 7780 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
7781
7782 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7783 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 7784 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
7785 rc = -ENODEV;
7786 goto err_out_unmap;
7787 }
7788
2726d6e1 7789 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
58fc2ea4
MC
7790 for (i = 0, j = 0; i < 3; i++) {
7791 u8 num, k, skip0;
7792
7793 num = (u8) (reg >> (24 - (i * 8)));
7794 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7795 if (num >= k || !skip0 || k == 1) {
7796 bp->fw_version[j++] = (num / k) + '0';
7797 skip0 = 0;
7798 }
7799 }
7800 if (i != 2)
7801 bp->fw_version[j++] = '.';
7802 }
2726d6e1 7803 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
7804 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7805 bp->wol = 1;
7806
7807 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 7808 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
7809
7810 for (i = 0; i < 30; i++) {
2726d6e1 7811 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
7812 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7813 break;
7814 msleep(10);
7815 }
7816 }
2726d6e1 7817 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
7818 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7819 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7820 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 7821 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4
MC
7822
7823 bp->fw_version[j++] = ' ';
7824 for (i = 0; i < 3; i++) {
2726d6e1 7825 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
58fc2ea4
MC
7826 reg = swab32(reg);
7827 memcpy(&bp->fw_version[j], &reg, 4);
7828 j += 4;
7829 }
7830 }
b6016b76 7831
2726d6e1 7832 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
7833 bp->mac_addr[0] = (u8) (reg >> 8);
7834 bp->mac_addr[1] = (u8) reg;
7835
2726d6e1 7836 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
7837 bp->mac_addr[2] = (u8) (reg >> 24);
7838 bp->mac_addr[3] = (u8) (reg >> 16);
7839 bp->mac_addr[4] = (u8) (reg >> 8);
7840 bp->mac_addr[5] = (u8) reg;
7841
7842 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 7843 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
7844
7845 bp->rx_csum = 1;
7846
b6016b76
MC
7847 bp->tx_quick_cons_trip_int = 20;
7848 bp->tx_quick_cons_trip = 20;
7849 bp->tx_ticks_int = 80;
7850 bp->tx_ticks = 80;
6aa20a22 7851
b6016b76
MC
7852 bp->rx_quick_cons_trip_int = 6;
7853 bp->rx_quick_cons_trip = 6;
7854 bp->rx_ticks_int = 18;
7855 bp->rx_ticks = 18;
7856
7ea6920e 7857 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 7858
ac392abc 7859 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 7860
5b0c76ad
MC
7861 bp->phy_addr = 1;
7862
b6016b76 7863 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
7864 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7865 bnx2_get_5709_media(bp);
7866 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 7867 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 7868
0d8a6571 7869 bp->phy_port = PORT_TP;
583c28e5 7870 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 7871 bp->phy_port = PORT_FIBRE;
2726d6e1 7872 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 7873 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 7874 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7875 bp->wol = 0;
7876 }
38ea3686
MC
7877 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7878 /* Don't do parallel detect on this board because of
7879 * some board problems. The link will not go down
7880 * if we do parallel detect.
7881 */
7882 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7883 pdev->subsystem_device == 0x310c)
7884 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7885 } else {
5b0c76ad 7886 bp->phy_addr = 2;
5b0c76ad 7887 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 7888 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 7889 }
261dd5ca
MC
7890 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7891 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 7892 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
7893 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7894 (CHIP_REV(bp) == CHIP_REV_Ax ||
7895 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 7896 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 7897
7c62e83b
MC
7898 bnx2_init_fw_cap(bp);
7899
16088272
MC
7900 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7901 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
7902 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7903 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 7904 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7905 bp->wol = 0;
7906 }
dda1e390 7907
b6016b76
MC
7908 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7909 bp->tx_quick_cons_trip_int =
7910 bp->tx_quick_cons_trip;
7911 bp->tx_ticks_int = bp->tx_ticks;
7912 bp->rx_quick_cons_trip_int =
7913 bp->rx_quick_cons_trip;
7914 bp->rx_ticks_int = bp->rx_ticks;
7915 bp->comp_prod_trip_int = bp->comp_prod_trip;
7916 bp->com_ticks_int = bp->com_ticks;
7917 bp->cmd_ticks_int = bp->cmd_ticks;
7918 }
7919
f9317a40
MC
7920 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7921 *
7922 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7923 * with byte enables disabled on the unused 32-bit word. This is legal
7924 * but causes problems on the AMD 8132 which will eventually stop
7925 * responding after a while.
7926 *
7927 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7928 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7929 */
7930 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7931 struct pci_dev *amd_8132 = NULL;
7932
7933 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7934 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7935 amd_8132))) {
f9317a40 7936
44c10138
AK
7937 if (amd_8132->revision >= 0x10 &&
7938 amd_8132->revision <= 0x13) {
f9317a40
MC
7939 disable_msi = 1;
7940 pci_dev_put(amd_8132);
7941 break;
7942 }
7943 }
7944 }
7945
deaf391b 7946 bnx2_set_default_link(bp);
b6016b76
MC
7947 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7948
cd339a0e 7949 init_timer(&bp->timer);
ac392abc 7950 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
7951 bp->timer.data = (unsigned long) bp;
7952 bp->timer.function = bnx2_timer;
7953
b6016b76
MC
7954 return 0;
7955
7956err_out_unmap:
7957 if (bp->regview) {
7958 iounmap(bp->regview);
73eef4cd 7959 bp->regview = NULL;
b6016b76
MC
7960 }
7961
7962err_out_release:
7963 pci_release_regions(pdev);
7964
7965err_out_disable:
7966 pci_disable_device(pdev);
7967 pci_set_drvdata(pdev, NULL);
7968
7969err_out:
7970 return rc;
7971}
7972
883e5151
MC
7973static char * __devinit
7974bnx2_bus_string(struct bnx2 *bp, char *str)
7975{
7976 char *s = str;
7977
f86e82fb 7978 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
7979 s += sprintf(s, "PCI Express");
7980 } else {
7981 s += sprintf(s, "PCI");
f86e82fb 7982 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 7983 s += sprintf(s, "-X");
f86e82fb 7984 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
7985 s += sprintf(s, " 32-bit");
7986 else
7987 s += sprintf(s, " 64-bit");
7988 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7989 }
7990 return str;
7991}
7992
2ba582b7 7993static void __devinit
35efa7c1
MC
7994bnx2_init_napi(struct bnx2 *bp)
7995{
b4b36042 7996 int i;
35efa7c1 7997
b4b36042 7998 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
35e9010b
MC
7999 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8000 int (*poll)(struct napi_struct *, int);
8001
8002 if (i == 0)
8003 poll = bnx2_poll;
8004 else
f0ea2e63 8005 poll = bnx2_poll_msix;
35e9010b
MC
8006
8007 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
8008 bnapi->bp = bp;
8009 }
35efa7c1
MC
8010}
8011
0421eae6
SH
8012static const struct net_device_ops bnx2_netdev_ops = {
8013 .ndo_open = bnx2_open,
8014 .ndo_start_xmit = bnx2_start_xmit,
8015 .ndo_stop = bnx2_close,
8016 .ndo_get_stats = bnx2_get_stats,
8017 .ndo_set_rx_mode = bnx2_set_rx_mode,
8018 .ndo_do_ioctl = bnx2_ioctl,
8019 .ndo_validate_addr = eth_validate_addr,
8020 .ndo_set_mac_address = bnx2_change_mac_addr,
8021 .ndo_change_mtu = bnx2_change_mtu,
8022 .ndo_tx_timeout = bnx2_tx_timeout,
8023#ifdef BCM_VLAN
8024 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8025#endif
8026#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8027 .ndo_poll_controller = poll_bnx2,
8028#endif
8029};
8030
b6016b76
MC
8031static int __devinit
8032bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8033{
8034 static int version_printed = 0;
8035 struct net_device *dev = NULL;
8036 struct bnx2 *bp;
0795af57 8037 int rc;
883e5151 8038 char str[40];
b6016b76
MC
8039
8040 if (version_printed++ == 0)
8041 printk(KERN_INFO "%s", version);
8042
8043 /* dev zeroed in init_etherdev */
706bf240 8044 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
8045
8046 if (!dev)
8047 return -ENOMEM;
8048
8049 rc = bnx2_init_board(pdev, dev);
8050 if (rc < 0) {
8051 free_netdev(dev);
8052 return rc;
8053 }
8054
0421eae6 8055 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 8056 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 8057 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 8058
972ec0d4 8059 bp = netdev_priv(dev);
35efa7c1 8060 bnx2_init_napi(bp);
b6016b76 8061
1b2f922f
MC
8062 pci_set_drvdata(pdev, dev);
8063
57579f76
MC
8064 rc = bnx2_request_firmware(bp);
8065 if (rc)
8066 goto error;
8067
1b2f922f
MC
8068 memcpy(dev->dev_addr, bp->mac_addr, 6);
8069 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 8070
d212f87b 8071 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 8072 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
8073 dev->features |= NETIF_F_IPV6_CSUM;
8074
1b2f922f
MC
8075#ifdef BCM_VLAN
8076 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8077#endif
8078 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
8079 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8080 dev->features |= NETIF_F_TSO6;
1b2f922f 8081
b6016b76 8082 if ((rc = register_netdev(dev))) {
9b91cf9d 8083 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 8084 goto error;
b6016b76
MC
8085 }
8086
883e5151 8087 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
e174961c 8088 "IRQ %d, node addr %pM\n",
b6016b76 8089 dev->name,
fbbf68b7 8090 board_info[ent->driver_data].name,
b6016b76
MC
8091 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8092 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 8093 bnx2_bus_string(bp, str),
b6016b76 8094 dev->base_addr,
e174961c 8095 bp->pdev->irq, dev->dev_addr);
b6016b76 8096
b6016b76 8097 return 0;
57579f76
MC
8098
8099error:
8100 if (bp->mips_firmware)
8101 release_firmware(bp->mips_firmware);
8102 if (bp->rv2p_firmware)
8103 release_firmware(bp->rv2p_firmware);
8104
8105 if (bp->regview)
8106 iounmap(bp->regview);
8107 pci_release_regions(pdev);
8108 pci_disable_device(pdev);
8109 pci_set_drvdata(pdev, NULL);
8110 free_netdev(dev);
8111 return rc;
b6016b76
MC
8112}
8113
8114static void __devexit
8115bnx2_remove_one(struct pci_dev *pdev)
8116{
8117 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8118 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8119
afdc08b9
MC
8120 flush_scheduled_work();
8121
b6016b76
MC
8122 unregister_netdev(dev);
8123
57579f76
MC
8124 if (bp->mips_firmware)
8125 release_firmware(bp->mips_firmware);
8126 if (bp->rv2p_firmware)
8127 release_firmware(bp->rv2p_firmware);
8128
b6016b76
MC
8129 if (bp->regview)
8130 iounmap(bp->regview);
8131
8132 free_netdev(dev);
8133 pci_release_regions(pdev);
8134 pci_disable_device(pdev);
8135 pci_set_drvdata(pdev, NULL);
8136}
8137
8138static int
829ca9a3 8139bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
8140{
8141 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8142 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8143
6caebb02
MC
8144 /* PCI register 4 needs to be saved whether netif_running() or not.
8145 * MSI address and data need to be saved if using MSI and
8146 * netif_running().
8147 */
8148 pci_save_state(pdev);
b6016b76
MC
8149 if (!netif_running(dev))
8150 return 0;
8151
1d60290f 8152 flush_scheduled_work();
b6016b76
MC
8153 bnx2_netif_stop(bp);
8154 netif_device_detach(dev);
8155 del_timer_sync(&bp->timer);
74bf4ba3 8156 bnx2_shutdown_chip(bp);
b6016b76 8157 bnx2_free_skbs(bp);
829ca9a3 8158 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
8159 return 0;
8160}
8161
8162static int
8163bnx2_resume(struct pci_dev *pdev)
8164{
8165 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8166 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8167
6caebb02 8168 pci_restore_state(pdev);
b6016b76
MC
8169 if (!netif_running(dev))
8170 return 0;
8171
829ca9a3 8172 bnx2_set_power_state(bp, PCI_D0);
b6016b76 8173 netif_device_attach(dev);
9a120bc5 8174 bnx2_init_nic(bp, 1);
b6016b76
MC
8175 bnx2_netif_start(bp);
8176 return 0;
8177}
8178
6ff2da49
WX
8179/**
8180 * bnx2_io_error_detected - called when PCI error is detected
8181 * @pdev: Pointer to PCI device
8182 * @state: The current pci connection state
8183 *
8184 * This function is called after a PCI bus error affecting
8185 * this device has been detected.
8186 */
8187static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8188 pci_channel_state_t state)
8189{
8190 struct net_device *dev = pci_get_drvdata(pdev);
8191 struct bnx2 *bp = netdev_priv(dev);
8192
8193 rtnl_lock();
8194 netif_device_detach(dev);
8195
8196 if (netif_running(dev)) {
8197 bnx2_netif_stop(bp);
8198 del_timer_sync(&bp->timer);
8199 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8200 }
8201
8202 pci_disable_device(pdev);
8203 rtnl_unlock();
8204
8205 /* Request a slot slot reset. */
8206 return PCI_ERS_RESULT_NEED_RESET;
8207}
8208
8209/**
8210 * bnx2_io_slot_reset - called after the pci bus has been reset.
8211 * @pdev: Pointer to PCI device
8212 *
8213 * Restart the card from scratch, as if from a cold-boot.
8214 */
8215static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8216{
8217 struct net_device *dev = pci_get_drvdata(pdev);
8218 struct bnx2 *bp = netdev_priv(dev);
8219
8220 rtnl_lock();
8221 if (pci_enable_device(pdev)) {
8222 dev_err(&pdev->dev,
8223 "Cannot re-enable PCI device after reset.\n");
8224 rtnl_unlock();
8225 return PCI_ERS_RESULT_DISCONNECT;
8226 }
8227 pci_set_master(pdev);
8228 pci_restore_state(pdev);
8229
8230 if (netif_running(dev)) {
8231 bnx2_set_power_state(bp, PCI_D0);
8232 bnx2_init_nic(bp, 1);
8233 }
8234
8235 rtnl_unlock();
8236 return PCI_ERS_RESULT_RECOVERED;
8237}
8238
8239/**
8240 * bnx2_io_resume - called when traffic can start flowing again.
8241 * @pdev: Pointer to PCI device
8242 *
8243 * This callback is called when the error recovery driver tells us that
8244 * its OK to resume normal operation.
8245 */
8246static void bnx2_io_resume(struct pci_dev *pdev)
8247{
8248 struct net_device *dev = pci_get_drvdata(pdev);
8249 struct bnx2 *bp = netdev_priv(dev);
8250
8251 rtnl_lock();
8252 if (netif_running(dev))
8253 bnx2_netif_start(bp);
8254
8255 netif_device_attach(dev);
8256 rtnl_unlock();
8257}
8258
8259static struct pci_error_handlers bnx2_err_handler = {
8260 .error_detected = bnx2_io_error_detected,
8261 .slot_reset = bnx2_io_slot_reset,
8262 .resume = bnx2_io_resume,
8263};
8264
b6016b76 8265static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8266 .name = DRV_MODULE_NAME,
8267 .id_table = bnx2_pci_tbl,
8268 .probe = bnx2_init_one,
8269 .remove = __devexit_p(bnx2_remove_one),
8270 .suspend = bnx2_suspend,
8271 .resume = bnx2_resume,
6ff2da49 8272 .err_handler = &bnx2_err_handler,
b6016b76
MC
8273};
8274
8275static int __init bnx2_init(void)
8276{
29917620 8277 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
8278}
8279
8280static void __exit bnx2_cleanup(void)
8281{
8282 pci_unregister_driver(&bnx2_pci_driver);
8283}
8284
8285module_init(bnx2_init);
8286module_exit(bnx2_cleanup);
8287
8288
8289