ARM: S5PC100: gpio.h cleanup
[linux-2.6-block.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
bec92044 3 * Copyright (c) 2004-2010 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
3a9c6a49 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
f2a4f052
MC
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16
17#include <linux/kernel.h>
18#include <linux/timer.h>
19#include <linux/errno.h>
20#include <linux/ioport.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/dma-mapping.h>
1977f032 30#include <linux/bitops.h>
f2a4f052
MC
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <linux/delay.h>
34#include <asm/byteorder.h>
c86a31f4 35#include <asm/page.h>
f2a4f052
MC
36#include <linux/time.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
f2a4f052 39#include <linux/if_vlan.h>
08013fa3 40#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
f2a4f052
MC
41#define BCM_VLAN 1
42#endif
f2a4f052 43#include <net/ip.h>
de081fa5 44#include <net/tcp.h>
f2a4f052 45#include <net/checksum.h>
f2a4f052
MC
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/prefetch.h>
29b12174 49#include <linux/cache.h>
57579f76 50#include <linux/firmware.h>
706bf240 51#include <linux/log2.h>
f2a4f052 52
4edd473f
MC
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
b6016b76
MC
57#include "bnx2.h"
58#include "bnx2_fw.h"
b3448b0b 59
b6016b76 60#define DRV_MODULE_NAME "bnx2"
587611d6
MC
61#define DRV_MODULE_VERSION "2.0.9"
62#define DRV_MODULE_RELDATE "April 27, 2010"
bec92044 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
078b0735 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
bec92044
MC
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
b6016b76
MC
68
69#define RUN_AT(x) (jiffies + (x))
70
71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT (5*HZ)
73
fefa8645 74static char version[] __devinitdata =
b6016b76
MC
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
81MODULE_FIRMWARE(FW_MIPS_FILE_06);
82MODULE_FIRMWARE(FW_RV2P_FILE_06);
83MODULE_FIRMWARE(FW_MIPS_FILE_09);
84MODULE_FIRMWARE(FW_RV2P_FILE_09);
078b0735 85MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
b6016b76
MC
86
87static int disable_msi = 0;
88
89module_param(disable_msi, int, 0);
90MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
5b0c76ad
MC
98 BCM5708,
99 BCM5708S,
bac0dff6 100 BCM5709,
27a005b8 101 BCM5709S,
7bb0a04f 102 BCM5716,
1caacecb 103 BCM5716S,
b6016b76
MC
104} board_t;
105
106/* indexed by board_t, above */
fefa8645 107static struct {
b6016b76
MC
108 char *name;
109} board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
121 };
122
7bb0a04f 123static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
146 { 0, }
147};
148
0ced9d01 149static const struct flash_spec flash_table[] =
b6016b76 150{
e30372c9
MC
151#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 153 /* Slow EEPROM */
37137709 154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
37137709
MC
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
b6016b76
MC
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
37137709 165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
37137709 171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
37137709
MC
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
b6016b76
MC
236};
237
0ced9d01 238static const struct flash_spec flash_5709 = {
e30372c9
MC
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245};
246
b6016b76
MC
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
4327ba43
BL
249static void bnx2_init_napi(struct bnx2 *bp);
250
35e9010b 251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 252{
2f8af120 253 u32 diff;
e89bbf10 254
2f8af120 255 smp_mb();
faac9c4b
MC
256
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
259 */
35e9010b 260 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
261 if (unlikely(diff >= TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == TX_DESC_CNT)
264 diff = MAX_TX_DESC_CNT;
265 }
e89bbf10
MC
266 return (bp->tx_ring_size - diff);
267}
268
b6016b76
MC
269static u32
270bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271{
1b8227c4
MC
272 u32 val;
273
274 spin_lock_bh(&bp->indirect_lock);
b6016b76 275 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
276 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_bh(&bp->indirect_lock);
278 return val;
b6016b76
MC
279}
280
281static void
282bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283{
1b8227c4 284 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 287 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
288}
289
2726d6e1
MC
290static void
291bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292{
293 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294}
295
296static u32
297bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298{
299 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300}
301
b6016b76
MC
302static void
303bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304{
305 offset += cid_addr;
1b8227c4 306 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
307 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308 int i;
309
310 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313 for (i = 0; i < 5; i++) {
59b47d8a
MC
314 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 break;
317 udelay(5);
318 }
319 } else {
320 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321 REG_WR(bp, BNX2_CTX_DATA, val);
322 }
1b8227c4 323 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
324}
325
4edd473f
MC
326#ifdef BCM_CNIC
327static int
328bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329{
330 struct bnx2 *bp = netdev_priv(dev);
331 struct drv_ctl_io *io = &info->data.io;
332
333 switch (info->cmd) {
334 case DRV_CTL_IO_WR_CMD:
335 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 break;
337 case DRV_CTL_IO_RD_CMD:
338 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 break;
340 case DRV_CTL_CTX_WR_CMD:
341 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342 break;
343 default:
344 return -EINVAL;
345 }
346 return 0;
347}
348
349static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350{
351 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 int sb_id;
354
355 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357 bnapi->cnic_present = 0;
358 sb_id = bp->irq_nvecs;
359 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360 } else {
361 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_tag = bnapi->last_status_idx;
363 bnapi->cnic_present = 1;
364 sb_id = 0;
365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366 }
367
368 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369 cp->irq_arr[0].status_blk = (void *)
370 ((unsigned long) bnapi->status_blk.msi +
371 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372 cp->irq_arr[0].status_blk_num = sb_id;
373 cp->num_irq = 1;
374}
375
376static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377 void *data)
378{
379 struct bnx2 *bp = netdev_priv(dev);
380 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382 if (ops == NULL)
383 return -EINVAL;
384
385 if (cp->drv_state & CNIC_DRV_STATE_REGD)
386 return -EBUSY;
387
388 bp->cnic_data = data;
389 rcu_assign_pointer(bp->cnic_ops, ops);
390
391 cp->num_irq = 0;
392 cp->drv_state = CNIC_DRV_STATE_REGD;
393
394 bnx2_setup_cnic_irq_info(bp);
395
396 return 0;
397}
398
399static int bnx2_unregister_cnic(struct net_device *dev)
400{
401 struct bnx2 *bp = netdev_priv(dev);
402 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
c5a88950 405 mutex_lock(&bp->cnic_lock);
4edd473f
MC
406 cp->drv_state = 0;
407 bnapi->cnic_present = 0;
408 rcu_assign_pointer(bp->cnic_ops, NULL);
c5a88950 409 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
410 synchronize_rcu();
411 return 0;
412}
413
414struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415{
416 struct bnx2 *bp = netdev_priv(dev);
417 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419 cp->drv_owner = THIS_MODULE;
420 cp->chip_id = bp->chip_id;
421 cp->pdev = bp->pdev;
422 cp->io_base = bp->regview;
423 cp->drv_ctl = bnx2_drv_ctl;
424 cp->drv_register_cnic = bnx2_register_cnic;
425 cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427 return cp;
428}
429EXPORT_SYMBOL(bnx2_cnic_probe);
430
431static void
432bnx2_cnic_stop(struct bnx2 *bp)
433{
434 struct cnic_ops *c_ops;
435 struct cnic_ctl_info info;
436
c5a88950
MC
437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops;
4edd473f
MC
439 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info);
442 }
c5a88950 443 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
444}
445
446static void
447bnx2_cnic_start(struct bnx2 *bp)
448{
449 struct cnic_ops *c_ops;
450 struct cnic_ctl_info info;
451
c5a88950
MC
452 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops;
4edd473f
MC
454 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458 bnapi->cnic_tag = bnapi->last_status_idx;
459 }
460 info.cmd = CNIC_CTL_START_CMD;
461 c_ops->cnic_ctl(bp->cnic_data, &info);
462 }
c5a88950 463 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
464}
465
466#else
467
468static void
469bnx2_cnic_stop(struct bnx2 *bp)
470{
471}
472
473static void
474bnx2_cnic_start(struct bnx2 *bp)
475{
476}
477
478#endif
479
b6016b76
MC
480static int
481bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482{
483 u32 val1;
484 int i, ret;
485
583c28e5 486 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
487 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493 udelay(40);
494 }
495
496 val1 = (bp->phy_addr << 21) | (reg << 16) |
497 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498 BNX2_EMAC_MDIO_COMM_START_BUSY;
499 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501 for (i = 0; i < 50; i++) {
502 udelay(10);
503
504 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506 udelay(5);
507
508 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511 break;
512 }
513 }
514
515 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516 *val = 0x0;
517 ret = -EBUSY;
518 }
519 else {
520 *val = val1;
521 ret = 0;
522 }
523
583c28e5 524 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
525 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531 udelay(40);
532 }
533
534 return ret;
535}
536
537static int
538bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539{
540 u32 val1;
541 int i, ret;
542
583c28e5 543 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
544 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550 udelay(40);
551 }
552
553 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 557
b6016b76
MC
558 for (i = 0; i < 50; i++) {
559 udelay(10);
560
561 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563 udelay(5);
564 break;
565 }
566 }
567
568 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569 ret = -EBUSY;
570 else
571 ret = 0;
572
583c28e5 573 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
574 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580 udelay(40);
581 }
582
583 return ret;
584}
585
586static void
587bnx2_disable_int(struct bnx2 *bp)
588{
b4b36042
MC
589 int i;
590 struct bnx2_napi *bnapi;
591
592 for (i = 0; i < bp->irq_nvecs; i++) {
593 bnapi = &bp->bnx2_napi[i];
594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596 }
b6016b76
MC
597 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598}
599
600static void
601bnx2_enable_int(struct bnx2 *bp)
602{
b4b36042
MC
603 int i;
604 struct bnx2_napi *bnapi;
35efa7c1 605
b4b36042
MC
606 for (i = 0; i < bp->irq_nvecs; i++) {
607 bnapi = &bp->bnx2_napi[i];
1269a8a6 608
b4b36042
MC
609 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612 bnapi->last_status_idx);
b6016b76 613
b4b36042
MC
614 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616 bnapi->last_status_idx);
617 }
bf5295bb 618 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
619}
620
621static void
622bnx2_disable_int_sync(struct bnx2 *bp)
623{
b4b36042
MC
624 int i;
625
b6016b76 626 atomic_inc(&bp->intr_sem);
3767546c
MC
627 if (!netif_running(bp->dev))
628 return;
629
b6016b76 630 bnx2_disable_int(bp);
b4b36042
MC
631 for (i = 0; i < bp->irq_nvecs; i++)
632 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
633}
634
35efa7c1
MC
635static void
636bnx2_napi_disable(struct bnx2 *bp)
637{
b4b36042
MC
638 int i;
639
640 for (i = 0; i < bp->irq_nvecs; i++)
641 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
642}
643
644static void
645bnx2_napi_enable(struct bnx2 *bp)
646{
b4b36042
MC
647 int i;
648
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
651}
652
b6016b76 653static void
212f9934 654bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
b6016b76 655{
212f9934
MC
656 if (stop_cnic)
657 bnx2_cnic_stop(bp);
b6016b76 658 if (netif_running(bp->dev)) {
e6bf95ff
BL
659 int i;
660
35efa7c1 661 bnx2_napi_disable(bp);
b6016b76 662 netif_tx_disable(bp->dev);
e6bf95ff
BL
663 /* prevent tx timeout */
664 for (i = 0; i < bp->dev->num_tx_queues; i++) {
665 struct netdev_queue *txq;
666
667 txq = netdev_get_tx_queue(bp->dev, i);
668 txq->trans_start = jiffies;
669 }
b6016b76 670 }
b7466560 671 bnx2_disable_int_sync(bp);
b6016b76
MC
672}
673
674static void
212f9934 675bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
b6016b76
MC
676{
677 if (atomic_dec_and_test(&bp->intr_sem)) {
678 if (netif_running(bp->dev)) {
706bf240 679 netif_tx_wake_all_queues(bp->dev);
35efa7c1 680 bnx2_napi_enable(bp);
b6016b76 681 bnx2_enable_int(bp);
212f9934
MC
682 if (start_cnic)
683 bnx2_cnic_start(bp);
b6016b76
MC
684 }
685 }
686}
687
35e9010b
MC
688static void
689bnx2_free_tx_mem(struct bnx2 *bp)
690{
691 int i;
692
693 for (i = 0; i < bp->num_tx_rings; i++) {
694 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
695 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
696
697 if (txr->tx_desc_ring) {
698 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
699 txr->tx_desc_ring,
700 txr->tx_desc_mapping);
701 txr->tx_desc_ring = NULL;
702 }
703 kfree(txr->tx_buf_ring);
704 txr->tx_buf_ring = NULL;
705 }
706}
707
bb4f98ab
MC
708static void
709bnx2_free_rx_mem(struct bnx2 *bp)
710{
711 int i;
712
713 for (i = 0; i < bp->num_rx_rings; i++) {
714 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
715 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
716 int j;
717
718 for (j = 0; j < bp->rx_max_ring; j++) {
719 if (rxr->rx_desc_ring[j])
720 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
721 rxr->rx_desc_ring[j],
722 rxr->rx_desc_mapping[j]);
723 rxr->rx_desc_ring[j] = NULL;
724 }
25b0b999 725 vfree(rxr->rx_buf_ring);
bb4f98ab
MC
726 rxr->rx_buf_ring = NULL;
727
728 for (j = 0; j < bp->rx_max_pg_ring; j++) {
729 if (rxr->rx_pg_desc_ring[j])
730 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
3298a738
MC
731 rxr->rx_pg_desc_ring[j],
732 rxr->rx_pg_desc_mapping[j]);
733 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab 734 }
25b0b999 735 vfree(rxr->rx_pg_ring);
bb4f98ab
MC
736 rxr->rx_pg_ring = NULL;
737 }
738}
739
35e9010b
MC
740static int
741bnx2_alloc_tx_mem(struct bnx2 *bp)
742{
743 int i;
744
745 for (i = 0; i < bp->num_tx_rings; i++) {
746 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
747 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
748
749 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
750 if (txr->tx_buf_ring == NULL)
751 return -ENOMEM;
752
753 txr->tx_desc_ring =
754 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
755 &txr->tx_desc_mapping);
756 if (txr->tx_desc_ring == NULL)
757 return -ENOMEM;
758 }
759 return 0;
760}
761
bb4f98ab
MC
762static int
763bnx2_alloc_rx_mem(struct bnx2 *bp)
764{
765 int i;
766
767 for (i = 0; i < bp->num_rx_rings; i++) {
768 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
769 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
770 int j;
771
772 rxr->rx_buf_ring =
773 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
774 if (rxr->rx_buf_ring == NULL)
775 return -ENOMEM;
776
777 memset(rxr->rx_buf_ring, 0,
778 SW_RXBD_RING_SIZE * bp->rx_max_ring);
779
780 for (j = 0; j < bp->rx_max_ring; j++) {
781 rxr->rx_desc_ring[j] =
782 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j]);
784 if (rxr->rx_desc_ring[j] == NULL)
785 return -ENOMEM;
786
787 }
788
789 if (bp->rx_pg_ring_size) {
790 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791 bp->rx_max_pg_ring);
792 if (rxr->rx_pg_ring == NULL)
793 return -ENOMEM;
794
795 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796 bp->rx_max_pg_ring);
797 }
798
799 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 rxr->rx_pg_desc_ring[j] =
801 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
802 &rxr->rx_pg_desc_mapping[j]);
803 if (rxr->rx_pg_desc_ring[j] == NULL)
804 return -ENOMEM;
805
806 }
807 }
808 return 0;
809}
810
b6016b76
MC
811static void
812bnx2_free_mem(struct bnx2 *bp)
813{
13daffa2 814 int i;
43e80b89 815 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 816
35e9010b 817 bnx2_free_tx_mem(bp);
bb4f98ab 818 bnx2_free_rx_mem(bp);
35e9010b 819
59b47d8a
MC
820 for (i = 0; i < bp->ctx_pages; i++) {
821 if (bp->ctx_blk[i]) {
822 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
823 bp->ctx_blk[i],
824 bp->ctx_blk_mapping[i]);
825 bp->ctx_blk[i] = NULL;
826 }
827 }
43e80b89 828 if (bnapi->status_blk.msi) {
0f31f994 829 pci_free_consistent(bp->pdev, bp->status_stats_size,
43e80b89
MC
830 bnapi->status_blk.msi,
831 bp->status_blk_mapping);
832 bnapi->status_blk.msi = NULL;
0f31f994 833 bp->stats_blk = NULL;
b6016b76 834 }
b6016b76
MC
835}
836
837static int
838bnx2_alloc_mem(struct bnx2 *bp)
839{
35e9010b 840 int i, status_blk_size, err;
43e80b89
MC
841 struct bnx2_napi *bnapi;
842 void *status_blk;
b6016b76 843
0f31f994
MC
844 /* Combine status and statistics blocks into one allocation. */
845 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 846 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
847 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
848 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
849 bp->status_stats_size = status_blk_size +
850 sizeof(struct statistics_block);
851
43e80b89
MC
852 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
853 &bp->status_blk_mapping);
854 if (status_blk == NULL)
b6016b76
MC
855 goto alloc_mem_err;
856
43e80b89 857 memset(status_blk, 0, bp->status_stats_size);
b6016b76 858
43e80b89
MC
859 bnapi = &bp->bnx2_napi[0];
860 bnapi->status_blk.msi = status_blk;
861 bnapi->hw_tx_cons_ptr =
862 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
863 bnapi->hw_rx_cons_ptr =
864 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 865 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
b4b36042 866 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
43e80b89
MC
867 struct status_block_msix *sblk;
868
869 bnapi = &bp->bnx2_napi[i];
b4b36042 870
43e80b89
MC
871 sblk = (void *) (status_blk +
872 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873 bnapi->status_blk.msix = sblk;
874 bnapi->hw_tx_cons_ptr =
875 &sblk->status_tx_quick_consumer_index;
876 bnapi->hw_rx_cons_ptr =
877 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
878 bnapi->int_num = i << 24;
879 }
880 }
35efa7c1 881
43e80b89 882 bp->stats_blk = status_blk + status_blk_size;
b6016b76 883
0f31f994 884 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 885
59b47d8a
MC
886 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
887 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
888 if (bp->ctx_pages == 0)
889 bp->ctx_pages = 1;
890 for (i = 0; i < bp->ctx_pages; i++) {
891 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
892 BCM_PAGE_SIZE,
893 &bp->ctx_blk_mapping[i]);
894 if (bp->ctx_blk[i] == NULL)
895 goto alloc_mem_err;
896 }
897 }
35e9010b 898
bb4f98ab
MC
899 err = bnx2_alloc_rx_mem(bp);
900 if (err)
901 goto alloc_mem_err;
902
35e9010b
MC
903 err = bnx2_alloc_tx_mem(bp);
904 if (err)
905 goto alloc_mem_err;
906
b6016b76
MC
907 return 0;
908
909alloc_mem_err:
910 bnx2_free_mem(bp);
911 return -ENOMEM;
912}
913
e3648b3d
MC
914static void
915bnx2_report_fw_link(struct bnx2 *bp)
916{
917 u32 fw_link_status = 0;
918
583c28e5 919 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
920 return;
921
e3648b3d
MC
922 if (bp->link_up) {
923 u32 bmsr;
924
925 switch (bp->line_speed) {
926 case SPEED_10:
927 if (bp->duplex == DUPLEX_HALF)
928 fw_link_status = BNX2_LINK_STATUS_10HALF;
929 else
930 fw_link_status = BNX2_LINK_STATUS_10FULL;
931 break;
932 case SPEED_100:
933 if (bp->duplex == DUPLEX_HALF)
934 fw_link_status = BNX2_LINK_STATUS_100HALF;
935 else
936 fw_link_status = BNX2_LINK_STATUS_100FULL;
937 break;
938 case SPEED_1000:
939 if (bp->duplex == DUPLEX_HALF)
940 fw_link_status = BNX2_LINK_STATUS_1000HALF;
941 else
942 fw_link_status = BNX2_LINK_STATUS_1000FULL;
943 break;
944 case SPEED_2500:
945 if (bp->duplex == DUPLEX_HALF)
946 fw_link_status = BNX2_LINK_STATUS_2500HALF;
947 else
948 fw_link_status = BNX2_LINK_STATUS_2500FULL;
949 break;
950 }
951
952 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
953
954 if (bp->autoneg) {
955 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
956
ca58c3af
MC
957 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
959
960 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 961 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
962 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
963 else
964 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965 }
966 }
967 else
968 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
969
2726d6e1 970 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
971}
972
9b1084b8
MC
973static char *
974bnx2_xceiver_str(struct bnx2 *bp)
975{
976 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 977 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
9b1084b8
MC
978 "Copper"));
979}
980
b6016b76
MC
981static void
982bnx2_report_link(struct bnx2 *bp)
983{
984 if (bp->link_up) {
985 netif_carrier_on(bp->dev);
3a9c6a49
JP
986 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
987 bnx2_xceiver_str(bp),
988 bp->line_speed,
989 bp->duplex == DUPLEX_FULL ? "full" : "half");
b6016b76
MC
990
991 if (bp->flow_ctrl) {
992 if (bp->flow_ctrl & FLOW_CTRL_RX) {
3a9c6a49 993 pr_cont(", receive ");
b6016b76 994 if (bp->flow_ctrl & FLOW_CTRL_TX)
3a9c6a49 995 pr_cont("& transmit ");
b6016b76
MC
996 }
997 else {
3a9c6a49 998 pr_cont(", transmit ");
b6016b76 999 }
3a9c6a49 1000 pr_cont("flow control ON");
b6016b76 1001 }
3a9c6a49
JP
1002 pr_cont("\n");
1003 } else {
b6016b76 1004 netif_carrier_off(bp->dev);
3a9c6a49
JP
1005 netdev_err(bp->dev, "NIC %s Link is Down\n",
1006 bnx2_xceiver_str(bp));
b6016b76 1007 }
e3648b3d
MC
1008
1009 bnx2_report_fw_link(bp);
b6016b76
MC
1010}
1011
1012static void
1013bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1014{
1015 u32 local_adv, remote_adv;
1016
1017 bp->flow_ctrl = 0;
6aa20a22 1018 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
1019 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1020
1021 if (bp->duplex == DUPLEX_FULL) {
1022 bp->flow_ctrl = bp->req_flow_ctrl;
1023 }
1024 return;
1025 }
1026
1027 if (bp->duplex != DUPLEX_FULL) {
1028 return;
1029 }
1030
583c28e5 1031 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
1032 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1033 u32 val;
1034
1035 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1036 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1037 bp->flow_ctrl |= FLOW_CTRL_TX;
1038 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1039 bp->flow_ctrl |= FLOW_CTRL_RX;
1040 return;
1041 }
1042
ca58c3af
MC
1043 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1044 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 1045
583c28e5 1046 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1047 u32 new_local_adv = 0;
1048 u32 new_remote_adv = 0;
1049
1050 if (local_adv & ADVERTISE_1000XPAUSE)
1051 new_local_adv |= ADVERTISE_PAUSE_CAP;
1052 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1053 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1054 if (remote_adv & ADVERTISE_1000XPAUSE)
1055 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1056 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1057 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1058
1059 local_adv = new_local_adv;
1060 remote_adv = new_remote_adv;
1061 }
1062
1063 /* See Table 28B-3 of 802.3ab-1999 spec. */
1064 if (local_adv & ADVERTISE_PAUSE_CAP) {
1065 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1066 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1067 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1068 }
1069 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1070 bp->flow_ctrl = FLOW_CTRL_RX;
1071 }
1072 }
1073 else {
1074 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1075 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1076 }
1077 }
1078 }
1079 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1080 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1081 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1082
1083 bp->flow_ctrl = FLOW_CTRL_TX;
1084 }
1085 }
1086}
1087
27a005b8
MC
1088static int
1089bnx2_5709s_linkup(struct bnx2 *bp)
1090{
1091 u32 val, speed;
1092
1093 bp->link_up = 1;
1094
1095 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1096 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1097 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098
1099 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1100 bp->line_speed = bp->req_line_speed;
1101 bp->duplex = bp->req_duplex;
1102 return 0;
1103 }
1104 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1105 switch (speed) {
1106 case MII_BNX2_GP_TOP_AN_SPEED_10:
1107 bp->line_speed = SPEED_10;
1108 break;
1109 case MII_BNX2_GP_TOP_AN_SPEED_100:
1110 bp->line_speed = SPEED_100;
1111 break;
1112 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1113 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1114 bp->line_speed = SPEED_1000;
1115 break;
1116 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1117 bp->line_speed = SPEED_2500;
1118 break;
1119 }
1120 if (val & MII_BNX2_GP_TOP_AN_FD)
1121 bp->duplex = DUPLEX_FULL;
1122 else
1123 bp->duplex = DUPLEX_HALF;
1124 return 0;
1125}
1126
b6016b76 1127static int
5b0c76ad
MC
1128bnx2_5708s_linkup(struct bnx2 *bp)
1129{
1130 u32 val;
1131
1132 bp->link_up = 1;
1133 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1134 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1135 case BCM5708S_1000X_STAT1_SPEED_10:
1136 bp->line_speed = SPEED_10;
1137 break;
1138 case BCM5708S_1000X_STAT1_SPEED_100:
1139 bp->line_speed = SPEED_100;
1140 break;
1141 case BCM5708S_1000X_STAT1_SPEED_1G:
1142 bp->line_speed = SPEED_1000;
1143 break;
1144 case BCM5708S_1000X_STAT1_SPEED_2G5:
1145 bp->line_speed = SPEED_2500;
1146 break;
1147 }
1148 if (val & BCM5708S_1000X_STAT1_FD)
1149 bp->duplex = DUPLEX_FULL;
1150 else
1151 bp->duplex = DUPLEX_HALF;
1152
1153 return 0;
1154}
1155
1156static int
1157bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
1158{
1159 u32 bmcr, local_adv, remote_adv, common;
1160
1161 bp->link_up = 1;
1162 bp->line_speed = SPEED_1000;
1163
ca58c3af 1164 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1165 if (bmcr & BMCR_FULLDPLX) {
1166 bp->duplex = DUPLEX_FULL;
1167 }
1168 else {
1169 bp->duplex = DUPLEX_HALF;
1170 }
1171
1172 if (!(bmcr & BMCR_ANENABLE)) {
1173 return 0;
1174 }
1175
ca58c3af
MC
1176 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1177 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1178
1179 common = local_adv & remote_adv;
1180 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1181
1182 if (common & ADVERTISE_1000XFULL) {
1183 bp->duplex = DUPLEX_FULL;
1184 }
1185 else {
1186 bp->duplex = DUPLEX_HALF;
1187 }
1188 }
1189
1190 return 0;
1191}
1192
1193static int
1194bnx2_copper_linkup(struct bnx2 *bp)
1195{
1196 u32 bmcr;
1197
ca58c3af 1198 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1199 if (bmcr & BMCR_ANENABLE) {
1200 u32 local_adv, remote_adv, common;
1201
1202 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1203 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1204
1205 common = local_adv & (remote_adv >> 2);
1206 if (common & ADVERTISE_1000FULL) {
1207 bp->line_speed = SPEED_1000;
1208 bp->duplex = DUPLEX_FULL;
1209 }
1210 else if (common & ADVERTISE_1000HALF) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_HALF;
1213 }
1214 else {
ca58c3af
MC
1215 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1216 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1217
1218 common = local_adv & remote_adv;
1219 if (common & ADVERTISE_100FULL) {
1220 bp->line_speed = SPEED_100;
1221 bp->duplex = DUPLEX_FULL;
1222 }
1223 else if (common & ADVERTISE_100HALF) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_HALF;
1226 }
1227 else if (common & ADVERTISE_10FULL) {
1228 bp->line_speed = SPEED_10;
1229 bp->duplex = DUPLEX_FULL;
1230 }
1231 else if (common & ADVERTISE_10HALF) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_HALF;
1234 }
1235 else {
1236 bp->line_speed = 0;
1237 bp->link_up = 0;
1238 }
1239 }
1240 }
1241 else {
1242 if (bmcr & BMCR_SPEED100) {
1243 bp->line_speed = SPEED_100;
1244 }
1245 else {
1246 bp->line_speed = SPEED_10;
1247 }
1248 if (bmcr & BMCR_FULLDPLX) {
1249 bp->duplex = DUPLEX_FULL;
1250 }
1251 else {
1252 bp->duplex = DUPLEX_HALF;
1253 }
1254 }
1255
1256 return 0;
1257}
1258
83e3fc89 1259static void
bb4f98ab 1260bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1261{
bb4f98ab 1262 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1263
1264 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1265 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1266 val |= 0x02 << 8;
1267
1268 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1269 u32 lo_water, hi_water;
1270
1271 if (bp->flow_ctrl & FLOW_CTRL_TX)
1272 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1273 else
1274 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1275 if (lo_water >= bp->rx_ring_size)
1276 lo_water = 0;
1277
5726026b 1278 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
83e3fc89
MC
1279
1280 if (hi_water <= lo_water)
1281 lo_water = 0;
1282
1283 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1284 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1285
1286 if (hi_water > 0xf)
1287 hi_water = 0xf;
1288 else if (hi_water == 0)
1289 lo_water = 0;
1290 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1291 }
1292 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1293}
1294
bb4f98ab
MC
1295static void
1296bnx2_init_all_rx_contexts(struct bnx2 *bp)
1297{
1298 int i;
1299 u32 cid;
1300
1301 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1302 if (i == 1)
1303 cid = RX_RSS_CID;
1304 bnx2_init_rx_context(bp, cid);
1305 }
1306}
1307
344478db 1308static void
b6016b76
MC
1309bnx2_set_mac_link(struct bnx2 *bp)
1310{
1311 u32 val;
1312
1313 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1314 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1315 (bp->duplex == DUPLEX_HALF)) {
1316 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1317 }
1318
1319 /* Configure the EMAC mode register. */
1320 val = REG_RD(bp, BNX2_EMAC_MODE);
1321
1322 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1323 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1324 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1325
1326 if (bp->link_up) {
5b0c76ad
MC
1327 switch (bp->line_speed) {
1328 case SPEED_10:
59b47d8a
MC
1329 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1330 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1331 break;
1332 }
1333 /* fall through */
1334 case SPEED_100:
1335 val |= BNX2_EMAC_MODE_PORT_MII;
1336 break;
1337 case SPEED_2500:
59b47d8a 1338 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1339 /* fall through */
1340 case SPEED_1000:
1341 val |= BNX2_EMAC_MODE_PORT_GMII;
1342 break;
1343 }
b6016b76
MC
1344 }
1345 else {
1346 val |= BNX2_EMAC_MODE_PORT_GMII;
1347 }
1348
1349 /* Set the MAC to operate in the appropriate duplex mode. */
1350 if (bp->duplex == DUPLEX_HALF)
1351 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1352 REG_WR(bp, BNX2_EMAC_MODE, val);
1353
1354 /* Enable/disable rx PAUSE. */
1355 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1356
1357 if (bp->flow_ctrl & FLOW_CTRL_RX)
1358 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1359 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1360
1361 /* Enable/disable tx PAUSE. */
1362 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1363 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1364
1365 if (bp->flow_ctrl & FLOW_CTRL_TX)
1366 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1367 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1368
1369 /* Acknowledge the interrupt. */
1370 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1371
83e3fc89 1372 if (CHIP_NUM(bp) == CHIP_NUM_5709)
bb4f98ab 1373 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1374}
1375
27a005b8
MC
1376static void
1377bnx2_enable_bmsr1(struct bnx2 *bp)
1378{
583c28e5 1379 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1380 (CHIP_NUM(bp) == CHIP_NUM_5709))
1381 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1382 MII_BNX2_BLK_ADDR_GP_STATUS);
1383}
1384
1385static void
1386bnx2_disable_bmsr1(struct bnx2 *bp)
1387{
583c28e5 1388 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1389 (CHIP_NUM(bp) == CHIP_NUM_5709))
1390 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1391 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1392}
1393
605a9e20
MC
1394static int
1395bnx2_test_and_enable_2g5(struct bnx2 *bp)
1396{
1397 u32 up1;
1398 int ret = 1;
1399
583c28e5 1400 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1401 return 0;
1402
1403 if (bp->autoneg & AUTONEG_SPEED)
1404 bp->advertising |= ADVERTISED_2500baseX_Full;
1405
27a005b8
MC
1406 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1408
605a9e20
MC
1409 bnx2_read_phy(bp, bp->mii_up1, &up1);
1410 if (!(up1 & BCM5708S_UP1_2G5)) {
1411 up1 |= BCM5708S_UP1_2G5;
1412 bnx2_write_phy(bp, bp->mii_up1, up1);
1413 ret = 0;
1414 }
1415
27a005b8
MC
1416 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1417 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1418 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1419
605a9e20
MC
1420 return ret;
1421}
1422
1423static int
1424bnx2_test_and_disable_2g5(struct bnx2 *bp)
1425{
1426 u32 up1;
1427 int ret = 0;
1428
583c28e5 1429 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1430 return 0;
1431
27a005b8
MC
1432 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1433 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1434
605a9e20
MC
1435 bnx2_read_phy(bp, bp->mii_up1, &up1);
1436 if (up1 & BCM5708S_UP1_2G5) {
1437 up1 &= ~BCM5708S_UP1_2G5;
1438 bnx2_write_phy(bp, bp->mii_up1, up1);
1439 ret = 1;
1440 }
1441
27a005b8
MC
1442 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1445
605a9e20
MC
1446 return ret;
1447}
1448
1449static void
1450bnx2_enable_forced_2g5(struct bnx2 *bp)
1451{
1452 u32 bmcr;
1453
583c28e5 1454 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1455 return;
1456
27a005b8
MC
1457 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1458 u32 val;
1459
1460 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461 MII_BNX2_BLK_ADDR_SERDES_DIG);
1462 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1463 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1464 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1465 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1472 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473 bmcr |= BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1474 } else {
1475 return;
605a9e20
MC
1476 }
1477
1478 if (bp->autoneg & AUTONEG_SPEED) {
1479 bmcr &= ~BMCR_ANENABLE;
1480 if (bp->req_duplex == DUPLEX_FULL)
1481 bmcr |= BMCR_FULLDPLX;
1482 }
1483 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1484}
1485
1486static void
1487bnx2_disable_forced_2g5(struct bnx2 *bp)
1488{
1489 u32 bmcr;
1490
583c28e5 1491 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1492 return;
1493
27a005b8
MC
1494 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1495 u32 val;
1496
1497 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1498 MII_BNX2_BLK_ADDR_SERDES_DIG);
1499 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1500 val &= ~MII_BNX2_SD_MISC1_FORCE;
1501 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1502
1503 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1504 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1505 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1506
1507 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1508 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1509 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1510 } else {
1511 return;
605a9e20
MC
1512 }
1513
1514 if (bp->autoneg & AUTONEG_SPEED)
1515 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1516 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1517}
1518
b2fadeae
MC
1519static void
1520bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1521{
1522 u32 val;
1523
1524 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1525 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1526 if (start)
1527 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1528 else
1529 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1530}
1531
b6016b76
MC
1532static int
1533bnx2_set_link(struct bnx2 *bp)
1534{
1535 u32 bmsr;
1536 u8 link_up;
1537
80be4434 1538 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1539 bp->link_up = 1;
1540 return 0;
1541 }
1542
583c28e5 1543 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1544 return 0;
1545
b6016b76
MC
1546 link_up = bp->link_up;
1547
27a005b8
MC
1548 bnx2_enable_bmsr1(bp);
1549 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1550 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1551 bnx2_disable_bmsr1(bp);
b6016b76 1552
583c28e5 1553 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1554 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1555 u32 val, an_dbg;
b6016b76 1556
583c28e5 1557 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1558 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1559 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1560 }
b6016b76 1561 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1562
1563 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1564 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1565 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1566
1567 if ((val & BNX2_EMAC_STATUS_LINK) &&
1568 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1569 bmsr |= BMSR_LSTATUS;
1570 else
1571 bmsr &= ~BMSR_LSTATUS;
1572 }
1573
1574 if (bmsr & BMSR_LSTATUS) {
1575 bp->link_up = 1;
1576
583c28e5 1577 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1578 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1579 bnx2_5706s_linkup(bp);
1580 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1581 bnx2_5708s_linkup(bp);
27a005b8
MC
1582 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1583 bnx2_5709s_linkup(bp);
b6016b76
MC
1584 }
1585 else {
1586 bnx2_copper_linkup(bp);
1587 }
1588 bnx2_resolve_flow_ctrl(bp);
1589 }
1590 else {
583c28e5 1591 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1592 (bp->autoneg & AUTONEG_SPEED))
1593 bnx2_disable_forced_2g5(bp);
b6016b76 1594
583c28e5 1595 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1596 u32 bmcr;
1597
1598 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1599 bmcr |= BMCR_ANENABLE;
1600 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1601
583c28e5 1602 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1603 }
b6016b76
MC
1604 bp->link_up = 0;
1605 }
1606
1607 if (bp->link_up != link_up) {
1608 bnx2_report_link(bp);
1609 }
1610
1611 bnx2_set_mac_link(bp);
1612
1613 return 0;
1614}
1615
1616static int
1617bnx2_reset_phy(struct bnx2 *bp)
1618{
1619 int i;
1620 u32 reg;
1621
ca58c3af 1622 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1623
1624#define PHY_RESET_MAX_WAIT 100
1625 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1626 udelay(10);
1627
ca58c3af 1628 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1629 if (!(reg & BMCR_RESET)) {
1630 udelay(20);
1631 break;
1632 }
1633 }
1634 if (i == PHY_RESET_MAX_WAIT) {
1635 return -EBUSY;
1636 }
1637 return 0;
1638}
1639
1640static u32
1641bnx2_phy_get_pause_adv(struct bnx2 *bp)
1642{
1643 u32 adv = 0;
1644
1645 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1646 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1647
583c28e5 1648 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1649 adv = ADVERTISE_1000XPAUSE;
1650 }
1651 else {
1652 adv = ADVERTISE_PAUSE_CAP;
1653 }
1654 }
1655 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1656 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1657 adv = ADVERTISE_1000XPSE_ASYM;
1658 }
1659 else {
1660 adv = ADVERTISE_PAUSE_ASYM;
1661 }
1662 }
1663 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1664 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1665 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1666 }
1667 else {
1668 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1669 }
1670 }
1671 return adv;
1672}
1673
a2f13890 1674static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1675
b6016b76 1676static int
0d8a6571 1677bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1678__releases(&bp->phy_lock)
1679__acquires(&bp->phy_lock)
0d8a6571
MC
1680{
1681 u32 speed_arg = 0, pause_adv;
1682
1683 pause_adv = bnx2_phy_get_pause_adv(bp);
1684
1685 if (bp->autoneg & AUTONEG_SPEED) {
1686 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1687 if (bp->advertising & ADVERTISED_10baseT_Half)
1688 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1689 if (bp->advertising & ADVERTISED_10baseT_Full)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1691 if (bp->advertising & ADVERTISED_100baseT_Half)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1693 if (bp->advertising & ADVERTISED_100baseT_Full)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1695 if (bp->advertising & ADVERTISED_1000baseT_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697 if (bp->advertising & ADVERTISED_2500baseX_Full)
1698 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1699 } else {
1700 if (bp->req_line_speed == SPEED_2500)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702 else if (bp->req_line_speed == SPEED_1000)
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1704 else if (bp->req_line_speed == SPEED_100) {
1705 if (bp->req_duplex == DUPLEX_FULL)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1707 else
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1709 } else if (bp->req_line_speed == SPEED_10) {
1710 if (bp->req_duplex == DUPLEX_FULL)
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1712 else
1713 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1714 }
1715 }
1716
1717 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1718 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1719 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1720 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1721
1722 if (port == PORT_TP)
1723 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1724 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1725
2726d6e1 1726 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1727
1728 spin_unlock_bh(&bp->phy_lock);
a2f13890 1729 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1730 spin_lock_bh(&bp->phy_lock);
1731
1732 return 0;
1733}
1734
1735static int
1736bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1737__releases(&bp->phy_lock)
1738__acquires(&bp->phy_lock)
b6016b76 1739{
605a9e20 1740 u32 adv, bmcr;
b6016b76
MC
1741 u32 new_adv = 0;
1742
583c28e5 1743 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1744 return (bnx2_setup_remote_phy(bp, port));
1745
b6016b76
MC
1746 if (!(bp->autoneg & AUTONEG_SPEED)) {
1747 u32 new_bmcr;
5b0c76ad
MC
1748 int force_link_down = 0;
1749
605a9e20
MC
1750 if (bp->req_line_speed == SPEED_2500) {
1751 if (!bnx2_test_and_enable_2g5(bp))
1752 force_link_down = 1;
1753 } else if (bp->req_line_speed == SPEED_1000) {
1754 if (bnx2_test_and_disable_2g5(bp))
1755 force_link_down = 1;
1756 }
ca58c3af 1757 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1758 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1759
ca58c3af 1760 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1761 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1762 new_bmcr |= BMCR_SPEED1000;
605a9e20 1763
27a005b8
MC
1764 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1765 if (bp->req_line_speed == SPEED_2500)
1766 bnx2_enable_forced_2g5(bp);
1767 else if (bp->req_line_speed == SPEED_1000) {
1768 bnx2_disable_forced_2g5(bp);
1769 new_bmcr &= ~0x2000;
1770 }
1771
1772 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1773 if (bp->req_line_speed == SPEED_2500)
1774 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1775 else
1776 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1777 }
1778
b6016b76 1779 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1780 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1781 new_bmcr |= BMCR_FULLDPLX;
1782 }
1783 else {
5b0c76ad 1784 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1785 new_bmcr &= ~BMCR_FULLDPLX;
1786 }
5b0c76ad 1787 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1788 /* Force a link down visible on the other side */
1789 if (bp->link_up) {
ca58c3af 1790 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1791 ~(ADVERTISE_1000XFULL |
1792 ADVERTISE_1000XHALF));
ca58c3af 1793 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1794 BMCR_ANRESTART | BMCR_ANENABLE);
1795
1796 bp->link_up = 0;
1797 netif_carrier_off(bp->dev);
ca58c3af 1798 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1799 bnx2_report_link(bp);
b6016b76 1800 }
ca58c3af
MC
1801 bnx2_write_phy(bp, bp->mii_adv, adv);
1802 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1803 } else {
1804 bnx2_resolve_flow_ctrl(bp);
1805 bnx2_set_mac_link(bp);
b6016b76
MC
1806 }
1807 return 0;
1808 }
1809
605a9e20 1810 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1811
b6016b76
MC
1812 if (bp->advertising & ADVERTISED_1000baseT_Full)
1813 new_adv |= ADVERTISE_1000XFULL;
1814
1815 new_adv |= bnx2_phy_get_pause_adv(bp);
1816
ca58c3af
MC
1817 bnx2_read_phy(bp, bp->mii_adv, &adv);
1818 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1819
1820 bp->serdes_an_pending = 0;
1821 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1822 /* Force a link down visible on the other side */
1823 if (bp->link_up) {
ca58c3af 1824 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1825 spin_unlock_bh(&bp->phy_lock);
1826 msleep(20);
1827 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1828 }
1829
ca58c3af
MC
1830 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1831 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1832 BMCR_ANENABLE);
f8dd064e
MC
1833 /* Speed up link-up time when the link partner
1834 * does not autonegotiate which is very common
1835 * in blade servers. Some blade servers use
1836 * IPMI for kerboard input and it's important
1837 * to minimize link disruptions. Autoneg. involves
1838 * exchanging base pages plus 3 next pages and
1839 * normally completes in about 120 msec.
1840 */
40105c0b 1841 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1842 bp->serdes_an_pending = 1;
1843 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1844 } else {
1845 bnx2_resolve_flow_ctrl(bp);
1846 bnx2_set_mac_link(bp);
b6016b76
MC
1847 }
1848
1849 return 0;
1850}
1851
1852#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1853 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1854 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1855 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1856
1857#define ETHTOOL_ALL_COPPER_SPEED \
1858 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1859 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1860 ADVERTISED_1000baseT_Full)
1861
1862#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1863 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1864
b6016b76
MC
1865#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1866
0d8a6571
MC
1867static void
1868bnx2_set_default_remote_link(struct bnx2 *bp)
1869{
1870 u32 link;
1871
1872 if (bp->phy_port == PORT_TP)
2726d6e1 1873 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1874 else
2726d6e1 1875 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1876
1877 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1878 bp->req_line_speed = 0;
1879 bp->autoneg |= AUTONEG_SPEED;
1880 bp->advertising = ADVERTISED_Autoneg;
1881 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1882 bp->advertising |= ADVERTISED_10baseT_Half;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1884 bp->advertising |= ADVERTISED_10baseT_Full;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1886 bp->advertising |= ADVERTISED_100baseT_Half;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1888 bp->advertising |= ADVERTISED_100baseT_Full;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1890 bp->advertising |= ADVERTISED_1000baseT_Full;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1892 bp->advertising |= ADVERTISED_2500baseX_Full;
1893 } else {
1894 bp->autoneg = 0;
1895 bp->advertising = 0;
1896 bp->req_duplex = DUPLEX_FULL;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1898 bp->req_line_speed = SPEED_10;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900 bp->req_duplex = DUPLEX_HALF;
1901 }
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1903 bp->req_line_speed = SPEED_100;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905 bp->req_duplex = DUPLEX_HALF;
1906 }
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908 bp->req_line_speed = SPEED_1000;
1909 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910 bp->req_line_speed = SPEED_2500;
1911 }
1912}
1913
deaf391b
MC
1914static void
1915bnx2_set_default_link(struct bnx2 *bp)
1916{
ab59859d
HH
1917 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1918 bnx2_set_default_remote_link(bp);
1919 return;
1920 }
0d8a6571 1921
deaf391b
MC
1922 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1923 bp->req_line_speed = 0;
583c28e5 1924 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1925 u32 reg;
1926
1927 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1928
2726d6e1 1929 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1930 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1931 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1932 bp->autoneg = 0;
1933 bp->req_line_speed = bp->line_speed = SPEED_1000;
1934 bp->req_duplex = DUPLEX_FULL;
1935 }
1936 } else
1937 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1938}
1939
df149d70
MC
1940static void
1941bnx2_send_heart_beat(struct bnx2 *bp)
1942{
1943 u32 msg;
1944 u32 addr;
1945
1946 spin_lock(&bp->indirect_lock);
1947 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1948 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1949 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1950 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1951 spin_unlock(&bp->indirect_lock);
1952}
1953
0d8a6571
MC
1954static void
1955bnx2_remote_phy_event(struct bnx2 *bp)
1956{
1957 u32 msg;
1958 u8 link_up = bp->link_up;
1959 u8 old_port;
1960
2726d6e1 1961 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1962
df149d70
MC
1963 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1964 bnx2_send_heart_beat(bp);
1965
1966 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1967
0d8a6571
MC
1968 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1969 bp->link_up = 0;
1970 else {
1971 u32 speed;
1972
1973 bp->link_up = 1;
1974 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1975 bp->duplex = DUPLEX_FULL;
1976 switch (speed) {
1977 case BNX2_LINK_STATUS_10HALF:
1978 bp->duplex = DUPLEX_HALF;
1979 case BNX2_LINK_STATUS_10FULL:
1980 bp->line_speed = SPEED_10;
1981 break;
1982 case BNX2_LINK_STATUS_100HALF:
1983 bp->duplex = DUPLEX_HALF;
1984 case BNX2_LINK_STATUS_100BASE_T4:
1985 case BNX2_LINK_STATUS_100FULL:
1986 bp->line_speed = SPEED_100;
1987 break;
1988 case BNX2_LINK_STATUS_1000HALF:
1989 bp->duplex = DUPLEX_HALF;
1990 case BNX2_LINK_STATUS_1000FULL:
1991 bp->line_speed = SPEED_1000;
1992 break;
1993 case BNX2_LINK_STATUS_2500HALF:
1994 bp->duplex = DUPLEX_HALF;
1995 case BNX2_LINK_STATUS_2500FULL:
1996 bp->line_speed = SPEED_2500;
1997 break;
1998 default:
1999 bp->line_speed = 0;
2000 break;
2001 }
2002
0d8a6571
MC
2003 bp->flow_ctrl = 0;
2004 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2005 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2006 if (bp->duplex == DUPLEX_FULL)
2007 bp->flow_ctrl = bp->req_flow_ctrl;
2008 } else {
2009 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2010 bp->flow_ctrl |= FLOW_CTRL_TX;
2011 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2012 bp->flow_ctrl |= FLOW_CTRL_RX;
2013 }
2014
2015 old_port = bp->phy_port;
2016 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017 bp->phy_port = PORT_FIBRE;
2018 else
2019 bp->phy_port = PORT_TP;
2020
2021 if (old_port != bp->phy_port)
2022 bnx2_set_default_link(bp);
2023
0d8a6571
MC
2024 }
2025 if (bp->link_up != link_up)
2026 bnx2_report_link(bp);
2027
2028 bnx2_set_mac_link(bp);
2029}
2030
2031static int
2032bnx2_set_remote_link(struct bnx2 *bp)
2033{
2034 u32 evt_code;
2035
2726d6e1 2036 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
2037 switch (evt_code) {
2038 case BNX2_FW_EVT_CODE_LINK_EVENT:
2039 bnx2_remote_phy_event(bp);
2040 break;
2041 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2042 default:
df149d70 2043 bnx2_send_heart_beat(bp);
0d8a6571
MC
2044 break;
2045 }
2046 return 0;
2047}
2048
b6016b76
MC
2049static int
2050bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
2051__releases(&bp->phy_lock)
2052__acquires(&bp->phy_lock)
b6016b76
MC
2053{
2054 u32 bmcr;
2055 u32 new_bmcr;
2056
ca58c3af 2057 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
2058
2059 if (bp->autoneg & AUTONEG_SPEED) {
2060 u32 adv_reg, adv1000_reg;
2061 u32 new_adv_reg = 0;
2062 u32 new_adv1000_reg = 0;
2063
ca58c3af 2064 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
2065 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2066 ADVERTISE_PAUSE_ASYM);
2067
2068 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069 adv1000_reg &= PHY_ALL_1000_SPEED;
2070
2071 if (bp->advertising & ADVERTISED_10baseT_Half)
2072 new_adv_reg |= ADVERTISE_10HALF;
2073 if (bp->advertising & ADVERTISED_10baseT_Full)
2074 new_adv_reg |= ADVERTISE_10FULL;
2075 if (bp->advertising & ADVERTISED_100baseT_Half)
2076 new_adv_reg |= ADVERTISE_100HALF;
2077 if (bp->advertising & ADVERTISED_100baseT_Full)
2078 new_adv_reg |= ADVERTISE_100FULL;
2079 if (bp->advertising & ADVERTISED_1000baseT_Full)
2080 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 2081
b6016b76
MC
2082 new_adv_reg |= ADVERTISE_CSMA;
2083
2084 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2085
2086 if ((adv1000_reg != new_adv1000_reg) ||
2087 (adv_reg != new_adv_reg) ||
2088 ((bmcr & BMCR_ANENABLE) == 0)) {
2089
ca58c3af 2090 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 2091 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 2092 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
2093 BMCR_ANENABLE);
2094 }
2095 else if (bp->link_up) {
2096 /* Flow ctrl may have changed from auto to forced */
2097 /* or vice-versa. */
2098
2099 bnx2_resolve_flow_ctrl(bp);
2100 bnx2_set_mac_link(bp);
2101 }
2102 return 0;
2103 }
2104
2105 new_bmcr = 0;
2106 if (bp->req_line_speed == SPEED_100) {
2107 new_bmcr |= BMCR_SPEED100;
2108 }
2109 if (bp->req_duplex == DUPLEX_FULL) {
2110 new_bmcr |= BMCR_FULLDPLX;
2111 }
2112 if (new_bmcr != bmcr) {
2113 u32 bmsr;
b6016b76 2114
ca58c3af
MC
2115 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 2117
b6016b76
MC
2118 if (bmsr & BMSR_LSTATUS) {
2119 /* Force link down */
ca58c3af 2120 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
2121 spin_unlock_bh(&bp->phy_lock);
2122 msleep(50);
2123 spin_lock_bh(&bp->phy_lock);
2124
ca58c3af
MC
2125 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
2127 }
2128
ca58c3af 2129 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
2130
2131 /* Normally, the new speed is setup after the link has
2132 * gone down and up again. In some cases, link will not go
2133 * down so we need to set up the new speed here.
2134 */
2135 if (bmsr & BMSR_LSTATUS) {
2136 bp->line_speed = bp->req_line_speed;
2137 bp->duplex = bp->req_duplex;
2138 bnx2_resolve_flow_ctrl(bp);
2139 bnx2_set_mac_link(bp);
2140 }
27a005b8
MC
2141 } else {
2142 bnx2_resolve_flow_ctrl(bp);
2143 bnx2_set_mac_link(bp);
b6016b76
MC
2144 }
2145 return 0;
2146}
2147
2148static int
0d8a6571 2149bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
2150__releases(&bp->phy_lock)
2151__acquires(&bp->phy_lock)
b6016b76
MC
2152{
2153 if (bp->loopback == MAC_LOOPBACK)
2154 return 0;
2155
583c28e5 2156 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 2157 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
2158 }
2159 else {
2160 return (bnx2_setup_copper_phy(bp));
2161 }
2162}
2163
27a005b8 2164static int
9a120bc5 2165bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
2166{
2167 u32 val;
2168
2169 bp->mii_bmcr = MII_BMCR + 0x10;
2170 bp->mii_bmsr = MII_BMSR + 0x10;
2171 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172 bp->mii_adv = MII_ADVERTISE + 0x10;
2173 bp->mii_lpa = MII_LPA + 0x10;
2174 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2180 if (reset_phy)
2181 bnx2_reset_phy(bp);
27a005b8
MC
2182
2183 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2192 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2193 val |= BCM5708S_UP1_2G5;
2194 else
2195 val &= ~BCM5708S_UP1_2G5;
2196 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211 return 0;
2212}
2213
b6016b76 2214static int
9a120bc5 2215bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2216{
2217 u32 val;
2218
9a120bc5
MC
2219 if (reset_phy)
2220 bnx2_reset_phy(bp);
27a005b8
MC
2221
2222 bp->mii_up1 = BCM5708S_UP1;
2223
5b0c76ad
MC
2224 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
583c28e5 2236 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2237 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238 val |= BCM5708S_UP1_2G5;
2239 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240 }
2241
2242 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2243 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2244 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2245 /* increase tx signal amplitude */
2246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247 BCM5708S_BLK_ADDR_TX_MISC);
2248 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252 }
2253
2726d6e1 2254 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2255 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257 if (val) {
2258 u32 is_backplane;
2259
2726d6e1 2260 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2261 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 BCM5708S_BLK_ADDR_TX_MISC);
2264 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266 BCM5708S_BLK_ADDR_DIG);
2267 }
2268 }
2269 return 0;
2270}
2271
2272static int
9a120bc5 2273bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2274{
9a120bc5
MC
2275 if (reset_phy)
2276 bnx2_reset_phy(bp);
27a005b8 2277
583c28e5 2278 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2279
59b47d8a
MC
2280 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2281 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2282
2283 if (bp->dev->mtu > 1500) {
2284 u32 val;
2285
2286 /* Set extended packet length bit */
2287 bnx2_write_phy(bp, 0x18, 0x7);
2288 bnx2_read_phy(bp, 0x18, &val);
2289 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292 bnx2_read_phy(bp, 0x1c, &val);
2293 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294 }
2295 else {
2296 u32 val;
2297
2298 bnx2_write_phy(bp, 0x18, 0x7);
2299 bnx2_read_phy(bp, 0x18, &val);
2300 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302 bnx2_write_phy(bp, 0x1c, 0x6c00);
2303 bnx2_read_phy(bp, 0x1c, &val);
2304 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305 }
2306
2307 return 0;
2308}
2309
2310static int
9a120bc5 2311bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2312{
5b0c76ad
MC
2313 u32 val;
2314
9a120bc5
MC
2315 if (reset_phy)
2316 bnx2_reset_phy(bp);
27a005b8 2317
583c28e5 2318 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2319 bnx2_write_phy(bp, 0x18, 0x0c00);
2320 bnx2_write_phy(bp, 0x17, 0x000a);
2321 bnx2_write_phy(bp, 0x15, 0x310b);
2322 bnx2_write_phy(bp, 0x17, 0x201f);
2323 bnx2_write_phy(bp, 0x15, 0x9506);
2324 bnx2_write_phy(bp, 0x17, 0x401f);
2325 bnx2_write_phy(bp, 0x15, 0x14e2);
2326 bnx2_write_phy(bp, 0x18, 0x0400);
2327 }
2328
583c28e5 2329 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2330 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331 MII_BNX2_DSP_EXPAND_REG | 0x8);
2332 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333 val &= ~(1 << 8);
2334 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335 }
2336
b6016b76 2337 if (bp->dev->mtu > 1500) {
b6016b76
MC
2338 /* Set extended packet length bit */
2339 bnx2_write_phy(bp, 0x18, 0x7);
2340 bnx2_read_phy(bp, 0x18, &val);
2341 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343 bnx2_read_phy(bp, 0x10, &val);
2344 bnx2_write_phy(bp, 0x10, val | 0x1);
2345 }
2346 else {
b6016b76
MC
2347 bnx2_write_phy(bp, 0x18, 0x7);
2348 bnx2_read_phy(bp, 0x18, &val);
2349 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351 bnx2_read_phy(bp, 0x10, &val);
2352 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353 }
2354
5b0c76ad
MC
2355 /* ethernet@wirespeed */
2356 bnx2_write_phy(bp, 0x18, 0x7007);
2357 bnx2_read_phy(bp, 0x18, &val);
2358 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2359 return 0;
2360}
2361
2362
2363static int
9a120bc5 2364bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2365__releases(&bp->phy_lock)
2366__acquires(&bp->phy_lock)
b6016b76
MC
2367{
2368 u32 val;
2369 int rc = 0;
2370
583c28e5
MC
2371 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2372 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2373
ca58c3af
MC
2374 bp->mii_bmcr = MII_BMCR;
2375 bp->mii_bmsr = MII_BMSR;
27a005b8 2376 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2377 bp->mii_adv = MII_ADVERTISE;
2378 bp->mii_lpa = MII_LPA;
2379
b6016b76
MC
2380 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2381
583c28e5 2382 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2383 goto setup_phy;
2384
b6016b76
MC
2385 bnx2_read_phy(bp, MII_PHYSID1, &val);
2386 bp->phy_id = val << 16;
2387 bnx2_read_phy(bp, MII_PHYSID2, &val);
2388 bp->phy_id |= val & 0xffff;
2389
583c28e5 2390 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2391 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2392 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2393 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2394 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2395 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2396 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2397 }
2398 else {
9a120bc5 2399 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2400 }
2401
0d8a6571
MC
2402setup_phy:
2403 if (!rc)
2404 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2405
2406 return rc;
2407}
2408
2409static int
2410bnx2_set_mac_loopback(struct bnx2 *bp)
2411{
2412 u32 mac_mode;
2413
2414 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2415 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2416 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2417 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2418 bp->link_up = 1;
2419 return 0;
2420}
2421
bc5a0690
MC
2422static int bnx2_test_link(struct bnx2 *);
2423
2424static int
2425bnx2_set_phy_loopback(struct bnx2 *bp)
2426{
2427 u32 mac_mode;
2428 int rc, i;
2429
2430 spin_lock_bh(&bp->phy_lock);
ca58c3af 2431 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2432 BMCR_SPEED1000);
2433 spin_unlock_bh(&bp->phy_lock);
2434 if (rc)
2435 return rc;
2436
2437 for (i = 0; i < 10; i++) {
2438 if (bnx2_test_link(bp) == 0)
2439 break;
80be4434 2440 msleep(100);
bc5a0690
MC
2441 }
2442
2443 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2444 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2445 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2446 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2447
2448 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2449 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2450 bp->link_up = 1;
2451 return 0;
2452}
2453
b6016b76 2454static int
a2f13890 2455bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2456{
2457 int i;
2458 u32 val;
2459
b6016b76
MC
2460 bp->fw_wr_seq++;
2461 msg_data |= bp->fw_wr_seq;
2462
2726d6e1 2463 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2464
a2f13890
MC
2465 if (!ack)
2466 return 0;
2467
b6016b76 2468 /* wait for an acknowledgement. */
40105c0b 2469 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2470 msleep(10);
b6016b76 2471
2726d6e1 2472 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2473
2474 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2475 break;
2476 }
b090ae2b
MC
2477 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2478 return 0;
b6016b76
MC
2479
2480 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2481 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2482 if (!silent)
3a9c6a49 2483 pr_err("fw sync timeout, reset code = %x\n", msg_data);
b6016b76
MC
2484
2485 msg_data &= ~BNX2_DRV_MSG_CODE;
2486 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2487
2726d6e1 2488 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2489
b6016b76
MC
2490 return -EBUSY;
2491 }
2492
b090ae2b
MC
2493 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2494 return -EIO;
2495
b6016b76
MC
2496 return 0;
2497}
2498
59b47d8a
MC
2499static int
2500bnx2_init_5709_context(struct bnx2 *bp)
2501{
2502 int i, ret = 0;
2503 u32 val;
2504
2505 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2506 val |= (BCM_PAGE_BITS - 8) << 16;
2507 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2508 for (i = 0; i < 10; i++) {
2509 val = REG_RD(bp, BNX2_CTX_COMMAND);
2510 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2511 break;
2512 udelay(2);
2513 }
2514 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2515 return -EBUSY;
2516
59b47d8a
MC
2517 for (i = 0; i < bp->ctx_pages; i++) {
2518 int j;
2519
352f7687
MC
2520 if (bp->ctx_blk[i])
2521 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2522 else
2523 return -ENOMEM;
2524
59b47d8a
MC
2525 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2526 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2527 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2529 (u64) bp->ctx_blk_mapping[i] >> 32);
2530 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2531 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2532 for (j = 0; j < 10; j++) {
2533
2534 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2535 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2536 break;
2537 udelay(5);
2538 }
2539 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2540 ret = -EBUSY;
2541 break;
2542 }
2543 }
2544 return ret;
2545}
2546
b6016b76
MC
2547static void
2548bnx2_init_context(struct bnx2 *bp)
2549{
2550 u32 vcid;
2551
2552 vcid = 96;
2553 while (vcid) {
2554 u32 vcid_addr, pcid_addr, offset;
7947b20e 2555 int i;
b6016b76
MC
2556
2557 vcid--;
2558
2559 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2560 u32 new_vcid;
2561
2562 vcid_addr = GET_PCID_ADDR(vcid);
2563 if (vcid & 0x8) {
2564 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2565 }
2566 else {
2567 new_vcid = vcid;
2568 }
2569 pcid_addr = GET_PCID_ADDR(new_vcid);
2570 }
2571 else {
2572 vcid_addr = GET_CID_ADDR(vcid);
2573 pcid_addr = vcid_addr;
2574 }
2575
7947b20e
MC
2576 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2577 vcid_addr += (i << PHY_CTX_SHIFT);
2578 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2579
5d5d0015 2580 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2581 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2582
7947b20e
MC
2583 /* Zero out the context. */
2584 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2585 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2586 }
b6016b76
MC
2587 }
2588}
2589
2590static int
2591bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2592{
2593 u16 *good_mbuf;
2594 u32 good_mbuf_cnt;
2595 u32 val;
2596
2597 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2598 if (good_mbuf == NULL) {
3a9c6a49 2599 pr_err("Failed to allocate memory in %s\n", __func__);
b6016b76
MC
2600 return -ENOMEM;
2601 }
2602
2603 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2604 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2605
2606 good_mbuf_cnt = 0;
2607
2608 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2609 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2610 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2611 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2612 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2613
2726d6e1 2614 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2615
2616 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2617
2618 /* The addresses with Bit 9 set are bad memory blocks. */
2619 if (!(val & (1 << 9))) {
2620 good_mbuf[good_mbuf_cnt] = (u16) val;
2621 good_mbuf_cnt++;
2622 }
2623
2726d6e1 2624 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2625 }
2626
2627 /* Free the good ones back to the mbuf pool thus discarding
2628 * all the bad ones. */
2629 while (good_mbuf_cnt) {
2630 good_mbuf_cnt--;
2631
2632 val = good_mbuf[good_mbuf_cnt];
2633 val = (val << 9) | val | 1;
2634
2726d6e1 2635 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2636 }
2637 kfree(good_mbuf);
2638 return 0;
2639}
2640
2641static void
5fcaed01 2642bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2643{
2644 u32 val;
b6016b76
MC
2645
2646 val = (mac_addr[0] << 8) | mac_addr[1];
2647
5fcaed01 2648 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2649
6aa20a22 2650 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2651 (mac_addr[4] << 8) | mac_addr[5];
2652
5fcaed01 2653 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2654}
2655
47bf4246 2656static inline int
bb4f98ab 2657bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246
MC
2658{
2659 dma_addr_t mapping;
bb4f98ab 2660 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2661 struct rx_bd *rxbd =
bb4f98ab 2662 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
47bf4246
MC
2663 struct page *page = alloc_page(GFP_ATOMIC);
2664
2665 if (!page)
2666 return -ENOMEM;
2667 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2668 PCI_DMA_FROMDEVICE);
3d16af86
BL
2669 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2670 __free_page(page);
2671 return -EIO;
2672 }
2673
47bf4246
MC
2674 rx_pg->page = page;
2675 pci_unmap_addr_set(rx_pg, mapping, mapping);
2676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2678 return 0;
2679}
2680
2681static void
bb4f98ab 2682bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2683{
bb4f98ab 2684 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2685 struct page *page = rx_pg->page;
2686
2687 if (!page)
2688 return;
2689
2690 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2691 PCI_DMA_FROMDEVICE);
2692
2693 __free_page(page);
2694 rx_pg->page = NULL;
2695}
2696
b6016b76 2697static inline int
bb4f98ab 2698bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
b6016b76
MC
2699{
2700 struct sk_buff *skb;
bb4f98ab 2701 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2702 dma_addr_t mapping;
bb4f98ab 2703 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2704 unsigned long align;
2705
932f3772 2706 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2707 if (skb == NULL) {
2708 return -ENOMEM;
2709 }
2710
59b47d8a
MC
2711 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2712 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2713
b6016b76
MC
2714 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2715 PCI_DMA_FROMDEVICE);
3d16af86
BL
2716 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2717 dev_kfree_skb(skb);
2718 return -EIO;
2719 }
b6016b76
MC
2720
2721 rx_buf->skb = skb;
2722 pci_unmap_addr_set(rx_buf, mapping, mapping);
2723
2724 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2725 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2726
bb4f98ab 2727 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2728
2729 return 0;
2730}
2731
da3e4fbe 2732static int
35efa7c1 2733bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2734{
43e80b89 2735 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2736 u32 new_link_state, old_link_state;
da3e4fbe 2737 int is_set = 1;
b6016b76 2738
da3e4fbe
MC
2739 new_link_state = sblk->status_attn_bits & event;
2740 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2741 if (new_link_state != old_link_state) {
da3e4fbe
MC
2742 if (new_link_state)
2743 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2744 else
2745 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2746 } else
2747 is_set = 0;
2748
2749 return is_set;
2750}
2751
2752static void
35efa7c1 2753bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2754{
74ecc62d
MC
2755 spin_lock(&bp->phy_lock);
2756
2757 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2758 bnx2_set_link(bp);
35efa7c1 2759 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2760 bnx2_set_remote_link(bp);
2761
74ecc62d
MC
2762 spin_unlock(&bp->phy_lock);
2763
b6016b76
MC
2764}
2765
ead7270b 2766static inline u16
35efa7c1 2767bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2768{
2769 u16 cons;
2770
43e80b89
MC
2771 /* Tell compiler that status block fields can change. */
2772 barrier();
2773 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2774 barrier();
ead7270b
MC
2775 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2776 cons++;
2777 return cons;
2778}
2779
57851d84
MC
2780static int
2781bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2782{
35e9010b 2783 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2784 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2785 int tx_pkt = 0, index;
2786 struct netdev_queue *txq;
2787
2788 index = (bnapi - bp->bnx2_napi);
2789 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2790
35efa7c1 2791 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2792 sw_cons = txr->tx_cons;
b6016b76
MC
2793
2794 while (sw_cons != hw_cons) {
3d16af86 2795 struct sw_tx_bd *tx_buf;
b6016b76
MC
2796 struct sk_buff *skb;
2797 int i, last;
2798
2799 sw_ring_cons = TX_RING_IDX(sw_cons);
2800
35e9010b 2801 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2802 skb = tx_buf->skb;
1d39ed56 2803
d62fda08
ED
2804 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2805 prefetch(&skb->end);
2806
b6016b76 2807 /* partial BD completions possible with TSO packets */
d62fda08 2808 if (tx_buf->is_gso) {
b6016b76
MC
2809 u16 last_idx, last_ring_idx;
2810
d62fda08
ED
2811 last_idx = sw_cons + tx_buf->nr_frags + 1;
2812 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
b6016b76
MC
2813 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2814 last_idx++;
2815 }
2816 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2817 break;
2818 }
2819 }
1d39ed56 2820
e95524a7
AD
2821 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2822 skb_headlen(skb), PCI_DMA_TODEVICE);
b6016b76
MC
2823
2824 tx_buf->skb = NULL;
d62fda08 2825 last = tx_buf->nr_frags;
b6016b76
MC
2826
2827 for (i = 0; i < last; i++) {
2828 sw_cons = NEXT_TX_BD(sw_cons);
e95524a7
AD
2829
2830 pci_unmap_page(bp->pdev,
2831 pci_unmap_addr(
2832 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2833 mapping),
2834 skb_shinfo(skb)->frags[i].size,
2835 PCI_DMA_TODEVICE);
b6016b76
MC
2836 }
2837
2838 sw_cons = NEXT_TX_BD(sw_cons);
2839
745720e5 2840 dev_kfree_skb(skb);
57851d84
MC
2841 tx_pkt++;
2842 if (tx_pkt == budget)
2843 break;
b6016b76 2844
d62fda08
ED
2845 if (hw_cons == sw_cons)
2846 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2847 }
2848
35e9010b
MC
2849 txr->hw_tx_cons = hw_cons;
2850 txr->tx_cons = sw_cons;
706bf240 2851
2f8af120 2852 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2853 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2854 * memory barrier, there is a small possibility that bnx2_start_xmit()
2855 * will miss it and cause the queue to be stopped forever.
2856 */
2857 smp_mb();
b6016b76 2858
706bf240 2859 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2860 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2861 __netif_tx_lock(txq, smp_processor_id());
2862 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2863 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2864 netif_tx_wake_queue(txq);
2865 __netif_tx_unlock(txq);
b6016b76 2866 }
706bf240 2867
57851d84 2868 return tx_pkt;
b6016b76
MC
2869}
2870
1db82f2a 2871static void
bb4f98ab 2872bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2873 struct sk_buff *skb, int count)
1db82f2a
MC
2874{
2875 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2876 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2877 int i;
3d16af86 2878 u16 hw_prod, prod;
bb4f98ab 2879 u16 cons = rxr->rx_pg_cons;
1db82f2a 2880
3d16af86
BL
2881 cons_rx_pg = &rxr->rx_pg_ring[cons];
2882
2883 /* The caller was unable to allocate a new page to replace the
2884 * last one in the frags array, so we need to recycle that page
2885 * and then free the skb.
2886 */
2887 if (skb) {
2888 struct page *page;
2889 struct skb_shared_info *shinfo;
2890
2891 shinfo = skb_shinfo(skb);
2892 shinfo->nr_frags--;
2893 page = shinfo->frags[shinfo->nr_frags].page;
2894 shinfo->frags[shinfo->nr_frags].page = NULL;
2895
2896 cons_rx_pg->page = page;
2897 dev_kfree_skb(skb);
2898 }
2899
2900 hw_prod = rxr->rx_pg_prod;
2901
1db82f2a
MC
2902 for (i = 0; i < count; i++) {
2903 prod = RX_PG_RING_IDX(hw_prod);
2904
bb4f98ab
MC
2905 prod_rx_pg = &rxr->rx_pg_ring[prod];
2906 cons_rx_pg = &rxr->rx_pg_ring[cons];
2907 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2908 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2909
1db82f2a
MC
2910 if (prod != cons) {
2911 prod_rx_pg->page = cons_rx_pg->page;
2912 cons_rx_pg->page = NULL;
2913 pci_unmap_addr_set(prod_rx_pg, mapping,
2914 pci_unmap_addr(cons_rx_pg, mapping));
2915
2916 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2917 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2918
2919 }
2920 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2921 hw_prod = NEXT_RX_BD(hw_prod);
2922 }
bb4f98ab
MC
2923 rxr->rx_pg_prod = hw_prod;
2924 rxr->rx_pg_cons = cons;
1db82f2a
MC
2925}
2926
b6016b76 2927static inline void
bb4f98ab
MC
2928bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2929 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2930{
236b6394
MC
2931 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2932 struct rx_bd *cons_bd, *prod_bd;
2933
bb4f98ab
MC
2934 cons_rx_buf = &rxr->rx_buf_ring[cons];
2935 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76
MC
2936
2937 pci_dma_sync_single_for_device(bp->pdev,
2938 pci_unmap_addr(cons_rx_buf, mapping),
601d3d18 2939 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2940
bb4f98ab 2941 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2942
236b6394 2943 prod_rx_buf->skb = skb;
b6016b76 2944
236b6394
MC
2945 if (cons == prod)
2946 return;
b6016b76 2947
236b6394
MC
2948 pci_unmap_addr_set(prod_rx_buf, mapping,
2949 pci_unmap_addr(cons_rx_buf, mapping));
2950
bb4f98ab
MC
2951 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2952 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2953 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2954 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2955}
2956
85833c62 2957static int
bb4f98ab 2958bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2959 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2960 u32 ring_idx)
85833c62
MC
2961{
2962 int err;
2963 u16 prod = ring_idx & 0xffff;
2964
bb4f98ab 2965 err = bnx2_alloc_rx_skb(bp, rxr, prod);
85833c62 2966 if (unlikely(err)) {
bb4f98ab 2967 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2968 if (hdr_len) {
2969 unsigned int raw_len = len + 4;
2970 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2971
bb4f98ab 2972 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 2973 }
85833c62
MC
2974 return err;
2975 }
2976
d89cb6af 2977 skb_reserve(skb, BNX2_RX_OFFSET);
85833c62
MC
2978 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2979 PCI_DMA_FROMDEVICE);
2980
1db82f2a
MC
2981 if (hdr_len == 0) {
2982 skb_put(skb, len);
2983 return 0;
2984 } else {
2985 unsigned int i, frag_len, frag_size, pages;
2986 struct sw_pg *rx_pg;
bb4f98ab
MC
2987 u16 pg_cons = rxr->rx_pg_cons;
2988 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
2989
2990 frag_size = len + 4 - hdr_len;
2991 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2992 skb_put(skb, hdr_len);
2993
2994 for (i = 0; i < pages; i++) {
3d16af86
BL
2995 dma_addr_t mapping_old;
2996
1db82f2a
MC
2997 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2998 if (unlikely(frag_len <= 4)) {
2999 unsigned int tail = 4 - frag_len;
3000
bb4f98ab
MC
3001 rxr->rx_pg_cons = pg_cons;
3002 rxr->rx_pg_prod = pg_prod;
3003 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 3004 pages - i);
1db82f2a
MC
3005 skb->len -= tail;
3006 if (i == 0) {
3007 skb->tail -= tail;
3008 } else {
3009 skb_frag_t *frag =
3010 &skb_shinfo(skb)->frags[i - 1];
3011 frag->size -= tail;
3012 skb->data_len -= tail;
3013 skb->truesize -= tail;
3014 }
3015 return 0;
3016 }
bb4f98ab 3017 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 3018
3d16af86
BL
3019 /* Don't unmap yet. If we're unable to allocate a new
3020 * page, we need to recycle the page and the DMA addr.
3021 */
3022 mapping_old = pci_unmap_addr(rx_pg, mapping);
1db82f2a
MC
3023 if (i == pages - 1)
3024 frag_len -= 4;
3025
3026 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3027 rx_pg->page = NULL;
3028
bb4f98ab
MC
3029 err = bnx2_alloc_rx_page(bp, rxr,
3030 RX_PG_RING_IDX(pg_prod));
1db82f2a 3031 if (unlikely(err)) {
bb4f98ab
MC
3032 rxr->rx_pg_cons = pg_cons;
3033 rxr->rx_pg_prod = pg_prod;
3034 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 3035 pages - i);
1db82f2a
MC
3036 return err;
3037 }
3038
3d16af86
BL
3039 pci_unmap_page(bp->pdev, mapping_old,
3040 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3041
1db82f2a
MC
3042 frag_size -= frag_len;
3043 skb->data_len += frag_len;
3044 skb->truesize += frag_len;
3045 skb->len += frag_len;
3046
3047 pg_prod = NEXT_RX_BD(pg_prod);
3048 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3049 }
bb4f98ab
MC
3050 rxr->rx_pg_prod = pg_prod;
3051 rxr->rx_pg_cons = pg_cons;
1db82f2a 3052 }
85833c62
MC
3053 return 0;
3054}
3055
c09c2627 3056static inline u16
35efa7c1 3057bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 3058{
bb4f98ab
MC
3059 u16 cons;
3060
43e80b89
MC
3061 /* Tell compiler that status block fields can change. */
3062 barrier();
3063 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 3064 barrier();
c09c2627
MC
3065 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3066 cons++;
3067 return cons;
3068}
3069
b6016b76 3070static int
35efa7c1 3071bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 3072{
bb4f98ab 3073 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
3074 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3075 struct l2_fhdr *rx_hdr;
1db82f2a 3076 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 3077
35efa7c1 3078 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
3079 sw_cons = rxr->rx_cons;
3080 sw_prod = rxr->rx_prod;
b6016b76
MC
3081
3082 /* Memory barrier necessary as speculative reads of the rx
3083 * buffer can be ahead of the index in the status block
3084 */
3085 rmb();
3086 while (sw_cons != hw_cons) {
1db82f2a 3087 unsigned int len, hdr_len;
ade2bfe7 3088 u32 status;
b6016b76
MC
3089 struct sw_bd *rx_buf;
3090 struct sk_buff *skb;
236b6394 3091 dma_addr_t dma_addr;
f22828e8
MC
3092 u16 vtag = 0;
3093 int hw_vlan __maybe_unused = 0;
b6016b76
MC
3094
3095 sw_ring_cons = RX_RING_IDX(sw_cons);
3096 sw_ring_prod = RX_RING_IDX(sw_prod);
3097
bb4f98ab 3098 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 3099 skb = rx_buf->skb;
236b6394
MC
3100
3101 rx_buf->skb = NULL;
3102
3103 dma_addr = pci_unmap_addr(rx_buf, mapping);
3104
3105 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
601d3d18
BL
3106 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3107 PCI_DMA_FROMDEVICE);
b6016b76
MC
3108
3109 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 3110 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 3111 status = rx_hdr->l2_fhdr_status;
b6016b76 3112
1db82f2a
MC
3113 hdr_len = 0;
3114 if (status & L2_FHDR_STATUS_SPLIT) {
3115 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3116 pg_ring_used = 1;
3117 } else if (len > bp->rx_jumbo_thresh) {
3118 hdr_len = bp->rx_jumbo_thresh;
3119 pg_ring_used = 1;
3120 }
3121
990ec380
MC
3122 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3123 L2_FHDR_ERRORS_PHY_DECODE |
3124 L2_FHDR_ERRORS_ALIGNMENT |
3125 L2_FHDR_ERRORS_TOO_SHORT |
3126 L2_FHDR_ERRORS_GIANT_FRAME))) {
3127
3128 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3129 sw_ring_prod);
3130 if (pg_ring_used) {
3131 int pages;
3132
3133 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3134
3135 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3136 }
3137 goto next_rx;
3138 }
3139
1db82f2a 3140 len -= 4;
b6016b76 3141
5d5d0015 3142 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
3143 struct sk_buff *new_skb;
3144
f22828e8 3145 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 3146 if (new_skb == NULL) {
bb4f98ab 3147 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
3148 sw_ring_prod);
3149 goto next_rx;
3150 }
b6016b76
MC
3151
3152 /* aligned copy */
d89cb6af 3153 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
3154 BNX2_RX_OFFSET - 6,
3155 new_skb->data, len + 6);
3156 skb_reserve(new_skb, 6);
b6016b76 3157 skb_put(new_skb, len);
b6016b76 3158
bb4f98ab 3159 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
3160 sw_ring_cons, sw_ring_prod);
3161
3162 skb = new_skb;
bb4f98ab 3163 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 3164 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 3165 goto next_rx;
b6016b76 3166
f22828e8
MC
3167 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3168 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3169 vtag = rx_hdr->l2_fhdr_vlan_tag;
3170#ifdef BCM_VLAN
3171 if (bp->vlgrp)
3172 hw_vlan = 1;
3173 else
3174#endif
3175 {
3176 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3177 __skb_push(skb, 4);
3178
3179 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3180 ve->h_vlan_proto = htons(ETH_P_8021Q);
3181 ve->h_vlan_TCI = htons(vtag);
3182 len += 4;
3183 }
3184 }
3185
b6016b76
MC
3186 skb->protocol = eth_type_trans(skb, bp->dev);
3187
3188 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3189 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3190
745720e5 3191 dev_kfree_skb(skb);
b6016b76
MC
3192 goto next_rx;
3193
3194 }
3195
b6016b76
MC
3196 skb->ip_summed = CHECKSUM_NONE;
3197 if (bp->rx_csum &&
3198 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3199 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3200
ade2bfe7
MC
3201 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3202 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3203 skb->ip_summed = CHECKSUM_UNNECESSARY;
3204 }
3205
0c8dfc83
DM
3206 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3207
b6016b76 3208#ifdef BCM_VLAN
f22828e8
MC
3209 if (hw_vlan)
3210 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
b6016b76
MC
3211 else
3212#endif
3213 netif_receive_skb(skb);
3214
b6016b76
MC
3215 rx_pkt++;
3216
3217next_rx:
b6016b76
MC
3218 sw_cons = NEXT_RX_BD(sw_cons);
3219 sw_prod = NEXT_RX_BD(sw_prod);
3220
3221 if ((rx_pkt == budget))
3222 break;
f4e418f7
MC
3223
3224 /* Refresh hw_cons to see if there is new work */
3225 if (sw_cons == hw_cons) {
35efa7c1 3226 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3227 rmb();
3228 }
b6016b76 3229 }
bb4f98ab
MC
3230 rxr->rx_cons = sw_cons;
3231 rxr->rx_prod = sw_prod;
b6016b76 3232
1db82f2a 3233 if (pg_ring_used)
bb4f98ab 3234 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3235
bb4f98ab 3236 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3237
bb4f98ab 3238 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3239
3240 mmiowb();
3241
3242 return rx_pkt;
3243
3244}
3245
3246/* MSI ISR - The only difference between this and the INTx ISR
3247 * is that the MSI interrupt is always serviced.
3248 */
3249static irqreturn_t
7d12e780 3250bnx2_msi(int irq, void *dev_instance)
b6016b76 3251{
f0ea2e63
MC
3252 struct bnx2_napi *bnapi = dev_instance;
3253 struct bnx2 *bp = bnapi->bp;
b6016b76 3254
43e80b89 3255 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3256 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3257 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3258 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3259
3260 /* Return here if interrupt is disabled. */
73eef4cd
MC
3261 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3262 return IRQ_HANDLED;
b6016b76 3263
288379f0 3264 napi_schedule(&bnapi->napi);
b6016b76 3265
73eef4cd 3266 return IRQ_HANDLED;
b6016b76
MC
3267}
3268
8e6a72c4
MC
3269static irqreturn_t
3270bnx2_msi_1shot(int irq, void *dev_instance)
3271{
f0ea2e63
MC
3272 struct bnx2_napi *bnapi = dev_instance;
3273 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3274
43e80b89 3275 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3276
3277 /* Return here if interrupt is disabled. */
3278 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3279 return IRQ_HANDLED;
3280
288379f0 3281 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3282
3283 return IRQ_HANDLED;
3284}
3285
b6016b76 3286static irqreturn_t
7d12e780 3287bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3288{
f0ea2e63
MC
3289 struct bnx2_napi *bnapi = dev_instance;
3290 struct bnx2 *bp = bnapi->bp;
43e80b89 3291 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3292
3293 /* When using INTx, it is possible for the interrupt to arrive
3294 * at the CPU before the status block posted prior to the
3295 * interrupt. Reading a register will flush the status block.
3296 * When using MSI, the MSI message will always complete after
3297 * the status block write.
3298 */
35efa7c1 3299 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3300 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3301 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3302 return IRQ_NONE;
b6016b76
MC
3303
3304 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3305 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3306 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3307
b8a7ce7b
MC
3308 /* Read back to deassert IRQ immediately to avoid too many
3309 * spurious interrupts.
3310 */
3311 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3312
b6016b76 3313 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3314 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3315 return IRQ_HANDLED;
b6016b76 3316
288379f0 3317 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3318 bnapi->last_status_idx = sblk->status_idx;
288379f0 3319 __napi_schedule(&bnapi->napi);
b8a7ce7b 3320 }
b6016b76 3321
73eef4cd 3322 return IRQ_HANDLED;
b6016b76
MC
3323}
3324
f4e418f7 3325static inline int
43e80b89 3326bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3327{
35e9010b 3328 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3329 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3330
bb4f98ab 3331 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3332 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3333 return 1;
43e80b89
MC
3334 return 0;
3335}
3336
3337#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3338 STATUS_ATTN_BITS_TIMER_ABORT)
3339
3340static inline int
3341bnx2_has_work(struct bnx2_napi *bnapi)
3342{
3343 struct status_block *sblk = bnapi->status_blk.msi;
3344
3345 if (bnx2_has_fast_work(bnapi))
3346 return 1;
f4e418f7 3347
4edd473f
MC
3348#ifdef BCM_CNIC
3349 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3350 return 1;
3351#endif
3352
da3e4fbe
MC
3353 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3354 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3355 return 1;
3356
3357 return 0;
3358}
3359
efba0180
MC
3360static void
3361bnx2_chk_missed_msi(struct bnx2 *bp)
3362{
3363 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3364 u32 msi_ctrl;
3365
3366 if (bnx2_has_work(bnapi)) {
3367 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3368 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3369 return;
3370
3371 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3372 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3373 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3374 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3375 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3376 }
3377 }
3378
3379 bp->idle_chk_status_idx = bnapi->last_status_idx;
3380}
3381
4edd473f
MC
3382#ifdef BCM_CNIC
3383static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3384{
3385 struct cnic_ops *c_ops;
3386
3387 if (!bnapi->cnic_present)
3388 return;
3389
3390 rcu_read_lock();
3391 c_ops = rcu_dereference(bp->cnic_ops);
3392 if (c_ops)
3393 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3394 bnapi->status_blk.msi);
3395 rcu_read_unlock();
3396}
3397#endif
3398
43e80b89 3399static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3400{
43e80b89 3401 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3402 u32 status_attn_bits = sblk->status_attn_bits;
3403 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3404
da3e4fbe
MC
3405 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3406 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3407
35efa7c1 3408 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3409
3410 /* This is needed to take care of transient status
3411 * during link changes.
3412 */
3413 REG_WR(bp, BNX2_HC_COMMAND,
3414 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3415 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3416 }
43e80b89
MC
3417}
3418
3419static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3420 int work_done, int budget)
3421{
3422 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3423 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3424
35e9010b 3425 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3426 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3427
bb4f98ab 3428 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3429 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3430
6f535763
DM
3431 return work_done;
3432}
3433
f0ea2e63
MC
3434static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3435{
3436 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3437 struct bnx2 *bp = bnapi->bp;
3438 int work_done = 0;
3439 struct status_block_msix *sblk = bnapi->status_blk.msix;
3440
3441 while (1) {
3442 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3443 if (unlikely(work_done >= budget))
3444 break;
3445
3446 bnapi->last_status_idx = sblk->status_idx;
3447 /* status idx must be read before checking for more work. */
3448 rmb();
3449 if (likely(!bnx2_has_fast_work(bnapi))) {
3450
288379f0 3451 napi_complete(napi);
f0ea2e63
MC
3452 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3453 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3454 bnapi->last_status_idx);
3455 break;
3456 }
3457 }
3458 return work_done;
3459}
3460
6f535763
DM
3461static int bnx2_poll(struct napi_struct *napi, int budget)
3462{
35efa7c1
MC
3463 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3464 struct bnx2 *bp = bnapi->bp;
6f535763 3465 int work_done = 0;
43e80b89 3466 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3467
3468 while (1) {
43e80b89
MC
3469 bnx2_poll_link(bp, bnapi);
3470
35efa7c1 3471 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3472
4edd473f
MC
3473#ifdef BCM_CNIC
3474 bnx2_poll_cnic(bp, bnapi);
3475#endif
3476
35efa7c1 3477 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3478 * much work has been processed, so we must read it before
3479 * checking for more work.
3480 */
35efa7c1 3481 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3482
3483 if (unlikely(work_done >= budget))
3484 break;
3485
6dee6421 3486 rmb();
35efa7c1 3487 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3488 napi_complete(napi);
f86e82fb 3489 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3490 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3491 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3492 bnapi->last_status_idx);
6dee6421 3493 break;
6f535763 3494 }
1269a8a6
MC
3495 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3496 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3497 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3498 bnapi->last_status_idx);
1269a8a6 3499
6f535763
DM
3500 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3501 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3502 bnapi->last_status_idx);
6f535763
DM
3503 break;
3504 }
b6016b76
MC
3505 }
3506
bea3348e 3507 return work_done;
b6016b76
MC
3508}
3509
932ff279 3510/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3511 * from set_multicast.
3512 */
3513static void
3514bnx2_set_rx_mode(struct net_device *dev)
3515{
972ec0d4 3516 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3517 u32 rx_mode, sort_mode;
ccffad25 3518 struct netdev_hw_addr *ha;
b6016b76 3519 int i;
b6016b76 3520
9f52b564
MC
3521 if (!netif_running(dev))
3522 return;
3523
c770a65c 3524 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3525
3526 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3527 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3528 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3529#ifdef BCM_VLAN
7c6337a1 3530 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3531 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 3532#else
7c6337a1 3533 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
e29054f9 3534 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3535#endif
3536 if (dev->flags & IFF_PROMISC) {
3537 /* Promiscuous mode. */
3538 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3539 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3540 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3541 }
3542 else if (dev->flags & IFF_ALLMULTI) {
3543 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3544 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3545 0xffffffff);
3546 }
3547 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3548 }
3549 else {
3550 /* Accept one or more multicast(s). */
3551 struct dev_mc_list *mclist;
3552 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3553 u32 regidx;
3554 u32 bit;
3555 u32 crc;
3556
3557 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3558
0ddf477b 3559 netdev_for_each_mc_addr(mclist, dev) {
b6016b76
MC
3560 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3561 bit = crc & 0xff;
3562 regidx = (bit & 0xe0) >> 5;
3563 bit &= 0x1f;
3564 mc_filter[regidx] |= (1 << bit);
3565 }
3566
3567 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3568 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3569 mc_filter[i]);
3570 }
3571
3572 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3573 }
3574
32e7bfc4 3575 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
5fcaed01
BL
3576 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3577 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3578 BNX2_RPM_SORT_USER0_PROM_VLAN;
3579 } else if (!(dev->flags & IFF_PROMISC)) {
5fcaed01 3580 /* Add all entries into to the match filter list */
ccffad25 3581 i = 0;
32e7bfc4 3582 netdev_for_each_uc_addr(ha, dev) {
ccffad25 3583 bnx2_set_mac_addr(bp, ha->addr,
5fcaed01
BL
3584 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3585 sort_mode |= (1 <<
3586 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
ccffad25 3587 i++;
5fcaed01
BL
3588 }
3589
3590 }
3591
b6016b76
MC
3592 if (rx_mode != bp->rx_mode) {
3593 bp->rx_mode = rx_mode;
3594 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3595 }
3596
3597 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3598 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3599 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3600
c770a65c 3601 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3602}
3603
57579f76
MC
3604static int __devinit
3605check_fw_section(const struct firmware *fw,
3606 const struct bnx2_fw_file_section *section,
3607 u32 alignment, bool non_empty)
3608{
3609 u32 offset = be32_to_cpu(section->offset);
3610 u32 len = be32_to_cpu(section->len);
3611
3612 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3613 return -EINVAL;
3614 if ((non_empty && len == 0) || len > fw->size - offset ||
3615 len & (alignment - 1))
3616 return -EINVAL;
3617 return 0;
3618}
3619
3620static int __devinit
3621check_mips_fw_entry(const struct firmware *fw,
3622 const struct bnx2_mips_fw_file_entry *entry)
3623{
3624 if (check_fw_section(fw, &entry->text, 4, true) ||
3625 check_fw_section(fw, &entry->data, 4, false) ||
3626 check_fw_section(fw, &entry->rodata, 4, false))
3627 return -EINVAL;
3628 return 0;
3629}
3630
3631static int __devinit
3632bnx2_request_firmware(struct bnx2 *bp)
b6016b76 3633{
57579f76 3634 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3635 const struct bnx2_mips_fw_file *mips_fw;
3636 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3637 int rc;
3638
3639 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3640 mips_fw_file = FW_MIPS_FILE_09;
078b0735
MC
3641 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3642 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3643 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3644 else
3645 rv2p_fw_file = FW_RV2P_FILE_09;
57579f76
MC
3646 } else {
3647 mips_fw_file = FW_MIPS_FILE_06;
3648 rv2p_fw_file = FW_RV2P_FILE_06;
3649 }
3650
3651 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3652 if (rc) {
3a9c6a49 3653 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
57579f76
MC
3654 return rc;
3655 }
3656
3657 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3658 if (rc) {
3a9c6a49 3659 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
57579f76
MC
3660 return rc;
3661 }
5ee1c326
BB
3662 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3663 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3664 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3665 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3666 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3667 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3668 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3669 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3a9c6a49 3670 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
57579f76
MC
3671 return -EINVAL;
3672 }
5ee1c326
BB
3673 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3674 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3675 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3a9c6a49 3676 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
57579f76
MC
3677 return -EINVAL;
3678 }
3679
3680 return 0;
3681}
3682
3683static u32
3684rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3685{
3686 switch (idx) {
3687 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3688 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3689 rv2p_code |= RV2P_BD_PAGE_SIZE;
3690 break;
3691 }
3692 return rv2p_code;
3693}
3694
3695static int
3696load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3697 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3698{
3699 u32 rv2p_code_len, file_offset;
3700 __be32 *rv2p_code;
b6016b76 3701 int i;
57579f76
MC
3702 u32 val, cmd, addr;
3703
3704 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3705 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3706
3707 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3708
57579f76
MC
3709 if (rv2p_proc == RV2P_PROC1) {
3710 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3711 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3712 } else {
3713 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3714 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3715 }
b6016b76
MC
3716
3717 for (i = 0; i < rv2p_code_len; i += 8) {
57579f76 3718 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3719 rv2p_code++;
57579f76 3720 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3721 rv2p_code++;
3722
57579f76
MC
3723 val = (i / 8) | cmd;
3724 REG_WR(bp, addr, val);
3725 }
3726
3727 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3728 for (i = 0; i < 8; i++) {
3729 u32 loc, code;
3730
3731 loc = be32_to_cpu(fw_entry->fixup[i]);
3732 if (loc && ((loc * 4) < rv2p_code_len)) {
3733 code = be32_to_cpu(*(rv2p_code + loc - 1));
3734 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3735 code = be32_to_cpu(*(rv2p_code + loc));
3736 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3737 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3738
3739 val = (loc / 2) | cmd;
3740 REG_WR(bp, addr, val);
b6016b76
MC
3741 }
3742 }
3743
3744 /* Reset the processor, un-stall is done later. */
3745 if (rv2p_proc == RV2P_PROC1) {
3746 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3747 }
3748 else {
3749 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3750 }
57579f76
MC
3751
3752 return 0;
b6016b76
MC
3753}
3754
af3ee519 3755static int
57579f76
MC
3756load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3757 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3758{
57579f76
MC
3759 u32 addr, len, file_offset;
3760 __be32 *data;
b6016b76
MC
3761 u32 offset;
3762 u32 val;
3763
3764 /* Halt the CPU. */
2726d6e1 3765 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3766 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3767 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3768 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3769
3770 /* Load the Text area. */
57579f76
MC
3771 addr = be32_to_cpu(fw_entry->text.addr);
3772 len = be32_to_cpu(fw_entry->text.len);
3773 file_offset = be32_to_cpu(fw_entry->text.offset);
3774 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3775
57579f76
MC
3776 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3777 if (len) {
b6016b76
MC
3778 int j;
3779
57579f76
MC
3780 for (j = 0; j < (len / 4); j++, offset += 4)
3781 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3782 }
3783
57579f76
MC
3784 /* Load the Data area. */
3785 addr = be32_to_cpu(fw_entry->data.addr);
3786 len = be32_to_cpu(fw_entry->data.len);
3787 file_offset = be32_to_cpu(fw_entry->data.offset);
3788 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3789
57579f76
MC
3790 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3791 if (len) {
b6016b76
MC
3792 int j;
3793
57579f76
MC
3794 for (j = 0; j < (len / 4); j++, offset += 4)
3795 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3796 }
3797
3798 /* Load the Read-Only area. */
57579f76
MC
3799 addr = be32_to_cpu(fw_entry->rodata.addr);
3800 len = be32_to_cpu(fw_entry->rodata.len);
3801 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3802 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3803
3804 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3805 if (len) {
b6016b76
MC
3806 int j;
3807
57579f76
MC
3808 for (j = 0; j < (len / 4); j++, offset += 4)
3809 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3810 }
3811
3812 /* Clear the pre-fetch instruction. */
2726d6e1 3813 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3814
3815 val = be32_to_cpu(fw_entry->start_addr);
3816 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3817
3818 /* Start the CPU. */
2726d6e1 3819 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3820 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3821 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3822 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3823
3824 return 0;
b6016b76
MC
3825}
3826
fba9fe91 3827static int
b6016b76
MC
3828bnx2_init_cpus(struct bnx2 *bp)
3829{
57579f76
MC
3830 const struct bnx2_mips_fw_file *mips_fw =
3831 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3832 const struct bnx2_rv2p_fw_file *rv2p_fw =
3833 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3834 int rc;
b6016b76
MC
3835
3836 /* Initialize the RV2P processor. */
57579f76
MC
3837 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3838 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3839
3840 /* Initialize the RX Processor. */
57579f76 3841 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3842 if (rc)
3843 goto init_cpu_err;
3844
b6016b76 3845 /* Initialize the TX Processor. */
57579f76 3846 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3847 if (rc)
3848 goto init_cpu_err;
3849
b6016b76 3850 /* Initialize the TX Patch-up Processor. */
57579f76 3851 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3852 if (rc)
3853 goto init_cpu_err;
3854
b6016b76 3855 /* Initialize the Completion Processor. */
57579f76 3856 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3857 if (rc)
3858 goto init_cpu_err;
3859
d43584c8 3860 /* Initialize the Command Processor. */
57579f76 3861 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3862
fba9fe91 3863init_cpu_err:
fba9fe91 3864 return rc;
b6016b76
MC
3865}
3866
3867static int
829ca9a3 3868bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3869{
3870 u16 pmcsr;
3871
3872 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3873
3874 switch (state) {
829ca9a3 3875 case PCI_D0: {
b6016b76
MC
3876 u32 val;
3877
3878 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3879 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3880 PCI_PM_CTRL_PME_STATUS);
3881
3882 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3883 /* delay required during transition out of D3hot */
3884 msleep(20);
3885
3886 val = REG_RD(bp, BNX2_EMAC_MODE);
3887 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3888 val &= ~BNX2_EMAC_MODE_MPKT;
3889 REG_WR(bp, BNX2_EMAC_MODE, val);
3890
3891 val = REG_RD(bp, BNX2_RPM_CONFIG);
3892 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3893 REG_WR(bp, BNX2_RPM_CONFIG, val);
3894 break;
3895 }
829ca9a3 3896 case PCI_D3hot: {
b6016b76
MC
3897 int i;
3898 u32 val, wol_msg;
3899
3900 if (bp->wol) {
3901 u32 advertising;
3902 u8 autoneg;
3903
3904 autoneg = bp->autoneg;
3905 advertising = bp->advertising;
3906
239cd343
MC
3907 if (bp->phy_port == PORT_TP) {
3908 bp->autoneg = AUTONEG_SPEED;
3909 bp->advertising = ADVERTISED_10baseT_Half |
3910 ADVERTISED_10baseT_Full |
3911 ADVERTISED_100baseT_Half |
3912 ADVERTISED_100baseT_Full |
3913 ADVERTISED_Autoneg;
3914 }
b6016b76 3915
239cd343
MC
3916 spin_lock_bh(&bp->phy_lock);
3917 bnx2_setup_phy(bp, bp->phy_port);
3918 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3919
3920 bp->autoneg = autoneg;
3921 bp->advertising = advertising;
3922
5fcaed01 3923 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3924
3925 val = REG_RD(bp, BNX2_EMAC_MODE);
3926
3927 /* Enable port mode. */
3928 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3929 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3930 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3931 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3932 if (bp->phy_port == PORT_TP)
3933 val |= BNX2_EMAC_MODE_PORT_MII;
3934 else {
3935 val |= BNX2_EMAC_MODE_PORT_GMII;
3936 if (bp->line_speed == SPEED_2500)
3937 val |= BNX2_EMAC_MODE_25G_MODE;
3938 }
b6016b76
MC
3939
3940 REG_WR(bp, BNX2_EMAC_MODE, val);
3941
3942 /* receive all multicast */
3943 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3944 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3945 0xffffffff);
3946 }
3947 REG_WR(bp, BNX2_EMAC_RX_MODE,
3948 BNX2_EMAC_RX_MODE_SORT_MODE);
3949
3950 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3951 BNX2_RPM_SORT_USER0_MC_EN;
3952 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3953 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3954 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3955 BNX2_RPM_SORT_USER0_ENA);
3956
3957 /* Need to enable EMAC and RPM for WOL. */
3958 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3959 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3960 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3961 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3962
3963 val = REG_RD(bp, BNX2_RPM_CONFIG);
3964 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3965 REG_WR(bp, BNX2_RPM_CONFIG, val);
3966
3967 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3968 }
3969 else {
3970 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3971 }
3972
f86e82fb 3973 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
3974 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3975 1, 0);
b6016b76
MC
3976
3977 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3978 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3979 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3980
3981 if (bp->wol)
3982 pmcsr |= 3;
3983 }
3984 else {
3985 pmcsr |= 3;
3986 }
3987 if (bp->wol) {
3988 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3989 }
3990 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3991 pmcsr);
3992
3993 /* No more memory access after this point until
3994 * device is brought back to D0.
3995 */
3996 udelay(50);
3997 break;
3998 }
3999 default:
4000 return -EINVAL;
4001 }
4002 return 0;
4003}
4004
4005static int
4006bnx2_acquire_nvram_lock(struct bnx2 *bp)
4007{
4008 u32 val;
4009 int j;
4010
4011 /* Request access to the flash interface. */
4012 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4013 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4014 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4015 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4016 break;
4017
4018 udelay(5);
4019 }
4020
4021 if (j >= NVRAM_TIMEOUT_COUNT)
4022 return -EBUSY;
4023
4024 return 0;
4025}
4026
4027static int
4028bnx2_release_nvram_lock(struct bnx2 *bp)
4029{
4030 int j;
4031 u32 val;
4032
4033 /* Relinquish nvram interface. */
4034 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4035
4036 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4037 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4038 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4039 break;
4040
4041 udelay(5);
4042 }
4043
4044 if (j >= NVRAM_TIMEOUT_COUNT)
4045 return -EBUSY;
4046
4047 return 0;
4048}
4049
4050
4051static int
4052bnx2_enable_nvram_write(struct bnx2 *bp)
4053{
4054 u32 val;
4055
4056 val = REG_RD(bp, BNX2_MISC_CFG);
4057 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4058
e30372c9 4059 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
4060 int j;
4061
4062 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4063 REG_WR(bp, BNX2_NVM_COMMAND,
4064 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4065
4066 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4067 udelay(5);
4068
4069 val = REG_RD(bp, BNX2_NVM_COMMAND);
4070 if (val & BNX2_NVM_COMMAND_DONE)
4071 break;
4072 }
4073
4074 if (j >= NVRAM_TIMEOUT_COUNT)
4075 return -EBUSY;
4076 }
4077 return 0;
4078}
4079
4080static void
4081bnx2_disable_nvram_write(struct bnx2 *bp)
4082{
4083 u32 val;
4084
4085 val = REG_RD(bp, BNX2_MISC_CFG);
4086 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4087}
4088
4089
4090static void
4091bnx2_enable_nvram_access(struct bnx2 *bp)
4092{
4093 u32 val;
4094
4095 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4096 /* Enable both bits, even on read. */
6aa20a22 4097 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4098 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4099}
4100
4101static void
4102bnx2_disable_nvram_access(struct bnx2 *bp)
4103{
4104 u32 val;
4105
4106 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4107 /* Disable both bits, even after read. */
6aa20a22 4108 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4109 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4110 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4111}
4112
4113static int
4114bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4115{
4116 u32 cmd;
4117 int j;
4118
e30372c9 4119 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
4120 /* Buffered flash, no erase needed */
4121 return 0;
4122
4123 /* Build an erase command */
4124 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4125 BNX2_NVM_COMMAND_DOIT;
4126
4127 /* Need to clear DONE bit separately. */
4128 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4129
4130 /* Address of the NVRAM to read from. */
4131 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4132
4133 /* Issue an erase command. */
4134 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4135
4136 /* Wait for completion. */
4137 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4138 u32 val;
4139
4140 udelay(5);
4141
4142 val = REG_RD(bp, BNX2_NVM_COMMAND);
4143 if (val & BNX2_NVM_COMMAND_DONE)
4144 break;
4145 }
4146
4147 if (j >= NVRAM_TIMEOUT_COUNT)
4148 return -EBUSY;
4149
4150 return 0;
4151}
4152
4153static int
4154bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4155{
4156 u32 cmd;
4157 int j;
4158
4159 /* Build the command word. */
4160 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4161
e30372c9
MC
4162 /* Calculate an offset of a buffered flash, not needed for 5709. */
4163 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4164 offset = ((offset / bp->flash_info->page_size) <<
4165 bp->flash_info->page_bits) +
4166 (offset % bp->flash_info->page_size);
4167 }
4168
4169 /* Need to clear DONE bit separately. */
4170 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4171
4172 /* Address of the NVRAM to read from. */
4173 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4174
4175 /* Issue a read command. */
4176 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4177
4178 /* Wait for completion. */
4179 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4180 u32 val;
4181
4182 udelay(5);
4183
4184 val = REG_RD(bp, BNX2_NVM_COMMAND);
4185 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
4186 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4187 memcpy(ret_val, &v, 4);
b6016b76
MC
4188 break;
4189 }
4190 }
4191 if (j >= NVRAM_TIMEOUT_COUNT)
4192 return -EBUSY;
4193
4194 return 0;
4195}
4196
4197
4198static int
4199bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4200{
b491edd5
AV
4201 u32 cmd;
4202 __be32 val32;
b6016b76
MC
4203 int j;
4204
4205 /* Build the command word. */
4206 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4207
e30372c9
MC
4208 /* Calculate an offset of a buffered flash, not needed for 5709. */
4209 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4210 offset = ((offset / bp->flash_info->page_size) <<
4211 bp->flash_info->page_bits) +
4212 (offset % bp->flash_info->page_size);
4213 }
4214
4215 /* Need to clear DONE bit separately. */
4216 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4217
4218 memcpy(&val32, val, 4);
b6016b76
MC
4219
4220 /* Write the data. */
b491edd5 4221 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4222
4223 /* Address of the NVRAM to write to. */
4224 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4225
4226 /* Issue the write command. */
4227 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4228
4229 /* Wait for completion. */
4230 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4231 udelay(5);
4232
4233 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4234 break;
4235 }
4236 if (j >= NVRAM_TIMEOUT_COUNT)
4237 return -EBUSY;
4238
4239 return 0;
4240}
4241
4242static int
4243bnx2_init_nvram(struct bnx2 *bp)
4244{
4245 u32 val;
e30372c9 4246 int j, entry_count, rc = 0;
0ced9d01 4247 const struct flash_spec *flash;
b6016b76 4248
e30372c9
MC
4249 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4250 bp->flash_info = &flash_5709;
4251 goto get_flash_size;
4252 }
4253
b6016b76
MC
4254 /* Determine the selected interface. */
4255 val = REG_RD(bp, BNX2_NVM_CFG1);
4256
ff8ac609 4257 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4258
b6016b76
MC
4259 if (val & 0x40000000) {
4260
4261 /* Flash interface has been reconfigured */
4262 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4263 j++, flash++) {
4264 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4265 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4266 bp->flash_info = flash;
4267 break;
4268 }
4269 }
4270 }
4271 else {
37137709 4272 u32 mask;
b6016b76
MC
4273 /* Not yet been reconfigured */
4274
37137709
MC
4275 if (val & (1 << 23))
4276 mask = FLASH_BACKUP_STRAP_MASK;
4277 else
4278 mask = FLASH_STRAP_MASK;
4279
b6016b76
MC
4280 for (j = 0, flash = &flash_table[0]; j < entry_count;
4281 j++, flash++) {
4282
37137709 4283 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4284 bp->flash_info = flash;
4285
4286 /* Request access to the flash interface. */
4287 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4288 return rc;
4289
4290 /* Enable access to flash interface */
4291 bnx2_enable_nvram_access(bp);
4292
4293 /* Reconfigure the flash interface */
4294 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4295 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4296 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4297 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4298
4299 /* Disable access to flash interface */
4300 bnx2_disable_nvram_access(bp);
4301 bnx2_release_nvram_lock(bp);
4302
4303 break;
4304 }
4305 }
4306 } /* if (val & 0x40000000) */
4307
4308 if (j == entry_count) {
4309 bp->flash_info = NULL;
3a9c6a49 4310 pr_alert("Unknown flash/EEPROM type\n");
1122db71 4311 return -ENODEV;
b6016b76
MC
4312 }
4313
e30372c9 4314get_flash_size:
2726d6e1 4315 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4316 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4317 if (val)
4318 bp->flash_size = val;
4319 else
4320 bp->flash_size = bp->flash_info->total_size;
4321
b6016b76
MC
4322 return rc;
4323}
4324
4325static int
4326bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4327 int buf_size)
4328{
4329 int rc = 0;
4330 u32 cmd_flags, offset32, len32, extra;
4331
4332 if (buf_size == 0)
4333 return 0;
4334
4335 /* Request access to the flash interface. */
4336 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4337 return rc;
4338
4339 /* Enable access to flash interface */
4340 bnx2_enable_nvram_access(bp);
4341
4342 len32 = buf_size;
4343 offset32 = offset;
4344 extra = 0;
4345
4346 cmd_flags = 0;
4347
4348 if (offset32 & 3) {
4349 u8 buf[4];
4350 u32 pre_len;
4351
4352 offset32 &= ~3;
4353 pre_len = 4 - (offset & 3);
4354
4355 if (pre_len >= len32) {
4356 pre_len = len32;
4357 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4358 BNX2_NVM_COMMAND_LAST;
4359 }
4360 else {
4361 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4362 }
4363
4364 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4365
4366 if (rc)
4367 return rc;
4368
4369 memcpy(ret_buf, buf + (offset & 3), pre_len);
4370
4371 offset32 += 4;
4372 ret_buf += pre_len;
4373 len32 -= pre_len;
4374 }
4375 if (len32 & 3) {
4376 extra = 4 - (len32 & 3);
4377 len32 = (len32 + 4) & ~3;
4378 }
4379
4380 if (len32 == 4) {
4381 u8 buf[4];
4382
4383 if (cmd_flags)
4384 cmd_flags = BNX2_NVM_COMMAND_LAST;
4385 else
4386 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4387 BNX2_NVM_COMMAND_LAST;
4388
4389 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4390
4391 memcpy(ret_buf, buf, 4 - extra);
4392 }
4393 else if (len32 > 0) {
4394 u8 buf[4];
4395
4396 /* Read the first word. */
4397 if (cmd_flags)
4398 cmd_flags = 0;
4399 else
4400 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4401
4402 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4403
4404 /* Advance to the next dword. */
4405 offset32 += 4;
4406 ret_buf += 4;
4407 len32 -= 4;
4408
4409 while (len32 > 4 && rc == 0) {
4410 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4411
4412 /* Advance to the next dword. */
4413 offset32 += 4;
4414 ret_buf += 4;
4415 len32 -= 4;
4416 }
4417
4418 if (rc)
4419 return rc;
4420
4421 cmd_flags = BNX2_NVM_COMMAND_LAST;
4422 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4423
4424 memcpy(ret_buf, buf, 4 - extra);
4425 }
4426
4427 /* Disable access to flash interface */
4428 bnx2_disable_nvram_access(bp);
4429
4430 bnx2_release_nvram_lock(bp);
4431
4432 return rc;
4433}
4434
4435static int
4436bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4437 int buf_size)
4438{
4439 u32 written, offset32, len32;
e6be763f 4440 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4441 int rc = 0;
4442 int align_start, align_end;
4443
4444 buf = data_buf;
4445 offset32 = offset;
4446 len32 = buf_size;
4447 align_start = align_end = 0;
4448
4449 if ((align_start = (offset32 & 3))) {
4450 offset32 &= ~3;
c873879c
MC
4451 len32 += align_start;
4452 if (len32 < 4)
4453 len32 = 4;
b6016b76
MC
4454 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4455 return rc;
4456 }
4457
4458 if (len32 & 3) {
c873879c
MC
4459 align_end = 4 - (len32 & 3);
4460 len32 += align_end;
4461 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4462 return rc;
b6016b76
MC
4463 }
4464
4465 if (align_start || align_end) {
e6be763f
MC
4466 align_buf = kmalloc(len32, GFP_KERNEL);
4467 if (align_buf == NULL)
b6016b76
MC
4468 return -ENOMEM;
4469 if (align_start) {
e6be763f 4470 memcpy(align_buf, start, 4);
b6016b76
MC
4471 }
4472 if (align_end) {
e6be763f 4473 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4474 }
e6be763f
MC
4475 memcpy(align_buf + align_start, data_buf, buf_size);
4476 buf = align_buf;
b6016b76
MC
4477 }
4478
e30372c9 4479 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4480 flash_buffer = kmalloc(264, GFP_KERNEL);
4481 if (flash_buffer == NULL) {
4482 rc = -ENOMEM;
4483 goto nvram_write_end;
4484 }
4485 }
4486
b6016b76
MC
4487 written = 0;
4488 while ((written < len32) && (rc == 0)) {
4489 u32 page_start, page_end, data_start, data_end;
4490 u32 addr, cmd_flags;
4491 int i;
b6016b76
MC
4492
4493 /* Find the page_start addr */
4494 page_start = offset32 + written;
4495 page_start -= (page_start % bp->flash_info->page_size);
4496 /* Find the page_end addr */
4497 page_end = page_start + bp->flash_info->page_size;
4498 /* Find the data_start addr */
4499 data_start = (written == 0) ? offset32 : page_start;
4500 /* Find the data_end addr */
6aa20a22 4501 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4502 (offset32 + len32) : page_end;
4503
4504 /* Request access to the flash interface. */
4505 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4506 goto nvram_write_end;
4507
4508 /* Enable access to flash interface */
4509 bnx2_enable_nvram_access(bp);
4510
4511 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4512 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4513 int j;
4514
4515 /* Read the whole page into the buffer
4516 * (non-buffer flash only) */
4517 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4518 if (j == (bp->flash_info->page_size - 4)) {
4519 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4520 }
4521 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4522 page_start + j,
4523 &flash_buffer[j],
b6016b76
MC
4524 cmd_flags);
4525
4526 if (rc)
4527 goto nvram_write_end;
4528
4529 cmd_flags = 0;
4530 }
4531 }
4532
4533 /* Enable writes to flash interface (unlock write-protect) */
4534 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4535 goto nvram_write_end;
4536
b6016b76
MC
4537 /* Loop to write back the buffer data from page_start to
4538 * data_start */
4539 i = 0;
e30372c9 4540 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4541 /* Erase the page */
4542 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4543 goto nvram_write_end;
4544
4545 /* Re-enable the write again for the actual write */
4546 bnx2_enable_nvram_write(bp);
4547
b6016b76
MC
4548 for (addr = page_start; addr < data_start;
4549 addr += 4, i += 4) {
6aa20a22 4550
b6016b76
MC
4551 rc = bnx2_nvram_write_dword(bp, addr,
4552 &flash_buffer[i], cmd_flags);
4553
4554 if (rc != 0)
4555 goto nvram_write_end;
4556
4557 cmd_flags = 0;
4558 }
4559 }
4560
4561 /* Loop to write the new data from data_start to data_end */
bae25761 4562 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4563 if ((addr == page_end - 4) ||
e30372c9 4564 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4565 (addr == data_end - 4))) {
4566
4567 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4568 }
4569 rc = bnx2_nvram_write_dword(bp, addr, buf,
4570 cmd_flags);
4571
4572 if (rc != 0)
4573 goto nvram_write_end;
4574
4575 cmd_flags = 0;
4576 buf += 4;
4577 }
4578
4579 /* Loop to write back the buffer data from data_end
4580 * to page_end */
e30372c9 4581 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4582 for (addr = data_end; addr < page_end;
4583 addr += 4, i += 4) {
6aa20a22 4584
b6016b76
MC
4585 if (addr == page_end-4) {
4586 cmd_flags = BNX2_NVM_COMMAND_LAST;
4587 }
4588 rc = bnx2_nvram_write_dword(bp, addr,
4589 &flash_buffer[i], cmd_flags);
4590
4591 if (rc != 0)
4592 goto nvram_write_end;
4593
4594 cmd_flags = 0;
4595 }
4596 }
4597
4598 /* Disable writes to flash interface (lock write-protect) */
4599 bnx2_disable_nvram_write(bp);
4600
4601 /* Disable access to flash interface */
4602 bnx2_disable_nvram_access(bp);
4603 bnx2_release_nvram_lock(bp);
4604
4605 /* Increment written */
4606 written += data_end - data_start;
4607 }
4608
4609nvram_write_end:
e6be763f
MC
4610 kfree(flash_buffer);
4611 kfree(align_buf);
b6016b76
MC
4612 return rc;
4613}
4614
0d8a6571 4615static void
7c62e83b 4616bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4617{
7c62e83b 4618 u32 val, sig = 0;
0d8a6571 4619
583c28e5 4620 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4621 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4622
4623 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4624 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4625
2726d6e1 4626 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4627 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4628 return;
4629
7c62e83b
MC
4630 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4631 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4632 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4633 }
4634
4635 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4636 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4637 u32 link;
4638
583c28e5 4639 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4640
7c62e83b
MC
4641 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4642 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4643 bp->phy_port = PORT_FIBRE;
4644 else
4645 bp->phy_port = PORT_TP;
489310a4 4646
7c62e83b
MC
4647 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4648 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4649 }
7c62e83b
MC
4650
4651 if (netif_running(bp->dev) && sig)
4652 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4653}
4654
b4b36042
MC
4655static void
4656bnx2_setup_msix_tbl(struct bnx2 *bp)
4657{
4658 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4659
4660 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4661 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4662}
4663
b6016b76
MC
4664static int
4665bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4666{
4667 u32 val;
4668 int i, rc = 0;
489310a4 4669 u8 old_port;
b6016b76
MC
4670
4671 /* Wait for the current PCI transaction to complete before
4672 * issuing a reset. */
4673 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4674 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4675 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4676 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4677 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4678 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4679 udelay(5);
4680
b090ae2b 4681 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4682 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4683
b6016b76
MC
4684 /* Deposit a driver reset signature so the firmware knows that
4685 * this is a soft reset. */
2726d6e1
MC
4686 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4687 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4688
b6016b76
MC
4689 /* Do a dummy read to force the chip to complete all current transaction
4690 * before we issue a reset. */
4691 val = REG_RD(bp, BNX2_MISC_ID);
4692
234754d5
MC
4693 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4694 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4695 REG_RD(bp, BNX2_MISC_COMMAND);
4696 udelay(5);
b6016b76 4697
234754d5
MC
4698 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4699 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4700
234754d5 4701 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4702
234754d5
MC
4703 } else {
4704 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4705 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4706 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4707
4708 /* Chip reset. */
4709 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4710
594a9dfa
MC
4711 /* Reading back any register after chip reset will hang the
4712 * bus on 5706 A0 and A1. The msleep below provides plenty
4713 * of margin for write posting.
4714 */
234754d5 4715 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4716 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4717 msleep(20);
b6016b76 4718
234754d5
MC
4719 /* Reset takes approximate 30 usec */
4720 for (i = 0; i < 10; i++) {
4721 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4722 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4723 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4724 break;
4725 udelay(10);
4726 }
4727
4728 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4729 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3a9c6a49 4730 pr_err("Chip reset did not complete\n");
234754d5
MC
4731 return -EBUSY;
4732 }
b6016b76
MC
4733 }
4734
4735 /* Make sure byte swapping is properly configured. */
4736 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4737 if (val != 0x01020304) {
3a9c6a49 4738 pr_err("Chip not in correct endian mode\n");
b6016b76
MC
4739 return -ENODEV;
4740 }
4741
b6016b76 4742 /* Wait for the firmware to finish its initialization. */
a2f13890 4743 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4744 if (rc)
4745 return rc;
b6016b76 4746
0d8a6571 4747 spin_lock_bh(&bp->phy_lock);
489310a4 4748 old_port = bp->phy_port;
7c62e83b 4749 bnx2_init_fw_cap(bp);
583c28e5
MC
4750 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4751 old_port != bp->phy_port)
0d8a6571
MC
4752 bnx2_set_default_remote_link(bp);
4753 spin_unlock_bh(&bp->phy_lock);
4754
b6016b76
MC
4755 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4756 /* Adjust the voltage regular to two steps lower. The default
4757 * of this register is 0x0000000e. */
4758 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4759
4760 /* Remove bad rbuf memory from the free pool. */
4761 rc = bnx2_alloc_bad_rbuf(bp);
4762 }
4763
c441b8d2 4764 if (bp->flags & BNX2_FLAG_USING_MSIX) {
b4b36042 4765 bnx2_setup_msix_tbl(bp);
c441b8d2
MC
4766 /* Prevent MSIX table reads and write from timing out */
4767 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4768 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4769 }
b4b36042 4770
b6016b76
MC
4771 return rc;
4772}
4773
4774static int
4775bnx2_init_chip(struct bnx2 *bp)
4776{
d8026d93 4777 u32 val, mtu;
b4b36042 4778 int rc, i;
b6016b76
MC
4779
4780 /* Make sure the interrupt is not active. */
4781 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4782
4783 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4784 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4785#ifdef __BIG_ENDIAN
6aa20a22 4786 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4787#endif
6aa20a22 4788 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4789 DMA_READ_CHANS << 12 |
4790 DMA_WRITE_CHANS << 16;
4791
4792 val |= (0x2 << 20) | (1 << 11);
4793
f86e82fb 4794 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4795 val |= (1 << 23);
4796
4797 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4798 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4799 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4800
4801 REG_WR(bp, BNX2_DMA_CONFIG, val);
4802
4803 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4804 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4805 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4806 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4807 }
4808
f86e82fb 4809 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4810 u16 val16;
4811
4812 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4813 &val16);
4814 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4815 val16 & ~PCI_X_CMD_ERO);
4816 }
4817
4818 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4819 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4820 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4821 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4822
4823 /* Initialize context mapping and zero out the quick contexts. The
4824 * context block must have already been enabled. */
641bdcd5
MC
4825 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4826 rc = bnx2_init_5709_context(bp);
4827 if (rc)
4828 return rc;
4829 } else
59b47d8a 4830 bnx2_init_context(bp);
b6016b76 4831
fba9fe91
MC
4832 if ((rc = bnx2_init_cpus(bp)) != 0)
4833 return rc;
4834
b6016b76
MC
4835 bnx2_init_nvram(bp);
4836
5fcaed01 4837 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4838
4839 val = REG_RD(bp, BNX2_MQ_CONFIG);
4840 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4841 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4edd473f
MC
4842 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4843 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4844 if (CHIP_REV(bp) == CHIP_REV_Ax)
4845 val |= BNX2_MQ_CONFIG_HALT_DIS;
4846 }
68c9f75a 4847
b6016b76
MC
4848 REG_WR(bp, BNX2_MQ_CONFIG, val);
4849
4850 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4851 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4852 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4853
4854 val = (BCM_PAGE_BITS - 8) << 24;
4855 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4856
4857 /* Configure page size. */
4858 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4859 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4860 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4861 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4862
4863 val = bp->mac_addr[0] +
4864 (bp->mac_addr[1] << 8) +
4865 (bp->mac_addr[2] << 16) +
4866 bp->mac_addr[3] +
4867 (bp->mac_addr[4] << 8) +
4868 (bp->mac_addr[5] << 16);
4869 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4870
4871 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4872 mtu = bp->dev->mtu;
4873 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4874 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4875 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4876 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4877
d8026d93
MC
4878 if (mtu < 1500)
4879 mtu = 1500;
4880
4881 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4882 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4883 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4884
155d5561 4885 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
b4b36042
MC
4886 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4887 bp->bnx2_napi[i].last_status_idx = 0;
4888
efba0180
MC
4889 bp->idle_chk_status_idx = 0xffff;
4890
b6016b76
MC
4891 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4892
4893 /* Set up how to generate a link change interrupt. */
4894 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4895
4896 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4897 (u64) bp->status_blk_mapping & 0xffffffff);
4898 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4899
4900 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4901 (u64) bp->stats_blk_mapping & 0xffffffff);
4902 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4903 (u64) bp->stats_blk_mapping >> 32);
4904
6aa20a22 4905 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4906 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4907
4908 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4909 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4910
4911 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4912 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4913
4914 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4915
4916 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4917
4918 REG_WR(bp, BNX2_HC_COM_TICKS,
4919 (bp->com_ticks_int << 16) | bp->com_ticks);
4920
4921 REG_WR(bp, BNX2_HC_CMD_TICKS,
4922 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4923
61d9e3fa 4924 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
02537b06
MC
4925 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4926 else
7ea6920e 4927 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4928 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4929
4930 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4931 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4932 else {
8e6a72c4
MC
4933 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4934 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4935 }
4936
efde73a3 4937 if (bp->flags & BNX2_FLAG_USING_MSIX) {
c76c0475
MC
4938 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4939 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4940
5e9ad9e1
MC
4941 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4942 }
4943
4944 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
cf7474a6 4945 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5e9ad9e1
MC
4946
4947 REG_WR(bp, BNX2_HC_CONFIG, val);
4948
4949 for (i = 1; i < bp->irq_nvecs; i++) {
4950 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4951 BNX2_HC_SB_CONFIG_1;
4952
6f743ca0 4953 REG_WR(bp, base,
c76c0475 4954 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4955 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4956 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4957
6f743ca0 4958 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
4959 (bp->tx_quick_cons_trip_int << 16) |
4960 bp->tx_quick_cons_trip);
4961
6f743ca0 4962 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
4963 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4964
5e9ad9e1
MC
4965 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4966 (bp->rx_quick_cons_trip_int << 16) |
4967 bp->rx_quick_cons_trip);
8e6a72c4 4968
5e9ad9e1
MC
4969 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4970 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4971 }
8e6a72c4 4972
b6016b76
MC
4973 /* Clear internal stats counters. */
4974 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4975
da3e4fbe 4976 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4977
4978 /* Initialize the receive filter. */
4979 bnx2_set_rx_mode(bp->dev);
4980
0aa38df7
MC
4981 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4982 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4983 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4984 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4985 }
b090ae2b 4986 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 4987 1, 0);
b6016b76 4988
df149d70 4989 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4990 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4991
4992 udelay(20);
4993
bf5295bb
MC
4994 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4995
b090ae2b 4996 return rc;
b6016b76
MC
4997}
4998
c76c0475
MC
4999static void
5000bnx2_clear_ring_states(struct bnx2 *bp)
5001{
5002 struct bnx2_napi *bnapi;
35e9010b 5003 struct bnx2_tx_ring_info *txr;
bb4f98ab 5004 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
5005 int i;
5006
5007 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5008 bnapi = &bp->bnx2_napi[i];
35e9010b 5009 txr = &bnapi->tx_ring;
bb4f98ab 5010 rxr = &bnapi->rx_ring;
c76c0475 5011
35e9010b
MC
5012 txr->tx_cons = 0;
5013 txr->hw_tx_cons = 0;
bb4f98ab
MC
5014 rxr->rx_prod_bseq = 0;
5015 rxr->rx_prod = 0;
5016 rxr->rx_cons = 0;
5017 rxr->rx_pg_prod = 0;
5018 rxr->rx_pg_cons = 0;
c76c0475
MC
5019 }
5020}
5021
59b47d8a 5022static void
35e9010b 5023bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
5024{
5025 u32 val, offset0, offset1, offset2, offset3;
62a8313c 5026 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
5027
5028 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5029 offset0 = BNX2_L2CTX_TYPE_XI;
5030 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5031 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5032 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5033 } else {
5034 offset0 = BNX2_L2CTX_TYPE;
5035 offset1 = BNX2_L2CTX_CMD_TYPE;
5036 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5037 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5038 }
5039 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 5040 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
5041
5042 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 5043 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 5044
35e9010b 5045 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 5046 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 5047
35e9010b 5048 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 5049 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 5050}
b6016b76
MC
5051
5052static void
35e9010b 5053bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
5054{
5055 struct tx_bd *txbd;
c76c0475
MC
5056 u32 cid = TX_CID;
5057 struct bnx2_napi *bnapi;
35e9010b 5058 struct bnx2_tx_ring_info *txr;
c76c0475 5059
35e9010b
MC
5060 bnapi = &bp->bnx2_napi[ring_num];
5061 txr = &bnapi->tx_ring;
5062
5063 if (ring_num == 0)
5064 cid = TX_CID;
5065 else
5066 cid = TX_TSS_CID + ring_num - 1;
b6016b76 5067
2f8af120
MC
5068 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5069
35e9010b 5070 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 5071
35e9010b
MC
5072 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5073 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 5074
35e9010b
MC
5075 txr->tx_prod = 0;
5076 txr->tx_prod_bseq = 0;
6aa20a22 5077
35e9010b
MC
5078 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5079 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 5080
35e9010b 5081 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
5082}
5083
5084static void
5d5d0015
MC
5085bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5086 int num_rings)
b6016b76 5087{
b6016b76 5088 int i;
5d5d0015 5089 struct rx_bd *rxbd;
6aa20a22 5090
5d5d0015 5091 for (i = 0; i < num_rings; i++) {
13daffa2 5092 int j;
b6016b76 5093
5d5d0015 5094 rxbd = &rx_ring[i][0];
13daffa2 5095 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 5096 rxbd->rx_bd_len = buf_size;
13daffa2
MC
5097 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5098 }
5d5d0015 5099 if (i == (num_rings - 1))
13daffa2
MC
5100 j = 0;
5101 else
5102 j = i + 1;
5d5d0015
MC
5103 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5104 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 5105 }
5d5d0015
MC
5106}
5107
5108static void
bb4f98ab 5109bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
5110{
5111 int i;
5112 u16 prod, ring_prod;
bb4f98ab
MC
5113 u32 cid, rx_cid_addr, val;
5114 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5115 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5116
5117 if (ring_num == 0)
5118 cid = RX_CID;
5119 else
5120 cid = RX_RSS_CID + ring_num - 1;
5121
5122 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 5123
bb4f98ab 5124 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
5125 bp->rx_buf_use_size, bp->rx_max_ring);
5126
bb4f98ab 5127 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
5128
5129 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5130 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5131 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5132 }
5133
62a8313c 5134 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 5135 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
5136 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5137 rxr->rx_pg_desc_mapping,
47bf4246
MC
5138 PAGE_SIZE, bp->rx_max_pg_ring);
5139 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
5140 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5141 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 5142 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 5143
bb4f98ab 5144 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 5145 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 5146
bb4f98ab 5147 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 5148 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
5149
5150 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5151 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5152 }
b6016b76 5153
bb4f98ab 5154 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 5155 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 5156
bb4f98ab 5157 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 5158 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 5159
bb4f98ab 5160 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 5161 for (i = 0; i < bp->rx_pg_ring_size; i++) {
b929e53c 5162 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
3a9c6a49
JP
5163 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5164 ring_num, i, bp->rx_pg_ring_size);
47bf4246 5165 break;
b929e53c 5166 }
47bf4246
MC
5167 prod = NEXT_RX_BD(prod);
5168 ring_prod = RX_PG_RING_IDX(prod);
5169 }
bb4f98ab 5170 rxr->rx_pg_prod = prod;
47bf4246 5171
bb4f98ab 5172 ring_prod = prod = rxr->rx_prod;
236b6394 5173 for (i = 0; i < bp->rx_ring_size; i++) {
b929e53c 5174 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
3a9c6a49
JP
5175 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5176 ring_num, i, bp->rx_ring_size);
b6016b76 5177 break;
b929e53c 5178 }
b6016b76
MC
5179 prod = NEXT_RX_BD(prod);
5180 ring_prod = RX_RING_IDX(prod);
5181 }
bb4f98ab 5182 rxr->rx_prod = prod;
b6016b76 5183
bb4f98ab
MC
5184 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5185 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5186 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 5187
bb4f98ab
MC
5188 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5189 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5190
5191 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
5192}
5193
35e9010b
MC
5194static void
5195bnx2_init_all_rings(struct bnx2 *bp)
5196{
5197 int i;
5e9ad9e1 5198 u32 val;
35e9010b
MC
5199
5200 bnx2_clear_ring_states(bp);
5201
5202 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5203 for (i = 0; i < bp->num_tx_rings; i++)
5204 bnx2_init_tx_ring(bp, i);
5205
5206 if (bp->num_tx_rings > 1)
5207 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5208 (TX_TSS_CID << 7));
5209
5e9ad9e1
MC
5210 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5211 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5212
bb4f98ab
MC
5213 for (i = 0; i < bp->num_rx_rings; i++)
5214 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
5215
5216 if (bp->num_rx_rings > 1) {
5217 u32 tbl_32;
5218 u8 *tbl = (u8 *) &tbl_32;
5219
5220 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5221 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5222
5223 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5224 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5225 if ((i % 4) == 3)
5226 bnx2_reg_wr_ind(bp,
5227 BNX2_RXP_SCRATCH_RSS_TBL + i,
5228 cpu_to_be32(tbl_32));
5229 }
5230
5231 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5232 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5233
5234 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5235
5236 }
35e9010b
MC
5237}
5238
5d5d0015 5239static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5240{
5d5d0015 5241 u32 max, num_rings = 1;
13daffa2 5242
5d5d0015
MC
5243 while (ring_size > MAX_RX_DESC_CNT) {
5244 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
5245 num_rings++;
5246 }
5247 /* round to next power of 2 */
5d5d0015 5248 max = max_size;
13daffa2
MC
5249 while ((max & num_rings) == 0)
5250 max >>= 1;
5251
5252 if (num_rings != max)
5253 max <<= 1;
5254
5d5d0015
MC
5255 return max;
5256}
5257
5258static void
5259bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5260{
84eaa187 5261 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5262
5263 /* 8 for CRC and VLAN */
d89cb6af 5264 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5265
84eaa187
MC
5266 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5267 sizeof(struct skb_shared_info);
5268
601d3d18 5269 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5270 bp->rx_pg_ring_size = 0;
5271 bp->rx_max_pg_ring = 0;
5272 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5273 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5274 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5275
5276 jumbo_size = size * pages;
5277 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5278 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5279
5280 bp->rx_pg_ring_size = jumbo_size;
5281 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5282 MAX_RX_PG_RINGS);
5283 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5284 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5285 bp->rx_copy_thresh = 0;
5286 }
5d5d0015
MC
5287
5288 bp->rx_buf_use_size = rx_size;
5289 /* hw alignment */
5290 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5291 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5292 bp->rx_ring_size = size;
5293 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5294 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5295}
5296
b6016b76
MC
5297static void
5298bnx2_free_tx_skbs(struct bnx2 *bp)
5299{
5300 int i;
5301
35e9010b
MC
5302 for (i = 0; i < bp->num_tx_rings; i++) {
5303 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5304 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5305 int j;
b6016b76 5306
35e9010b 5307 if (txr->tx_buf_ring == NULL)
b6016b76 5308 continue;
b6016b76 5309
35e9010b 5310 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5311 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5312 struct sk_buff *skb = tx_buf->skb;
e95524a7 5313 int k, last;
35e9010b
MC
5314
5315 if (skb == NULL) {
5316 j++;
5317 continue;
5318 }
5319
e95524a7
AD
5320 pci_unmap_single(bp->pdev,
5321 pci_unmap_addr(tx_buf, mapping),
5322 skb_headlen(skb),
5323 PCI_DMA_TODEVICE);
b6016b76 5324
35e9010b 5325 tx_buf->skb = NULL;
b6016b76 5326
e95524a7
AD
5327 last = tx_buf->nr_frags;
5328 j++;
5329 for (k = 0; k < last; k++, j++) {
5330 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5331 pci_unmap_page(bp->pdev,
5332 pci_unmap_addr(tx_buf, mapping),
5333 skb_shinfo(skb)->frags[k].size,
5334 PCI_DMA_TODEVICE);
5335 }
35e9010b 5336 dev_kfree_skb(skb);
b6016b76 5337 }
b6016b76 5338 }
b6016b76
MC
5339}
5340
5341static void
5342bnx2_free_rx_skbs(struct bnx2 *bp)
5343{
5344 int i;
5345
bb4f98ab
MC
5346 for (i = 0; i < bp->num_rx_rings; i++) {
5347 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5348 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5349 int j;
b6016b76 5350
bb4f98ab
MC
5351 if (rxr->rx_buf_ring == NULL)
5352 return;
b6016b76 5353
bb4f98ab
MC
5354 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5355 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5356 struct sk_buff *skb = rx_buf->skb;
b6016b76 5357
bb4f98ab
MC
5358 if (skb == NULL)
5359 continue;
b6016b76 5360
bb4f98ab
MC
5361 pci_unmap_single(bp->pdev,
5362 pci_unmap_addr(rx_buf, mapping),
5363 bp->rx_buf_use_size,
5364 PCI_DMA_FROMDEVICE);
b6016b76 5365
bb4f98ab
MC
5366 rx_buf->skb = NULL;
5367
5368 dev_kfree_skb(skb);
5369 }
5370 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5371 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5372 }
5373}
5374
5375static void
5376bnx2_free_skbs(struct bnx2 *bp)
5377{
5378 bnx2_free_tx_skbs(bp);
5379 bnx2_free_rx_skbs(bp);
5380}
5381
5382static int
5383bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5384{
5385 int rc;
5386
5387 rc = bnx2_reset_chip(bp, reset_code);
5388 bnx2_free_skbs(bp);
5389 if (rc)
5390 return rc;
5391
fba9fe91
MC
5392 if ((rc = bnx2_init_chip(bp)) != 0)
5393 return rc;
5394
35e9010b 5395 bnx2_init_all_rings(bp);
b6016b76
MC
5396 return 0;
5397}
5398
5399static int
9a120bc5 5400bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5401{
5402 int rc;
5403
5404 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5405 return rc;
5406
80be4434 5407 spin_lock_bh(&bp->phy_lock);
9a120bc5 5408 bnx2_init_phy(bp, reset_phy);
b6016b76 5409 bnx2_set_link(bp);
543a827d
MC
5410 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5411 bnx2_remote_phy_event(bp);
0d8a6571 5412 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5413 return 0;
5414}
5415
74bf4ba3
MC
5416static int
5417bnx2_shutdown_chip(struct bnx2 *bp)
5418{
5419 u32 reset_code;
5420
5421 if (bp->flags & BNX2_FLAG_NO_WOL)
5422 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5423 else if (bp->wol)
5424 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5425 else
5426 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5427
5428 return bnx2_reset_chip(bp, reset_code);
5429}
5430
b6016b76
MC
5431static int
5432bnx2_test_registers(struct bnx2 *bp)
5433{
5434 int ret;
5bae30c9 5435 int i, is_5709;
f71e1309 5436 static const struct {
b6016b76
MC
5437 u16 offset;
5438 u16 flags;
5bae30c9 5439#define BNX2_FL_NOT_5709 1
b6016b76
MC
5440 u32 rw_mask;
5441 u32 ro_mask;
5442 } reg_tbl[] = {
5443 { 0x006c, 0, 0x00000000, 0x0000003f },
5444 { 0x0090, 0, 0xffffffff, 0x00000000 },
5445 { 0x0094, 0, 0x00000000, 0x00000000 },
5446
5bae30c9
MC
5447 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5448 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5449 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5450 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5451 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5452 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5453 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5454 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5455 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5456
5457 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5458 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5459 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5460 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5461 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5462 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5463
5464 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5465 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5466 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5467
5468 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5469 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5470
5471 { 0x1408, 0, 0x01c00800, 0x00000000 },
5472 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5473 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5474 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5475 { 0x14b0, 0, 0x00000002, 0x00000001 },
5476 { 0x14b8, 0, 0x00000000, 0x00000000 },
5477 { 0x14c0, 0, 0x00000000, 0x00000009 },
5478 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5479 { 0x14cc, 0, 0x00000000, 0x00000001 },
5480 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5481
5482 { 0x1800, 0, 0x00000000, 0x00000001 },
5483 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5484
5485 { 0x2800, 0, 0x00000000, 0x00000001 },
5486 { 0x2804, 0, 0x00000000, 0x00003f01 },
5487 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5488 { 0x2810, 0, 0xffff0000, 0x00000000 },
5489 { 0x2814, 0, 0xffff0000, 0x00000000 },
5490 { 0x2818, 0, 0xffff0000, 0x00000000 },
5491 { 0x281c, 0, 0xffff0000, 0x00000000 },
5492 { 0x2834, 0, 0xffffffff, 0x00000000 },
5493 { 0x2840, 0, 0x00000000, 0xffffffff },
5494 { 0x2844, 0, 0x00000000, 0xffffffff },
5495 { 0x2848, 0, 0xffffffff, 0x00000000 },
5496 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5497
5498 { 0x2c00, 0, 0x00000000, 0x00000011 },
5499 { 0x2c04, 0, 0x00000000, 0x00030007 },
5500
b6016b76
MC
5501 { 0x3c00, 0, 0x00000000, 0x00000001 },
5502 { 0x3c04, 0, 0x00000000, 0x00070000 },
5503 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5504 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5505 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5506 { 0x3c14, 0, 0x00000000, 0xffffffff },
5507 { 0x3c18, 0, 0x00000000, 0xffffffff },
5508 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5509 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5510
5511 { 0x5004, 0, 0x00000000, 0x0000007f },
5512 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5513
b6016b76
MC
5514 { 0x5c00, 0, 0x00000000, 0x00000001 },
5515 { 0x5c04, 0, 0x00000000, 0x0003000f },
5516 { 0x5c08, 0, 0x00000003, 0x00000000 },
5517 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5518 { 0x5c10, 0, 0x00000000, 0xffffffff },
5519 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5520 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5521 { 0x5c88, 0, 0x00000000, 0x00077373 },
5522 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5523
5524 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5525 { 0x680c, 0, 0xffffffff, 0x00000000 },
5526 { 0x6810, 0, 0xffffffff, 0x00000000 },
5527 { 0x6814, 0, 0xffffffff, 0x00000000 },
5528 { 0x6818, 0, 0xffffffff, 0x00000000 },
5529 { 0x681c, 0, 0xffffffff, 0x00000000 },
5530 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5531 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5532 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5533 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5534 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5535 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5536 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5537 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5538 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5539 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5540 { 0x684c, 0, 0xffffffff, 0x00000000 },
5541 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5542 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5543 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5544 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5545 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5546 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5547
5548 { 0xffff, 0, 0x00000000, 0x00000000 },
5549 };
5550
5551 ret = 0;
5bae30c9
MC
5552 is_5709 = 0;
5553 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5554 is_5709 = 1;
5555
b6016b76
MC
5556 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5557 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5558 u16 flags = reg_tbl[i].flags;
5559
5560 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5561 continue;
b6016b76
MC
5562
5563 offset = (u32) reg_tbl[i].offset;
5564 rw_mask = reg_tbl[i].rw_mask;
5565 ro_mask = reg_tbl[i].ro_mask;
5566
14ab9b86 5567 save_val = readl(bp->regview + offset);
b6016b76 5568
14ab9b86 5569 writel(0, bp->regview + offset);
b6016b76 5570
14ab9b86 5571 val = readl(bp->regview + offset);
b6016b76
MC
5572 if ((val & rw_mask) != 0) {
5573 goto reg_test_err;
5574 }
5575
5576 if ((val & ro_mask) != (save_val & ro_mask)) {
5577 goto reg_test_err;
5578 }
5579
14ab9b86 5580 writel(0xffffffff, bp->regview + offset);
b6016b76 5581
14ab9b86 5582 val = readl(bp->regview + offset);
b6016b76
MC
5583 if ((val & rw_mask) != rw_mask) {
5584 goto reg_test_err;
5585 }
5586
5587 if ((val & ro_mask) != (save_val & ro_mask)) {
5588 goto reg_test_err;
5589 }
5590
14ab9b86 5591 writel(save_val, bp->regview + offset);
b6016b76
MC
5592 continue;
5593
5594reg_test_err:
14ab9b86 5595 writel(save_val, bp->regview + offset);
b6016b76
MC
5596 ret = -ENODEV;
5597 break;
5598 }
5599 return ret;
5600}
5601
5602static int
5603bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5604{
f71e1309 5605 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5606 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5607 int i;
5608
5609 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5610 u32 offset;
5611
5612 for (offset = 0; offset < size; offset += 4) {
5613
2726d6e1 5614 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5615
2726d6e1 5616 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5617 test_pattern[i]) {
5618 return -ENODEV;
5619 }
5620 }
5621 }
5622 return 0;
5623}
5624
5625static int
5626bnx2_test_memory(struct bnx2 *bp)
5627{
5628 int ret = 0;
5629 int i;
5bae30c9 5630 static struct mem_entry {
b6016b76
MC
5631 u32 offset;
5632 u32 len;
5bae30c9 5633 } mem_tbl_5706[] = {
b6016b76 5634 { 0x60000, 0x4000 },
5b0c76ad 5635 { 0xa0000, 0x3000 },
b6016b76
MC
5636 { 0xe0000, 0x4000 },
5637 { 0x120000, 0x4000 },
5638 { 0x1a0000, 0x4000 },
5639 { 0x160000, 0x4000 },
5640 { 0xffffffff, 0 },
5bae30c9
MC
5641 },
5642 mem_tbl_5709[] = {
5643 { 0x60000, 0x4000 },
5644 { 0xa0000, 0x3000 },
5645 { 0xe0000, 0x4000 },
5646 { 0x120000, 0x4000 },
5647 { 0x1a0000, 0x4000 },
5648 { 0xffffffff, 0 },
b6016b76 5649 };
5bae30c9
MC
5650 struct mem_entry *mem_tbl;
5651
5652 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5653 mem_tbl = mem_tbl_5709;
5654 else
5655 mem_tbl = mem_tbl_5706;
b6016b76
MC
5656
5657 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5658 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5659 mem_tbl[i].len)) != 0) {
5660 return ret;
5661 }
5662 }
6aa20a22 5663
b6016b76
MC
5664 return ret;
5665}
5666
bc5a0690
MC
5667#define BNX2_MAC_LOOPBACK 0
5668#define BNX2_PHY_LOOPBACK 1
5669
b6016b76 5670static int
bc5a0690 5671bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5672{
5673 unsigned int pkt_size, num_pkts, i;
5674 struct sk_buff *skb, *rx_skb;
5675 unsigned char *packet;
bc5a0690 5676 u16 rx_start_idx, rx_idx;
b6016b76
MC
5677 dma_addr_t map;
5678 struct tx_bd *txbd;
5679 struct sw_bd *rx_buf;
5680 struct l2_fhdr *rx_hdr;
5681 int ret = -ENODEV;
c76c0475 5682 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5683 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5684 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5685
5686 tx_napi = bnapi;
b6016b76 5687
35e9010b 5688 txr = &tx_napi->tx_ring;
bb4f98ab 5689 rxr = &bnapi->rx_ring;
bc5a0690
MC
5690 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5691 bp->loopback = MAC_LOOPBACK;
5692 bnx2_set_mac_loopback(bp);
5693 }
5694 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5695 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5696 return 0;
5697
80be4434 5698 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5699 bnx2_set_phy_loopback(bp);
5700 }
5701 else
5702 return -EINVAL;
b6016b76 5703
84eaa187 5704 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5705 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5706 if (!skb)
5707 return -ENOMEM;
b6016b76 5708 packet = skb_put(skb, pkt_size);
6634292b 5709 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5710 memset(packet + 6, 0x0, 8);
5711 for (i = 14; i < pkt_size; i++)
5712 packet[i] = (unsigned char) (i & 0xff);
5713
e95524a7
AD
5714 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5715 PCI_DMA_TODEVICE);
5716 if (pci_dma_mapping_error(bp->pdev, map)) {
3d16af86
BL
5717 dev_kfree_skb(skb);
5718 return -EIO;
5719 }
b6016b76 5720
bf5295bb
MC
5721 REG_WR(bp, BNX2_HC_COMMAND,
5722 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5723
b6016b76
MC
5724 REG_RD(bp, BNX2_HC_COMMAND);
5725
5726 udelay(5);
35efa7c1 5727 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5728
b6016b76
MC
5729 num_pkts = 0;
5730
35e9010b 5731 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5732
5733 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5734 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5735 txbd->tx_bd_mss_nbytes = pkt_size;
5736 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5737
5738 num_pkts++;
35e9010b
MC
5739 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5740 txr->tx_prod_bseq += pkt_size;
b6016b76 5741
35e9010b
MC
5742 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5743 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5744
5745 udelay(100);
5746
bf5295bb
MC
5747 REG_WR(bp, BNX2_HC_COMMAND,
5748 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5749
b6016b76
MC
5750 REG_RD(bp, BNX2_HC_COMMAND);
5751
5752 udelay(5);
5753
e95524a7 5754 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 5755 dev_kfree_skb(skb);
b6016b76 5756
35e9010b 5757 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5758 goto loopback_test_done;
b6016b76 5759
35efa7c1 5760 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5761 if (rx_idx != rx_start_idx + num_pkts) {
5762 goto loopback_test_done;
5763 }
5764
bb4f98ab 5765 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5766 rx_skb = rx_buf->skb;
5767
5768 rx_hdr = (struct l2_fhdr *) rx_skb->data;
d89cb6af 5769 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76
MC
5770
5771 pci_dma_sync_single_for_cpu(bp->pdev,
5772 pci_unmap_addr(rx_buf, mapping),
5773 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5774
ade2bfe7 5775 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5776 (L2_FHDR_ERRORS_BAD_CRC |
5777 L2_FHDR_ERRORS_PHY_DECODE |
5778 L2_FHDR_ERRORS_ALIGNMENT |
5779 L2_FHDR_ERRORS_TOO_SHORT |
5780 L2_FHDR_ERRORS_GIANT_FRAME)) {
5781
5782 goto loopback_test_done;
5783 }
5784
5785 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5786 goto loopback_test_done;
5787 }
5788
5789 for (i = 14; i < pkt_size; i++) {
5790 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5791 goto loopback_test_done;
5792 }
5793 }
5794
5795 ret = 0;
5796
5797loopback_test_done:
5798 bp->loopback = 0;
5799 return ret;
5800}
5801
bc5a0690
MC
5802#define BNX2_MAC_LOOPBACK_FAILED 1
5803#define BNX2_PHY_LOOPBACK_FAILED 2
5804#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5805 BNX2_PHY_LOOPBACK_FAILED)
5806
5807static int
5808bnx2_test_loopback(struct bnx2 *bp)
5809{
5810 int rc = 0;
5811
5812 if (!netif_running(bp->dev))
5813 return BNX2_LOOPBACK_FAILED;
5814
5815 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5816 spin_lock_bh(&bp->phy_lock);
9a120bc5 5817 bnx2_init_phy(bp, 1);
bc5a0690
MC
5818 spin_unlock_bh(&bp->phy_lock);
5819 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5820 rc |= BNX2_MAC_LOOPBACK_FAILED;
5821 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5822 rc |= BNX2_PHY_LOOPBACK_FAILED;
5823 return rc;
5824}
5825
b6016b76
MC
5826#define NVRAM_SIZE 0x200
5827#define CRC32_RESIDUAL 0xdebb20e3
5828
5829static int
5830bnx2_test_nvram(struct bnx2 *bp)
5831{
b491edd5 5832 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5833 u8 *data = (u8 *) buf;
5834 int rc = 0;
5835 u32 magic, csum;
5836
5837 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5838 goto test_nvram_done;
5839
5840 magic = be32_to_cpu(buf[0]);
5841 if (magic != 0x669955aa) {
5842 rc = -ENODEV;
5843 goto test_nvram_done;
5844 }
5845
5846 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5847 goto test_nvram_done;
5848
5849 csum = ether_crc_le(0x100, data);
5850 if (csum != CRC32_RESIDUAL) {
5851 rc = -ENODEV;
5852 goto test_nvram_done;
5853 }
5854
5855 csum = ether_crc_le(0x100, data + 0x100);
5856 if (csum != CRC32_RESIDUAL) {
5857 rc = -ENODEV;
5858 }
5859
5860test_nvram_done:
5861 return rc;
5862}
5863
5864static int
5865bnx2_test_link(struct bnx2 *bp)
5866{
5867 u32 bmsr;
5868
9f52b564
MC
5869 if (!netif_running(bp->dev))
5870 return -ENODEV;
5871
583c28e5 5872 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5873 if (bp->link_up)
5874 return 0;
5875 return -ENODEV;
5876 }
c770a65c 5877 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5878 bnx2_enable_bmsr1(bp);
5879 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5880 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5881 bnx2_disable_bmsr1(bp);
c770a65c 5882 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5883
b6016b76
MC
5884 if (bmsr & BMSR_LSTATUS) {
5885 return 0;
5886 }
5887 return -ENODEV;
5888}
5889
5890static int
5891bnx2_test_intr(struct bnx2 *bp)
5892{
5893 int i;
b6016b76
MC
5894 u16 status_idx;
5895
5896 if (!netif_running(bp->dev))
5897 return -ENODEV;
5898
5899 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5900
5901 /* This register is not touched during run-time. */
bf5295bb 5902 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5903 REG_RD(bp, BNX2_HC_COMMAND);
5904
5905 for (i = 0; i < 10; i++) {
5906 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5907 status_idx) {
5908
5909 break;
5910 }
5911
5912 msleep_interruptible(10);
5913 }
5914 if (i < 10)
5915 return 0;
5916
5917 return -ENODEV;
5918}
5919
38ea3686 5920/* Determining link for parallel detection. */
b2fadeae
MC
5921static int
5922bnx2_5706_serdes_has_link(struct bnx2 *bp)
5923{
5924 u32 mode_ctl, an_dbg, exp;
5925
38ea3686
MC
5926 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5927 return 0;
5928
b2fadeae
MC
5929 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5930 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5931
5932 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5933 return 0;
5934
5935 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5936 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5937 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5938
f3014c0c 5939 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5940 return 0;
5941
5942 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5943 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5944 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5945
5946 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5947 return 0;
5948
5949 return 1;
5950}
5951
b6016b76 5952static void
48b01e2d 5953bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5954{
b2fadeae
MC
5955 int check_link = 1;
5956
48b01e2d 5957 spin_lock(&bp->phy_lock);
b2fadeae 5958 if (bp->serdes_an_pending) {
48b01e2d 5959 bp->serdes_an_pending--;
b2fadeae
MC
5960 check_link = 0;
5961 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 5962 u32 bmcr;
b6016b76 5963
ac392abc 5964 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 5965
ca58c3af 5966 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5967
48b01e2d 5968 if (bmcr & BMCR_ANENABLE) {
b2fadeae 5969 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
5970 bmcr &= ~BMCR_ANENABLE;
5971 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5972 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 5973 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 5974 }
b6016b76 5975 }
48b01e2d
MC
5976 }
5977 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 5978 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 5979 u32 phy2;
b6016b76 5980
48b01e2d
MC
5981 bnx2_write_phy(bp, 0x17, 0x0f01);
5982 bnx2_read_phy(bp, 0x15, &phy2);
5983 if (phy2 & 0x20) {
5984 u32 bmcr;
cd339a0e 5985
ca58c3af 5986 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5987 bmcr |= BMCR_ANENABLE;
ca58c3af 5988 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5989
583c28e5 5990 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
5991 }
5992 } else
ac392abc 5993 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5994
a2724e25 5995 if (check_link) {
b2fadeae
MC
5996 u32 val;
5997
5998 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5999 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6000 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6001
a2724e25
MC
6002 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6003 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6004 bnx2_5706s_force_link_dn(bp, 1);
6005 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6006 } else
6007 bnx2_set_link(bp);
6008 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6009 bnx2_set_link(bp);
b2fadeae 6010 }
48b01e2d
MC
6011 spin_unlock(&bp->phy_lock);
6012}
b6016b76 6013
f8dd064e
MC
6014static void
6015bnx2_5708_serdes_timer(struct bnx2 *bp)
6016{
583c28e5 6017 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
6018 return;
6019
583c28e5 6020 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
6021 bp->serdes_an_pending = 0;
6022 return;
6023 }
b6016b76 6024
f8dd064e
MC
6025 spin_lock(&bp->phy_lock);
6026 if (bp->serdes_an_pending)
6027 bp->serdes_an_pending--;
6028 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6029 u32 bmcr;
b6016b76 6030
ca58c3af 6031 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 6032 if (bmcr & BMCR_ANENABLE) {
605a9e20 6033 bnx2_enable_forced_2g5(bp);
40105c0b 6034 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 6035 } else {
605a9e20 6036 bnx2_disable_forced_2g5(bp);
f8dd064e 6037 bp->serdes_an_pending = 2;
ac392abc 6038 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6039 }
b6016b76 6040
f8dd064e 6041 } else
ac392abc 6042 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6043
f8dd064e
MC
6044 spin_unlock(&bp->phy_lock);
6045}
6046
48b01e2d
MC
6047static void
6048bnx2_timer(unsigned long data)
6049{
6050 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 6051
48b01e2d
MC
6052 if (!netif_running(bp->dev))
6053 return;
b6016b76 6054
48b01e2d
MC
6055 if (atomic_read(&bp->intr_sem) != 0)
6056 goto bnx2_restart_timer;
b6016b76 6057
efba0180
MC
6058 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6059 BNX2_FLAG_USING_MSI)
6060 bnx2_chk_missed_msi(bp);
6061
df149d70 6062 bnx2_send_heart_beat(bp);
b6016b76 6063
2726d6e1
MC
6064 bp->stats_blk->stat_FwRxDrop =
6065 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 6066
02537b06 6067 /* workaround occasional corrupted counters */
61d9e3fa 6068 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
02537b06
MC
6069 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6070 BNX2_HC_COMMAND_STATS_NOW);
6071
583c28e5 6072 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
6073 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6074 bnx2_5706_serdes_timer(bp);
27a005b8 6075 else
f8dd064e 6076 bnx2_5708_serdes_timer(bp);
b6016b76
MC
6077 }
6078
6079bnx2_restart_timer:
cd339a0e 6080 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6081}
6082
8e6a72c4
MC
6083static int
6084bnx2_request_irq(struct bnx2 *bp)
6085{
6d866ffc 6086 unsigned long flags;
b4b36042
MC
6087 struct bnx2_irq *irq;
6088 int rc = 0, i;
8e6a72c4 6089
f86e82fb 6090 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
6091 flags = 0;
6092 else
6093 flags = IRQF_SHARED;
b4b36042
MC
6094
6095 for (i = 0; i < bp->irq_nvecs; i++) {
6096 irq = &bp->irq_tbl[i];
c76c0475 6097 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 6098 &bp->bnx2_napi[i]);
b4b36042
MC
6099 if (rc)
6100 break;
6101 irq->requested = 1;
6102 }
8e6a72c4
MC
6103 return rc;
6104}
6105
6106static void
6107bnx2_free_irq(struct bnx2 *bp)
6108{
b4b36042
MC
6109 struct bnx2_irq *irq;
6110 int i;
8e6a72c4 6111
b4b36042
MC
6112 for (i = 0; i < bp->irq_nvecs; i++) {
6113 irq = &bp->irq_tbl[i];
6114 if (irq->requested)
f0ea2e63 6115 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 6116 irq->requested = 0;
6d866ffc 6117 }
f86e82fb 6118 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 6119 pci_disable_msi(bp->pdev);
f86e82fb 6120 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
6121 pci_disable_msix(bp->pdev);
6122
f86e82fb 6123 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
6124}
6125
6126static void
5e9ad9e1 6127bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 6128{
57851d84
MC
6129 int i, rc;
6130 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
6131 struct net_device *dev = bp->dev;
6132 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 6133
b4b36042
MC
6134 bnx2_setup_msix_tbl(bp);
6135 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6136 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6137 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84 6138
e2eb8e38
BL
6139 /* Need to flush the previous three writes to ensure MSI-X
6140 * is setup properly */
6141 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6142
57851d84
MC
6143 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6144 msix_ent[i].entry = i;
6145 msix_ent[i].vector = 0;
6146 }
6147
6148 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6149 if (rc != 0)
6150 return;
6151
5e9ad9e1 6152 bp->irq_nvecs = msix_vecs;
f86e82fb 6153 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
69010313 6154 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
57851d84 6155 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
6156 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6157 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6158 }
6d866ffc
MC
6159}
6160
6161static void
6162bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6163{
5e9ad9e1 6164 int cpus = num_online_cpus();
706bf240 6165 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 6166
6d866ffc
MC
6167 bp->irq_tbl[0].handler = bnx2_interrupt;
6168 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
6169 bp->irq_nvecs = 1;
6170 bp->irq_tbl[0].vector = bp->pdev->irq;
6171
5e9ad9e1
MC
6172 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6173 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 6174
f86e82fb
DM
6175 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6176 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 6177 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 6178 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 6179 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 6180 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
6181 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6182 } else
6183 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
6184
6185 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
6186 }
6187 }
706bf240
BL
6188
6189 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6190 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6191
5e9ad9e1 6192 bp->num_rx_rings = bp->irq_nvecs;
8e6a72c4
MC
6193}
6194
b6016b76
MC
6195/* Called with rtnl_lock */
6196static int
6197bnx2_open(struct net_device *dev)
6198{
972ec0d4 6199 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6200 int rc;
6201
1b2f922f
MC
6202 netif_carrier_off(dev);
6203
829ca9a3 6204 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6205 bnx2_disable_int(bp);
6206
35e9010b 6207 bnx2_setup_int_mode(bp, disable_msi);
4327ba43 6208 bnx2_init_napi(bp);
35e9010b 6209 bnx2_napi_enable(bp);
b6016b76 6210 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
6211 if (rc)
6212 goto open_err;
b6016b76 6213
8e6a72c4 6214 rc = bnx2_request_irq(bp);
2739a8bb
MC
6215 if (rc)
6216 goto open_err;
b6016b76 6217
9a120bc5 6218 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
6219 if (rc)
6220 goto open_err;
6aa20a22 6221
cd339a0e 6222 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6223
6224 atomic_set(&bp->intr_sem, 0);
6225
354fcd77
MC
6226 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6227
b6016b76
MC
6228 bnx2_enable_int(bp);
6229
f86e82fb 6230 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
6231 /* Test MSI to make sure it is working
6232 * If MSI test fails, go back to INTx mode
6233 */
6234 if (bnx2_test_intr(bp) != 0) {
3a9c6a49 6235 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
b6016b76
MC
6236
6237 bnx2_disable_int(bp);
8e6a72c4 6238 bnx2_free_irq(bp);
b6016b76 6239
6d866ffc
MC
6240 bnx2_setup_int_mode(bp, 1);
6241
9a120bc5 6242 rc = bnx2_init_nic(bp, 0);
b6016b76 6243
8e6a72c4
MC
6244 if (!rc)
6245 rc = bnx2_request_irq(bp);
6246
b6016b76 6247 if (rc) {
b6016b76 6248 del_timer_sync(&bp->timer);
2739a8bb 6249 goto open_err;
b6016b76
MC
6250 }
6251 bnx2_enable_int(bp);
6252 }
6253 }
f86e82fb 6254 if (bp->flags & BNX2_FLAG_USING_MSI)
3a9c6a49 6255 netdev_info(dev, "using MSI\n");
f86e82fb 6256 else if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49 6257 netdev_info(dev, "using MSIX\n");
b6016b76 6258
706bf240 6259 netif_tx_start_all_queues(dev);
b6016b76
MC
6260
6261 return 0;
2739a8bb
MC
6262
6263open_err:
6264 bnx2_napi_disable(bp);
6265 bnx2_free_skbs(bp);
6266 bnx2_free_irq(bp);
6267 bnx2_free_mem(bp);
6268 return rc;
b6016b76
MC
6269}
6270
6271static void
c4028958 6272bnx2_reset_task(struct work_struct *work)
b6016b76 6273{
c4028958 6274 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 6275
51bf6bb4
MC
6276 rtnl_lock();
6277 if (!netif_running(bp->dev)) {
6278 rtnl_unlock();
afdc08b9 6279 return;
51bf6bb4 6280 }
afdc08b9 6281
212f9934 6282 bnx2_netif_stop(bp, true);
b6016b76 6283
9a120bc5 6284 bnx2_init_nic(bp, 1);
b6016b76
MC
6285
6286 atomic_set(&bp->intr_sem, 1);
212f9934 6287 bnx2_netif_start(bp, true);
51bf6bb4 6288 rtnl_unlock();
b6016b76
MC
6289}
6290
20175c57
MC
6291static void
6292bnx2_dump_state(struct bnx2 *bp)
6293{
6294 struct net_device *dev = bp->dev;
6295
3a9c6a49
JP
6296 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6297 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6298 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6299 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6300 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6301 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6302 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6303 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6304 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
20175c57 6305 if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49
JP
6306 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6307 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
20175c57
MC
6308}
6309
b6016b76
MC
6310static void
6311bnx2_tx_timeout(struct net_device *dev)
6312{
972ec0d4 6313 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6314
20175c57
MC
6315 bnx2_dump_state(bp);
6316
b6016b76
MC
6317 /* This allows the netif to be shutdown gracefully before resetting */
6318 schedule_work(&bp->reset_task);
6319}
6320
6321#ifdef BCM_VLAN
6322/* Called with rtnl_lock */
6323static void
6324bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6325{
972ec0d4 6326 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6327
3767546c 6328 if (netif_running(dev))
212f9934 6329 bnx2_netif_stop(bp, false);
b6016b76
MC
6330
6331 bp->vlgrp = vlgrp;
3767546c
MC
6332
6333 if (!netif_running(dev))
6334 return;
6335
b6016b76 6336 bnx2_set_rx_mode(dev);
7c62e83b
MC
6337 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6338 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
b6016b76 6339
212f9934 6340 bnx2_netif_start(bp, false);
b6016b76 6341}
b6016b76
MC
6342#endif
6343
932ff279 6344/* Called with netif_tx_lock.
2f8af120
MC
6345 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6346 * netif_wake_queue().
b6016b76 6347 */
61357325 6348static netdev_tx_t
b6016b76
MC
6349bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6350{
972ec0d4 6351 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6352 dma_addr_t mapping;
6353 struct tx_bd *txbd;
3d16af86 6354 struct sw_tx_bd *tx_buf;
b6016b76
MC
6355 u32 len, vlan_tag_flags, last_frag, mss;
6356 u16 prod, ring_prod;
6357 int i;
706bf240
BL
6358 struct bnx2_napi *bnapi;
6359 struct bnx2_tx_ring_info *txr;
6360 struct netdev_queue *txq;
6361
6362 /* Determine which tx ring we will be placed on */
6363 i = skb_get_queue_mapping(skb);
6364 bnapi = &bp->bnx2_napi[i];
6365 txr = &bnapi->tx_ring;
6366 txq = netdev_get_tx_queue(dev, i);
b6016b76 6367
35e9010b 6368 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6369 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6370 netif_tx_stop_queue(txq);
3a9c6a49 6371 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
b6016b76
MC
6372
6373 return NETDEV_TX_BUSY;
6374 }
6375 len = skb_headlen(skb);
35e9010b 6376 prod = txr->tx_prod;
b6016b76
MC
6377 ring_prod = TX_RING_IDX(prod);
6378
6379 vlan_tag_flags = 0;
84fa7933 6380 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6381 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6382 }
6383
729b85cd 6384#ifdef BCM_VLAN
79ea13ce 6385 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
b6016b76
MC
6386 vlan_tag_flags |=
6387 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6388 }
729b85cd 6389#endif
fde82055 6390 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6391 u32 tcp_opt_len;
eddc9ec5 6392 struct iphdr *iph;
b6016b76 6393
b6016b76
MC
6394 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6395
4666f87a
MC
6396 tcp_opt_len = tcp_optlen(skb);
6397
6398 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6399 u32 tcp_off = skb_transport_offset(skb) -
6400 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6401
4666f87a
MC
6402 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6403 TX_BD_FLAGS_SW_FLAGS;
6404 if (likely(tcp_off == 0))
6405 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6406 else {
6407 tcp_off >>= 3;
6408 vlan_tag_flags |= ((tcp_off & 0x3) <<
6409 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6410 ((tcp_off & 0x10) <<
6411 TX_BD_FLAGS_TCP6_OFF4_SHL);
6412 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6413 }
6414 } else {
4666f87a 6415 iph = ip_hdr(skb);
4666f87a
MC
6416 if (tcp_opt_len || (iph->ihl > 5)) {
6417 vlan_tag_flags |= ((iph->ihl - 5) +
6418 (tcp_opt_len >> 2)) << 8;
6419 }
b6016b76 6420 }
4666f87a 6421 } else
b6016b76 6422 mss = 0;
b6016b76 6423
e95524a7
AD
6424 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6425 if (pci_dma_mapping_error(bp->pdev, mapping)) {
3d16af86
BL
6426 dev_kfree_skb(skb);
6427 return NETDEV_TX_OK;
6428 }
6429
35e9010b 6430 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6431 tx_buf->skb = skb;
e95524a7 6432 pci_unmap_addr_set(tx_buf, mapping, mapping);
b6016b76 6433
35e9010b 6434 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6435
6436 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6437 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6438 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6439 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6440
6441 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6442 tx_buf->nr_frags = last_frag;
6443 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6444
6445 for (i = 0; i < last_frag; i++) {
6446 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6447
6448 prod = NEXT_TX_BD(prod);
6449 ring_prod = TX_RING_IDX(prod);
35e9010b 6450 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6451
6452 len = frag->size;
e95524a7
AD
6453 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6454 len, PCI_DMA_TODEVICE);
6455 if (pci_dma_mapping_error(bp->pdev, mapping))
6456 goto dma_error;
6457 pci_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6458 mapping);
b6016b76
MC
6459
6460 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6461 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6462 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6463 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6464
6465 }
6466 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6467
6468 prod = NEXT_TX_BD(prod);
35e9010b 6469 txr->tx_prod_bseq += skb->len;
b6016b76 6470
35e9010b
MC
6471 REG_WR16(bp, txr->tx_bidx_addr, prod);
6472 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6473
6474 mmiowb();
6475
35e9010b 6476 txr->tx_prod = prod;
b6016b76 6477
35e9010b 6478 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6479 netif_tx_stop_queue(txq);
35e9010b 6480 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6481 netif_tx_wake_queue(txq);
b6016b76
MC
6482 }
6483
e95524a7
AD
6484 return NETDEV_TX_OK;
6485dma_error:
6486 /* save value of frag that failed */
6487 last_frag = i;
6488
6489 /* start back at beginning and unmap skb */
6490 prod = txr->tx_prod;
6491 ring_prod = TX_RING_IDX(prod);
6492 tx_buf = &txr->tx_buf_ring[ring_prod];
6493 tx_buf->skb = NULL;
6494 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6495 skb_headlen(skb), PCI_DMA_TODEVICE);
6496
6497 /* unmap remaining mapped pages */
6498 for (i = 0; i < last_frag; i++) {
6499 prod = NEXT_TX_BD(prod);
6500 ring_prod = TX_RING_IDX(prod);
6501 tx_buf = &txr->tx_buf_ring[ring_prod];
6502 pci_unmap_page(bp->pdev, pci_unmap_addr(tx_buf, mapping),
6503 skb_shinfo(skb)->frags[i].size,
6504 PCI_DMA_TODEVICE);
6505 }
6506
6507 dev_kfree_skb(skb);
b6016b76
MC
6508 return NETDEV_TX_OK;
6509}
6510
6511/* Called with rtnl_lock */
6512static int
6513bnx2_close(struct net_device *dev)
6514{
972ec0d4 6515 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6516
4bb073c0 6517 cancel_work_sync(&bp->reset_task);
afdc08b9 6518
bea3348e 6519 bnx2_disable_int_sync(bp);
35efa7c1 6520 bnx2_napi_disable(bp);
b6016b76 6521 del_timer_sync(&bp->timer);
74bf4ba3 6522 bnx2_shutdown_chip(bp);
8e6a72c4 6523 bnx2_free_irq(bp);
b6016b76
MC
6524 bnx2_free_skbs(bp);
6525 bnx2_free_mem(bp);
6526 bp->link_up = 0;
6527 netif_carrier_off(bp->dev);
829ca9a3 6528 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6529 return 0;
6530}
6531
354fcd77
MC
6532static void
6533bnx2_save_stats(struct bnx2 *bp)
6534{
6535 u32 *hw_stats = (u32 *) bp->stats_blk;
6536 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6537 int i;
6538
6539 /* The 1st 10 counters are 64-bit counters */
6540 for (i = 0; i < 20; i += 2) {
6541 u32 hi;
6542 u64 lo;
6543
c9885fe5
PR
6544 hi = temp_stats[i] + hw_stats[i];
6545 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
354fcd77
MC
6546 if (lo > 0xffffffff)
6547 hi++;
c9885fe5
PR
6548 temp_stats[i] = hi;
6549 temp_stats[i + 1] = lo & 0xffffffff;
354fcd77
MC
6550 }
6551
6552 for ( ; i < sizeof(struct statistics_block) / 4; i++)
c9885fe5 6553 temp_stats[i] += hw_stats[i];
354fcd77
MC
6554}
6555
a4743058 6556#define GET_64BIT_NET_STATS64(ctr) \
b6016b76
MC
6557 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6558 (unsigned long) (ctr##_lo)
6559
a4743058 6560#define GET_64BIT_NET_STATS32(ctr) \
b6016b76
MC
6561 (ctr##_lo)
6562
6563#if (BITS_PER_LONG == 64)
a4743058 6564#define GET_64BIT_NET_STATS(ctr) \
354fcd77
MC
6565 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6566 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
b6016b76 6567#else
a4743058 6568#define GET_64BIT_NET_STATS(ctr) \
354fcd77
MC
6569 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6570 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
b6016b76
MC
6571#endif
6572
a4743058 6573#define GET_32BIT_NET_STATS(ctr) \
354fcd77
MC
6574 (unsigned long) (bp->stats_blk->ctr + \
6575 bp->temp_stats_blk->ctr)
a4743058 6576
b6016b76
MC
6577static struct net_device_stats *
6578bnx2_get_stats(struct net_device *dev)
6579{
972ec0d4 6580 struct bnx2 *bp = netdev_priv(dev);
d8e8034d 6581 struct net_device_stats *net_stats = &dev->stats;
b6016b76
MC
6582
6583 if (bp->stats_blk == NULL) {
6584 return net_stats;
6585 }
6586 net_stats->rx_packets =
a4743058
MC
6587 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6588 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6589 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
b6016b76
MC
6590
6591 net_stats->tx_packets =
a4743058
MC
6592 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6593 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6594 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
b6016b76
MC
6595
6596 net_stats->rx_bytes =
a4743058 6597 GET_64BIT_NET_STATS(stat_IfHCInOctets);
b6016b76
MC
6598
6599 net_stats->tx_bytes =
a4743058 6600 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
b6016b76 6601
6aa20a22 6602 net_stats->multicast =
a4743058 6603 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
b6016b76 6604
6aa20a22 6605 net_stats->collisions =
a4743058 6606 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
b6016b76 6607
6aa20a22 6608 net_stats->rx_length_errors =
a4743058
MC
6609 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6610 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
b6016b76 6611
6aa20a22 6612 net_stats->rx_over_errors =
a4743058
MC
6613 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6614 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
b6016b76 6615
6aa20a22 6616 net_stats->rx_frame_errors =
a4743058 6617 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
b6016b76 6618
6aa20a22 6619 net_stats->rx_crc_errors =
a4743058 6620 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
b6016b76
MC
6621
6622 net_stats->rx_errors = net_stats->rx_length_errors +
6623 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6624 net_stats->rx_crc_errors;
6625
6626 net_stats->tx_aborted_errors =
a4743058
MC
6627 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6628 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
b6016b76 6629
5b0c76ad
MC
6630 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6631 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6632 net_stats->tx_carrier_errors = 0;
6633 else {
6634 net_stats->tx_carrier_errors =
a4743058 6635 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
b6016b76
MC
6636 }
6637
6638 net_stats->tx_errors =
a4743058 6639 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
b6016b76
MC
6640 net_stats->tx_aborted_errors +
6641 net_stats->tx_carrier_errors;
6642
cea94db9 6643 net_stats->rx_missed_errors =
a4743058
MC
6644 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6645 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6646 GET_32BIT_NET_STATS(stat_FwRxDrop);
cea94db9 6647
b6016b76
MC
6648 return net_stats;
6649}
6650
6651/* All ethtool functions called with rtnl_lock */
6652
6653static int
6654bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6655{
972ec0d4 6656 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6657 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6658
6659 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6660 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6661 support_serdes = 1;
6662 support_copper = 1;
6663 } else if (bp->phy_port == PORT_FIBRE)
6664 support_serdes = 1;
6665 else
6666 support_copper = 1;
6667
6668 if (support_serdes) {
b6016b76
MC
6669 cmd->supported |= SUPPORTED_1000baseT_Full |
6670 SUPPORTED_FIBRE;
583c28e5 6671 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6672 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6673
b6016b76 6674 }
7b6b8347 6675 if (support_copper) {
b6016b76
MC
6676 cmd->supported |= SUPPORTED_10baseT_Half |
6677 SUPPORTED_10baseT_Full |
6678 SUPPORTED_100baseT_Half |
6679 SUPPORTED_100baseT_Full |
6680 SUPPORTED_1000baseT_Full |
6681 SUPPORTED_TP;
6682
b6016b76
MC
6683 }
6684
7b6b8347
MC
6685 spin_lock_bh(&bp->phy_lock);
6686 cmd->port = bp->phy_port;
b6016b76
MC
6687 cmd->advertising = bp->advertising;
6688
6689 if (bp->autoneg & AUTONEG_SPEED) {
6690 cmd->autoneg = AUTONEG_ENABLE;
6691 }
6692 else {
6693 cmd->autoneg = AUTONEG_DISABLE;
6694 }
6695
6696 if (netif_carrier_ok(dev)) {
6697 cmd->speed = bp->line_speed;
6698 cmd->duplex = bp->duplex;
6699 }
6700 else {
6701 cmd->speed = -1;
6702 cmd->duplex = -1;
6703 }
7b6b8347 6704 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6705
6706 cmd->transceiver = XCVR_INTERNAL;
6707 cmd->phy_address = bp->phy_addr;
6708
6709 return 0;
6710}
6aa20a22 6711
b6016b76
MC
6712static int
6713bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6714{
972ec0d4 6715 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6716 u8 autoneg = bp->autoneg;
6717 u8 req_duplex = bp->req_duplex;
6718 u16 req_line_speed = bp->req_line_speed;
6719 u32 advertising = bp->advertising;
7b6b8347
MC
6720 int err = -EINVAL;
6721
6722 spin_lock_bh(&bp->phy_lock);
6723
6724 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6725 goto err_out_unlock;
6726
583c28e5
MC
6727 if (cmd->port != bp->phy_port &&
6728 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6729 goto err_out_unlock;
b6016b76 6730
d6b14486
MC
6731 /* If device is down, we can store the settings only if the user
6732 * is setting the currently active port.
6733 */
6734 if (!netif_running(dev) && cmd->port != bp->phy_port)
6735 goto err_out_unlock;
6736
b6016b76
MC
6737 if (cmd->autoneg == AUTONEG_ENABLE) {
6738 autoneg |= AUTONEG_SPEED;
6739
beb499af
MC
6740 advertising = cmd->advertising;
6741 if (cmd->port == PORT_TP) {
6742 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6743 if (!advertising)
b6016b76 6744 advertising = ETHTOOL_ALL_COPPER_SPEED;
beb499af
MC
6745 } else {
6746 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6747 if (!advertising)
6748 advertising = ETHTOOL_ALL_FIBRE_SPEED;
b6016b76
MC
6749 }
6750 advertising |= ADVERTISED_Autoneg;
6751 }
6752 else {
7b6b8347 6753 if (cmd->port == PORT_FIBRE) {
80be4434
MC
6754 if ((cmd->speed != SPEED_1000 &&
6755 cmd->speed != SPEED_2500) ||
6756 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6757 goto err_out_unlock;
80be4434
MC
6758
6759 if (cmd->speed == SPEED_2500 &&
583c28e5 6760 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6761 goto err_out_unlock;
b6016b76 6762 }
7b6b8347
MC
6763 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6764 goto err_out_unlock;
6765
b6016b76
MC
6766 autoneg &= ~AUTONEG_SPEED;
6767 req_line_speed = cmd->speed;
6768 req_duplex = cmd->duplex;
6769 advertising = 0;
6770 }
6771
6772 bp->autoneg = autoneg;
6773 bp->advertising = advertising;
6774 bp->req_line_speed = req_line_speed;
6775 bp->req_duplex = req_duplex;
6776
d6b14486
MC
6777 err = 0;
6778 /* If device is down, the new settings will be picked up when it is
6779 * brought up.
6780 */
6781 if (netif_running(dev))
6782 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6783
7b6b8347 6784err_out_unlock:
c770a65c 6785 spin_unlock_bh(&bp->phy_lock);
b6016b76 6786
7b6b8347 6787 return err;
b6016b76
MC
6788}
6789
6790static void
6791bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6792{
972ec0d4 6793 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6794
6795 strcpy(info->driver, DRV_MODULE_NAME);
6796 strcpy(info->version, DRV_MODULE_VERSION);
6797 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6798 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6799}
6800
244ac4f4
MC
6801#define BNX2_REGDUMP_LEN (32 * 1024)
6802
6803static int
6804bnx2_get_regs_len(struct net_device *dev)
6805{
6806 return BNX2_REGDUMP_LEN;
6807}
6808
6809static void
6810bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6811{
6812 u32 *p = _p, i, offset;
6813 u8 *orig_p = _p;
6814 struct bnx2 *bp = netdev_priv(dev);
6815 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6816 0x0800, 0x0880, 0x0c00, 0x0c10,
6817 0x0c30, 0x0d08, 0x1000, 0x101c,
6818 0x1040, 0x1048, 0x1080, 0x10a4,
6819 0x1400, 0x1490, 0x1498, 0x14f0,
6820 0x1500, 0x155c, 0x1580, 0x15dc,
6821 0x1600, 0x1658, 0x1680, 0x16d8,
6822 0x1800, 0x1820, 0x1840, 0x1854,
6823 0x1880, 0x1894, 0x1900, 0x1984,
6824 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6825 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6826 0x2000, 0x2030, 0x23c0, 0x2400,
6827 0x2800, 0x2820, 0x2830, 0x2850,
6828 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6829 0x3c00, 0x3c94, 0x4000, 0x4010,
6830 0x4080, 0x4090, 0x43c0, 0x4458,
6831 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6832 0x4fc0, 0x5010, 0x53c0, 0x5444,
6833 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6834 0x5fc0, 0x6000, 0x6400, 0x6428,
6835 0x6800, 0x6848, 0x684c, 0x6860,
6836 0x6888, 0x6910, 0x8000 };
6837
6838 regs->version = 0;
6839
6840 memset(p, 0, BNX2_REGDUMP_LEN);
6841
6842 if (!netif_running(bp->dev))
6843 return;
6844
6845 i = 0;
6846 offset = reg_boundaries[0];
6847 p += offset;
6848 while (offset < BNX2_REGDUMP_LEN) {
6849 *p++ = REG_RD(bp, offset);
6850 offset += 4;
6851 if (offset == reg_boundaries[i + 1]) {
6852 offset = reg_boundaries[i + 2];
6853 p = (u32 *) (orig_p + offset);
6854 i += 2;
6855 }
6856 }
6857}
6858
b6016b76
MC
6859static void
6860bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6861{
972ec0d4 6862 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6863
f86e82fb 6864 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6865 wol->supported = 0;
6866 wol->wolopts = 0;
6867 }
6868 else {
6869 wol->supported = WAKE_MAGIC;
6870 if (bp->wol)
6871 wol->wolopts = WAKE_MAGIC;
6872 else
6873 wol->wolopts = 0;
6874 }
6875 memset(&wol->sopass, 0, sizeof(wol->sopass));
6876}
6877
6878static int
6879bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6880{
972ec0d4 6881 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6882
6883 if (wol->wolopts & ~WAKE_MAGIC)
6884 return -EINVAL;
6885
6886 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6887 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6888 return -EINVAL;
6889
6890 bp->wol = 1;
6891 }
6892 else {
6893 bp->wol = 0;
6894 }
6895 return 0;
6896}
6897
6898static int
6899bnx2_nway_reset(struct net_device *dev)
6900{
972ec0d4 6901 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6902 u32 bmcr;
6903
9f52b564
MC
6904 if (!netif_running(dev))
6905 return -EAGAIN;
6906
b6016b76
MC
6907 if (!(bp->autoneg & AUTONEG_SPEED)) {
6908 return -EINVAL;
6909 }
6910
c770a65c 6911 spin_lock_bh(&bp->phy_lock);
b6016b76 6912
583c28e5 6913 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6914 int rc;
6915
6916 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6917 spin_unlock_bh(&bp->phy_lock);
6918 return rc;
6919 }
6920
b6016b76 6921 /* Force a link down visible on the other side */
583c28e5 6922 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6923 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6924 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6925
6926 msleep(20);
6927
c770a65c 6928 spin_lock_bh(&bp->phy_lock);
f8dd064e 6929
40105c0b 6930 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
6931 bp->serdes_an_pending = 1;
6932 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6933 }
6934
ca58c3af 6935 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6936 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6937 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6938
c770a65c 6939 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6940
6941 return 0;
6942}
6943
7959ea25
ON
6944static u32
6945bnx2_get_link(struct net_device *dev)
6946{
6947 struct bnx2 *bp = netdev_priv(dev);
6948
6949 return bp->link_up;
6950}
6951
b6016b76
MC
6952static int
6953bnx2_get_eeprom_len(struct net_device *dev)
6954{
972ec0d4 6955 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6956
1122db71 6957 if (bp->flash_info == NULL)
b6016b76
MC
6958 return 0;
6959
1122db71 6960 return (int) bp->flash_size;
b6016b76
MC
6961}
6962
6963static int
6964bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6965 u8 *eebuf)
6966{
972ec0d4 6967 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6968 int rc;
6969
9f52b564
MC
6970 if (!netif_running(dev))
6971 return -EAGAIN;
6972
1064e944 6973 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6974
6975 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6976
6977 return rc;
6978}
6979
6980static int
6981bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6982 u8 *eebuf)
6983{
972ec0d4 6984 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6985 int rc;
6986
9f52b564
MC
6987 if (!netif_running(dev))
6988 return -EAGAIN;
6989
1064e944 6990 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6991
6992 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6993
6994 return rc;
6995}
6996
6997static int
6998bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6999{
972ec0d4 7000 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7001
7002 memset(coal, 0, sizeof(struct ethtool_coalesce));
7003
7004 coal->rx_coalesce_usecs = bp->rx_ticks;
7005 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7006 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7007 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7008
7009 coal->tx_coalesce_usecs = bp->tx_ticks;
7010 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7011 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7012 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7013
7014 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7015
7016 return 0;
7017}
7018
7019static int
7020bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7021{
972ec0d4 7022 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7023
7024 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7025 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7026
6aa20a22 7027 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
7028 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7029
7030 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7031 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7032
7033 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7034 if (bp->rx_quick_cons_trip_int > 0xff)
7035 bp->rx_quick_cons_trip_int = 0xff;
7036
7037 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7038 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7039
7040 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7041 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7042
7043 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7044 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7045
7046 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7047 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7048 0xff;
7049
7050 bp->stats_ticks = coal->stats_block_coalesce_usecs;
61d9e3fa 7051 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
02537b06
MC
7052 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7053 bp->stats_ticks = USEC_PER_SEC;
7054 }
7ea6920e
MC
7055 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7056 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7057 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
7058
7059 if (netif_running(bp->dev)) {
212f9934 7060 bnx2_netif_stop(bp, true);
9a120bc5 7061 bnx2_init_nic(bp, 0);
212f9934 7062 bnx2_netif_start(bp, true);
b6016b76
MC
7063 }
7064
7065 return 0;
7066}
7067
7068static void
7069bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7070{
972ec0d4 7071 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7072
13daffa2 7073 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 7074 ering->rx_mini_max_pending = 0;
47bf4246 7075 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
7076
7077 ering->rx_pending = bp->rx_ring_size;
7078 ering->rx_mini_pending = 0;
47bf4246 7079 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
7080
7081 ering->tx_max_pending = MAX_TX_DESC_CNT;
7082 ering->tx_pending = bp->tx_ring_size;
7083}
7084
7085static int
5d5d0015 7086bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 7087{
13daffa2 7088 if (netif_running(bp->dev)) {
354fcd77
MC
7089 /* Reset will erase chipset stats; save them */
7090 bnx2_save_stats(bp);
7091
212f9934 7092 bnx2_netif_stop(bp, true);
13daffa2
MC
7093 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7094 bnx2_free_skbs(bp);
7095 bnx2_free_mem(bp);
7096 }
7097
5d5d0015
MC
7098 bnx2_set_rx_ring_size(bp, rx);
7099 bp->tx_ring_size = tx;
b6016b76
MC
7100
7101 if (netif_running(bp->dev)) {
13daffa2
MC
7102 int rc;
7103
7104 rc = bnx2_alloc_mem(bp);
6fefb65e
MC
7105 if (!rc)
7106 rc = bnx2_init_nic(bp, 0);
7107
7108 if (rc) {
7109 bnx2_napi_enable(bp);
7110 dev_close(bp->dev);
13daffa2 7111 return rc;
6fefb65e 7112 }
e9f26c49
MC
7113#ifdef BCM_CNIC
7114 mutex_lock(&bp->cnic_lock);
7115 /* Let cnic know about the new status block. */
7116 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7117 bnx2_setup_cnic_irq_info(bp);
7118 mutex_unlock(&bp->cnic_lock);
7119#endif
212f9934 7120 bnx2_netif_start(bp, true);
b6016b76 7121 }
b6016b76
MC
7122 return 0;
7123}
7124
5d5d0015
MC
7125static int
7126bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7127{
7128 struct bnx2 *bp = netdev_priv(dev);
7129 int rc;
7130
7131 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7132 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7133 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7134
7135 return -EINVAL;
7136 }
7137 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7138 return rc;
7139}
7140
b6016b76
MC
7141static void
7142bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7143{
972ec0d4 7144 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7145
7146 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7147 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7148 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7149}
7150
7151static int
7152bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7153{
972ec0d4 7154 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7155
7156 bp->req_flow_ctrl = 0;
7157 if (epause->rx_pause)
7158 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7159 if (epause->tx_pause)
7160 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7161
7162 if (epause->autoneg) {
7163 bp->autoneg |= AUTONEG_FLOW_CTRL;
7164 }
7165 else {
7166 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7167 }
7168
9f52b564
MC
7169 if (netif_running(dev)) {
7170 spin_lock_bh(&bp->phy_lock);
7171 bnx2_setup_phy(bp, bp->phy_port);
7172 spin_unlock_bh(&bp->phy_lock);
7173 }
b6016b76
MC
7174
7175 return 0;
7176}
7177
7178static u32
7179bnx2_get_rx_csum(struct net_device *dev)
7180{
972ec0d4 7181 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7182
7183 return bp->rx_csum;
7184}
7185
7186static int
7187bnx2_set_rx_csum(struct net_device *dev, u32 data)
7188{
972ec0d4 7189 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7190
7191 bp->rx_csum = data;
7192 return 0;
7193}
7194
b11d6213
MC
7195static int
7196bnx2_set_tso(struct net_device *dev, u32 data)
7197{
4666f87a
MC
7198 struct bnx2 *bp = netdev_priv(dev);
7199
7200 if (data) {
b11d6213 7201 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7202 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7203 dev->features |= NETIF_F_TSO6;
7204 } else
7205 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7206 NETIF_F_TSO_ECN);
b11d6213
MC
7207 return 0;
7208}
7209
14ab9b86 7210static struct {
b6016b76 7211 char string[ETH_GSTRING_LEN];
790dab2f 7212} bnx2_stats_str_arr[] = {
b6016b76
MC
7213 { "rx_bytes" },
7214 { "rx_error_bytes" },
7215 { "tx_bytes" },
7216 { "tx_error_bytes" },
7217 { "rx_ucast_packets" },
7218 { "rx_mcast_packets" },
7219 { "rx_bcast_packets" },
7220 { "tx_ucast_packets" },
7221 { "tx_mcast_packets" },
7222 { "tx_bcast_packets" },
7223 { "tx_mac_errors" },
7224 { "tx_carrier_errors" },
7225 { "rx_crc_errors" },
7226 { "rx_align_errors" },
7227 { "tx_single_collisions" },
7228 { "tx_multi_collisions" },
7229 { "tx_deferred" },
7230 { "tx_excess_collisions" },
7231 { "tx_late_collisions" },
7232 { "tx_total_collisions" },
7233 { "rx_fragments" },
7234 { "rx_jabbers" },
7235 { "rx_undersize_packets" },
7236 { "rx_oversize_packets" },
7237 { "rx_64_byte_packets" },
7238 { "rx_65_to_127_byte_packets" },
7239 { "rx_128_to_255_byte_packets" },
7240 { "rx_256_to_511_byte_packets" },
7241 { "rx_512_to_1023_byte_packets" },
7242 { "rx_1024_to_1522_byte_packets" },
7243 { "rx_1523_to_9022_byte_packets" },
7244 { "tx_64_byte_packets" },
7245 { "tx_65_to_127_byte_packets" },
7246 { "tx_128_to_255_byte_packets" },
7247 { "tx_256_to_511_byte_packets" },
7248 { "tx_512_to_1023_byte_packets" },
7249 { "tx_1024_to_1522_byte_packets" },
7250 { "tx_1523_to_9022_byte_packets" },
7251 { "rx_xon_frames" },
7252 { "rx_xoff_frames" },
7253 { "tx_xon_frames" },
7254 { "tx_xoff_frames" },
7255 { "rx_mac_ctrl_frames" },
7256 { "rx_filtered_packets" },
790dab2f 7257 { "rx_ftq_discards" },
b6016b76 7258 { "rx_discards" },
cea94db9 7259 { "rx_fw_discards" },
b6016b76
MC
7260};
7261
790dab2f
MC
7262#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7263 sizeof(bnx2_stats_str_arr[0]))
7264
b6016b76
MC
7265#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7266
f71e1309 7267static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7268 STATS_OFFSET32(stat_IfHCInOctets_hi),
7269 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7270 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7271 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7272 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7273 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7274 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7275 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7276 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7277 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7278 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
7279 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7280 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7281 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7282 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7283 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7284 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7285 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7286 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7287 STATS_OFFSET32(stat_EtherStatsCollisions),
7288 STATS_OFFSET32(stat_EtherStatsFragments),
7289 STATS_OFFSET32(stat_EtherStatsJabbers),
7290 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7291 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7292 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7293 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7294 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7295 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7296 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7297 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7298 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7299 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7300 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7301 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7302 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7303 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7304 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7305 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7306 STATS_OFFSET32(stat_XonPauseFramesReceived),
7307 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7308 STATS_OFFSET32(stat_OutXonSent),
7309 STATS_OFFSET32(stat_OutXoffSent),
7310 STATS_OFFSET32(stat_MacControlFramesReceived),
7311 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
790dab2f 7312 STATS_OFFSET32(stat_IfInFTQDiscards),
6aa20a22 7313 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 7314 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
7315};
7316
7317/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7318 * skipped because of errata.
6aa20a22 7319 */
14ab9b86 7320static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7321 8,0,8,8,8,8,8,8,8,8,
7322 4,0,4,4,4,4,4,4,4,4,
7323 4,4,4,4,4,4,4,4,4,4,
7324 4,4,4,4,4,4,4,4,4,4,
790dab2f 7325 4,4,4,4,4,4,4,
b6016b76
MC
7326};
7327
5b0c76ad
MC
7328static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7329 8,0,8,8,8,8,8,8,8,8,
7330 4,4,4,4,4,4,4,4,4,4,
7331 4,4,4,4,4,4,4,4,4,4,
7332 4,4,4,4,4,4,4,4,4,4,
790dab2f 7333 4,4,4,4,4,4,4,
5b0c76ad
MC
7334};
7335
b6016b76
MC
7336#define BNX2_NUM_TESTS 6
7337
14ab9b86 7338static struct {
b6016b76
MC
7339 char string[ETH_GSTRING_LEN];
7340} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7341 { "register_test (offline)" },
7342 { "memory_test (offline)" },
7343 { "loopback_test (offline)" },
7344 { "nvram_test (online)" },
7345 { "interrupt_test (online)" },
7346 { "link_test (online)" },
7347};
7348
7349static int
b9f2c044 7350bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7351{
b9f2c044
JG
7352 switch (sset) {
7353 case ETH_SS_TEST:
7354 return BNX2_NUM_TESTS;
7355 case ETH_SS_STATS:
7356 return BNX2_NUM_STATS;
7357 default:
7358 return -EOPNOTSUPP;
7359 }
b6016b76
MC
7360}
7361
7362static void
7363bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7364{
972ec0d4 7365 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7366
9f52b564
MC
7367 bnx2_set_power_state(bp, PCI_D0);
7368
b6016b76
MC
7369 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7370 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7371 int i;
7372
212f9934 7373 bnx2_netif_stop(bp, true);
b6016b76
MC
7374 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7375 bnx2_free_skbs(bp);
7376
7377 if (bnx2_test_registers(bp) != 0) {
7378 buf[0] = 1;
7379 etest->flags |= ETH_TEST_FL_FAILED;
7380 }
7381 if (bnx2_test_memory(bp) != 0) {
7382 buf[1] = 1;
7383 etest->flags |= ETH_TEST_FL_FAILED;
7384 }
bc5a0690 7385 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7386 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7387
9f52b564
MC
7388 if (!netif_running(bp->dev))
7389 bnx2_shutdown_chip(bp);
b6016b76 7390 else {
9a120bc5 7391 bnx2_init_nic(bp, 1);
212f9934 7392 bnx2_netif_start(bp, true);
b6016b76
MC
7393 }
7394
7395 /* wait for link up */
80be4434
MC
7396 for (i = 0; i < 7; i++) {
7397 if (bp->link_up)
7398 break;
7399 msleep_interruptible(1000);
7400 }
b6016b76
MC
7401 }
7402
7403 if (bnx2_test_nvram(bp) != 0) {
7404 buf[3] = 1;
7405 etest->flags |= ETH_TEST_FL_FAILED;
7406 }
7407 if (bnx2_test_intr(bp) != 0) {
7408 buf[4] = 1;
7409 etest->flags |= ETH_TEST_FL_FAILED;
7410 }
7411
7412 if (bnx2_test_link(bp) != 0) {
7413 buf[5] = 1;
7414 etest->flags |= ETH_TEST_FL_FAILED;
7415
7416 }
9f52b564
MC
7417 if (!netif_running(bp->dev))
7418 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7419}
7420
7421static void
7422bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7423{
7424 switch (stringset) {
7425 case ETH_SS_STATS:
7426 memcpy(buf, bnx2_stats_str_arr,
7427 sizeof(bnx2_stats_str_arr));
7428 break;
7429 case ETH_SS_TEST:
7430 memcpy(buf, bnx2_tests_str_arr,
7431 sizeof(bnx2_tests_str_arr));
7432 break;
7433 }
7434}
7435
b6016b76
MC
7436static void
7437bnx2_get_ethtool_stats(struct net_device *dev,
7438 struct ethtool_stats *stats, u64 *buf)
7439{
972ec0d4 7440 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7441 int i;
7442 u32 *hw_stats = (u32 *) bp->stats_blk;
354fcd77 7443 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
14ab9b86 7444 u8 *stats_len_arr = NULL;
b6016b76
MC
7445
7446 if (hw_stats == NULL) {
7447 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7448 return;
7449 }
7450
5b0c76ad
MC
7451 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7452 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7453 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7454 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7455 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7456 else
7457 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7458
7459 for (i = 0; i < BNX2_NUM_STATS; i++) {
354fcd77
MC
7460 unsigned long offset;
7461
b6016b76
MC
7462 if (stats_len_arr[i] == 0) {
7463 /* skip this counter */
7464 buf[i] = 0;
7465 continue;
7466 }
354fcd77
MC
7467
7468 offset = bnx2_stats_offset_arr[i];
b6016b76
MC
7469 if (stats_len_arr[i] == 4) {
7470 /* 4-byte counter */
354fcd77
MC
7471 buf[i] = (u64) *(hw_stats + offset) +
7472 *(temp_stats + offset);
b6016b76
MC
7473 continue;
7474 }
7475 /* 8-byte counter */
354fcd77
MC
7476 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7477 *(hw_stats + offset + 1) +
7478 (((u64) *(temp_stats + offset)) << 32) +
7479 *(temp_stats + offset + 1);
b6016b76
MC
7480 }
7481}
7482
7483static int
7484bnx2_phys_id(struct net_device *dev, u32 data)
7485{
972ec0d4 7486 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7487 int i;
7488 u32 save;
7489
9f52b564
MC
7490 bnx2_set_power_state(bp, PCI_D0);
7491
b6016b76
MC
7492 if (data == 0)
7493 data = 2;
7494
7495 save = REG_RD(bp, BNX2_MISC_CFG);
7496 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7497
7498 for (i = 0; i < (data * 2); i++) {
7499 if ((i % 2) == 0) {
7500 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7501 }
7502 else {
7503 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7504 BNX2_EMAC_LED_1000MB_OVERRIDE |
7505 BNX2_EMAC_LED_100MB_OVERRIDE |
7506 BNX2_EMAC_LED_10MB_OVERRIDE |
7507 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7508 BNX2_EMAC_LED_TRAFFIC);
7509 }
7510 msleep_interruptible(500);
7511 if (signal_pending(current))
7512 break;
7513 }
7514 REG_WR(bp, BNX2_EMAC_LED, 0);
7515 REG_WR(bp, BNX2_MISC_CFG, save);
9f52b564
MC
7516
7517 if (!netif_running(dev))
7518 bnx2_set_power_state(bp, PCI_D3hot);
7519
b6016b76
MC
7520 return 0;
7521}
7522
4666f87a
MC
7523static int
7524bnx2_set_tx_csum(struct net_device *dev, u32 data)
7525{
7526 struct bnx2 *bp = netdev_priv(dev);
7527
7528 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 7529 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
7530 else
7531 return (ethtool_op_set_tx_csum(dev, data));
7532}
7533
7282d491 7534static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7535 .get_settings = bnx2_get_settings,
7536 .set_settings = bnx2_set_settings,
7537 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7538 .get_regs_len = bnx2_get_regs_len,
7539 .get_regs = bnx2_get_regs,
b6016b76
MC
7540 .get_wol = bnx2_get_wol,
7541 .set_wol = bnx2_set_wol,
7542 .nway_reset = bnx2_nway_reset,
7959ea25 7543 .get_link = bnx2_get_link,
b6016b76
MC
7544 .get_eeprom_len = bnx2_get_eeprom_len,
7545 .get_eeprom = bnx2_get_eeprom,
7546 .set_eeprom = bnx2_set_eeprom,
7547 .get_coalesce = bnx2_get_coalesce,
7548 .set_coalesce = bnx2_set_coalesce,
7549 .get_ringparam = bnx2_get_ringparam,
7550 .set_ringparam = bnx2_set_ringparam,
7551 .get_pauseparam = bnx2_get_pauseparam,
7552 .set_pauseparam = bnx2_set_pauseparam,
7553 .get_rx_csum = bnx2_get_rx_csum,
7554 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 7555 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 7556 .set_sg = ethtool_op_set_sg,
b11d6213 7557 .set_tso = bnx2_set_tso,
b6016b76
MC
7558 .self_test = bnx2_self_test,
7559 .get_strings = bnx2_get_strings,
7560 .phys_id = bnx2_phys_id,
b6016b76 7561 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7562 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7563};
7564
7565/* Called with rtnl_lock */
7566static int
7567bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7568{
14ab9b86 7569 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7570 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7571 int err;
7572
7573 switch(cmd) {
7574 case SIOCGMIIPHY:
7575 data->phy_id = bp->phy_addr;
7576
7577 /* fallthru */
7578 case SIOCGMIIREG: {
7579 u32 mii_regval;
7580
583c28e5 7581 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7582 return -EOPNOTSUPP;
7583
dad3e452
MC
7584 if (!netif_running(dev))
7585 return -EAGAIN;
7586
c770a65c 7587 spin_lock_bh(&bp->phy_lock);
b6016b76 7588 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7589 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7590
7591 data->val_out = mii_regval;
7592
7593 return err;
7594 }
7595
7596 case SIOCSMIIREG:
583c28e5 7597 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7598 return -EOPNOTSUPP;
7599
dad3e452
MC
7600 if (!netif_running(dev))
7601 return -EAGAIN;
7602
c770a65c 7603 spin_lock_bh(&bp->phy_lock);
b6016b76 7604 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7605 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7606
7607 return err;
7608
7609 default:
7610 /* do nothing */
7611 break;
7612 }
7613 return -EOPNOTSUPP;
7614}
7615
7616/* Called with rtnl_lock */
7617static int
7618bnx2_change_mac_addr(struct net_device *dev, void *p)
7619{
7620 struct sockaddr *addr = p;
972ec0d4 7621 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7622
73eef4cd
MC
7623 if (!is_valid_ether_addr(addr->sa_data))
7624 return -EINVAL;
7625
b6016b76
MC
7626 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7627 if (netif_running(dev))
5fcaed01 7628 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7629
7630 return 0;
7631}
7632
7633/* Called with rtnl_lock */
7634static int
7635bnx2_change_mtu(struct net_device *dev, int new_mtu)
7636{
972ec0d4 7637 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7638
7639 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7640 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7641 return -EINVAL;
7642
7643 dev->mtu = new_mtu;
5d5d0015 7644 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
7645}
7646
257ddbda 7647#ifdef CONFIG_NET_POLL_CONTROLLER
b6016b76
MC
7648static void
7649poll_bnx2(struct net_device *dev)
7650{
972ec0d4 7651 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7652 int i;
b6016b76 7653
b2af2c1d 7654 for (i = 0; i < bp->irq_nvecs; i++) {
1bf1e347
MC
7655 struct bnx2_irq *irq = &bp->irq_tbl[i];
7656
7657 disable_irq(irq->vector);
7658 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7659 enable_irq(irq->vector);
b2af2c1d 7660 }
b6016b76
MC
7661}
7662#endif
7663
253c8b75
MC
7664static void __devinit
7665bnx2_get_5709_media(struct bnx2 *bp)
7666{
7667 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7668 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7669 u32 strap;
7670
7671 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7672 return;
7673 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7674 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7675 return;
7676 }
7677
7678 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7679 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7680 else
7681 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7682
7683 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7684 switch (strap) {
7685 case 0x4:
7686 case 0x5:
7687 case 0x6:
583c28e5 7688 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7689 return;
7690 }
7691 } else {
7692 switch (strap) {
7693 case 0x1:
7694 case 0x2:
7695 case 0x4:
583c28e5 7696 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7697 return;
7698 }
7699 }
7700}
7701
883e5151
MC
7702static void __devinit
7703bnx2_get_pci_speed(struct bnx2 *bp)
7704{
7705 u32 reg;
7706
7707 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7708 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7709 u32 clkreg;
7710
f86e82fb 7711 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7712
7713 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7714
7715 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7716 switch (clkreg) {
7717 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7718 bp->bus_speed_mhz = 133;
7719 break;
7720
7721 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7722 bp->bus_speed_mhz = 100;
7723 break;
7724
7725 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7726 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7727 bp->bus_speed_mhz = 66;
7728 break;
7729
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7731 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7732 bp->bus_speed_mhz = 50;
7733 break;
7734
7735 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7736 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7737 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7738 bp->bus_speed_mhz = 33;
7739 break;
7740 }
7741 }
7742 else {
7743 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7744 bp->bus_speed_mhz = 66;
7745 else
7746 bp->bus_speed_mhz = 33;
7747 }
7748
7749 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7750 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7751
7752}
7753
76d99061
MC
7754static void __devinit
7755bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7756{
df25bc38 7757 int rc, i, j;
76d99061 7758 u8 *data;
df25bc38 7759 unsigned int block_end, rosize, len;
76d99061 7760
012093f6
MC
7761#define BNX2_VPD_NVRAM_OFFSET 0x300
7762#define BNX2_VPD_LEN 128
76d99061
MC
7763#define BNX2_MAX_VER_SLEN 30
7764
7765 data = kmalloc(256, GFP_KERNEL);
7766 if (!data)
7767 return;
7768
012093f6
MC
7769 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7770 BNX2_VPD_LEN);
76d99061
MC
7771 if (rc)
7772 goto vpd_done;
7773
012093f6
MC
7774 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7775 data[i] = data[i + BNX2_VPD_LEN + 3];
7776 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7777 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7778 data[i + 3] = data[i + BNX2_VPD_LEN];
76d99061
MC
7779 }
7780
df25bc38
MC
7781 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7782 if (i < 0)
7783 goto vpd_done;
76d99061 7784
df25bc38
MC
7785 rosize = pci_vpd_lrdt_size(&data[i]);
7786 i += PCI_VPD_LRDT_TAG_SIZE;
7787 block_end = i + rosize;
76d99061 7788
df25bc38
MC
7789 if (block_end > BNX2_VPD_LEN)
7790 goto vpd_done;
76d99061 7791
df25bc38
MC
7792 j = pci_vpd_find_info_keyword(data, i, rosize,
7793 PCI_VPD_RO_KEYWORD_MFR_ID);
7794 if (j < 0)
7795 goto vpd_done;
76d99061 7796
df25bc38 7797 len = pci_vpd_info_field_size(&data[j]);
76d99061 7798
df25bc38
MC
7799 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7800 if (j + len > block_end || len != 4 ||
7801 memcmp(&data[j], "1028", 4))
7802 goto vpd_done;
4067a854 7803
df25bc38
MC
7804 j = pci_vpd_find_info_keyword(data, i, rosize,
7805 PCI_VPD_RO_KEYWORD_VENDOR0);
7806 if (j < 0)
7807 goto vpd_done;
4067a854 7808
df25bc38 7809 len = pci_vpd_info_field_size(&data[j]);
4067a854 7810
df25bc38
MC
7811 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7812 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
76d99061 7813 goto vpd_done;
df25bc38
MC
7814
7815 memcpy(bp->fw_version, &data[j], len);
7816 bp->fw_version[len] = ' ';
76d99061
MC
7817
7818vpd_done:
7819 kfree(data);
7820}
7821
b6016b76
MC
7822static int __devinit
7823bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7824{
7825 struct bnx2 *bp;
7826 unsigned long mem_len;
58fc2ea4 7827 int rc, i, j;
b6016b76 7828 u32 reg;
40453c83 7829 u64 dma_mask, persist_dma_mask;
b6016b76 7830
b6016b76 7831 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7832 bp = netdev_priv(dev);
b6016b76
MC
7833
7834 bp->flags = 0;
7835 bp->phy_flags = 0;
7836
354fcd77
MC
7837 bp->temp_stats_blk =
7838 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7839
7840 if (bp->temp_stats_blk == NULL) {
7841 rc = -ENOMEM;
7842 goto err_out;
7843 }
7844
b6016b76
MC
7845 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7846 rc = pci_enable_device(pdev);
7847 if (rc) {
3a9c6a49 7848 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
b6016b76
MC
7849 goto err_out;
7850 }
7851
7852 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7853 dev_err(&pdev->dev,
3a9c6a49 7854 "Cannot find PCI device base address, aborting\n");
b6016b76
MC
7855 rc = -ENODEV;
7856 goto err_out_disable;
7857 }
7858
7859 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7860 if (rc) {
3a9c6a49 7861 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
b6016b76
MC
7862 goto err_out_disable;
7863 }
7864
7865 pci_set_master(pdev);
6ff2da49 7866 pci_save_state(pdev);
b6016b76
MC
7867
7868 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7869 if (bp->pm_cap == 0) {
9b91cf9d 7870 dev_err(&pdev->dev,
3a9c6a49 7871 "Cannot find power management capability, aborting\n");
b6016b76
MC
7872 rc = -EIO;
7873 goto err_out_release;
7874 }
7875
b6016b76
MC
7876 bp->dev = dev;
7877 bp->pdev = pdev;
7878
7879 spin_lock_init(&bp->phy_lock);
1b8227c4 7880 spin_lock_init(&bp->indirect_lock);
c5a88950
MC
7881#ifdef BCM_CNIC
7882 mutex_init(&bp->cnic_lock);
7883#endif
c4028958 7884 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7885
7886 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
4edd473f 7887 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
b6016b76
MC
7888 dev->mem_end = dev->mem_start + mem_len;
7889 dev->irq = pdev->irq;
7890
7891 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7892
7893 if (!bp->regview) {
3a9c6a49 7894 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
b6016b76
MC
7895 rc = -ENOMEM;
7896 goto err_out_release;
7897 }
7898
7899 /* Configure byte swap and enable write to the reg_window registers.
7900 * Rely on CPU to do target byte swapping on big endian systems
7901 * The chip's target access swapping will not swap all accesses
7902 */
7903 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7904 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7905 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7906
829ca9a3 7907 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7908
7909 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7910
883e5151
MC
7911 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7912 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7913 dev_err(&pdev->dev,
3a9c6a49 7914 "Cannot find PCIE capability, aborting\n");
883e5151
MC
7915 rc = -EIO;
7916 goto err_out_unmap;
7917 }
f86e82fb 7918 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7919 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7920 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
883e5151 7921 } else {
59b47d8a
MC
7922 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7923 if (bp->pcix_cap == 0) {
7924 dev_err(&pdev->dev,
3a9c6a49 7925 "Cannot find PCIX capability, aborting\n");
59b47d8a
MC
7926 rc = -EIO;
7927 goto err_out_unmap;
7928 }
61d9e3fa 7929 bp->flags |= BNX2_FLAG_BROKEN_STATS;
59b47d8a
MC
7930 }
7931
b4b36042
MC
7932 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7933 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7934 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7935 }
7936
8e6a72c4
MC
7937 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7938 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7939 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7940 }
7941
40453c83
MC
7942 /* 5708 cannot support DMA addresses > 40-bit. */
7943 if (CHIP_NUM(bp) == CHIP_NUM_5708)
50cf156a 7944 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 7945 else
6a35528a 7946 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
7947
7948 /* Configure DMA attributes. */
7949 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7950 dev->features |= NETIF_F_HIGHDMA;
7951 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7952 if (rc) {
7953 dev_err(&pdev->dev,
3a9c6a49 7954 "pci_set_consistent_dma_mask failed, aborting\n");
40453c83
MC
7955 goto err_out_unmap;
7956 }
284901a9 7957 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3a9c6a49 7958 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
40453c83
MC
7959 goto err_out_unmap;
7960 }
7961
f86e82fb 7962 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 7963 bnx2_get_pci_speed(bp);
b6016b76
MC
7964
7965 /* 5706A0 may falsely detect SERR and PERR. */
7966 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7967 reg = REG_RD(bp, PCI_COMMAND);
7968 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7969 REG_WR(bp, PCI_COMMAND, reg);
7970 }
7971 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 7972 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 7973
9b91cf9d 7974 dev_err(&pdev->dev,
3a9c6a49 7975 "5706 A1 can only be used in a PCIX bus, aborting\n");
b6016b76
MC
7976 goto err_out_unmap;
7977 }
7978
7979 bnx2_init_nvram(bp);
7980
2726d6e1 7981 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
7982
7983 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
7984 BNX2_SHM_HDR_SIGNATURE_SIG) {
7985 u32 off = PCI_FUNC(pdev->devfn) << 2;
7986
2726d6e1 7987 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 7988 } else
e3648b3d
MC
7989 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7990
b6016b76
MC
7991 /* Get the permanent MAC address. First we need to make sure the
7992 * firmware is actually running.
7993 */
2726d6e1 7994 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
7995
7996 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7997 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
3a9c6a49 7998 dev_err(&pdev->dev, "Firmware not running, aborting\n");
b6016b76
MC
7999 rc = -ENODEV;
8000 goto err_out_unmap;
8001 }
8002
76d99061
MC
8003 bnx2_read_vpd_fw_ver(bp);
8004
8005 j = strlen(bp->fw_version);
2726d6e1 8006 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
76d99061 8007 for (i = 0; i < 3 && j < 24; i++) {
58fc2ea4
MC
8008 u8 num, k, skip0;
8009
76d99061
MC
8010 if (i == 0) {
8011 bp->fw_version[j++] = 'b';
8012 bp->fw_version[j++] = 'c';
8013 bp->fw_version[j++] = ' ';
8014 }
58fc2ea4
MC
8015 num = (u8) (reg >> (24 - (i * 8)));
8016 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8017 if (num >= k || !skip0 || k == 1) {
8018 bp->fw_version[j++] = (num / k) + '0';
8019 skip0 = 0;
8020 }
8021 }
8022 if (i != 2)
8023 bp->fw_version[j++] = '.';
8024 }
2726d6e1 8025 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
8026 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8027 bp->wol = 1;
8028
8029 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 8030 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
8031
8032 for (i = 0; i < 30; i++) {
2726d6e1 8033 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
8034 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8035 break;
8036 msleep(10);
8037 }
8038 }
2726d6e1 8039 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
8040 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8041 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8042 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 8043 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4 8044
76d99061
MC
8045 if (j < 32)
8046 bp->fw_version[j++] = ' ';
8047 for (i = 0; i < 3 && j < 28; i++) {
2726d6e1 8048 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
58fc2ea4
MC
8049 reg = swab32(reg);
8050 memcpy(&bp->fw_version[j], &reg, 4);
8051 j += 4;
8052 }
8053 }
b6016b76 8054
2726d6e1 8055 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
8056 bp->mac_addr[0] = (u8) (reg >> 8);
8057 bp->mac_addr[1] = (u8) reg;
8058
2726d6e1 8059 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
8060 bp->mac_addr[2] = (u8) (reg >> 24);
8061 bp->mac_addr[3] = (u8) (reg >> 16);
8062 bp->mac_addr[4] = (u8) (reg >> 8);
8063 bp->mac_addr[5] = (u8) reg;
8064
8065 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 8066 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
8067
8068 bp->rx_csum = 1;
8069
cf7474a6 8070 bp->tx_quick_cons_trip_int = 2;
b6016b76 8071 bp->tx_quick_cons_trip = 20;
cf7474a6 8072 bp->tx_ticks_int = 18;
b6016b76 8073 bp->tx_ticks = 80;
6aa20a22 8074
cf7474a6
MC
8075 bp->rx_quick_cons_trip_int = 2;
8076 bp->rx_quick_cons_trip = 12;
b6016b76
MC
8077 bp->rx_ticks_int = 18;
8078 bp->rx_ticks = 18;
8079
7ea6920e 8080 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 8081
ac392abc 8082 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 8083
5b0c76ad
MC
8084 bp->phy_addr = 1;
8085
b6016b76 8086 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
8087 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8088 bnx2_get_5709_media(bp);
8089 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 8090 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 8091
0d8a6571 8092 bp->phy_port = PORT_TP;
583c28e5 8093 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 8094 bp->phy_port = PORT_FIBRE;
2726d6e1 8095 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 8096 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 8097 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8098 bp->wol = 0;
8099 }
38ea3686
MC
8100 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8101 /* Don't do parallel detect on this board because of
8102 * some board problems. The link will not go down
8103 * if we do parallel detect.
8104 */
8105 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8106 pdev->subsystem_device == 0x310c)
8107 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8108 } else {
5b0c76ad 8109 bp->phy_addr = 2;
5b0c76ad 8110 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 8111 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 8112 }
261dd5ca
MC
8113 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8114 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 8115 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
8116 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8117 (CHIP_REV(bp) == CHIP_REV_Ax ||
8118 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 8119 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 8120
7c62e83b
MC
8121 bnx2_init_fw_cap(bp);
8122
16088272
MC
8123 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8124 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
8125 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8126 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 8127 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8128 bp->wol = 0;
8129 }
dda1e390 8130
b6016b76
MC
8131 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8132 bp->tx_quick_cons_trip_int =
8133 bp->tx_quick_cons_trip;
8134 bp->tx_ticks_int = bp->tx_ticks;
8135 bp->rx_quick_cons_trip_int =
8136 bp->rx_quick_cons_trip;
8137 bp->rx_ticks_int = bp->rx_ticks;
8138 bp->comp_prod_trip_int = bp->comp_prod_trip;
8139 bp->com_ticks_int = bp->com_ticks;
8140 bp->cmd_ticks_int = bp->cmd_ticks;
8141 }
8142
f9317a40
MC
8143 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8144 *
8145 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8146 * with byte enables disabled on the unused 32-bit word. This is legal
8147 * but causes problems on the AMD 8132 which will eventually stop
8148 * responding after a while.
8149 *
8150 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 8151 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
8152 */
8153 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8154 struct pci_dev *amd_8132 = NULL;
8155
8156 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8157 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8158 amd_8132))) {
f9317a40 8159
44c10138
AK
8160 if (amd_8132->revision >= 0x10 &&
8161 amd_8132->revision <= 0x13) {
f9317a40
MC
8162 disable_msi = 1;
8163 pci_dev_put(amd_8132);
8164 break;
8165 }
8166 }
8167 }
8168
deaf391b 8169 bnx2_set_default_link(bp);
b6016b76
MC
8170 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8171
cd339a0e 8172 init_timer(&bp->timer);
ac392abc 8173 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
8174 bp->timer.data = (unsigned long) bp;
8175 bp->timer.function = bnx2_timer;
8176
b6016b76
MC
8177 return 0;
8178
8179err_out_unmap:
8180 if (bp->regview) {
8181 iounmap(bp->regview);
73eef4cd 8182 bp->regview = NULL;
b6016b76
MC
8183 }
8184
8185err_out_release:
8186 pci_release_regions(pdev);
8187
8188err_out_disable:
8189 pci_disable_device(pdev);
8190 pci_set_drvdata(pdev, NULL);
8191
8192err_out:
8193 return rc;
8194}
8195
883e5151
MC
8196static char * __devinit
8197bnx2_bus_string(struct bnx2 *bp, char *str)
8198{
8199 char *s = str;
8200
f86e82fb 8201 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
8202 s += sprintf(s, "PCI Express");
8203 } else {
8204 s += sprintf(s, "PCI");
f86e82fb 8205 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 8206 s += sprintf(s, "-X");
f86e82fb 8207 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
8208 s += sprintf(s, " 32-bit");
8209 else
8210 s += sprintf(s, " 64-bit");
8211 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8212 }
8213 return str;
8214}
8215
2ba582b7 8216static void __devinit
35efa7c1
MC
8217bnx2_init_napi(struct bnx2 *bp)
8218{
b4b36042 8219 int i;
35efa7c1 8220
4327ba43 8221 for (i = 0; i < bp->irq_nvecs; i++) {
35e9010b
MC
8222 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8223 int (*poll)(struct napi_struct *, int);
8224
8225 if (i == 0)
8226 poll = bnx2_poll;
8227 else
f0ea2e63 8228 poll = bnx2_poll_msix;
35e9010b
MC
8229
8230 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
8231 bnapi->bp = bp;
8232 }
35efa7c1
MC
8233}
8234
0421eae6
SH
8235static const struct net_device_ops bnx2_netdev_ops = {
8236 .ndo_open = bnx2_open,
8237 .ndo_start_xmit = bnx2_start_xmit,
8238 .ndo_stop = bnx2_close,
8239 .ndo_get_stats = bnx2_get_stats,
8240 .ndo_set_rx_mode = bnx2_set_rx_mode,
8241 .ndo_do_ioctl = bnx2_ioctl,
8242 .ndo_validate_addr = eth_validate_addr,
8243 .ndo_set_mac_address = bnx2_change_mac_addr,
8244 .ndo_change_mtu = bnx2_change_mtu,
8245 .ndo_tx_timeout = bnx2_tx_timeout,
8246#ifdef BCM_VLAN
8247 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8248#endif
257ddbda 8249#ifdef CONFIG_NET_POLL_CONTROLLER
0421eae6
SH
8250 .ndo_poll_controller = poll_bnx2,
8251#endif
8252};
8253
72dccb01
ED
8254static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8255{
8256#ifdef BCM_VLAN
8257 dev->vlan_features |= flags;
8258#endif
8259}
8260
b6016b76
MC
8261static int __devinit
8262bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8263{
8264 static int version_printed = 0;
8265 struct net_device *dev = NULL;
8266 struct bnx2 *bp;
0795af57 8267 int rc;
883e5151 8268 char str[40];
b6016b76
MC
8269
8270 if (version_printed++ == 0)
3a9c6a49 8271 pr_info("%s", version);
b6016b76
MC
8272
8273 /* dev zeroed in init_etherdev */
706bf240 8274 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
8275
8276 if (!dev)
8277 return -ENOMEM;
8278
8279 rc = bnx2_init_board(pdev, dev);
8280 if (rc < 0) {
8281 free_netdev(dev);
8282 return rc;
8283 }
8284
0421eae6 8285 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 8286 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 8287 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 8288
972ec0d4 8289 bp = netdev_priv(dev);
b6016b76 8290
1b2f922f
MC
8291 pci_set_drvdata(pdev, dev);
8292
57579f76
MC
8293 rc = bnx2_request_firmware(bp);
8294 if (rc)
8295 goto error;
8296
1b2f922f
MC
8297 memcpy(dev->dev_addr, bp->mac_addr, 6);
8298 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 8299
d212f87b 8300 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
72dccb01
ED
8301 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8302 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
d212f87b 8303 dev->features |= NETIF_F_IPV6_CSUM;
72dccb01
ED
8304 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8305 }
1b2f922f
MC
8306#ifdef BCM_VLAN
8307 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8308#endif
8309 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
72dccb01
ED
8310 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8311 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4666f87a 8312 dev->features |= NETIF_F_TSO6;
72dccb01
ED
8313 vlan_features_add(dev, NETIF_F_TSO6);
8314 }
b6016b76 8315 if ((rc = register_netdev(dev))) {
9b91cf9d 8316 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 8317 goto error;
b6016b76
MC
8318 }
8319
3a9c6a49
JP
8320 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8321 board_info[ent->driver_data].name,
8322 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8323 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8324 bnx2_bus_string(bp, str),
8325 dev->base_addr,
8326 bp->pdev->irq, dev->dev_addr);
b6016b76 8327
b6016b76 8328 return 0;
57579f76
MC
8329
8330error:
8331 if (bp->mips_firmware)
8332 release_firmware(bp->mips_firmware);
8333 if (bp->rv2p_firmware)
8334 release_firmware(bp->rv2p_firmware);
8335
8336 if (bp->regview)
8337 iounmap(bp->regview);
8338 pci_release_regions(pdev);
8339 pci_disable_device(pdev);
8340 pci_set_drvdata(pdev, NULL);
8341 free_netdev(dev);
8342 return rc;
b6016b76
MC
8343}
8344
8345static void __devexit
8346bnx2_remove_one(struct pci_dev *pdev)
8347{
8348 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8349 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8350
afdc08b9
MC
8351 flush_scheduled_work();
8352
b6016b76
MC
8353 unregister_netdev(dev);
8354
57579f76
MC
8355 if (bp->mips_firmware)
8356 release_firmware(bp->mips_firmware);
8357 if (bp->rv2p_firmware)
8358 release_firmware(bp->rv2p_firmware);
8359
b6016b76
MC
8360 if (bp->regview)
8361 iounmap(bp->regview);
8362
354fcd77
MC
8363 kfree(bp->temp_stats_blk);
8364
b6016b76
MC
8365 free_netdev(dev);
8366 pci_release_regions(pdev);
8367 pci_disable_device(pdev);
8368 pci_set_drvdata(pdev, NULL);
8369}
8370
8371static int
829ca9a3 8372bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
8373{
8374 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8375 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8376
6caebb02
MC
8377 /* PCI register 4 needs to be saved whether netif_running() or not.
8378 * MSI address and data need to be saved if using MSI and
8379 * netif_running().
8380 */
8381 pci_save_state(pdev);
b6016b76
MC
8382 if (!netif_running(dev))
8383 return 0;
8384
1d60290f 8385 flush_scheduled_work();
212f9934 8386 bnx2_netif_stop(bp, true);
b6016b76
MC
8387 netif_device_detach(dev);
8388 del_timer_sync(&bp->timer);
74bf4ba3 8389 bnx2_shutdown_chip(bp);
b6016b76 8390 bnx2_free_skbs(bp);
829ca9a3 8391 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
8392 return 0;
8393}
8394
8395static int
8396bnx2_resume(struct pci_dev *pdev)
8397{
8398 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8399 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8400
6caebb02 8401 pci_restore_state(pdev);
b6016b76
MC
8402 if (!netif_running(dev))
8403 return 0;
8404
829ca9a3 8405 bnx2_set_power_state(bp, PCI_D0);
b6016b76 8406 netif_device_attach(dev);
9a120bc5 8407 bnx2_init_nic(bp, 1);
212f9934 8408 bnx2_netif_start(bp, true);
b6016b76
MC
8409 return 0;
8410}
8411
6ff2da49
WX
8412/**
8413 * bnx2_io_error_detected - called when PCI error is detected
8414 * @pdev: Pointer to PCI device
8415 * @state: The current pci connection state
8416 *
8417 * This function is called after a PCI bus error affecting
8418 * this device has been detected.
8419 */
8420static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8421 pci_channel_state_t state)
8422{
8423 struct net_device *dev = pci_get_drvdata(pdev);
8424 struct bnx2 *bp = netdev_priv(dev);
8425
8426 rtnl_lock();
8427 netif_device_detach(dev);
8428
2ec3de26
DN
8429 if (state == pci_channel_io_perm_failure) {
8430 rtnl_unlock();
8431 return PCI_ERS_RESULT_DISCONNECT;
8432 }
8433
6ff2da49 8434 if (netif_running(dev)) {
212f9934 8435 bnx2_netif_stop(bp, true);
6ff2da49
WX
8436 del_timer_sync(&bp->timer);
8437 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8438 }
8439
8440 pci_disable_device(pdev);
8441 rtnl_unlock();
8442
8443 /* Request a slot slot reset. */
8444 return PCI_ERS_RESULT_NEED_RESET;
8445}
8446
8447/**
8448 * bnx2_io_slot_reset - called after the pci bus has been reset.
8449 * @pdev: Pointer to PCI device
8450 *
8451 * Restart the card from scratch, as if from a cold-boot.
8452 */
8453static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8454{
8455 struct net_device *dev = pci_get_drvdata(pdev);
8456 struct bnx2 *bp = netdev_priv(dev);
8457
8458 rtnl_lock();
8459 if (pci_enable_device(pdev)) {
8460 dev_err(&pdev->dev,
3a9c6a49 8461 "Cannot re-enable PCI device after reset\n");
6ff2da49
WX
8462 rtnl_unlock();
8463 return PCI_ERS_RESULT_DISCONNECT;
8464 }
8465 pci_set_master(pdev);
8466 pci_restore_state(pdev);
529fab67 8467 pci_save_state(pdev);
6ff2da49
WX
8468
8469 if (netif_running(dev)) {
8470 bnx2_set_power_state(bp, PCI_D0);
8471 bnx2_init_nic(bp, 1);
8472 }
8473
8474 rtnl_unlock();
8475 return PCI_ERS_RESULT_RECOVERED;
8476}
8477
8478/**
8479 * bnx2_io_resume - called when traffic can start flowing again.
8480 * @pdev: Pointer to PCI device
8481 *
8482 * This callback is called when the error recovery driver tells us that
8483 * its OK to resume normal operation.
8484 */
8485static void bnx2_io_resume(struct pci_dev *pdev)
8486{
8487 struct net_device *dev = pci_get_drvdata(pdev);
8488 struct bnx2 *bp = netdev_priv(dev);
8489
8490 rtnl_lock();
8491 if (netif_running(dev))
212f9934 8492 bnx2_netif_start(bp, true);
6ff2da49
WX
8493
8494 netif_device_attach(dev);
8495 rtnl_unlock();
8496}
8497
8498static struct pci_error_handlers bnx2_err_handler = {
8499 .error_detected = bnx2_io_error_detected,
8500 .slot_reset = bnx2_io_slot_reset,
8501 .resume = bnx2_io_resume,
8502};
8503
b6016b76 8504static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8505 .name = DRV_MODULE_NAME,
8506 .id_table = bnx2_pci_tbl,
8507 .probe = bnx2_init_one,
8508 .remove = __devexit_p(bnx2_remove_one),
8509 .suspend = bnx2_suspend,
8510 .resume = bnx2_resume,
6ff2da49 8511 .err_handler = &bnx2_err_handler,
b6016b76
MC
8512};
8513
8514static int __init bnx2_init(void)
8515{
29917620 8516 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
8517}
8518
8519static void __exit bnx2_cleanup(void)
8520{
8521 pci_unregister_driver(&bnx2_pci_driver);
8522}
8523
8524module_init(bnx2_init);
8525module_exit(bnx2_cleanup);
8526
8527
8528