bnx2x: Multi-queue
[linux-2.6-block.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea
ET
80static int use_inta;
81static int poll;
a2fbb9ea 82static int debug;
34f80b04 83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 93
1cf167f2 94static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
95
96enum bnx2x_board_type {
97 BCM57710 = 0,
34f80b04
EG
98 BCM57711 = 1,
99 BCM57711E = 2,
a2fbb9ea
ET
100};
101
34f80b04 102/* indexed by board_type, above */
53a10565 103static struct {
a2fbb9ea
ET
104 char *name;
105} board_info[] __devinitdata = {
34f80b04
EG
106 { "Broadcom NetXtreme II BCM57710 XGb" },
107 { "Broadcom NetXtreme II BCM57711 XGb" },
108 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
109};
110
34f80b04 111
a2fbb9ea
ET
112static const struct pci_device_id bnx2x_pci_tbl[] = {
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
114 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
119 { 0 }
120};
121
122MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
123
124/****************************************************************************
125* General service functions
126****************************************************************************/
127
128/* used only at init
129 * locking is done by mcp
130 */
131static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
132{
133 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
136 PCICFG_VENDOR_ID_OFFSET);
137}
138
a2fbb9ea
ET
139static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
140{
141 u32 val;
142
143 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
144 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
146 PCICFG_VENDOR_ID_OFFSET);
147
148 return val;
149}
a2fbb9ea
ET
150
151static const u32 dmae_reg_go_c[] = {
152 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
153 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
154 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
155 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
156};
157
158/* copy command into DMAE command memory and set DMAE command go */
159static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
160 int idx)
161{
162 u32 cmd_offset;
163 int i;
164
165 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
166 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
167 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
168
ad8d3948
EG
169 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
170 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
171 }
172 REG_WR(bp, dmae_reg_go_c[idx], 1);
173}
174
ad8d3948
EG
175void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
176 u32 len32)
a2fbb9ea 177{
ad8d3948 178 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 179 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
180 int cnt = 200;
181
182 if (!bp->dmae_ready) {
183 u32 *data = bnx2x_sp(bp, wb_data[0]);
184
185 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
186 " using indirect\n", dst_addr, len32);
187 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
188 return;
189 }
190
191 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
192
193 memset(dmae, 0, sizeof(struct dmae_command));
194
195 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
196 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
197 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
198#ifdef __BIG_ENDIAN
199 DMAE_CMD_ENDIANITY_B_DW_SWAP |
200#else
201 DMAE_CMD_ENDIANITY_DW_SWAP |
202#endif
34f80b04
EG
203 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
204 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
205 dmae->src_addr_lo = U64_LO(dma_addr);
206 dmae->src_addr_hi = U64_HI(dma_addr);
207 dmae->dst_addr_lo = dst_addr >> 2;
208 dmae->dst_addr_hi = 0;
209 dmae->len = len32;
210 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
211 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 212 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 213
ad8d3948 214 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
215 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
216 "dst_addr [%x:%08x (%08x)]\n"
217 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
218 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
219 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
220 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 221 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
222 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
223 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
224
225 *wb_comp = 0;
226
34f80b04 227 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
228
229 udelay(5);
ad8d3948
EG
230
231 while (*wb_comp != DMAE_COMP_VAL) {
232 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
233
ad8d3948 234 if (!cnt) {
a2fbb9ea
ET
235 BNX2X_ERR("dmae timeout!\n");
236 break;
237 }
ad8d3948 238 cnt--;
12469401
YG
239 /* adjust delay for emulation/FPGA */
240 if (CHIP_REV_IS_SLOW(bp))
241 msleep(100);
242 else
243 udelay(5);
a2fbb9ea 244 }
ad8d3948
EG
245
246 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
247}
248
c18487ee 249void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 250{
ad8d3948 251 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 252 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
253 int cnt = 200;
254
255 if (!bp->dmae_ready) {
256 u32 *data = bnx2x_sp(bp, wb_data[0]);
257 int i;
258
259 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
260 " using indirect\n", src_addr, len32);
261 for (i = 0; i < len32; i++)
262 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
263 return;
264 }
265
266 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
267
268 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
269 memset(dmae, 0, sizeof(struct dmae_command));
270
271 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
272 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
273 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
274#ifdef __BIG_ENDIAN
275 DMAE_CMD_ENDIANITY_B_DW_SWAP |
276#else
277 DMAE_CMD_ENDIANITY_DW_SWAP |
278#endif
34f80b04
EG
279 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
280 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
281 dmae->src_addr_lo = src_addr >> 2;
282 dmae->src_addr_hi = 0;
283 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
284 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
285 dmae->len = len32;
286 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
287 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 288 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 289
ad8d3948 290 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
291 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
292 "dst_addr [%x:%08x (%08x)]\n"
293 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
294 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
295 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
296 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
297
298 *wb_comp = 0;
299
34f80b04 300 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
301
302 udelay(5);
ad8d3948
EG
303
304 while (*wb_comp != DMAE_COMP_VAL) {
305
ad8d3948 306 if (!cnt) {
a2fbb9ea
ET
307 BNX2X_ERR("dmae timeout!\n");
308 break;
309 }
ad8d3948 310 cnt--;
12469401
YG
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
a2fbb9ea 316 }
ad8d3948 317 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
318 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
319 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
320
321 mutex_unlock(&bp->dmae_mutex);
322}
323
324/* used only for slowpath so not inlined */
325static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
326{
327 u32 wb_write[2];
328
329 wb_write[0] = val_hi;
330 wb_write[1] = val_lo;
331 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 332}
a2fbb9ea 333
ad8d3948
EG
334#ifdef USE_WB_RD
335static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
336{
337 u32 wb_data[2];
338
339 REG_RD_DMAE(bp, reg, wb_data, 2);
340
341 return HILO_U64(wb_data[0], wb_data[1]);
342}
343#endif
344
a2fbb9ea
ET
345static int bnx2x_mc_assert(struct bnx2x *bp)
346{
a2fbb9ea 347 char last_idx;
34f80b04
EG
348 int i, rc = 0;
349 u32 row0, row1, row2, row3;
350
351 /* XSTORM */
352 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
353 XSTORM_ASSERT_LIST_INDEX_OFFSET);
354 if (last_idx)
355 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
356
357 /* print the asserts */
358 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
359
360 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
361 XSTORM_ASSERT_LIST_OFFSET(i));
362 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
364 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
366 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
368
369 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
370 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
371 " 0x%08x 0x%08x 0x%08x\n",
372 i, row3, row2, row1, row0);
373 rc++;
374 } else {
375 break;
376 }
377 }
378
379 /* TSTORM */
380 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
381 TSTORM_ASSERT_LIST_INDEX_OFFSET);
382 if (last_idx)
383 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
384
385 /* print the asserts */
386 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
387
388 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
389 TSTORM_ASSERT_LIST_OFFSET(i));
390 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
392 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
394 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
396
397 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
398 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
399 " 0x%08x 0x%08x 0x%08x\n",
400 i, row3, row2, row1, row0);
401 rc++;
402 } else {
403 break;
404 }
405 }
406
407 /* CSTORM */
408 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
409 CSTORM_ASSERT_LIST_INDEX_OFFSET);
410 if (last_idx)
411 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
412
413 /* print the asserts */
414 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
415
416 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
417 CSTORM_ASSERT_LIST_OFFSET(i));
418 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
420 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
422 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
424
425 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
426 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
427 " 0x%08x 0x%08x 0x%08x\n",
428 i, row3, row2, row1, row0);
429 rc++;
430 } else {
431 break;
432 }
433 }
434
435 /* USTORM */
436 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
437 USTORM_ASSERT_LIST_INDEX_OFFSET);
438 if (last_idx)
439 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
440
441 /* print the asserts */
442 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
443
444 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
445 USTORM_ASSERT_LIST_OFFSET(i));
446 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i) + 4);
448 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 8);
450 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 12);
452
453 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
454 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
455 " 0x%08x 0x%08x 0x%08x\n",
456 i, row3, row2, row1, row0);
457 rc++;
458 } else {
459 break;
a2fbb9ea
ET
460 }
461 }
34f80b04 462
a2fbb9ea
ET
463 return rc;
464}
c14423fe 465
a2fbb9ea
ET
466static void bnx2x_fw_dump(struct bnx2x *bp)
467{
468 u32 mark, offset;
469 u32 data[9];
470 int word;
471
472 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
473 mark = ((mark + 0x3) & ~0x3);
474 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
475
476 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
477 for (word = 0; word < 8; word++)
478 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
479 offset + 4*word));
480 data[8] = 0x0;
49d66772 481 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
482 }
483 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
484 for (word = 0; word < 8; word++)
485 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
486 offset + 4*word));
487 data[8] = 0x0;
49d66772 488 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
489 }
490 printk("\n" KERN_ERR PFX "end of fw dump\n");
491}
492
493static void bnx2x_panic_dump(struct bnx2x *bp)
494{
495 int i;
496 u16 j, start, end;
497
66e855f3
YG
498 bp->stats_state = STATS_STATE_DISABLED;
499 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
500
a2fbb9ea
ET
501 BNX2X_ERR("begin crash dump -----------------\n");
502
503 for_each_queue(bp, i) {
504 struct bnx2x_fastpath *fp = &bp->fp[i];
505 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
506
507 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 508 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 509 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 510 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
511 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
512 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
513 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
514 fp->rx_bd_prod, fp->rx_bd_cons,
515 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
516 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
517 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
518 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
519 " *sb_u_idx(%x) bd data(%x,%x)\n",
520 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
521 fp->status_blk->c_status_block.status_block_index,
522 fp->fp_u_idx,
523 fp->status_blk->u_status_block.status_block_index,
524 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
525
526 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
527 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
528 for (j = start; j < end; j++) {
529 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
530
531 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
532 sw_bd->skb, sw_bd->first_bd);
533 }
534
535 start = TX_BD(fp->tx_bd_cons - 10);
536 end = TX_BD(fp->tx_bd_cons + 254);
537 for (j = start; j < end; j++) {
538 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
539
540 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
541 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
542 }
543
544 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
545 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
546 for (j = start; j < end; j++) {
547 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
548 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
549
550 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 551 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
552 }
553
3196a88a
EG
554 start = RX_SGE(fp->rx_sge_prod);
555 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
556 for (j = start; j < end; j++) {
557 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
558 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
559
560 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
561 j, rx_sge[1], rx_sge[0], sw_page->page);
562 }
563
a2fbb9ea
ET
564 start = RCQ_BD(fp->rx_comp_cons - 10);
565 end = RCQ_BD(fp->rx_comp_cons + 503);
566 for (j = start; j < end; j++) {
567 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
568
569 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
570 j, cqe[0], cqe[1], cqe[2], cqe[3]);
571 }
572 }
573
49d66772
ET
574 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
575 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 576 " spq_prod_idx(%u)\n",
49d66772 577 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
578 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
579
34f80b04 580 bnx2x_fw_dump(bp);
a2fbb9ea
ET
581 bnx2x_mc_assert(bp);
582 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
583}
584
615f8fd9 585static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 586{
34f80b04 587 int port = BP_PORT(bp);
a2fbb9ea
ET
588 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
589 u32 val = REG_RD(bp, addr);
590 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
591
592 if (msix) {
593 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
594 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
595 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
596 } else {
597 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 598 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
599 HC_CONFIG_0_REG_INT_LINE_EN_0 |
600 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 601
615f8fd9
ET
602 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
603 val, port, addr, msix);
604
605 REG_WR(bp, addr, val);
606
a2fbb9ea
ET
607 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
608 }
609
615f8fd9 610 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
611 val, port, addr, msix);
612
613 REG_WR(bp, addr, val);
34f80b04
EG
614
615 if (CHIP_IS_E1H(bp)) {
616 /* init leading/trailing edge */
617 if (IS_E1HMF(bp)) {
618 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
619 if (bp->port.pmf)
620 /* enable nig attention */
621 val |= 0x0100;
622 } else
623 val = 0xffff;
624
625 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
626 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
627 }
a2fbb9ea
ET
628}
629
615f8fd9 630static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 631{
34f80b04 632 int port = BP_PORT(bp);
a2fbb9ea
ET
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635
636 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
637 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
638 HC_CONFIG_0_REG_INT_LINE_EN_0 |
639 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
640
641 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
642 val, port, addr);
643
644 REG_WR(bp, addr, val);
645 if (REG_RD(bp, addr) != val)
646 BNX2X_ERR("BUG! proper val not read from IGU!\n");
647}
648
f8ef6e44 649static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 650{
a2fbb9ea
ET
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
652 int i;
653
34f80b04 654 /* disable interrupt handling */
a2fbb9ea 655 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
656 if (disable_hw)
657 /* prevent the HW from sending interrupts */
658 bnx2x_int_disable(bp);
a2fbb9ea
ET
659
660 /* make sure all ISRs are done */
661 if (msix) {
662 for_each_queue(bp, i)
663 synchronize_irq(bp->msix_table[i].vector);
664
665 /* one more for the Slow Path IRQ */
666 synchronize_irq(bp->msix_table[i].vector);
667 } else
668 synchronize_irq(bp->pdev->irq);
669
670 /* make sure sp_task is not running */
1cf167f2
EG
671 cancel_delayed_work(&bp->sp_task);
672 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
673}
674
34f80b04 675/* fast path */
a2fbb9ea
ET
676
677/*
34f80b04 678 * General service functions
a2fbb9ea
ET
679 */
680
34f80b04 681static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
682 u8 storm, u16 index, u8 op, u8 update)
683{
5c862848
EG
684 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
685 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
686 struct igu_ack_register igu_ack;
687
688 igu_ack.status_block_index = index;
689 igu_ack.sb_id_and_flags =
34f80b04 690 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
691 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
692 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
693 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
694
5c862848
EG
695 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
696 (*(u32 *)&igu_ack), hc_addr);
697 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
698}
699
700static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
701{
702 struct host_status_block *fpsb = fp->status_blk;
703 u16 rc = 0;
704
705 barrier(); /* status block is written to by the chip */
706 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
707 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
708 rc |= 1;
709 }
710 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
711 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
712 rc |= 2;
713 }
714 return rc;
715}
716
a2fbb9ea
ET
717static u16 bnx2x_ack_int(struct bnx2x *bp)
718{
5c862848
EG
719 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
720 COMMAND_REG_SIMD_MASK);
721 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 722
5c862848
EG
723 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
724 result, hc_addr);
a2fbb9ea 725
a2fbb9ea
ET
726 return result;
727}
728
729
730/*
731 * fast path service functions
732 */
733
237907c1
EG
734static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
735{
736 u16 tx_cons_sb;
737
738 /* Tell compiler that status block fields can change */
739 barrier();
740 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
741 return (fp->tx_pkt_cons != tx_cons_sb);
742}
743
744static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
745{
746 /* Tell compiler that consumer and producer can change */
747 barrier();
748 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
749
237907c1
EG
750}
751
a2fbb9ea
ET
752/* free skb in the packet ring at pos idx
753 * return idx of last bd freed
754 */
755static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
756 u16 idx)
757{
758 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
759 struct eth_tx_bd *tx_bd;
760 struct sk_buff *skb = tx_buf->skb;
34f80b04 761 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
762 int nbd;
763
764 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
765 idx, tx_buf, skb);
766
767 /* unmap first bd */
768 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
769 tx_bd = &fp->tx_desc_ring[bd_idx];
770 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
771 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
772
773 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 774 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
775#ifdef BNX2X_STOP_ON_ERROR
776 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 777 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
778 bnx2x_panic();
779 }
780#endif
781
782 /* Skip a parse bd and the TSO split header bd
783 since they have no mapping */
784 if (nbd)
785 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
786
787 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
788 ETH_TX_BD_FLAGS_TCP_CSUM |
789 ETH_TX_BD_FLAGS_SW_LSO)) {
790 if (--nbd)
791 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
792 tx_bd = &fp->tx_desc_ring[bd_idx];
793 /* is this a TSO split header bd? */
794 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
795 if (--nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797 }
798 }
799
800 /* now free frags */
801 while (nbd > 0) {
802
803 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
804 tx_bd = &fp->tx_desc_ring[bd_idx];
805 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
806 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
807 if (--nbd)
808 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
809 }
810
811 /* release skb */
53e5e96e 812 WARN_ON(!skb);
a2fbb9ea
ET
813 dev_kfree_skb(skb);
814 tx_buf->first_bd = 0;
815 tx_buf->skb = NULL;
816
34f80b04 817 return new_cons;
a2fbb9ea
ET
818}
819
34f80b04 820static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 821{
34f80b04
EG
822 s16 used;
823 u16 prod;
824 u16 cons;
a2fbb9ea 825
34f80b04 826 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
827 prod = fp->tx_bd_prod;
828 cons = fp->tx_bd_cons;
829
34f80b04
EG
830 /* NUM_TX_RINGS = number of "next-page" entries
831 It will be used as a threshold */
832 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 833
34f80b04 834#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
835 WARN_ON(used < 0);
836 WARN_ON(used > fp->bp->tx_ring_size);
837 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 838#endif
a2fbb9ea 839
34f80b04 840 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
841}
842
843static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
844{
845 struct bnx2x *bp = fp->bp;
555f6c78 846 struct netdev_queue *txq;
a2fbb9ea
ET
847 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
848 int done = 0;
849
850#ifdef BNX2X_STOP_ON_ERROR
851 if (unlikely(bp->panic))
852 return;
853#endif
854
555f6c78 855 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
856 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
857 sw_cons = fp->tx_pkt_cons;
858
859 while (sw_cons != hw_cons) {
860 u16 pkt_cons;
861
862 pkt_cons = TX_BD(sw_cons);
863
864 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
865
34f80b04 866 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
867 hw_cons, sw_cons, pkt_cons);
868
34f80b04 869/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
870 rmb();
871 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
872 }
873*/
874 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
875 sw_cons++;
876 done++;
877
878 if (done == work)
879 break;
880 }
881
882 fp->tx_pkt_cons = sw_cons;
883 fp->tx_bd_cons = bd_cons;
884
555f6c78
EG
885 /* Need to make the tx_bd_cons update visible to start_xmit()
886 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
887 * memory barrier, there is a small possibility that start_xmit()
888 * will miss it and cause the queue to be stopped forever.
889 */
890 smp_mb();
891
892 /* TBD need a thresh? */
555f6c78 893 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 894
555f6c78 895 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 896
555f6c78 897 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 898 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 899 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 900 netif_tx_wake_queue(txq);
a2fbb9ea 901
555f6c78 902 __netif_tx_unlock(txq);
a2fbb9ea
ET
903 }
904}
905
3196a88a 906
a2fbb9ea
ET
907static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
908 union eth_rx_cqe *rr_cqe)
909{
910 struct bnx2x *bp = fp->bp;
911 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
912 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
913
34f80b04 914 DP(BNX2X_MSG_SP,
a2fbb9ea 915 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
916 FP_IDX(fp), cid, command, bp->state,
917 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
918
919 bp->spq_left++;
920
34f80b04 921 if (FP_IDX(fp)) {
a2fbb9ea
ET
922 switch (command | fp->state) {
923 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
924 BNX2X_FP_STATE_OPENING):
925 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
926 cid);
927 fp->state = BNX2X_FP_STATE_OPEN;
928 break;
929
930 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
931 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
932 cid);
933 fp->state = BNX2X_FP_STATE_HALTED;
934 break;
935
936 default:
34f80b04
EG
937 BNX2X_ERR("unexpected MC reply (%d) "
938 "fp->state is %x\n", command, fp->state);
939 break;
a2fbb9ea 940 }
34f80b04 941 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
942 return;
943 }
c14423fe 944
a2fbb9ea
ET
945 switch (command | bp->state) {
946 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
947 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
948 bp->state = BNX2X_STATE_OPEN;
949 break;
950
951 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
952 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
953 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
954 fp->state = BNX2X_FP_STATE_HALTED;
955 break;
956
a2fbb9ea 957 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 958 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 959 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
960 break;
961
3196a88a 962
a2fbb9ea 963 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 964 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 965 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 966 bp->set_mac_pending = 0;
a2fbb9ea
ET
967 break;
968
49d66772 969 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 970 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
971 break;
972
a2fbb9ea 973 default:
34f80b04 974 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 975 command, bp->state);
34f80b04 976 break;
a2fbb9ea 977 }
34f80b04 978 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
979}
980
7a9b2557
VZ
981static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
982 struct bnx2x_fastpath *fp, u16 index)
983{
984 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
985 struct page *page = sw_buf->page;
986 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
987
988 /* Skip "next page" elements */
989 if (!page)
990 return;
991
992 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 993 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
994 __free_pages(page, PAGES_PER_SGE_SHIFT);
995
996 sw_buf->page = NULL;
997 sge->addr_hi = 0;
998 sge->addr_lo = 0;
999}
1000
1001static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1002 struct bnx2x_fastpath *fp, int last)
1003{
1004 int i;
1005
1006 for (i = 0; i < last; i++)
1007 bnx2x_free_rx_sge(bp, fp, i);
1008}
1009
1010static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1011 struct bnx2x_fastpath *fp, u16 index)
1012{
1013 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1014 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1015 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1016 dma_addr_t mapping;
1017
1018 if (unlikely(page == NULL))
1019 return -ENOMEM;
1020
4f40f2cb 1021 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1022 PCI_DMA_FROMDEVICE);
8d8bb39b 1023 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1024 __free_pages(page, PAGES_PER_SGE_SHIFT);
1025 return -ENOMEM;
1026 }
1027
1028 sw_buf->page = page;
1029 pci_unmap_addr_set(sw_buf, mapping, mapping);
1030
1031 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1032 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1033
1034 return 0;
1035}
1036
a2fbb9ea
ET
1037static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1038 struct bnx2x_fastpath *fp, u16 index)
1039{
1040 struct sk_buff *skb;
1041 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1042 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1043 dma_addr_t mapping;
1044
1045 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1046 if (unlikely(skb == NULL))
1047 return -ENOMEM;
1048
437cf2f1 1049 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1050 PCI_DMA_FROMDEVICE);
8d8bb39b 1051 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1052 dev_kfree_skb(skb);
1053 return -ENOMEM;
1054 }
1055
1056 rx_buf->skb = skb;
1057 pci_unmap_addr_set(rx_buf, mapping, mapping);
1058
1059 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1060 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1061
1062 return 0;
1063}
1064
1065/* note that we are not allocating a new skb,
1066 * we are just moving one from cons to prod
1067 * we are not creating a new mapping,
1068 * so there is no need to check for dma_mapping_error().
1069 */
1070static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1071 struct sk_buff *skb, u16 cons, u16 prod)
1072{
1073 struct bnx2x *bp = fp->bp;
1074 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1075 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1076 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1077 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1078
1079 pci_dma_sync_single_for_device(bp->pdev,
1080 pci_unmap_addr(cons_rx_buf, mapping),
1081 bp->rx_offset + RX_COPY_THRESH,
1082 PCI_DMA_FROMDEVICE);
1083
1084 prod_rx_buf->skb = cons_rx_buf->skb;
1085 pci_unmap_addr_set(prod_rx_buf, mapping,
1086 pci_unmap_addr(cons_rx_buf, mapping));
1087 *prod_bd = *cons_bd;
1088}
1089
7a9b2557
VZ
1090static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1091 u16 idx)
1092{
1093 u16 last_max = fp->last_max_sge;
1094
1095 if (SUB_S16(idx, last_max) > 0)
1096 fp->last_max_sge = idx;
1097}
1098
1099static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1100{
1101 int i, j;
1102
1103 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1104 int idx = RX_SGE_CNT * i - 1;
1105
1106 for (j = 0; j < 2; j++) {
1107 SGE_MASK_CLEAR_BIT(fp, idx);
1108 idx--;
1109 }
1110 }
1111}
1112
1113static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1114 struct eth_fast_path_rx_cqe *fp_cqe)
1115{
1116 struct bnx2x *bp = fp->bp;
4f40f2cb 1117 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1118 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1119 SGE_PAGE_SHIFT;
7a9b2557
VZ
1120 u16 last_max, last_elem, first_elem;
1121 u16 delta = 0;
1122 u16 i;
1123
1124 if (!sge_len)
1125 return;
1126
1127 /* First mark all used pages */
1128 for (i = 0; i < sge_len; i++)
1129 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1130
1131 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1132 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1133
1134 /* Here we assume that the last SGE index is the biggest */
1135 prefetch((void *)(fp->sge_mask));
1136 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1137
1138 last_max = RX_SGE(fp->last_max_sge);
1139 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1140 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1141
1142 /* If ring is not full */
1143 if (last_elem + 1 != first_elem)
1144 last_elem++;
1145
1146 /* Now update the prod */
1147 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1148 if (likely(fp->sge_mask[i]))
1149 break;
1150
1151 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1152 delta += RX_SGE_MASK_ELEM_SZ;
1153 }
1154
1155 if (delta > 0) {
1156 fp->rx_sge_prod += delta;
1157 /* clear page-end entries */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159 }
1160
1161 DP(NETIF_MSG_RX_STATUS,
1162 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1163 fp->last_max_sge, fp->rx_sge_prod);
1164}
1165
1166static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1167{
1168 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1169 memset(fp->sge_mask, 0xff,
1170 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1171
33471629
EG
1172 /* Clear the two last indices in the page to 1:
1173 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1174 hence will never be indicated and should be removed from
1175 the calculations. */
1176 bnx2x_clear_sge_mask_next_elems(fp);
1177}
1178
1179static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1180 struct sk_buff *skb, u16 cons, u16 prod)
1181{
1182 struct bnx2x *bp = fp->bp;
1183 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1184 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1185 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1186 dma_addr_t mapping;
1187
1188 /* move empty skb from pool to prod and map it */
1189 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1190 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1191 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1192 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1193
1194 /* move partial skb from cons to pool (don't unmap yet) */
1195 fp->tpa_pool[queue] = *cons_rx_buf;
1196
1197 /* mark bin state as start - print error if current state != stop */
1198 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1199 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1200
1201 fp->tpa_state[queue] = BNX2X_TPA_START;
1202
1203 /* point prod_bd to new skb */
1204 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1205 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1206
1207#ifdef BNX2X_STOP_ON_ERROR
1208 fp->tpa_queue_used |= (1 << queue);
1209#ifdef __powerpc64__
1210 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1211#else
1212 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1213#endif
1214 fp->tpa_queue_used);
1215#endif
1216}
1217
1218static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1219 struct sk_buff *skb,
1220 struct eth_fast_path_rx_cqe *fp_cqe,
1221 u16 cqe_idx)
1222{
1223 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1224 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1225 u32 i, frag_len, frag_size, pages;
1226 int err;
1227 int j;
1228
1229 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1230 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1231
1232 /* This is needed in order to enable forwarding support */
1233 if (frag_size)
4f40f2cb 1234 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1235 max(frag_size, (u32)len_on_bd));
1236
1237#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1238 if (pages >
1239 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1240 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1241 pages, cqe_idx);
1242 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1243 fp_cqe->pkt_len, len_on_bd);
1244 bnx2x_panic();
1245 return -EINVAL;
1246 }
1247#endif
1248
1249 /* Run through the SGL and compose the fragmented skb */
1250 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1251 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1252
1253 /* FW gives the indices of the SGE as if the ring is an array
1254 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1255 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1256 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1257 old_rx_pg = *rx_pg;
1258
1259 /* If we fail to allocate a substitute page, we simply stop
1260 where we are and drop the whole packet */
1261 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1262 if (unlikely(err)) {
66e855f3 1263 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1264 return err;
1265 }
1266
1267 /* Unmap the page as we r going to pass it to the stack */
1268 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1269 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1270
1271 /* Add one frag and update the appropriate fields in the skb */
1272 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1273
1274 skb->data_len += frag_len;
1275 skb->truesize += frag_len;
1276 skb->len += frag_len;
1277
1278 frag_size -= frag_len;
1279 }
1280
1281 return 0;
1282}
1283
1284static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1285 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1286 u16 cqe_idx)
1287{
1288 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1289 struct sk_buff *skb = rx_buf->skb;
1290 /* alloc new skb */
1291 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1292
1293 /* Unmap skb in the pool anyway, as we are going to change
1294 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1295 fails. */
1296 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1297 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1298
7a9b2557 1299 if (likely(new_skb)) {
66e855f3
YG
1300 /* fix ip xsum and give it to the stack */
1301 /* (no need to map the new skb) */
0c6671b0
EG
1302#ifdef BCM_VLAN
1303 int is_vlan_cqe =
1304 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1305 PARSING_FLAGS_VLAN);
1306 int is_not_hwaccel_vlan_cqe =
1307 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1308#endif
7a9b2557
VZ
1309
1310 prefetch(skb);
1311 prefetch(((char *)(skb)) + 128);
1312
7a9b2557
VZ
1313#ifdef BNX2X_STOP_ON_ERROR
1314 if (pad + len > bp->rx_buf_size) {
1315 BNX2X_ERR("skb_put is about to fail... "
1316 "pad %d len %d rx_buf_size %d\n",
1317 pad, len, bp->rx_buf_size);
1318 bnx2x_panic();
1319 return;
1320 }
1321#endif
1322
1323 skb_reserve(skb, pad);
1324 skb_put(skb, len);
1325
1326 skb->protocol = eth_type_trans(skb, bp->dev);
1327 skb->ip_summed = CHECKSUM_UNNECESSARY;
0c8dfc83 1328 skb_record_rx_queue(skb, queue);
7a9b2557
VZ
1329
1330 {
1331 struct iphdr *iph;
1332
1333 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1334#ifdef BCM_VLAN
1335 /* If there is no Rx VLAN offloading -
1336 take VLAN tag into an account */
1337 if (unlikely(is_not_hwaccel_vlan_cqe))
1338 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1339#endif
7a9b2557
VZ
1340 iph->check = 0;
1341 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1342 }
1343
1344 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1345 &cqe->fast_path_cqe, cqe_idx)) {
1346#ifdef BCM_VLAN
0c6671b0
EG
1347 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1348 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1349 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1350 le16_to_cpu(cqe->fast_path_cqe.
1351 vlan_tag));
1352 else
1353#endif
1354 netif_receive_skb(skb);
1355 } else {
1356 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1357 " - dropping packet!\n");
1358 dev_kfree_skb(skb);
1359 }
1360
7a9b2557
VZ
1361
1362 /* put new skb in bin */
1363 fp->tpa_pool[queue].skb = new_skb;
1364
1365 } else {
66e855f3 1366 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1367 DP(NETIF_MSG_RX_STATUS,
1368 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1369 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1370 }
1371
1372 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1373}
1374
1375static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1376 struct bnx2x_fastpath *fp,
1377 u16 bd_prod, u16 rx_comp_prod,
1378 u16 rx_sge_prod)
1379{
8d9c5f34 1380 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1381 int i;
1382
1383 /* Update producers */
1384 rx_prods.bd_prod = bd_prod;
1385 rx_prods.cqe_prod = rx_comp_prod;
1386 rx_prods.sge_prod = rx_sge_prod;
1387
58f4c4cf
EG
1388 /*
1389 * Make sure that the BD and SGE data is updated before updating the
1390 * producers since FW might read the BD/SGE right after the producer
1391 * is updated.
1392 * This is only applicable for weak-ordered memory model archs such
1393 * as IA-64. The following barrier is also mandatory since FW will
1394 * assumes BDs must have buffers.
1395 */
1396 wmb();
1397
8d9c5f34
EG
1398 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1399 REG_WR(bp, BAR_USTRORM_INTMEM +
1400 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1401 ((u32 *)&rx_prods)[i]);
1402
58f4c4cf
EG
1403 mmiowb(); /* keep prod updates ordered */
1404
7a9b2557 1405 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1406 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1407 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1408}
1409
a2fbb9ea
ET
1410static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1411{
1412 struct bnx2x *bp = fp->bp;
34f80b04 1413 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1414 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1415 int rx_pkt = 0;
1416
1417#ifdef BNX2X_STOP_ON_ERROR
1418 if (unlikely(bp->panic))
1419 return 0;
1420#endif
1421
34f80b04
EG
1422 /* CQ "next element" is of the size of the regular element,
1423 that's why it's ok here */
a2fbb9ea
ET
1424 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1425 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1426 hw_comp_cons++;
1427
1428 bd_cons = fp->rx_bd_cons;
1429 bd_prod = fp->rx_bd_prod;
34f80b04 1430 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1431 sw_comp_cons = fp->rx_comp_cons;
1432 sw_comp_prod = fp->rx_comp_prod;
1433
1434 /* Memory barrier necessary as speculative reads of the rx
1435 * buffer can be ahead of the index in the status block
1436 */
1437 rmb();
1438
1439 DP(NETIF_MSG_RX_STATUS,
1440 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1441 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1442
1443 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1444 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1445 struct sk_buff *skb;
1446 union eth_rx_cqe *cqe;
34f80b04
EG
1447 u8 cqe_fp_flags;
1448 u16 len, pad;
a2fbb9ea
ET
1449
1450 comp_ring_cons = RCQ_BD(sw_comp_cons);
1451 bd_prod = RX_BD(bd_prod);
1452 bd_cons = RX_BD(bd_cons);
1453
1454 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1455 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1456
a2fbb9ea 1457 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1458 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1459 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1460 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1461 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1462 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1463
1464 /* is this a slowpath msg? */
34f80b04 1465 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1466 bnx2x_sp_event(fp, cqe);
1467 goto next_cqe;
1468
1469 /* this is an rx packet */
1470 } else {
1471 rx_buf = &fp->rx_buf_ring[bd_cons];
1472 skb = rx_buf->skb;
a2fbb9ea
ET
1473 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1474 pad = cqe->fast_path_cqe.placement_offset;
1475
7a9b2557
VZ
1476 /* If CQE is marked both TPA_START and TPA_END
1477 it is a non-TPA CQE */
1478 if ((!fp->disable_tpa) &&
1479 (TPA_TYPE(cqe_fp_flags) !=
1480 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1481 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1482
1483 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1484 DP(NETIF_MSG_RX_STATUS,
1485 "calling tpa_start on queue %d\n",
1486 queue);
1487
1488 bnx2x_tpa_start(fp, queue, skb,
1489 bd_cons, bd_prod);
1490 goto next_rx;
1491 }
1492
1493 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1494 DP(NETIF_MSG_RX_STATUS,
1495 "calling tpa_stop on queue %d\n",
1496 queue);
1497
1498 if (!BNX2X_RX_SUM_FIX(cqe))
1499 BNX2X_ERR("STOP on none TCP "
1500 "data\n");
1501
1502 /* This is a size of the linear data
1503 on this skb */
1504 len = le16_to_cpu(cqe->fast_path_cqe.
1505 len_on_bd);
1506 bnx2x_tpa_stop(bp, fp, queue, pad,
1507 len, cqe, comp_ring_cons);
1508#ifdef BNX2X_STOP_ON_ERROR
1509 if (bp->panic)
1510 return -EINVAL;
1511#endif
1512
1513 bnx2x_update_sge_prod(fp,
1514 &cqe->fast_path_cqe);
1515 goto next_cqe;
1516 }
1517 }
1518
a2fbb9ea
ET
1519 pci_dma_sync_single_for_device(bp->pdev,
1520 pci_unmap_addr(rx_buf, mapping),
1521 pad + RX_COPY_THRESH,
1522 PCI_DMA_FROMDEVICE);
1523 prefetch(skb);
1524 prefetch(((char *)(skb)) + 128);
1525
1526 /* is this an error packet? */
34f80b04 1527 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1528 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1529 "ERROR flags %x rx packet %u\n",
1530 cqe_fp_flags, sw_comp_cons);
66e855f3 1531 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1532 goto reuse_rx;
1533 }
1534
1535 /* Since we don't have a jumbo ring
1536 * copy small packets if mtu > 1500
1537 */
1538 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1539 (len <= RX_COPY_THRESH)) {
1540 struct sk_buff *new_skb;
1541
1542 new_skb = netdev_alloc_skb(bp->dev,
1543 len + pad);
1544 if (new_skb == NULL) {
1545 DP(NETIF_MSG_RX_ERR,
34f80b04 1546 "ERROR packet dropped "
a2fbb9ea 1547 "because of alloc failure\n");
66e855f3 1548 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1549 goto reuse_rx;
1550 }
1551
1552 /* aligned copy */
1553 skb_copy_from_linear_data_offset(skb, pad,
1554 new_skb->data + pad, len);
1555 skb_reserve(new_skb, pad);
1556 skb_put(new_skb, len);
1557
1558 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1559
1560 skb = new_skb;
1561
1562 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1563 pci_unmap_single(bp->pdev,
1564 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1565 bp->rx_buf_size,
a2fbb9ea
ET
1566 PCI_DMA_FROMDEVICE);
1567 skb_reserve(skb, pad);
1568 skb_put(skb, len);
1569
1570 } else {
1571 DP(NETIF_MSG_RX_ERR,
34f80b04 1572 "ERROR packet dropped because "
a2fbb9ea 1573 "of alloc failure\n");
66e855f3 1574 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1575reuse_rx:
1576 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1577 goto next_rx;
1578 }
1579
1580 skb->protocol = eth_type_trans(skb, bp->dev);
1581
1582 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1583 if (bp->rx_csum) {
1adcd8be
EG
1584 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1585 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1586 else
1587 bp->eth_stats.hw_csum_err++;
1588 }
a2fbb9ea
ET
1589 }
1590
1591#ifdef BCM_VLAN
0c6671b0 1592 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1593 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1594 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1595 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1596 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1597 else
1598#endif
34f80b04 1599 netif_receive_skb(skb);
a2fbb9ea 1600
a2fbb9ea
ET
1601
1602next_rx:
1603 rx_buf->skb = NULL;
1604
1605 bd_cons = NEXT_RX_IDX(bd_cons);
1606 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1607 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1608 rx_pkt++;
a2fbb9ea
ET
1609next_cqe:
1610 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1611 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1612
34f80b04 1613 if (rx_pkt == budget)
a2fbb9ea
ET
1614 break;
1615 } /* while */
1616
1617 fp->rx_bd_cons = bd_cons;
34f80b04 1618 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1619 fp->rx_comp_cons = sw_comp_cons;
1620 fp->rx_comp_prod = sw_comp_prod;
1621
7a9b2557
VZ
1622 /* Update producers */
1623 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1624 fp->rx_sge_prod);
a2fbb9ea
ET
1625
1626 fp->rx_pkt += rx_pkt;
1627 fp->rx_calls++;
1628
1629 return rx_pkt;
1630}
1631
1632static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1633{
1634 struct bnx2x_fastpath *fp = fp_cookie;
1635 struct bnx2x *bp = fp->bp;
34f80b04 1636 int index = FP_IDX(fp);
a2fbb9ea 1637
da5a662a
VZ
1638 /* Return here if interrupt is disabled */
1639 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1640 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1641 return IRQ_HANDLED;
1642 }
1643
34f80b04
EG
1644 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1645 index, FP_SB_ID(fp));
1646 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1647
1648#ifdef BNX2X_STOP_ON_ERROR
1649 if (unlikely(bp->panic))
1650 return IRQ_HANDLED;
1651#endif
1652
1653 prefetch(fp->rx_cons_sb);
1654 prefetch(fp->tx_cons_sb);
1655 prefetch(&fp->status_blk->c_status_block.status_block_index);
1656 prefetch(&fp->status_blk->u_status_block.status_block_index);
1657
288379f0 1658 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1659
a2fbb9ea
ET
1660 return IRQ_HANDLED;
1661}
1662
1663static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1664{
555f6c78 1665 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1666 u16 status = bnx2x_ack_int(bp);
34f80b04 1667 u16 mask;
a2fbb9ea 1668
34f80b04 1669 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1670 if (unlikely(status == 0)) {
1671 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1672 return IRQ_NONE;
1673 }
34f80b04 1674 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1675
34f80b04 1676 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1677 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1678 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1679 return IRQ_HANDLED;
1680 }
1681
3196a88a
EG
1682#ifdef BNX2X_STOP_ON_ERROR
1683 if (unlikely(bp->panic))
1684 return IRQ_HANDLED;
1685#endif
1686
34f80b04
EG
1687 mask = 0x2 << bp->fp[0].sb_id;
1688 if (status & mask) {
a2fbb9ea
ET
1689 struct bnx2x_fastpath *fp = &bp->fp[0];
1690
1691 prefetch(fp->rx_cons_sb);
1692 prefetch(fp->tx_cons_sb);
1693 prefetch(&fp->status_blk->c_status_block.status_block_index);
1694 prefetch(&fp->status_blk->u_status_block.status_block_index);
1695
288379f0 1696 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1697
34f80b04 1698 status &= ~mask;
a2fbb9ea
ET
1699 }
1700
a2fbb9ea 1701
34f80b04 1702 if (unlikely(status & 0x1)) {
1cf167f2 1703 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1704
1705 status &= ~0x1;
1706 if (!status)
1707 return IRQ_HANDLED;
1708 }
1709
34f80b04
EG
1710 if (status)
1711 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1712 status);
a2fbb9ea 1713
c18487ee 1714 return IRQ_HANDLED;
a2fbb9ea
ET
1715}
1716
c18487ee 1717/* end of fast path */
a2fbb9ea 1718
bb2a0f7a 1719static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1720
c18487ee
YR
1721/* Link */
1722
1723/*
1724 * General service functions
1725 */
a2fbb9ea 1726
4a37fb66 1727static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1728{
1729 u32 lock_status;
1730 u32 resource_bit = (1 << resource);
4a37fb66
YG
1731 int func = BP_FUNC(bp);
1732 u32 hw_lock_control_reg;
c18487ee 1733 int cnt;
a2fbb9ea 1734
c18487ee
YR
1735 /* Validating that the resource is within range */
1736 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1737 DP(NETIF_MSG_HW,
1738 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1739 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1740 return -EINVAL;
1741 }
a2fbb9ea 1742
4a37fb66
YG
1743 if (func <= 5) {
1744 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1745 } else {
1746 hw_lock_control_reg =
1747 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1748 }
1749
c18487ee 1750 /* Validating that the resource is not already taken */
4a37fb66 1751 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1752 if (lock_status & resource_bit) {
1753 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1754 lock_status, resource_bit);
1755 return -EEXIST;
1756 }
a2fbb9ea 1757
46230476
EG
1758 /* Try for 5 second every 5ms */
1759 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1760 /* Try to acquire the lock */
4a37fb66
YG
1761 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1762 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1763 if (lock_status & resource_bit)
1764 return 0;
a2fbb9ea 1765
c18487ee 1766 msleep(5);
a2fbb9ea 1767 }
c18487ee
YR
1768 DP(NETIF_MSG_HW, "Timeout\n");
1769 return -EAGAIN;
1770}
a2fbb9ea 1771
4a37fb66 1772static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1773{
1774 u32 lock_status;
1775 u32 resource_bit = (1 << resource);
4a37fb66
YG
1776 int func = BP_FUNC(bp);
1777 u32 hw_lock_control_reg;
a2fbb9ea 1778
c18487ee
YR
1779 /* Validating that the resource is within range */
1780 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1781 DP(NETIF_MSG_HW,
1782 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1783 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1784 return -EINVAL;
1785 }
1786
4a37fb66
YG
1787 if (func <= 5) {
1788 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1789 } else {
1790 hw_lock_control_reg =
1791 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1792 }
1793
c18487ee 1794 /* Validating that the resource is currently taken */
4a37fb66 1795 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1796 if (!(lock_status & resource_bit)) {
1797 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1798 lock_status, resource_bit);
1799 return -EFAULT;
a2fbb9ea
ET
1800 }
1801
4a37fb66 1802 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1803 return 0;
1804}
1805
1806/* HW Lock for shared dual port PHYs */
4a37fb66 1807static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1808{
1809 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1810
34f80b04 1811 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1812
c18487ee
YR
1813 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1814 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1815 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1816}
a2fbb9ea 1817
4a37fb66 1818static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1819{
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1821
c18487ee
YR
1822 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1823 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1824 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1825
34f80b04 1826 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1827}
a2fbb9ea 1828
17de50b7 1829int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1830{
1831 /* The GPIO should be swapped if swap register is set and active */
1832 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1833 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1834 int gpio_shift = gpio_num +
1835 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1836 u32 gpio_mask = (1 << gpio_shift);
1837 u32 gpio_reg;
a2fbb9ea 1838
c18487ee
YR
1839 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1840 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1841 return -EINVAL;
1842 }
a2fbb9ea 1843
4a37fb66 1844 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1845 /* read GPIO and mask except the float bits */
1846 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1847
c18487ee
YR
1848 switch (mode) {
1849 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1850 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1851 gpio_num, gpio_shift);
1852 /* clear FLOAT and set CLR */
1853 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1854 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1855 break;
a2fbb9ea 1856
c18487ee
YR
1857 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1858 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1859 gpio_num, gpio_shift);
1860 /* clear FLOAT and set SET */
1861 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1862 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1863 break;
a2fbb9ea 1864
17de50b7 1865 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1866 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1867 gpio_num, gpio_shift);
1868 /* set FLOAT */
1869 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1870 break;
a2fbb9ea 1871
c18487ee
YR
1872 default:
1873 break;
a2fbb9ea
ET
1874 }
1875
c18487ee 1876 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1877 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1878
c18487ee 1879 return 0;
a2fbb9ea
ET
1880}
1881
c18487ee 1882static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1883{
c18487ee
YR
1884 u32 spio_mask = (1 << spio_num);
1885 u32 spio_reg;
a2fbb9ea 1886
c18487ee
YR
1887 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1888 (spio_num > MISC_REGISTERS_SPIO_7)) {
1889 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1890 return -EINVAL;
a2fbb9ea
ET
1891 }
1892
4a37fb66 1893 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1894 /* read SPIO and mask except the float bits */
1895 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1896
c18487ee 1897 switch (mode) {
6378c025 1898 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1899 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1900 /* clear FLOAT and set CLR */
1901 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1902 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1903 break;
a2fbb9ea 1904
6378c025 1905 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1906 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1907 /* clear FLOAT and set SET */
1908 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1909 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1910 break;
a2fbb9ea 1911
c18487ee
YR
1912 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1913 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1914 /* set FLOAT */
1915 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1916 break;
a2fbb9ea 1917
c18487ee
YR
1918 default:
1919 break;
a2fbb9ea
ET
1920 }
1921
c18487ee 1922 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1923 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1924
a2fbb9ea
ET
1925 return 0;
1926}
1927
c18487ee 1928static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1929{
ad33ea3a
EG
1930 switch (bp->link_vars.ieee_fc &
1931 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1932 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1933 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1934 ADVERTISED_Pause);
1935 break;
1936 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1937 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1938 ADVERTISED_Pause);
1939 break;
1940 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1941 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1942 break;
1943 default:
34f80b04 1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1945 ADVERTISED_Pause);
1946 break;
1947 }
1948}
f1410647 1949
c18487ee
YR
1950static void bnx2x_link_report(struct bnx2x *bp)
1951{
1952 if (bp->link_vars.link_up) {
1953 if (bp->state == BNX2X_STATE_OPEN)
1954 netif_carrier_on(bp->dev);
1955 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1956
c18487ee 1957 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1958
c18487ee
YR
1959 if (bp->link_vars.duplex == DUPLEX_FULL)
1960 printk("full duplex");
1961 else
1962 printk("half duplex");
f1410647 1963
c0700f90
DM
1964 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1965 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1966 printk(", receive ");
c0700f90 1967 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1968 printk("& transmit ");
1969 } else {
1970 printk(", transmit ");
1971 }
1972 printk("flow control ON");
1973 }
1974 printk("\n");
f1410647 1975
c18487ee
YR
1976 } else { /* link_down */
1977 netif_carrier_off(bp->dev);
1978 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1979 }
c18487ee
YR
1980}
1981
1982static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1983{
19680c48
EG
1984 if (!BP_NOMCP(bp)) {
1985 u8 rc;
a2fbb9ea 1986
19680c48 1987 /* Initialize link parameters structure variables */
8c99e7b0
YR
1988 /* It is recommended to turn off RX FC for jumbo frames
1989 for better performance */
1990 if (IS_E1HMF(bp))
c0700f90 1991 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 1992 else if (bp->dev->mtu > 5000)
c0700f90 1993 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1994 else
c0700f90 1995 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1996
4a37fb66 1997 bnx2x_acquire_phy_lock(bp);
19680c48 1998 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1999 bnx2x_release_phy_lock(bp);
a2fbb9ea 2000
3c96c68b
EG
2001 bnx2x_calc_fc_adv(bp);
2002
19680c48
EG
2003 if (bp->link_vars.link_up)
2004 bnx2x_link_report(bp);
a2fbb9ea 2005
34f80b04 2006
19680c48
EG
2007 return rc;
2008 }
2009 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2010 return -EINVAL;
a2fbb9ea
ET
2011}
2012
c18487ee 2013static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2014{
19680c48 2015 if (!BP_NOMCP(bp)) {
4a37fb66 2016 bnx2x_acquire_phy_lock(bp);
19680c48 2017 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2018 bnx2x_release_phy_lock(bp);
a2fbb9ea 2019
19680c48
EG
2020 bnx2x_calc_fc_adv(bp);
2021 } else
2022 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2023}
a2fbb9ea 2024
c18487ee
YR
2025static void bnx2x__link_reset(struct bnx2x *bp)
2026{
19680c48 2027 if (!BP_NOMCP(bp)) {
4a37fb66 2028 bnx2x_acquire_phy_lock(bp);
19680c48 2029 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2030 bnx2x_release_phy_lock(bp);
19680c48
EG
2031 } else
2032 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2033}
a2fbb9ea 2034
c18487ee
YR
2035static u8 bnx2x_link_test(struct bnx2x *bp)
2036{
2037 u8 rc;
a2fbb9ea 2038
4a37fb66 2039 bnx2x_acquire_phy_lock(bp);
c18487ee 2040 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2041 bnx2x_release_phy_lock(bp);
a2fbb9ea 2042
c18487ee
YR
2043 return rc;
2044}
a2fbb9ea 2045
34f80b04
EG
2046/* Calculates the sum of vn_min_rates.
2047 It's needed for further normalizing of the min_rates.
2048
2049 Returns:
2050 sum of vn_min_rates
2051 or
2052 0 - if all the min_rates are 0.
33471629 2053 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2054 If not all min_rates are zero then those that are zeroes will
2055 be set to 1.
2056 */
2057static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2058{
2059 int i, port = BP_PORT(bp);
2060 u32 wsum = 0;
2061 int all_zero = 1;
2062
2063 for (i = 0; i < E1HVN_MAX; i++) {
2064 u32 vn_cfg =
2065 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2066 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2067 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2068 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2069 /* If min rate is zero - set it to 1 */
2070 if (!vn_min_rate)
2071 vn_min_rate = DEF_MIN_RATE;
2072 else
2073 all_zero = 0;
2074
2075 wsum += vn_min_rate;
2076 }
2077 }
2078
2079 /* ... only if all min rates are zeros - disable FAIRNESS */
2080 if (all_zero)
2081 return 0;
2082
2083 return wsum;
2084}
2085
2086static void bnx2x_init_port_minmax(struct bnx2x *bp,
2087 int en_fness,
2088 u16 port_rate,
2089 struct cmng_struct_per_port *m_cmng_port)
2090{
2091 u32 r_param = port_rate / 8;
2092 int port = BP_PORT(bp);
2093 int i;
2094
2095 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2096
2097 /* Enable minmax only if we are in e1hmf mode */
2098 if (IS_E1HMF(bp)) {
2099 u32 fair_periodic_timeout_usec;
2100 u32 t_fair;
2101
2102 /* Enable rate shaping and fairness */
2103 m_cmng_port->flags.cmng_vn_enable = 1;
2104 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2105 m_cmng_port->flags.rate_shaping_enable = 1;
2106
2107 if (!en_fness)
2108 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2109 " fairness will be disabled\n");
2110
2111 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2112 m_cmng_port->rs_vars.rs_periodic_timeout =
2113 RS_PERIODIC_TIMEOUT_USEC / 4;
2114
2115 /* this is the threshold below which no timer arming will occur
2116 1.25 coefficient is for the threshold to be a little bigger
2117 than the real time, to compensate for timer in-accuracy */
2118 m_cmng_port->rs_vars.rs_threshold =
2119 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2120
2121 /* resolution of fairness timer */
2122 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2123 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2124 t_fair = T_FAIR_COEF / port_rate;
2125
2126 /* this is the threshold below which we won't arm
2127 the timer anymore */
2128 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2129
2130 /* we multiply by 1e3/8 to get bytes/msec.
2131 We don't want the credits to pass a credit
2132 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2133 m_cmng_port->fair_vars.upper_bound =
2134 r_param * t_fair * FAIR_MEM;
2135 /* since each tick is 4 usec */
2136 m_cmng_port->fair_vars.fairness_timeout =
2137 fair_periodic_timeout_usec / 4;
2138
2139 } else {
2140 /* Disable rate shaping and fairness */
2141 m_cmng_port->flags.cmng_vn_enable = 0;
2142 m_cmng_port->flags.fairness_enable = 0;
2143 m_cmng_port->flags.rate_shaping_enable = 0;
2144
2145 DP(NETIF_MSG_IFUP,
2146 "Single function mode minmax will be disabled\n");
2147 }
2148
2149 /* Store it to internal memory */
2150 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2151 REG_WR(bp, BAR_XSTRORM_INTMEM +
2152 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2153 ((u32 *)(m_cmng_port))[i]);
2154}
2155
2156static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2157 u32 wsum, u16 port_rate,
2158 struct cmng_struct_per_port *m_cmng_port)
2159{
2160 struct rate_shaping_vars_per_vn m_rs_vn;
2161 struct fairness_vars_per_vn m_fair_vn;
2162 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2163 u16 vn_min_rate, vn_max_rate;
2164 int i;
2165
2166 /* If function is hidden - set min and max to zeroes */
2167 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2168 vn_min_rate = 0;
2169 vn_max_rate = 0;
2170
2171 } else {
2172 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2173 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2174 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2175 if current min rate is zero - set it to 1.
33471629 2176 This is a requirement of the algorithm. */
34f80b04
EG
2177 if ((vn_min_rate == 0) && wsum)
2178 vn_min_rate = DEF_MIN_RATE;
2179 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2180 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2181 }
2182
2183 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2184 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2185
2186 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2187 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2188
2189 /* global vn counter - maximal Mbps for this vn */
2190 m_rs_vn.vn_counter.rate = vn_max_rate;
2191
2192 /* quota - number of bytes transmitted in this period */
2193 m_rs_vn.vn_counter.quota =
2194 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2195
2196#ifdef BNX2X_PER_PROT_QOS
2197 /* per protocol counter */
2198 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2199 /* maximal Mbps for this protocol */
2200 m_rs_vn.protocol_counters[protocol].rate =
2201 protocol_max_rate[protocol];
2202 /* the quota in each timer period -
2203 number of bytes transmitted in this period */
2204 m_rs_vn.protocol_counters[protocol].quota =
2205 (u32)(rs_periodic_timeout_usec *
2206 ((double)m_rs_vn.
2207 protocol_counters[protocol].rate/8));
2208 }
2209#endif
2210
2211 if (wsum) {
2212 /* credit for each period of the fairness algorithm:
2213 number of bytes in T_FAIR (the vn share the port rate).
2214 wsum should not be larger than 10000, thus
2215 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2216 m_fair_vn.vn_credit_delta =
2217 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2218 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2219 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2220 m_fair_vn.vn_credit_delta);
2221 }
2222
2223#ifdef BNX2X_PER_PROT_QOS
2224 do {
2225 u32 protocolWeightSum = 0;
2226
2227 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2228 protocolWeightSum +=
2229 drvInit.protocol_min_rate[protocol];
2230 /* per protocol counter -
2231 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2232 if (protocolWeightSum > 0) {
2233 for (protocol = 0;
2234 protocol < NUM_OF_PROTOCOLS; protocol++)
2235 /* credit for each period of the
2236 fairness algorithm - number of bytes in
2237 T_FAIR (the protocol share the vn rate) */
2238 m_fair_vn.protocol_credit_delta[protocol] =
2239 (u32)((vn_min_rate / 8) * t_fair *
2240 protocol_min_rate / protocolWeightSum);
2241 }
2242 } while (0);
2243#endif
2244
2245 /* Store it to internal memory */
2246 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2247 REG_WR(bp, BAR_XSTRORM_INTMEM +
2248 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2249 ((u32 *)(&m_rs_vn))[i]);
2250
2251 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2252 REG_WR(bp, BAR_XSTRORM_INTMEM +
2253 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2254 ((u32 *)(&m_fair_vn))[i]);
2255}
2256
c18487ee
YR
2257/* This function is called upon link interrupt */
2258static void bnx2x_link_attn(struct bnx2x *bp)
2259{
34f80b04
EG
2260 int vn;
2261
bb2a0f7a
YG
2262 /* Make sure that we are synced with the current statistics */
2263 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2264
c18487ee 2265 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2266
bb2a0f7a
YG
2267 if (bp->link_vars.link_up) {
2268
2269 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2270 struct host_port_stats *pstats;
2271
2272 pstats = bnx2x_sp(bp, port_stats);
2273 /* reset old bmac stats */
2274 memset(&(pstats->mac_stx[0]), 0,
2275 sizeof(struct mac_stx));
2276 }
2277 if ((bp->state == BNX2X_STATE_OPEN) ||
2278 (bp->state == BNX2X_STATE_DISABLED))
2279 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2280 }
2281
c18487ee
YR
2282 /* indicate link status */
2283 bnx2x_link_report(bp);
34f80b04
EG
2284
2285 if (IS_E1HMF(bp)) {
2286 int func;
2287
2288 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2289 if (vn == BP_E1HVN(bp))
2290 continue;
2291
2292 func = ((vn << 1) | BP_PORT(bp));
2293
2294 /* Set the attention towards other drivers
2295 on the same port */
2296 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2297 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2298 }
2299 }
2300
2301 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2302 struct cmng_struct_per_port m_cmng_port;
2303 u32 wsum;
2304 int port = BP_PORT(bp);
2305
2306 /* Init RATE SHAPING and FAIRNESS contexts */
2307 wsum = bnx2x_calc_vn_wsum(bp);
2308 bnx2x_init_port_minmax(bp, (int)wsum,
2309 bp->link_vars.line_speed,
2310 &m_cmng_port);
2311 if (IS_E1HMF(bp))
2312 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2313 bnx2x_init_vn_minmax(bp, 2*vn + port,
2314 wsum, bp->link_vars.line_speed,
2315 &m_cmng_port);
2316 }
c18487ee 2317}
a2fbb9ea 2318
c18487ee
YR
2319static void bnx2x__link_status_update(struct bnx2x *bp)
2320{
2321 if (bp->state != BNX2X_STATE_OPEN)
2322 return;
a2fbb9ea 2323
c18487ee 2324 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2325
bb2a0f7a
YG
2326 if (bp->link_vars.link_up)
2327 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2328 else
2329 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2330
c18487ee
YR
2331 /* indicate link status */
2332 bnx2x_link_report(bp);
a2fbb9ea 2333}
a2fbb9ea 2334
34f80b04
EG
2335static void bnx2x_pmf_update(struct bnx2x *bp)
2336{
2337 int port = BP_PORT(bp);
2338 u32 val;
2339
2340 bp->port.pmf = 1;
2341 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2342
2343 /* enable nig attention */
2344 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2345 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2346 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2347
2348 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2349}
2350
c18487ee 2351/* end of Link */
a2fbb9ea
ET
2352
2353/* slow path */
2354
2355/*
2356 * General service functions
2357 */
2358
2359/* the slow path queue is odd since completions arrive on the fastpath ring */
2360static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2361 u32 data_hi, u32 data_lo, int common)
2362{
34f80b04 2363 int func = BP_FUNC(bp);
a2fbb9ea 2364
34f80b04
EG
2365 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2366 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2367 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2368 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2369 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2370
2371#ifdef BNX2X_STOP_ON_ERROR
2372 if (unlikely(bp->panic))
2373 return -EIO;
2374#endif
2375
34f80b04 2376 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2377
2378 if (!bp->spq_left) {
2379 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2380 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2381 bnx2x_panic();
2382 return -EBUSY;
2383 }
f1410647 2384
a2fbb9ea
ET
2385 /* CID needs port number to be encoded int it */
2386 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2387 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2388 HW_CID(bp, cid)));
2389 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2390 if (common)
2391 bp->spq_prod_bd->hdr.type |=
2392 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2393
2394 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2395 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2396
2397 bp->spq_left--;
2398
2399 if (bp->spq_prod_bd == bp->spq_last_bd) {
2400 bp->spq_prod_bd = bp->spq;
2401 bp->spq_prod_idx = 0;
2402 DP(NETIF_MSG_TIMER, "end of spq\n");
2403
2404 } else {
2405 bp->spq_prod_bd++;
2406 bp->spq_prod_idx++;
2407 }
2408
34f80b04 2409 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2410 bp->spq_prod_idx);
2411
34f80b04 2412 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2413 return 0;
2414}
2415
2416/* acquire split MCP access lock register */
4a37fb66 2417static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2418{
a2fbb9ea 2419 u32 i, j, val;
34f80b04 2420 int rc = 0;
a2fbb9ea
ET
2421
2422 might_sleep();
2423 i = 100;
2424 for (j = 0; j < i*10; j++) {
2425 val = (1UL << 31);
2426 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2427 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2428 if (val & (1L << 31))
2429 break;
2430
2431 msleep(5);
2432 }
a2fbb9ea 2433 if (!(val & (1L << 31))) {
19680c48 2434 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2435 rc = -EBUSY;
2436 }
2437
2438 return rc;
2439}
2440
4a37fb66
YG
2441/* release split MCP access lock register */
2442static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2443{
2444 u32 val = 0;
2445
2446 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2447}
2448
2449static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2450{
2451 struct host_def_status_block *def_sb = bp->def_status_blk;
2452 u16 rc = 0;
2453
2454 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2455 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2456 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2457 rc |= 1;
2458 }
2459 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2460 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2461 rc |= 2;
2462 }
2463 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2464 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2465 rc |= 4;
2466 }
2467 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2468 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2469 rc |= 8;
2470 }
2471 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2472 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2473 rc |= 16;
2474 }
2475 return rc;
2476}
2477
2478/*
2479 * slow path service functions
2480 */
2481
2482static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2483{
34f80b04 2484 int port = BP_PORT(bp);
5c862848
EG
2485 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2486 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2487 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2488 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2489 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2490 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2491 u32 aeu_mask;
a2fbb9ea 2492
a2fbb9ea
ET
2493 if (bp->attn_state & asserted)
2494 BNX2X_ERR("IGU ERROR\n");
2495
3fcaf2e5
EG
2496 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2497 aeu_mask = REG_RD(bp, aeu_addr);
2498
a2fbb9ea 2499 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2500 aeu_mask, asserted);
2501 aeu_mask &= ~(asserted & 0xff);
2502 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2503
3fcaf2e5
EG
2504 REG_WR(bp, aeu_addr, aeu_mask);
2505 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2506
3fcaf2e5 2507 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2508 bp->attn_state |= asserted;
3fcaf2e5 2509 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2510
2511 if (asserted & ATTN_HARD_WIRED_MASK) {
2512 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2513
a5e9a7cf
EG
2514 bnx2x_acquire_phy_lock(bp);
2515
877e9aa4
ET
2516 /* save nig interrupt mask */
2517 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2518 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2519
c18487ee 2520 bnx2x_link_attn(bp);
a2fbb9ea
ET
2521
2522 /* handle unicore attn? */
2523 }
2524 if (asserted & ATTN_SW_TIMER_4_FUNC)
2525 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2526
2527 if (asserted & GPIO_2_FUNC)
2528 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2529
2530 if (asserted & GPIO_3_FUNC)
2531 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2532
2533 if (asserted & GPIO_4_FUNC)
2534 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2535
2536 if (port == 0) {
2537 if (asserted & ATTN_GENERAL_ATTN_1) {
2538 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2539 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2540 }
2541 if (asserted & ATTN_GENERAL_ATTN_2) {
2542 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2543 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2544 }
2545 if (asserted & ATTN_GENERAL_ATTN_3) {
2546 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2547 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2548 }
2549 } else {
2550 if (asserted & ATTN_GENERAL_ATTN_4) {
2551 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2552 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2553 }
2554 if (asserted & ATTN_GENERAL_ATTN_5) {
2555 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2556 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2557 }
2558 if (asserted & ATTN_GENERAL_ATTN_6) {
2559 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2560 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2561 }
2562 }
2563
2564 } /* if hardwired */
2565
5c862848
EG
2566 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2567 asserted, hc_addr);
2568 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2569
2570 /* now set back the mask */
a5e9a7cf 2571 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2572 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2573 bnx2x_release_phy_lock(bp);
2574 }
a2fbb9ea
ET
2575}
2576
877e9aa4 2577static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2578{
34f80b04 2579 int port = BP_PORT(bp);
877e9aa4
ET
2580 int reg_offset;
2581 u32 val;
2582
34f80b04
EG
2583 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2584 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2585
34f80b04 2586 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2587
2588 val = REG_RD(bp, reg_offset);
2589 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2590 REG_WR(bp, reg_offset, val);
2591
2592 BNX2X_ERR("SPIO5 hw attention\n");
2593
34f80b04 2594 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2595 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2596 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2597 /* Fan failure attention */
2598
17de50b7 2599 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2600 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2601 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2602 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2603 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2604 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2605 /* mark the failure */
c18487ee 2606 bp->link_params.ext_phy_config &=
877e9aa4 2607 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2608 bp->link_params.ext_phy_config |=
877e9aa4
ET
2609 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2610 SHMEM_WR(bp,
2611 dev_info.port_hw_config[port].
2612 external_phy_config,
c18487ee 2613 bp->link_params.ext_phy_config);
877e9aa4
ET
2614 /* log the failure */
2615 printk(KERN_ERR PFX "Fan Failure on Network"
2616 " Controller %s has caused the driver to"
2617 " shutdown the card to prevent permanent"
2618 " damage. Please contact Dell Support for"
2619 " assistance\n", bp->dev->name);
2620 break;
2621
2622 default:
2623 break;
2624 }
2625 }
34f80b04
EG
2626
2627 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2628
2629 val = REG_RD(bp, reg_offset);
2630 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2631 REG_WR(bp, reg_offset, val);
2632
2633 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2634 (attn & HW_INTERRUT_ASSERT_SET_0));
2635 bnx2x_panic();
2636 }
877e9aa4
ET
2637}
2638
2639static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2640{
2641 u32 val;
2642
2643 if (attn & BNX2X_DOORQ_ASSERT) {
2644
2645 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2646 BNX2X_ERR("DB hw attention 0x%x\n", val);
2647 /* DORQ discard attention */
2648 if (val & 0x2)
2649 BNX2X_ERR("FATAL error from DORQ\n");
2650 }
34f80b04
EG
2651
2652 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2653
2654 int port = BP_PORT(bp);
2655 int reg_offset;
2656
2657 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2658 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2659
2660 val = REG_RD(bp, reg_offset);
2661 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2662 REG_WR(bp, reg_offset, val);
2663
2664 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2665 (attn & HW_INTERRUT_ASSERT_SET_1));
2666 bnx2x_panic();
2667 }
877e9aa4
ET
2668}
2669
2670static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2671{
2672 u32 val;
2673
2674 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2675
2676 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2677 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2678 /* CFC error attention */
2679 if (val & 0x2)
2680 BNX2X_ERR("FATAL error from CFC\n");
2681 }
2682
2683 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2684
2685 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2686 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2687 /* RQ_USDMDP_FIFO_OVERFLOW */
2688 if (val & 0x18000)
2689 BNX2X_ERR("FATAL error from PXP\n");
2690 }
34f80b04
EG
2691
2692 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2693
2694 int port = BP_PORT(bp);
2695 int reg_offset;
2696
2697 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2698 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2699
2700 val = REG_RD(bp, reg_offset);
2701 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2702 REG_WR(bp, reg_offset, val);
2703
2704 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2705 (attn & HW_INTERRUT_ASSERT_SET_2));
2706 bnx2x_panic();
2707 }
877e9aa4
ET
2708}
2709
2710static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2711{
34f80b04
EG
2712 u32 val;
2713
877e9aa4
ET
2714 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2715
34f80b04
EG
2716 if (attn & BNX2X_PMF_LINK_ASSERT) {
2717 int func = BP_FUNC(bp);
2718
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2720 bnx2x__link_status_update(bp);
2721 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2722 DRV_STATUS_PMF)
2723 bnx2x_pmf_update(bp);
2724
2725 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2726
2727 BNX2X_ERR("MC assert!\n");
2728 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2729 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2731 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2732 bnx2x_panic();
2733
2734 } else if (attn & BNX2X_MCP_ASSERT) {
2735
2736 BNX2X_ERR("MCP assert!\n");
2737 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2738 bnx2x_fw_dump(bp);
877e9aa4
ET
2739
2740 } else
2741 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2742 }
2743
2744 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2745 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2746 if (attn & BNX2X_GRC_TIMEOUT) {
2747 val = CHIP_IS_E1H(bp) ?
2748 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2749 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2750 }
2751 if (attn & BNX2X_GRC_RSV) {
2752 val = CHIP_IS_E1H(bp) ?
2753 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2754 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2755 }
877e9aa4 2756 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2757 }
2758}
2759
2760static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2761{
a2fbb9ea
ET
2762 struct attn_route attn;
2763 struct attn_route group_mask;
34f80b04 2764 int port = BP_PORT(bp);
877e9aa4 2765 int index;
a2fbb9ea
ET
2766 u32 reg_addr;
2767 u32 val;
3fcaf2e5 2768 u32 aeu_mask;
a2fbb9ea
ET
2769
2770 /* need to take HW lock because MCP or other port might also
2771 try to handle this event */
4a37fb66 2772 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2773
2774 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2775 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2776 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2777 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2778 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2779 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2780
2781 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2782 if (deasserted & (1 << index)) {
2783 group_mask = bp->attn_group[index];
2784
34f80b04
EG
2785 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2786 index, group_mask.sig[0], group_mask.sig[1],
2787 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2788
877e9aa4
ET
2789 bnx2x_attn_int_deasserted3(bp,
2790 attn.sig[3] & group_mask.sig[3]);
2791 bnx2x_attn_int_deasserted1(bp,
2792 attn.sig[1] & group_mask.sig[1]);
2793 bnx2x_attn_int_deasserted2(bp,
2794 attn.sig[2] & group_mask.sig[2]);
2795 bnx2x_attn_int_deasserted0(bp,
2796 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2797
a2fbb9ea
ET
2798 if ((attn.sig[0] & group_mask.sig[0] &
2799 HW_PRTY_ASSERT_SET_0) ||
2800 (attn.sig[1] & group_mask.sig[1] &
2801 HW_PRTY_ASSERT_SET_1) ||
2802 (attn.sig[2] & group_mask.sig[2] &
2803 HW_PRTY_ASSERT_SET_2))
6378c025 2804 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2805 }
2806 }
2807
4a37fb66 2808 bnx2x_release_alr(bp);
a2fbb9ea 2809
5c862848 2810 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2811
2812 val = ~deasserted;
3fcaf2e5
EG
2813 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2814 val, reg_addr);
5c862848 2815 REG_WR(bp, reg_addr, val);
a2fbb9ea 2816
a2fbb9ea 2817 if (~bp->attn_state & deasserted)
3fcaf2e5 2818 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2819
2820 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2821 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2822
3fcaf2e5
EG
2823 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2824 aeu_mask = REG_RD(bp, reg_addr);
2825
2826 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2827 aeu_mask, deasserted);
2828 aeu_mask |= (deasserted & 0xff);
2829 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2830
3fcaf2e5
EG
2831 REG_WR(bp, reg_addr, aeu_mask);
2832 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2833
2834 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2835 bp->attn_state &= ~deasserted;
2836 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2837}
2838
2839static void bnx2x_attn_int(struct bnx2x *bp)
2840{
2841 /* read local copy of bits */
68d59484
EG
2842 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2843 attn_bits);
2844 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2845 attn_bits_ack);
a2fbb9ea
ET
2846 u32 attn_state = bp->attn_state;
2847
2848 /* look for changed bits */
2849 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2850 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2851
2852 DP(NETIF_MSG_HW,
2853 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2854 attn_bits, attn_ack, asserted, deasserted);
2855
2856 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2857 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2858
2859 /* handle bits that were raised */
2860 if (asserted)
2861 bnx2x_attn_int_asserted(bp, asserted);
2862
2863 if (deasserted)
2864 bnx2x_attn_int_deasserted(bp, deasserted);
2865}
2866
2867static void bnx2x_sp_task(struct work_struct *work)
2868{
1cf167f2 2869 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2870 u16 status;
2871
34f80b04 2872
a2fbb9ea
ET
2873 /* Return here if interrupt is disabled */
2874 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2875 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2876 return;
2877 }
2878
2879 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2880/* if (status == 0) */
2881/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2882
3196a88a 2883 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2884
877e9aa4
ET
2885 /* HW attentions */
2886 if (status & 0x1)
a2fbb9ea 2887 bnx2x_attn_int(bp);
a2fbb9ea 2888
bb2a0f7a
YG
2889 /* CStorm events: query_stats, port delete ramrod */
2890 if (status & 0x2)
2891 bp->stats_pending = 0;
2892
68d59484 2893 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2894 IGU_INT_NOP, 1);
2895 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2896 IGU_INT_NOP, 1);
2897 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2898 IGU_INT_NOP, 1);
2899 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2900 IGU_INT_NOP, 1);
2901 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2902 IGU_INT_ENABLE, 1);
877e9aa4 2903
a2fbb9ea
ET
2904}
2905
2906static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2907{
2908 struct net_device *dev = dev_instance;
2909 struct bnx2x *bp = netdev_priv(dev);
2910
2911 /* Return here if interrupt is disabled */
2912 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2913 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2914 return IRQ_HANDLED;
2915 }
2916
8d9c5f34 2917 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2918
2919#ifdef BNX2X_STOP_ON_ERROR
2920 if (unlikely(bp->panic))
2921 return IRQ_HANDLED;
2922#endif
2923
1cf167f2 2924 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2925
2926 return IRQ_HANDLED;
2927}
2928
2929/* end of slow path */
2930
2931/* Statistics */
2932
2933/****************************************************************************
2934* Macros
2935****************************************************************************/
2936
a2fbb9ea
ET
2937/* sum[hi:lo] += add[hi:lo] */
2938#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2939 do { \
2940 s_lo += a_lo; \
f5ba6772 2941 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2942 } while (0)
2943
2944/* difference = minuend - subtrahend */
2945#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2946 do { \
bb2a0f7a
YG
2947 if (m_lo < s_lo) { \
2948 /* underflow */ \
a2fbb9ea 2949 d_hi = m_hi - s_hi; \
bb2a0f7a 2950 if (d_hi > 0) { \
6378c025 2951 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2952 d_hi--; \
2953 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2954 } else { \
6378c025 2955 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2956 d_hi = 0; \
2957 d_lo = 0; \
2958 } \
bb2a0f7a
YG
2959 } else { \
2960 /* m_lo >= s_lo */ \
a2fbb9ea 2961 if (m_hi < s_hi) { \
bb2a0f7a
YG
2962 d_hi = 0; \
2963 d_lo = 0; \
2964 } else { \
6378c025 2965 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2966 d_hi = m_hi - s_hi; \
2967 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2968 } \
2969 } \
2970 } while (0)
2971
bb2a0f7a 2972#define UPDATE_STAT64(s, t) \
a2fbb9ea 2973 do { \
bb2a0f7a
YG
2974 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2975 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2976 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2977 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2978 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2979 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2980 } while (0)
2981
bb2a0f7a 2982#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2983 do { \
bb2a0f7a
YG
2984 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2985 diff.lo, new->s##_lo, old->s##_lo); \
2986 ADD_64(estats->t##_hi, diff.hi, \
2987 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2988 } while (0)
2989
2990/* sum[hi:lo] += add */
2991#define ADD_EXTEND_64(s_hi, s_lo, a) \
2992 do { \
2993 s_lo += a; \
2994 s_hi += (s_lo < a) ? 1 : 0; \
2995 } while (0)
2996
bb2a0f7a 2997#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2998 do { \
bb2a0f7a
YG
2999 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3000 pstats->mac_stx[1].s##_lo, \
3001 new->s); \
a2fbb9ea
ET
3002 } while (0)
3003
bb2a0f7a 3004#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
3005 do { \
3006 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3007 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
3008 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3009 } while (0)
3010
3011#define UPDATE_EXTEND_XSTAT(s, t) \
3012 do { \
3013 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3014 old_xclient->s = le32_to_cpu(xclient->s); \
3015 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
3016 } while (0)
3017
3018/*
3019 * General service functions
3020 */
3021
3022static inline long bnx2x_hilo(u32 *hiref)
3023{
3024 u32 lo = *(hiref + 1);
3025#if (BITS_PER_LONG == 64)
3026 u32 hi = *hiref;
3027
3028 return HILO_U64(hi, lo);
3029#else
3030 return lo;
3031#endif
3032}
3033
3034/*
3035 * Init service functions
3036 */
3037
bb2a0f7a
YG
3038static void bnx2x_storm_stats_post(struct bnx2x *bp)
3039{
3040 if (!bp->stats_pending) {
3041 struct eth_query_ramrod_data ramrod_data = {0};
3042 int rc;
3043
3044 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3045 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
bb2a0f7a
YG
3046 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3047
3048 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3049 ((u32 *)&ramrod_data)[1],
3050 ((u32 *)&ramrod_data)[0], 0);
3051 if (rc == 0) {
3052 /* stats ramrod has it's own slot on the spq */
3053 bp->spq_left++;
3054 bp->stats_pending = 1;
3055 }
3056 }
3057}
3058
3059static void bnx2x_stats_init(struct bnx2x *bp)
3060{
3061 int port = BP_PORT(bp);
3062
3063 bp->executer_idx = 0;
3064 bp->stats_counter = 0;
3065
3066 /* port stats */
3067 if (!BP_NOMCP(bp))
3068 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3069 else
3070 bp->port.port_stx = 0;
3071 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3072
3073 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3074 bp->port.old_nig_stats.brb_discard =
3075 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3076 bp->port.old_nig_stats.brb_truncate =
3077 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3078 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3079 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3080 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3081 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3082
3083 /* function stats */
3084 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3085 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3086 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3087 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3088
3089 bp->stats_state = STATS_STATE_DISABLED;
3090 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3091 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3092}
3093
3094static void bnx2x_hw_stats_post(struct bnx2x *bp)
3095{
3096 struct dmae_command *dmae = &bp->stats_dmae;
3097 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3098
3099 *stats_comp = DMAE_COMP_VAL;
3100
3101 /* loader */
3102 if (bp->executer_idx) {
3103 int loader_idx = PMF_DMAE_C(bp);
3104
3105 memset(dmae, 0, sizeof(struct dmae_command));
3106
3107 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3108 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3109 DMAE_CMD_DST_RESET |
3110#ifdef __BIG_ENDIAN
3111 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3112#else
3113 DMAE_CMD_ENDIANITY_DW_SWAP |
3114#endif
3115 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3116 DMAE_CMD_PORT_0) |
3117 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3118 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3119 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3120 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3121 sizeof(struct dmae_command) *
3122 (loader_idx + 1)) >> 2;
3123 dmae->dst_addr_hi = 0;
3124 dmae->len = sizeof(struct dmae_command) >> 2;
3125 if (CHIP_IS_E1(bp))
3126 dmae->len--;
3127 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3128 dmae->comp_addr_hi = 0;
3129 dmae->comp_val = 1;
3130
3131 *stats_comp = 0;
3132 bnx2x_post_dmae(bp, dmae, loader_idx);
3133
3134 } else if (bp->func_stx) {
3135 *stats_comp = 0;
3136 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3137 }
3138}
3139
3140static int bnx2x_stats_comp(struct bnx2x *bp)
3141{
3142 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3143 int cnt = 10;
3144
3145 might_sleep();
3146 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3147 if (!cnt) {
3148 BNX2X_ERR("timeout waiting for stats finished\n");
3149 break;
3150 }
3151 cnt--;
12469401 3152 msleep(1);
bb2a0f7a
YG
3153 }
3154 return 1;
3155}
3156
3157/*
3158 * Statistics service functions
3159 */
3160
3161static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3162{
3163 struct dmae_command *dmae;
3164 u32 opcode;
3165 int loader_idx = PMF_DMAE_C(bp);
3166 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3167
3168 /* sanity */
3169 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3170 BNX2X_ERR("BUG!\n");
3171 return;
3172 }
3173
3174 bp->executer_idx = 0;
3175
3176 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3177 DMAE_CMD_C_ENABLE |
3178 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3179#ifdef __BIG_ENDIAN
3180 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3181#else
3182 DMAE_CMD_ENDIANITY_DW_SWAP |
3183#endif
3184 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3185 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3186
3187 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3188 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3189 dmae->src_addr_lo = bp->port.port_stx >> 2;
3190 dmae->src_addr_hi = 0;
3191 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3192 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3193 dmae->len = DMAE_LEN32_RD_MAX;
3194 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3195 dmae->comp_addr_hi = 0;
3196 dmae->comp_val = 1;
3197
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3200 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3201 dmae->src_addr_hi = 0;
7a9b2557
VZ
3202 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3203 DMAE_LEN32_RD_MAX * 4);
3204 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3205 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3206 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3207 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3208 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3209 dmae->comp_val = DMAE_COMP_VAL;
3210
3211 *stats_comp = 0;
3212 bnx2x_hw_stats_post(bp);
3213 bnx2x_stats_comp(bp);
3214}
3215
3216static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3217{
3218 struct dmae_command *dmae;
34f80b04 3219 int port = BP_PORT(bp);
bb2a0f7a 3220 int vn = BP_E1HVN(bp);
a2fbb9ea 3221 u32 opcode;
bb2a0f7a 3222 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3223 u32 mac_addr;
bb2a0f7a
YG
3224 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3225
3226 /* sanity */
3227 if (!bp->link_vars.link_up || !bp->port.pmf) {
3228 BNX2X_ERR("BUG!\n");
3229 return;
3230 }
a2fbb9ea
ET
3231
3232 bp->executer_idx = 0;
bb2a0f7a
YG
3233
3234 /* MCP */
3235 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3236 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3237 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3238#ifdef __BIG_ENDIAN
bb2a0f7a 3239 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3240#else
bb2a0f7a 3241 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3242#endif
bb2a0f7a
YG
3243 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3244 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3245
bb2a0f7a 3246 if (bp->port.port_stx) {
a2fbb9ea
ET
3247
3248 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3249 dmae->opcode = opcode;
bb2a0f7a
YG
3250 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3251 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3252 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3253 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3254 dmae->len = sizeof(struct host_port_stats) >> 2;
3255 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3256 dmae->comp_addr_hi = 0;
3257 dmae->comp_val = 1;
a2fbb9ea
ET
3258 }
3259
bb2a0f7a
YG
3260 if (bp->func_stx) {
3261
3262 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3263 dmae->opcode = opcode;
3264 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3265 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3266 dmae->dst_addr_lo = bp->func_stx >> 2;
3267 dmae->dst_addr_hi = 0;
3268 dmae->len = sizeof(struct host_func_stats) >> 2;
3269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 dmae->comp_addr_hi = 0;
3271 dmae->comp_val = 1;
a2fbb9ea
ET
3272 }
3273
bb2a0f7a 3274 /* MAC */
a2fbb9ea
ET
3275 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3276 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3277 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3278#ifdef __BIG_ENDIAN
3279 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3280#else
3281 DMAE_CMD_ENDIANITY_DW_SWAP |
3282#endif
bb2a0f7a
YG
3283 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3284 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3285
c18487ee 3286 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3287
3288 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3289 NIG_REG_INGRESS_BMAC0_MEM);
3290
3291 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3292 BIGMAC_REGISTER_TX_STAT_GTBYT */
3293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 dmae->opcode = opcode;
3295 dmae->src_addr_lo = (mac_addr +
3296 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3297 dmae->src_addr_hi = 0;
3298 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3299 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3300 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3301 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3302 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3303 dmae->comp_addr_hi = 0;
3304 dmae->comp_val = 1;
3305
3306 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3307 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3308 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3309 dmae->opcode = opcode;
3310 dmae->src_addr_lo = (mac_addr +
3311 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3312 dmae->src_addr_hi = 0;
3313 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3314 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3315 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3316 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3317 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3318 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3319 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3320 dmae->comp_addr_hi = 0;
3321 dmae->comp_val = 1;
3322
c18487ee 3323 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3324
3325 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3326
3327 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3328 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3329 dmae->opcode = opcode;
3330 dmae->src_addr_lo = (mac_addr +
3331 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3332 dmae->src_addr_hi = 0;
3333 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3334 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3335 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3336 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3337 dmae->comp_addr_hi = 0;
3338 dmae->comp_val = 1;
3339
3340 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3341 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3342 dmae->opcode = opcode;
3343 dmae->src_addr_lo = (mac_addr +
3344 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3345 dmae->src_addr_hi = 0;
3346 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3347 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3348 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3349 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3350 dmae->len = 1;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3353 dmae->comp_val = 1;
3354
3355 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3356 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3357 dmae->opcode = opcode;
3358 dmae->src_addr_lo = (mac_addr +
3359 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3360 dmae->src_addr_hi = 0;
3361 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3362 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3363 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3364 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3365 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3366 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3367 dmae->comp_addr_hi = 0;
3368 dmae->comp_val = 1;
3369 }
3370
3371 /* NIG */
bb2a0f7a
YG
3372 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3373 dmae->opcode = opcode;
3374 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3375 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3376 dmae->src_addr_hi = 0;
3377 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3378 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3379 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3380 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3381 dmae->comp_addr_hi = 0;
3382 dmae->comp_val = 1;
3383
3384 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3385 dmae->opcode = opcode;
3386 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3387 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3388 dmae->src_addr_hi = 0;
3389 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3390 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3391 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3392 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3393 dmae->len = (2*sizeof(u32)) >> 2;
3394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 dmae->comp_addr_hi = 0;
3396 dmae->comp_val = 1;
3397
a2fbb9ea
ET
3398 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3399 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3400 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3401 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3402#ifdef __BIG_ENDIAN
3403 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3404#else
3405 DMAE_CMD_ENDIANITY_DW_SWAP |
3406#endif
bb2a0f7a
YG
3407 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3408 (vn << DMAE_CMD_E1HVN_SHIFT));
3409 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3410 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3411 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3412 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3413 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3414 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3415 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3416 dmae->len = (2*sizeof(u32)) >> 2;
3417 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3418 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3419 dmae->comp_val = DMAE_COMP_VAL;
3420
3421 *stats_comp = 0;
a2fbb9ea
ET
3422}
3423
bb2a0f7a 3424static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3425{
bb2a0f7a
YG
3426 struct dmae_command *dmae = &bp->stats_dmae;
3427 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3428
bb2a0f7a
YG
3429 /* sanity */
3430 if (!bp->func_stx) {
3431 BNX2X_ERR("BUG!\n");
3432 return;
3433 }
a2fbb9ea 3434
bb2a0f7a
YG
3435 bp->executer_idx = 0;
3436 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3437
bb2a0f7a
YG
3438 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3439 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3440 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3441#ifdef __BIG_ENDIAN
3442 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3443#else
3444 DMAE_CMD_ENDIANITY_DW_SWAP |
3445#endif
3446 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3447 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3448 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3449 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3450 dmae->dst_addr_lo = bp->func_stx >> 2;
3451 dmae->dst_addr_hi = 0;
3452 dmae->len = sizeof(struct host_func_stats) >> 2;
3453 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3454 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3455 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3456
bb2a0f7a
YG
3457 *stats_comp = 0;
3458}
a2fbb9ea 3459
bb2a0f7a
YG
3460static void bnx2x_stats_start(struct bnx2x *bp)
3461{
3462 if (bp->port.pmf)
3463 bnx2x_port_stats_init(bp);
3464
3465 else if (bp->func_stx)
3466 bnx2x_func_stats_init(bp);
3467
3468 bnx2x_hw_stats_post(bp);
3469 bnx2x_storm_stats_post(bp);
3470}
3471
3472static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3473{
3474 bnx2x_stats_comp(bp);
3475 bnx2x_stats_pmf_update(bp);
3476 bnx2x_stats_start(bp);
3477}
3478
3479static void bnx2x_stats_restart(struct bnx2x *bp)
3480{
3481 bnx2x_stats_comp(bp);
3482 bnx2x_stats_start(bp);
3483}
3484
3485static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3486{
3487 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3488 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3489 struct regpair diff;
3490
3491 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3492 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3493 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3494 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3495 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3496 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3497 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3498 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3499 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3500 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3501 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3502 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3503 UPDATE_STAT64(tx_stat_gt127,
3504 tx_stat_etherstatspkts65octetsto127octets);
3505 UPDATE_STAT64(tx_stat_gt255,
3506 tx_stat_etherstatspkts128octetsto255octets);
3507 UPDATE_STAT64(tx_stat_gt511,
3508 tx_stat_etherstatspkts256octetsto511octets);
3509 UPDATE_STAT64(tx_stat_gt1023,
3510 tx_stat_etherstatspkts512octetsto1023octets);
3511 UPDATE_STAT64(tx_stat_gt1518,
3512 tx_stat_etherstatspkts1024octetsto1522octets);
3513 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3514 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3515 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3516 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3517 UPDATE_STAT64(tx_stat_gterr,
3518 tx_stat_dot3statsinternalmactransmiterrors);
3519 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3520}
3521
3522static void bnx2x_emac_stats_update(struct bnx2x *bp)
3523{
3524 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3525 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3526
3527 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3528 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3529 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3530 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3531 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3532 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3533 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3534 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3535 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3536 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3537 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3538 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3539 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3540 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3541 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3542 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3543 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3544 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3545 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3546 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3547 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3548 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3549 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3550 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3551 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3552 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3553 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3554 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3556 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3557 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3558}
3559
3560static int bnx2x_hw_stats_update(struct bnx2x *bp)
3561{
3562 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3563 struct nig_stats *old = &(bp->port.old_nig_stats);
3564 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3565 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3566 struct regpair diff;
3567
3568 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3569 bnx2x_bmac_stats_update(bp);
3570
3571 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3572 bnx2x_emac_stats_update(bp);
3573
3574 else { /* unreached */
3575 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3576 return -1;
3577 }
a2fbb9ea 3578
bb2a0f7a
YG
3579 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3580 new->brb_discard - old->brb_discard);
66e855f3
YG
3581 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3582 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3583
bb2a0f7a
YG
3584 UPDATE_STAT64_NIG(egress_mac_pkt0,
3585 etherstatspkts1024octetsto1522octets);
3586 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3587
bb2a0f7a 3588 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3589
bb2a0f7a
YG
3590 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3591 sizeof(struct mac_stx));
3592 estats->brb_drop_hi = pstats->brb_drop_hi;
3593 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3594
bb2a0f7a 3595 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3596
bb2a0f7a 3597 return 0;
a2fbb9ea
ET
3598}
3599
bb2a0f7a 3600static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3601{
3602 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3603 int cl_id = BP_CL_ID(bp);
3604 struct tstorm_per_port_stats *tport =
3605 &stats->tstorm_common.port_statistics;
a2fbb9ea 3606 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3607 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3608 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3609 struct xstorm_per_client_stats *xclient =
3610 &stats->xstorm_common.client_statistics[cl_id];
3611 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3612 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3613 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3614 u32 diff;
3615
bb2a0f7a
YG
3616 /* are storm stats valid? */
3617 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3618 bp->stats_counter) {
3619 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3620 " tstorm counter (%d) != stats_counter (%d)\n",
3621 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3622 return -1;
3623 }
bb2a0f7a
YG
3624 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3625 bp->stats_counter) {
3626 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3627 " xstorm counter (%d) != stats_counter (%d)\n",
3628 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3629 return -2;
3630 }
a2fbb9ea 3631
bb2a0f7a
YG
3632 fstats->total_bytes_received_hi =
3633 fstats->valid_bytes_received_hi =
a2fbb9ea 3634 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3635 fstats->total_bytes_received_lo =
3636 fstats->valid_bytes_received_lo =
a2fbb9ea 3637 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3638
3639 estats->error_bytes_received_hi =
3640 le32_to_cpu(tclient->rcv_error_bytes.hi);
3641 estats->error_bytes_received_lo =
3642 le32_to_cpu(tclient->rcv_error_bytes.lo);
3643 ADD_64(estats->error_bytes_received_hi,
3644 estats->rx_stat_ifhcinbadoctets_hi,
3645 estats->error_bytes_received_lo,
3646 estats->rx_stat_ifhcinbadoctets_lo);
3647
3648 ADD_64(fstats->total_bytes_received_hi,
3649 estats->error_bytes_received_hi,
3650 fstats->total_bytes_received_lo,
3651 estats->error_bytes_received_lo);
3652
3653 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3654 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3655 total_multicast_packets_received);
a2fbb9ea 3656 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3657 total_broadcast_packets_received);
3658
3659 fstats->total_bytes_transmitted_hi =
3660 le32_to_cpu(xclient->total_sent_bytes.hi);
3661 fstats->total_bytes_transmitted_lo =
3662 le32_to_cpu(xclient->total_sent_bytes.lo);
3663
3664 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3665 total_unicast_packets_transmitted);
3666 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3667 total_multicast_packets_transmitted);
3668 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3669 total_broadcast_packets_transmitted);
3670
3671 memcpy(estats, &(fstats->total_bytes_received_hi),
3672 sizeof(struct host_func_stats) - 2*sizeof(u32));
3673
3674 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3675 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3676 estats->brb_truncate_discard =
3677 le32_to_cpu(tport->brb_truncate_discard);
3678 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3679
3680 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3681 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3682 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3683 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3684 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3685 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3686 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3687 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3688 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3689 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3690 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3691 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3692 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3693
bb2a0f7a
YG
3694 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3695 old_tclient->packets_too_big_discard =
a2fbb9ea 3696 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3697 estats->no_buff_discard =
3698 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3699 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3700
3701 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3702 old_xclient->unicast_bytes_sent.hi =
3703 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3704 old_xclient->unicast_bytes_sent.lo =
3705 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3706 old_xclient->multicast_bytes_sent.hi =
3707 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3708 old_xclient->multicast_bytes_sent.lo =
3709 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3710 old_xclient->broadcast_bytes_sent.hi =
3711 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3712 old_xclient->broadcast_bytes_sent.lo =
3713 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3714
3715 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3716
3717 return 0;
3718}
3719
bb2a0f7a 3720static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3721{
bb2a0f7a
YG
3722 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3723 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3724 struct net_device_stats *nstats = &bp->dev->stats;
3725
3726 nstats->rx_packets =
3727 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3728 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3729 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3730
3731 nstats->tx_packets =
3732 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3733 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3734 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3735
bb2a0f7a 3736 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3737
0e39e645 3738 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3739
bb2a0f7a
YG
3740 nstats->rx_dropped = old_tclient->checksum_discard +
3741 estats->mac_discard;
a2fbb9ea
ET
3742 nstats->tx_dropped = 0;
3743
3744 nstats->multicast =
3745 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3746
bb2a0f7a
YG
3747 nstats->collisions =
3748 estats->tx_stat_dot3statssinglecollisionframes_lo +
3749 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3750 estats->tx_stat_dot3statslatecollisions_lo +
3751 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3752
bb2a0f7a
YG
3753 estats->jabber_packets_received =
3754 old_tclient->packets_too_big_discard +
3755 estats->rx_stat_dot3statsframestoolong_lo;
3756
3757 nstats->rx_length_errors =
3758 estats->rx_stat_etherstatsundersizepkts_lo +
3759 estats->jabber_packets_received;
66e855f3 3760 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3761 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3762 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3763 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3764 nstats->rx_missed_errors = estats->xxoverflow_discard;
3765
3766 nstats->rx_errors = nstats->rx_length_errors +
3767 nstats->rx_over_errors +
3768 nstats->rx_crc_errors +
3769 nstats->rx_frame_errors +
0e39e645
ET
3770 nstats->rx_fifo_errors +
3771 nstats->rx_missed_errors;
a2fbb9ea 3772
bb2a0f7a
YG
3773 nstats->tx_aborted_errors =
3774 estats->tx_stat_dot3statslatecollisions_lo +
3775 estats->tx_stat_dot3statsexcessivecollisions_lo;
3776 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3777 nstats->tx_fifo_errors = 0;
3778 nstats->tx_heartbeat_errors = 0;
3779 nstats->tx_window_errors = 0;
3780
3781 nstats->tx_errors = nstats->tx_aborted_errors +
3782 nstats->tx_carrier_errors;
a2fbb9ea
ET
3783}
3784
bb2a0f7a 3785static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3786{
bb2a0f7a
YG
3787 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3788 int update = 0;
a2fbb9ea 3789
bb2a0f7a
YG
3790 if (*stats_comp != DMAE_COMP_VAL)
3791 return;
3792
3793 if (bp->port.pmf)
3794 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3795
bb2a0f7a 3796 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3797
bb2a0f7a
YG
3798 if (update)
3799 bnx2x_net_stats_update(bp);
a2fbb9ea 3800
bb2a0f7a
YG
3801 else {
3802 if (bp->stats_pending) {
3803 bp->stats_pending++;
3804 if (bp->stats_pending == 3) {
3805 BNX2X_ERR("stats not updated for 3 times\n");
3806 bnx2x_panic();
3807 return;
3808 }
3809 }
a2fbb9ea
ET
3810 }
3811
3812 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3813 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3814 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3815 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3816 int i;
a2fbb9ea
ET
3817
3818 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3819 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3820 " tx pkt (%lx)\n",
3821 bnx2x_tx_avail(bp->fp),
7a9b2557 3822 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3823 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3824 " rx pkt (%lx)\n",
7a9b2557
VZ
3825 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3826 bp->fp->rx_comp_cons),
3827 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3828 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3829 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3830 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3831 printk(KERN_DEBUG "tstats: checksum_discard %u "
3832 "packets_too_big_discard %u no_buff_discard %u "
3833 "mac_discard %u mac_filter_discard %u "
3834 "xxovrflow_discard %u brb_truncate_discard %u "
3835 "ttl0_discard %u\n",
bb2a0f7a
YG
3836 old_tclient->checksum_discard,
3837 old_tclient->packets_too_big_discard,
3838 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3839 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3840 estats->brb_truncate_discard,
3841 old_tclient->ttl0_discard);
a2fbb9ea
ET
3842
3843 for_each_queue(bp, i) {
3844 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3845 bnx2x_fp(bp, i, tx_pkt),
3846 bnx2x_fp(bp, i, rx_pkt),
3847 bnx2x_fp(bp, i, rx_calls));
3848 }
3849 }
3850
bb2a0f7a
YG
3851 bnx2x_hw_stats_post(bp);
3852 bnx2x_storm_stats_post(bp);
3853}
a2fbb9ea 3854
bb2a0f7a
YG
3855static void bnx2x_port_stats_stop(struct bnx2x *bp)
3856{
3857 struct dmae_command *dmae;
3858 u32 opcode;
3859 int loader_idx = PMF_DMAE_C(bp);
3860 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3861
bb2a0f7a 3862 bp->executer_idx = 0;
a2fbb9ea 3863
bb2a0f7a
YG
3864 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3865 DMAE_CMD_C_ENABLE |
3866 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3867#ifdef __BIG_ENDIAN
bb2a0f7a 3868 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3869#else
bb2a0f7a 3870 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3871#endif
bb2a0f7a
YG
3872 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3873 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3874
3875 if (bp->port.port_stx) {
3876
3877 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3878 if (bp->func_stx)
3879 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3880 else
3881 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3882 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3883 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3884 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3885 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3886 dmae->len = sizeof(struct host_port_stats) >> 2;
3887 if (bp->func_stx) {
3888 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3889 dmae->comp_addr_hi = 0;
3890 dmae->comp_val = 1;
3891 } else {
3892 dmae->comp_addr_lo =
3893 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3894 dmae->comp_addr_hi =
3895 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3896 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3897
bb2a0f7a
YG
3898 *stats_comp = 0;
3899 }
a2fbb9ea
ET
3900 }
3901
bb2a0f7a
YG
3902 if (bp->func_stx) {
3903
3904 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3905 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3906 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3907 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3908 dmae->dst_addr_lo = bp->func_stx >> 2;
3909 dmae->dst_addr_hi = 0;
3910 dmae->len = sizeof(struct host_func_stats) >> 2;
3911 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3912 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3913 dmae->comp_val = DMAE_COMP_VAL;
3914
3915 *stats_comp = 0;
a2fbb9ea 3916 }
bb2a0f7a
YG
3917}
3918
3919static void bnx2x_stats_stop(struct bnx2x *bp)
3920{
3921 int update = 0;
3922
3923 bnx2x_stats_comp(bp);
3924
3925 if (bp->port.pmf)
3926 update = (bnx2x_hw_stats_update(bp) == 0);
3927
3928 update |= (bnx2x_storm_stats_update(bp) == 0);
3929
3930 if (update) {
3931 bnx2x_net_stats_update(bp);
a2fbb9ea 3932
bb2a0f7a
YG
3933 if (bp->port.pmf)
3934 bnx2x_port_stats_stop(bp);
3935
3936 bnx2x_hw_stats_post(bp);
3937 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3938 }
3939}
3940
bb2a0f7a
YG
3941static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3942{
3943}
3944
3945static const struct {
3946 void (*action)(struct bnx2x *bp);
3947 enum bnx2x_stats_state next_state;
3948} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3949/* state event */
3950{
3951/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3952/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3953/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3954/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3955},
3956{
3957/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3958/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3959/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3960/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3961}
3962};
3963
3964static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3965{
3966 enum bnx2x_stats_state state = bp->stats_state;
3967
3968 bnx2x_stats_stm[state][event].action(bp);
3969 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3970
3971 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3972 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3973 state, event, bp->stats_state);
3974}
3975
a2fbb9ea
ET
3976static void bnx2x_timer(unsigned long data)
3977{
3978 struct bnx2x *bp = (struct bnx2x *) data;
3979
3980 if (!netif_running(bp->dev))
3981 return;
3982
3983 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3984 goto timer_restart;
a2fbb9ea
ET
3985
3986 if (poll) {
3987 struct bnx2x_fastpath *fp = &bp->fp[0];
3988 int rc;
3989
3990 bnx2x_tx_int(fp, 1000);
3991 rc = bnx2x_rx_int(fp, 1000);
3992 }
3993
34f80b04
EG
3994 if (!BP_NOMCP(bp)) {
3995 int func = BP_FUNC(bp);
a2fbb9ea
ET
3996 u32 drv_pulse;
3997 u32 mcp_pulse;
3998
3999 ++bp->fw_drv_pulse_wr_seq;
4000 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4001 /* TBD - add SYSTEM_TIME */
4002 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4003 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4004
34f80b04 4005 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4006 MCP_PULSE_SEQ_MASK);
4007 /* The delta between driver pulse and mcp response
4008 * should be 1 (before mcp response) or 0 (after mcp response)
4009 */
4010 if ((drv_pulse != mcp_pulse) &&
4011 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4012 /* someone lost a heartbeat... */
4013 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4014 drv_pulse, mcp_pulse);
4015 }
4016 }
4017
bb2a0f7a
YG
4018 if ((bp->state == BNX2X_STATE_OPEN) ||
4019 (bp->state == BNX2X_STATE_DISABLED))
4020 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4021
f1410647 4022timer_restart:
a2fbb9ea
ET
4023 mod_timer(&bp->timer, jiffies + bp->current_interval);
4024}
4025
4026/* end of Statistics */
4027
4028/* nic init */
4029
4030/*
4031 * nic init service functions
4032 */
4033
34f80b04 4034static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4035{
34f80b04
EG
4036 int port = BP_PORT(bp);
4037
4038 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4039 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4040 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4041 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4042 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4043 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4044}
4045
5c862848
EG
4046static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4047 dma_addr_t mapping, int sb_id)
34f80b04
EG
4048{
4049 int port = BP_PORT(bp);
bb2a0f7a 4050 int func = BP_FUNC(bp);
a2fbb9ea 4051 int index;
34f80b04 4052 u64 section;
a2fbb9ea
ET
4053
4054 /* USTORM */
4055 section = ((u64)mapping) + offsetof(struct host_status_block,
4056 u_status_block);
34f80b04 4057 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4058
4059 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4060 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4061 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4062 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4063 U64_HI(section));
bb2a0f7a
YG
4064 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4065 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4066
4067 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4068 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4069 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4070
4071 /* CSTORM */
4072 section = ((u64)mapping) + offsetof(struct host_status_block,
4073 c_status_block);
34f80b04 4074 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4075
4076 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4077 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4078 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4079 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4080 U64_HI(section));
7a9b2557
VZ
4081 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4082 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4083
4084 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4085 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4086 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4087
4088 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4089}
4090
4091static void bnx2x_zero_def_sb(struct bnx2x *bp)
4092{
4093 int func = BP_FUNC(bp);
a2fbb9ea 4094
34f80b04
EG
4095 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4096 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4097 sizeof(struct ustorm_def_status_block)/4);
4098 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4099 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4100 sizeof(struct cstorm_def_status_block)/4);
4101 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4102 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4103 sizeof(struct xstorm_def_status_block)/4);
4104 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4105 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4106 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4107}
4108
4109static void bnx2x_init_def_sb(struct bnx2x *bp,
4110 struct host_def_status_block *def_sb,
34f80b04 4111 dma_addr_t mapping, int sb_id)
a2fbb9ea 4112{
34f80b04
EG
4113 int port = BP_PORT(bp);
4114 int func = BP_FUNC(bp);
a2fbb9ea
ET
4115 int index, val, reg_offset;
4116 u64 section;
4117
4118 /* ATTN */
4119 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4120 atten_status_block);
34f80b04 4121 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4122
49d66772
ET
4123 bp->attn_state = 0;
4124
a2fbb9ea
ET
4125 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4126 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4127
34f80b04 4128 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4129 bp->attn_group[index].sig[0] = REG_RD(bp,
4130 reg_offset + 0x10*index);
4131 bp->attn_group[index].sig[1] = REG_RD(bp,
4132 reg_offset + 0x4 + 0x10*index);
4133 bp->attn_group[index].sig[2] = REG_RD(bp,
4134 reg_offset + 0x8 + 0x10*index);
4135 bp->attn_group[index].sig[3] = REG_RD(bp,
4136 reg_offset + 0xc + 0x10*index);
4137 }
4138
a2fbb9ea
ET
4139 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4140 HC_REG_ATTN_MSG0_ADDR_L);
4141
4142 REG_WR(bp, reg_offset, U64_LO(section));
4143 REG_WR(bp, reg_offset + 4, U64_HI(section));
4144
4145 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4146
4147 val = REG_RD(bp, reg_offset);
34f80b04 4148 val |= sb_id;
a2fbb9ea
ET
4149 REG_WR(bp, reg_offset, val);
4150
4151 /* USTORM */
4152 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4153 u_def_status_block);
34f80b04 4154 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4155
4156 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4157 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4158 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4159 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4160 U64_HI(section));
5c862848 4161 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4162 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4163
4164 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4165 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4166 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4167
4168 /* CSTORM */
4169 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4170 c_def_status_block);
34f80b04 4171 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4172
4173 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4174 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4175 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4176 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4177 U64_HI(section));
5c862848 4178 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4179 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4180
4181 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4182 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4183 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4184
4185 /* TSTORM */
4186 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4187 t_def_status_block);
34f80b04 4188 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4189
4190 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4191 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4192 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4193 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4194 U64_HI(section));
5c862848 4195 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4196 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4197
4198 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4199 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4200 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4201
4202 /* XSTORM */
4203 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4204 x_def_status_block);
34f80b04 4205 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4206
4207 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4208 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4209 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4210 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4211 U64_HI(section));
5c862848 4212 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4213 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4214
4215 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4216 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4217 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4218
bb2a0f7a 4219 bp->stats_pending = 0;
66e855f3 4220 bp->set_mac_pending = 0;
bb2a0f7a 4221
34f80b04 4222 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4223}
4224
4225static void bnx2x_update_coalesce(struct bnx2x *bp)
4226{
34f80b04 4227 int port = BP_PORT(bp);
a2fbb9ea
ET
4228 int i;
4229
4230 for_each_queue(bp, i) {
34f80b04 4231 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4232
4233 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4234 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4235 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4236 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4237 bp->rx_ticks/12);
a2fbb9ea 4238 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4239 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4240 U_SB_ETH_RX_CQ_INDEX),
4241 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4242
4243 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4244 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4245 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4246 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4247 bp->tx_ticks/12);
a2fbb9ea 4248 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4249 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4250 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4251 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4252 }
4253}
4254
7a9b2557
VZ
4255static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4256 struct bnx2x_fastpath *fp, int last)
4257{
4258 int i;
4259
4260 for (i = 0; i < last; i++) {
4261 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4262 struct sk_buff *skb = rx_buf->skb;
4263
4264 if (skb == NULL) {
4265 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4266 continue;
4267 }
4268
4269 if (fp->tpa_state[i] == BNX2X_TPA_START)
4270 pci_unmap_single(bp->pdev,
4271 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4272 bp->rx_buf_size,
7a9b2557
VZ
4273 PCI_DMA_FROMDEVICE);
4274
4275 dev_kfree_skb(skb);
4276 rx_buf->skb = NULL;
4277 }
4278}
4279
a2fbb9ea
ET
4280static void bnx2x_init_rx_rings(struct bnx2x *bp)
4281{
7a9b2557 4282 int func = BP_FUNC(bp);
32626230
EG
4283 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4284 ETH_MAX_AGGREGATION_QUEUES_E1H;
4285 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4286 int i, j;
a2fbb9ea 4287
437cf2f1
EG
4288 bp->rx_buf_size = bp->dev->mtu;
4289 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4290 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4291
7a9b2557
VZ
4292 if (bp->flags & TPA_ENABLE_FLAG) {
4293 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4294 "rx_buf_size %d effective_mtu %d\n",
4295 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557 4296
555f6c78 4297 for_each_rx_queue(bp, j) {
32626230 4298 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4299
32626230 4300 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4301 fp->tpa_pool[i].skb =
4302 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4303 if (!fp->tpa_pool[i].skb) {
4304 BNX2X_ERR("Failed to allocate TPA "
4305 "skb pool for queue[%d] - "
4306 "disabling TPA on this "
4307 "queue!\n", j);
4308 bnx2x_free_tpa_pool(bp, fp, i);
4309 fp->disable_tpa = 1;
4310 break;
4311 }
4312 pci_unmap_addr_set((struct sw_rx_bd *)
4313 &bp->fp->tpa_pool[i],
4314 mapping, 0);
4315 fp->tpa_state[i] = BNX2X_TPA_STOP;
4316 }
4317 }
4318 }
4319
555f6c78 4320 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4321 struct bnx2x_fastpath *fp = &bp->fp[j];
4322
4323 fp->rx_bd_cons = 0;
4324 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4325 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4326
4327 /* "next page" elements initialization */
4328 /* SGE ring */
4329 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4330 struct eth_rx_sge *sge;
4331
4332 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4333 sge->addr_hi =
4334 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4335 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4336 sge->addr_lo =
4337 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4338 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4339 }
4340
4341 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4342
7a9b2557 4343 /* RX BD ring */
a2fbb9ea
ET
4344 for (i = 1; i <= NUM_RX_RINGS; i++) {
4345 struct eth_rx_bd *rx_bd;
4346
4347 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4348 rx_bd->addr_hi =
4349 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4350 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4351 rx_bd->addr_lo =
4352 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4353 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4354 }
4355
34f80b04 4356 /* CQ ring */
a2fbb9ea
ET
4357 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4358 struct eth_rx_cqe_next_page *nextpg;
4359
4360 nextpg = (struct eth_rx_cqe_next_page *)
4361 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4362 nextpg->addr_hi =
4363 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4364 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4365 nextpg->addr_lo =
4366 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4367 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4368 }
4369
7a9b2557
VZ
4370 /* Allocate SGEs and initialize the ring elements */
4371 for (i = 0, ring_prod = 0;
4372 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4373
7a9b2557
VZ
4374 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4375 BNX2X_ERR("was only able to allocate "
4376 "%d rx sges\n", i);
4377 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4378 /* Cleanup already allocated elements */
4379 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4380 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4381 fp->disable_tpa = 1;
4382 ring_prod = 0;
4383 break;
4384 }
4385 ring_prod = NEXT_SGE_IDX(ring_prod);
4386 }
4387 fp->rx_sge_prod = ring_prod;
4388
4389 /* Allocate BDs and initialize BD ring */
66e855f3 4390 fp->rx_comp_cons = 0;
7a9b2557 4391 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4392 for (i = 0; i < bp->rx_ring_size; i++) {
4393 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4394 BNX2X_ERR("was only able to allocate "
4395 "%d rx skbs\n", i);
66e855f3 4396 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4397 break;
4398 }
4399 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4400 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4401 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4402 }
4403
7a9b2557
VZ
4404 fp->rx_bd_prod = ring_prod;
4405 /* must not have more available CQEs than BDs */
4406 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4407 cqe_ring_prod);
a2fbb9ea
ET
4408 fp->rx_pkt = fp->rx_calls = 0;
4409
7a9b2557
VZ
4410 /* Warning!
4411 * this will generate an interrupt (to the TSTORM)
4412 * must only be done after chip is initialized
4413 */
4414 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4415 fp->rx_sge_prod);
a2fbb9ea
ET
4416 if (j != 0)
4417 continue;
4418
4419 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4420 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4421 U64_LO(fp->rx_comp_mapping));
4422 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4423 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4424 U64_HI(fp->rx_comp_mapping));
4425 }
4426}
4427
4428static void bnx2x_init_tx_ring(struct bnx2x *bp)
4429{
4430 int i, j;
4431
555f6c78 4432 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4433 struct bnx2x_fastpath *fp = &bp->fp[j];
4434
4435 for (i = 1; i <= NUM_TX_RINGS; i++) {
4436 struct eth_tx_bd *tx_bd =
4437 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4438
4439 tx_bd->addr_hi =
4440 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4441 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4442 tx_bd->addr_lo =
4443 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4444 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4445 }
4446
4447 fp->tx_pkt_prod = 0;
4448 fp->tx_pkt_cons = 0;
4449 fp->tx_bd_prod = 0;
4450 fp->tx_bd_cons = 0;
4451 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4452 fp->tx_pkt = 0;
4453 }
4454}
4455
4456static void bnx2x_init_sp_ring(struct bnx2x *bp)
4457{
34f80b04 4458 int func = BP_FUNC(bp);
a2fbb9ea
ET
4459
4460 spin_lock_init(&bp->spq_lock);
4461
4462 bp->spq_left = MAX_SPQ_PENDING;
4463 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4464 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4465 bp->spq_prod_bd = bp->spq;
4466 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4467
34f80b04 4468 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4469 U64_LO(bp->spq_mapping));
34f80b04
EG
4470 REG_WR(bp,
4471 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4472 U64_HI(bp->spq_mapping));
4473
34f80b04 4474 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4475 bp->spq_prod_idx);
4476}
4477
4478static void bnx2x_init_context(struct bnx2x *bp)
4479{
4480 int i;
4481
4482 for_each_queue(bp, i) {
4483 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4484 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4485 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4486
34f80b04
EG
4487 context->ustorm_st_context.common.sb_index_numbers =
4488 BNX2X_RX_SB_INDEX_NUM;
4489 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4490 context->ustorm_st_context.common.status_block_id = sb_id;
4491 context->ustorm_st_context.common.flags =
4492 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
8d9c5f34
EG
4493 context->ustorm_st_context.common.mc_alignment_log_size =
4494 6 /*BCM_RX_ETH_PAYLOAD_ALIGN*/;
34f80b04 4495 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4496 bp->rx_buf_size;
34f80b04 4497 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4498 U64_HI(fp->rx_desc_mapping);
34f80b04 4499 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4500 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4501 if (!fp->disable_tpa) {
4502 context->ustorm_st_context.common.flags |=
4503 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4504 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4505 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4506 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4507 (u32)0xffff);
7a9b2557
VZ
4508 context->ustorm_st_context.common.sge_page_base_hi =
4509 U64_HI(fp->rx_sge_mapping);
4510 context->ustorm_st_context.common.sge_page_base_lo =
4511 U64_LO(fp->rx_sge_mapping);
4512 }
4513
8d9c5f34
EG
4514 context->ustorm_ag_context.cdu_usage =
4515 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4516 CDU_REGION_NUMBER_UCM_AG,
4517 ETH_CONNECTION_TYPE);
4518
4519 context->xstorm_st_context.tx_bd_page_base_hi =
4520 U64_HI(fp->tx_desc_mapping);
4521 context->xstorm_st_context.tx_bd_page_base_lo =
4522 U64_LO(fp->tx_desc_mapping);
4523 context->xstorm_st_context.db_data_addr_hi =
4524 U64_HI(fp->tx_prods_mapping);
4525 context->xstorm_st_context.db_data_addr_lo =
4526 U64_LO(fp->tx_prods_mapping);
4527 context->xstorm_st_context.statistics_data = (fp->cl_id |
4528 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4529 context->cstorm_st_context.sb_index_number =
5c862848 4530 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4531 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4532
4533 context->xstorm_ag_context.cdu_reserved =
4534 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4535 CDU_REGION_NUMBER_XCM_AG,
4536 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4537 }
4538}
4539
4540static void bnx2x_init_ind_table(struct bnx2x *bp)
4541{
26c8fa4d 4542 int func = BP_FUNC(bp);
a2fbb9ea
ET
4543 int i;
4544
555f6c78 4545 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4546 return;
4547
555f6c78
EG
4548 DP(NETIF_MSG_IFUP,
4549 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4550 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4551 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4552 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4553 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4554}
4555
49d66772
ET
4556static void bnx2x_set_client_config(struct bnx2x *bp)
4557{
49d66772 4558 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4559 int port = BP_PORT(bp);
4560 int i;
49d66772 4561
e7799c5f 4562 tstorm_client.mtu = bp->dev->mtu;
66e855f3 4563 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4564 tstorm_client.config_flags =
4565 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4566#ifdef BCM_VLAN
0c6671b0 4567 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4568 tstorm_client.config_flags |=
8d9c5f34 4569 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4570 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4571 }
4572#endif
49d66772 4573
7a9b2557
VZ
4574 if (bp->flags & TPA_ENABLE_FLAG) {
4575 tstorm_client.max_sges_for_packet =
4f40f2cb 4576 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4577 tstorm_client.max_sges_for_packet =
4578 ((tstorm_client.max_sges_for_packet +
4579 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4580 PAGES_PER_SGE_SHIFT;
4581
4582 tstorm_client.config_flags |=
4583 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4584 }
4585
49d66772
ET
4586 for_each_queue(bp, i) {
4587 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4588 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4589 ((u32 *)&tstorm_client)[0]);
4590 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4591 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4592 ((u32 *)&tstorm_client)[1]);
4593 }
4594
34f80b04
EG
4595 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4596 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4597}
4598
a2fbb9ea
ET
4599static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4600{
a2fbb9ea 4601 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4602 int mode = bp->rx_mode;
4603 int mask = (1 << BP_L_ID(bp));
4604 int func = BP_FUNC(bp);
a2fbb9ea
ET
4605 int i;
4606
3196a88a 4607 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4608
4609 switch (mode) {
4610 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4611 tstorm_mac_filter.ucast_drop_all = mask;
4612 tstorm_mac_filter.mcast_drop_all = mask;
4613 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4614 break;
4615 case BNX2X_RX_MODE_NORMAL:
34f80b04 4616 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4617 break;
4618 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4619 tstorm_mac_filter.mcast_accept_all = mask;
4620 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4621 break;
4622 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4623 tstorm_mac_filter.ucast_accept_all = mask;
4624 tstorm_mac_filter.mcast_accept_all = mask;
4625 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4626 break;
4627 default:
34f80b04
EG
4628 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4629 break;
a2fbb9ea
ET
4630 }
4631
4632 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4633 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4634 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4635 ((u32 *)&tstorm_mac_filter)[i]);
4636
34f80b04 4637/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4638 ((u32 *)&tstorm_mac_filter)[i]); */
4639 }
a2fbb9ea 4640
49d66772
ET
4641 if (mode != BNX2X_RX_MODE_NONE)
4642 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4643}
4644
471de716
EG
4645static void bnx2x_init_internal_common(struct bnx2x *bp)
4646{
4647 int i;
4648
3cdf1db7
YG
4649 if (bp->flags & TPA_ENABLE_FLAG) {
4650 struct tstorm_eth_tpa_exist tpa = {0};
4651
4652 tpa.tpa_exist = 1;
4653
4654 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4655 ((u32 *)&tpa)[0]);
4656 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4657 ((u32 *)&tpa)[1]);
4658 }
4659
471de716
EG
4660 /* Zero this manually as its initialization is
4661 currently missing in the initTool */
4662 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4663 REG_WR(bp, BAR_USTRORM_INTMEM +
4664 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4665}
4666
4667static void bnx2x_init_internal_port(struct bnx2x *bp)
4668{
4669 int port = BP_PORT(bp);
4670
4671 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4672 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4673 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4674 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4675}
4676
4677static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4678{
a2fbb9ea
ET
4679 struct tstorm_eth_function_common_config tstorm_config = {0};
4680 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4681 int port = BP_PORT(bp);
4682 int func = BP_FUNC(bp);
4683 int i;
471de716 4684 u16 max_agg_size;
a2fbb9ea
ET
4685
4686 if (is_multi(bp)) {
555f6c78 4687 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4688 tstorm_config.rss_result_mask = MULTI_MASK;
4689 }
8d9c5f34
EG
4690 if (IS_E1HMF(bp))
4691 tstorm_config.config_flags |=
4692 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4693
34f80b04
EG
4694 tstorm_config.leading_client_id = BP_L_ID(bp);
4695
a2fbb9ea 4696 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4697 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4698 (*(u32 *)&tstorm_config));
4699
c14423fe 4700 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4701 bnx2x_set_storm_rx_mode(bp);
4702
66e855f3
YG
4703 /* reset xstorm per client statistics */
4704 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4705 REG_WR(bp, BAR_XSTRORM_INTMEM +
4706 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4707 i*4, 0);
4708 }
4709 /* reset tstorm per client statistics */
4710 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4711 REG_WR(bp, BAR_TSTRORM_INTMEM +
4712 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4713 i*4, 0);
4714 }
4715
4716 /* Init statistics related context */
34f80b04 4717 stats_flags.collect_eth = 1;
a2fbb9ea 4718
66e855f3 4719 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4720 ((u32 *)&stats_flags)[0]);
66e855f3 4721 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4722 ((u32 *)&stats_flags)[1]);
4723
66e855f3 4724 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4725 ((u32 *)&stats_flags)[0]);
66e855f3 4726 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4727 ((u32 *)&stats_flags)[1]);
4728
66e855f3 4729 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4730 ((u32 *)&stats_flags)[0]);
66e855f3 4731 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4732 ((u32 *)&stats_flags)[1]);
4733
66e855f3
YG
4734 REG_WR(bp, BAR_XSTRORM_INTMEM +
4735 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4736 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4737 REG_WR(bp, BAR_XSTRORM_INTMEM +
4738 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4739 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4740
4741 REG_WR(bp, BAR_TSTRORM_INTMEM +
4742 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4743 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4744 REG_WR(bp, BAR_TSTRORM_INTMEM +
4745 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4746 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4747
4748 if (CHIP_IS_E1H(bp)) {
4749 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4750 IS_E1HMF(bp));
4751 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4752 IS_E1HMF(bp));
4753 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4754 IS_E1HMF(bp));
4755 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4756 IS_E1HMF(bp));
4757
7a9b2557
VZ
4758 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4759 bp->e1hov);
34f80b04
EG
4760 }
4761
4f40f2cb
EG
4762 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4763 max_agg_size =
4764 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4765 SGE_PAGE_SIZE * PAGES_PER_SGE),
4766 (u32)0xffff);
555f6c78 4767 for_each_rx_queue(bp, i) {
7a9b2557 4768 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4769
4770 REG_WR(bp, BAR_USTRORM_INTMEM +
4771 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4772 U64_LO(fp->rx_comp_mapping));
4773 REG_WR(bp, BAR_USTRORM_INTMEM +
4774 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4775 U64_HI(fp->rx_comp_mapping));
4776
7a9b2557
VZ
4777 REG_WR16(bp, BAR_USTRORM_INTMEM +
4778 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4779 max_agg_size);
4780 }
a2fbb9ea
ET
4781}
4782
471de716
EG
4783static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4784{
4785 switch (load_code) {
4786 case FW_MSG_CODE_DRV_LOAD_COMMON:
4787 bnx2x_init_internal_common(bp);
4788 /* no break */
4789
4790 case FW_MSG_CODE_DRV_LOAD_PORT:
4791 bnx2x_init_internal_port(bp);
4792 /* no break */
4793
4794 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4795 bnx2x_init_internal_func(bp);
4796 break;
4797
4798 default:
4799 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4800 break;
4801 }
4802}
4803
4804static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4805{
4806 int i;
4807
4808 for_each_queue(bp, i) {
4809 struct bnx2x_fastpath *fp = &bp->fp[i];
4810
34f80b04 4811 fp->bp = bp;
a2fbb9ea 4812 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4813 fp->index = i;
34f80b04
EG
4814 fp->cl_id = BP_L_ID(bp) + i;
4815 fp->sb_id = fp->cl_id;
4816 DP(NETIF_MSG_IFUP,
4817 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4818 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4819 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4820 FP_SB_ID(fp));
4821 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4822 }
4823
5c862848
EG
4824 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4825 DEF_SB_ID);
4826 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4827 bnx2x_update_coalesce(bp);
4828 bnx2x_init_rx_rings(bp);
4829 bnx2x_init_tx_ring(bp);
4830 bnx2x_init_sp_ring(bp);
4831 bnx2x_init_context(bp);
471de716 4832 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4833 bnx2x_init_ind_table(bp);
0ef00459
EG
4834 bnx2x_stats_init(bp);
4835
4836 /* At this point, we are ready for interrupts */
4837 atomic_set(&bp->intr_sem, 0);
4838
4839 /* flush all before enabling interrupts */
4840 mb();
4841 mmiowb();
4842
615f8fd9 4843 bnx2x_int_enable(bp);
a2fbb9ea
ET
4844}
4845
4846/* end of nic init */
4847
4848/*
4849 * gzip service functions
4850 */
4851
4852static int bnx2x_gunzip_init(struct bnx2x *bp)
4853{
4854 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4855 &bp->gunzip_mapping);
4856 if (bp->gunzip_buf == NULL)
4857 goto gunzip_nomem1;
4858
4859 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4860 if (bp->strm == NULL)
4861 goto gunzip_nomem2;
4862
4863 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4864 GFP_KERNEL);
4865 if (bp->strm->workspace == NULL)
4866 goto gunzip_nomem3;
4867
4868 return 0;
4869
4870gunzip_nomem3:
4871 kfree(bp->strm);
4872 bp->strm = NULL;
4873
4874gunzip_nomem2:
4875 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4876 bp->gunzip_mapping);
4877 bp->gunzip_buf = NULL;
4878
4879gunzip_nomem1:
4880 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4881 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4882 return -ENOMEM;
4883}
4884
4885static void bnx2x_gunzip_end(struct bnx2x *bp)
4886{
4887 kfree(bp->strm->workspace);
4888
4889 kfree(bp->strm);
4890 bp->strm = NULL;
4891
4892 if (bp->gunzip_buf) {
4893 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4894 bp->gunzip_mapping);
4895 bp->gunzip_buf = NULL;
4896 }
4897}
4898
4899static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4900{
4901 int n, rc;
4902
4903 /* check gzip header */
4904 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4905 return -EINVAL;
4906
4907 n = 10;
4908
34f80b04 4909#define FNAME 0x8
a2fbb9ea
ET
4910
4911 if (zbuf[3] & FNAME)
4912 while ((zbuf[n++] != 0) && (n < len));
4913
4914 bp->strm->next_in = zbuf + n;
4915 bp->strm->avail_in = len - n;
4916 bp->strm->next_out = bp->gunzip_buf;
4917 bp->strm->avail_out = FW_BUF_SIZE;
4918
4919 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4920 if (rc != Z_OK)
4921 return rc;
4922
4923 rc = zlib_inflate(bp->strm, Z_FINISH);
4924 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4925 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4926 bp->dev->name, bp->strm->msg);
4927
4928 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4929 if (bp->gunzip_outlen & 0x3)
4930 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4931 " gunzip_outlen (%d) not aligned\n",
4932 bp->dev->name, bp->gunzip_outlen);
4933 bp->gunzip_outlen >>= 2;
4934
4935 zlib_inflateEnd(bp->strm);
4936
4937 if (rc == Z_STREAM_END)
4938 return 0;
4939
4940 return rc;
4941}
4942
4943/* nic load/unload */
4944
4945/*
34f80b04 4946 * General service functions
a2fbb9ea
ET
4947 */
4948
4949/* send a NIG loopback debug packet */
4950static void bnx2x_lb_pckt(struct bnx2x *bp)
4951{
a2fbb9ea 4952 u32 wb_write[3];
a2fbb9ea
ET
4953
4954 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4955 wb_write[0] = 0x55555555;
4956 wb_write[1] = 0x55555555;
34f80b04 4957 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4958 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4959
4960 /* NON-IP protocol */
a2fbb9ea
ET
4961 wb_write[0] = 0x09000000;
4962 wb_write[1] = 0x55555555;
34f80b04 4963 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4964 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4965}
4966
4967/* some of the internal memories
4968 * are not directly readable from the driver
4969 * to test them we send debug packets
4970 */
4971static int bnx2x_int_mem_test(struct bnx2x *bp)
4972{
4973 int factor;
4974 int count, i;
4975 u32 val = 0;
4976
ad8d3948 4977 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4978 factor = 120;
ad8d3948
EG
4979 else if (CHIP_REV_IS_EMUL(bp))
4980 factor = 200;
4981 else
a2fbb9ea 4982 factor = 1;
a2fbb9ea
ET
4983
4984 DP(NETIF_MSG_HW, "start part1\n");
4985
4986 /* Disable inputs of parser neighbor blocks */
4987 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4988 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4989 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4990 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4991
4992 /* Write 0 to parser credits for CFC search request */
4993 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4994
4995 /* send Ethernet packet */
4996 bnx2x_lb_pckt(bp);
4997
4998 /* TODO do i reset NIG statistic? */
4999 /* Wait until NIG register shows 1 packet of size 0x10 */
5000 count = 1000 * factor;
5001 while (count) {
34f80b04 5002
a2fbb9ea
ET
5003 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5004 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5005 if (val == 0x10)
5006 break;
5007
5008 msleep(10);
5009 count--;
5010 }
5011 if (val != 0x10) {
5012 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5013 return -1;
5014 }
5015
5016 /* Wait until PRS register shows 1 packet */
5017 count = 1000 * factor;
5018 while (count) {
5019 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5020 if (val == 1)
5021 break;
5022
5023 msleep(10);
5024 count--;
5025 }
5026 if (val != 0x1) {
5027 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5028 return -2;
5029 }
5030
5031 /* Reset and init BRB, PRS */
34f80b04 5032 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5033 msleep(50);
34f80b04 5034 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5035 msleep(50);
5036 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5037 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5038
5039 DP(NETIF_MSG_HW, "part2\n");
5040
5041 /* Disable inputs of parser neighbor blocks */
5042 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5043 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5044 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5045 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5046
5047 /* Write 0 to parser credits for CFC search request */
5048 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5049
5050 /* send 10 Ethernet packets */
5051 for (i = 0; i < 10; i++)
5052 bnx2x_lb_pckt(bp);
5053
5054 /* Wait until NIG register shows 10 + 1
5055 packets of size 11*0x10 = 0xb0 */
5056 count = 1000 * factor;
5057 while (count) {
34f80b04 5058
a2fbb9ea
ET
5059 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5060 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5061 if (val == 0xb0)
5062 break;
5063
5064 msleep(10);
5065 count--;
5066 }
5067 if (val != 0xb0) {
5068 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5069 return -3;
5070 }
5071
5072 /* Wait until PRS register shows 2 packets */
5073 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5074 if (val != 2)
5075 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5076
5077 /* Write 1 to parser credits for CFC search request */
5078 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5079
5080 /* Wait until PRS register shows 3 packets */
5081 msleep(10 * factor);
5082 /* Wait until NIG register shows 1 packet of size 0x10 */
5083 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5084 if (val != 3)
5085 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5086
5087 /* clear NIG EOP FIFO */
5088 for (i = 0; i < 11; i++)
5089 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5090 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5091 if (val != 1) {
5092 BNX2X_ERR("clear of NIG failed\n");
5093 return -4;
5094 }
5095
5096 /* Reset and init BRB, PRS, NIG */
5097 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5098 msleep(50);
5099 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5100 msleep(50);
5101 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5102 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5103#ifndef BCM_ISCSI
5104 /* set NIC mode */
5105 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5106#endif
5107
5108 /* Enable inputs of parser neighbor blocks */
5109 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5110 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5111 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5112 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5113
5114 DP(NETIF_MSG_HW, "done\n");
5115
5116 return 0; /* OK */
5117}
5118
5119static void enable_blocks_attention(struct bnx2x *bp)
5120{
5121 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5122 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5123 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5124 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5125 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5126 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5127 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5128 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5129 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5130/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5131/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5132 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5133 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5134 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5135/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5136/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5137 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5138 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5139 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5140 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5141/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5142/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5143 if (CHIP_REV_IS_FPGA(bp))
5144 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5145 else
5146 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5147 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5148 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5149 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5150/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5151/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5152 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5153 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5154/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5155 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5156}
5157
34f80b04 5158
81f75bbf
EG
5159static void bnx2x_reset_common(struct bnx2x *bp)
5160{
5161 /* reset_common */
5162 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5163 0xd3ffff7f);
5164 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5165}
5166
34f80b04 5167static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5168{
a2fbb9ea 5169 u32 val, i;
a2fbb9ea 5170
34f80b04 5171 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5172
81f75bbf 5173 bnx2x_reset_common(bp);
34f80b04
EG
5174 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5175 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5176
34f80b04
EG
5177 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5178 if (CHIP_IS_E1H(bp))
5179 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5180
34f80b04
EG
5181 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5182 msleep(30);
5183 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5184
34f80b04
EG
5185 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5186 if (CHIP_IS_E1(bp)) {
5187 /* enable HW interrupt from PXP on USDM overflow
5188 bit 16 on INT_MASK_0 */
5189 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5190 }
a2fbb9ea 5191
34f80b04
EG
5192 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5193 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5194
5195#ifdef __BIG_ENDIAN
34f80b04
EG
5196 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5197 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5198 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5199 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5200 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
34f80b04
EG
5201
5202/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5203 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5204 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5205 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5206 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5207#endif
5208
34f80b04 5209 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5210#ifdef BCM_ISCSI
34f80b04
EG
5211 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5212 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5213 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5214#endif
5215
34f80b04
EG
5216 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5217 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5218
34f80b04
EG
5219 /* let the HW do it's magic ... */
5220 msleep(100);
5221 /* finish PXP init */
5222 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5223 if (val != 1) {
5224 BNX2X_ERR("PXP2 CFG failed\n");
5225 return -EBUSY;
5226 }
5227 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5228 if (val != 1) {
5229 BNX2X_ERR("PXP2 RD_INIT failed\n");
5230 return -EBUSY;
5231 }
a2fbb9ea 5232
34f80b04
EG
5233 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5234 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5235
34f80b04 5236 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5237
34f80b04
EG
5238 /* clean the DMAE memory */
5239 bp->dmae_ready = 1;
5240 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5241
34f80b04
EG
5242 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5243 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5244 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5245 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5246
34f80b04
EG
5247 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5248 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5249 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5250 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5251
5252 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5253 /* soft reset pulse */
5254 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5255 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5256
5257#ifdef BCM_ISCSI
34f80b04 5258 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5259#endif
a2fbb9ea 5260
34f80b04
EG
5261 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5262 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5263 if (!CHIP_REV_IS_SLOW(bp)) {
5264 /* enable hw interrupt from doorbell Q */
5265 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5266 }
a2fbb9ea 5267
34f80b04
EG
5268 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5269 if (CHIP_REV_IS_SLOW(bp)) {
5270 /* fix for emulation and FPGA for no pause */
5271 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5272 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5273 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5274 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5275 }
a2fbb9ea 5276
34f80b04 5277 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5278 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5279 /* set NIC mode */
5280 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5281 if (CHIP_IS_E1H(bp))
5282 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5283
34f80b04
EG
5284 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5285 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5286 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5287 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5288
34f80b04
EG
5289 if (CHIP_IS_E1H(bp)) {
5290 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5291 STORM_INTMEM_SIZE_E1H/2);
5292 bnx2x_init_fill(bp,
5293 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5294 0, STORM_INTMEM_SIZE_E1H/2);
5295 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5296 STORM_INTMEM_SIZE_E1H/2);
5297 bnx2x_init_fill(bp,
5298 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5299 0, STORM_INTMEM_SIZE_E1H/2);
5300 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5301 STORM_INTMEM_SIZE_E1H/2);
5302 bnx2x_init_fill(bp,
5303 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304 0, STORM_INTMEM_SIZE_E1H/2);
5305 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1H/2);
5307 bnx2x_init_fill(bp,
5308 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309 0, STORM_INTMEM_SIZE_E1H/2);
5310 } else { /* E1 */
ad8d3948
EG
5311 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5312 STORM_INTMEM_SIZE_E1);
5313 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5314 STORM_INTMEM_SIZE_E1);
5315 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5316 STORM_INTMEM_SIZE_E1);
5317 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5318 STORM_INTMEM_SIZE_E1);
34f80b04 5319 }
a2fbb9ea 5320
34f80b04
EG
5321 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5322 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5323 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5324 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5325
34f80b04
EG
5326 /* sync semi rtc */
5327 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5328 0x80000000);
5329 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5330 0x80000000);
a2fbb9ea 5331
34f80b04
EG
5332 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5333 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5334 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5335
34f80b04
EG
5336 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5337 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5338 REG_WR(bp, i, 0xc0cac01a);
5339 /* TODO: replace with something meaningful */
5340 }
8d9c5f34 5341 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5342 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5343
34f80b04
EG
5344 if (sizeof(union cdu_context) != 1024)
5345 /* we currently assume that a context is 1024 bytes */
5346 printk(KERN_ALERT PFX "please adjust the size of"
5347 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5348
34f80b04
EG
5349 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5350 val = (4 << 24) + (0 << 12) + 1024;
5351 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5352 if (CHIP_IS_E1(bp)) {
5353 /* !!! fix pxp client crdit until excel update */
5354 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5355 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5356 }
a2fbb9ea 5357
34f80b04
EG
5358 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5359 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5360 /* enable context validation interrupt from CFC */
5361 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5362
5363 /* set the thresholds to prevent CFC/CDU race */
5364 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5365
34f80b04
EG
5366 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5367 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5368
34f80b04
EG
5369 /* PXPCS COMMON comes here */
5370 /* Reset PCIE errors for debug */
5371 REG_WR(bp, 0x2814, 0xffffffff);
5372 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5373
34f80b04
EG
5374 /* EMAC0 COMMON comes here */
5375 /* EMAC1 COMMON comes here */
5376 /* DBU COMMON comes here */
5377 /* DBG COMMON comes here */
5378
5379 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5380 if (CHIP_IS_E1H(bp)) {
5381 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5382 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5383 }
5384
5385 if (CHIP_REV_IS_SLOW(bp))
5386 msleep(200);
5387
5388 /* finish CFC init */
5389 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5390 if (val != 1) {
5391 BNX2X_ERR("CFC LL_INIT failed\n");
5392 return -EBUSY;
5393 }
5394 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5395 if (val != 1) {
5396 BNX2X_ERR("CFC AC_INIT failed\n");
5397 return -EBUSY;
5398 }
5399 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5400 if (val != 1) {
5401 BNX2X_ERR("CFC CAM_INIT failed\n");
5402 return -EBUSY;
5403 }
5404 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5405
34f80b04
EG
5406 /* read NIG statistic
5407 to see if this is our first up since powerup */
5408 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5409 val = *bnx2x_sp(bp, wb_data[0]);
5410
5411 /* do internal memory self test */
5412 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5413 BNX2X_ERR("internal mem self test failed\n");
5414 return -EBUSY;
5415 }
5416
5417 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5418 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5419 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5420 /* Fan failure is indicated by SPIO 5 */
5421 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5422 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5423
5424 /* set to active low mode */
5425 val = REG_RD(bp, MISC_REG_SPIO_INT);
5426 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5427 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5428 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5429
34f80b04
EG
5430 /* enable interrupt to signal the IGU */
5431 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5432 val |= (1 << MISC_REGISTERS_SPIO_5);
5433 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5434 break;
f1410647 5435
34f80b04
EG
5436 default:
5437 break;
5438 }
f1410647 5439
34f80b04
EG
5440 /* clear PXP2 attentions */
5441 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5442
34f80b04 5443 enable_blocks_attention(bp);
a2fbb9ea 5444
6bbca910
YR
5445 if (!BP_NOMCP(bp)) {
5446 bnx2x_acquire_phy_lock(bp);
5447 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5448 bnx2x_release_phy_lock(bp);
5449 } else
5450 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5451
34f80b04
EG
5452 return 0;
5453}
a2fbb9ea 5454
34f80b04
EG
5455static int bnx2x_init_port(struct bnx2x *bp)
5456{
5457 int port = BP_PORT(bp);
5458 u32 val;
a2fbb9ea 5459
34f80b04
EG
5460 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5461
5462 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5463
5464 /* Port PXP comes here */
5465 /* Port PXP2 comes here */
a2fbb9ea
ET
5466#ifdef BCM_ISCSI
5467 /* Port0 1
5468 * Port1 385 */
5469 i++;
5470 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5471 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5472 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5473 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5474
5475 /* Port0 2
5476 * Port1 386 */
5477 i++;
5478 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5479 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5480 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5481 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5482
5483 /* Port0 3
5484 * Port1 387 */
5485 i++;
5486 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5487 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5488 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5489 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5490#endif
34f80b04 5491 /* Port CMs come here */
8d9c5f34
EG
5492 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5493 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5494
5495 /* Port QM comes here */
a2fbb9ea
ET
5496#ifdef BCM_ISCSI
5497 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5498 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5499
5500 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5501 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5502#endif
5503 /* Port DQ comes here */
5504 /* Port BRB1 comes here */
ad8d3948 5505 /* Port PRS comes here */
a2fbb9ea
ET
5506 /* Port TSDM comes here */
5507 /* Port CSDM comes here */
5508 /* Port USDM comes here */
5509 /* Port XSDM comes here */
34f80b04
EG
5510 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5511 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5512 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5513 port ? USEM_PORT1_END : USEM_PORT0_END);
5514 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5515 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5516 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5517 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5518 /* Port UPB comes here */
34f80b04
EG
5519 /* Port XPB comes here */
5520
5521 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5522 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5523
5524 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5525 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5526
5527 /* update threshold */
34f80b04 5528 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5529 /* update init credit */
34f80b04 5530 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5531
5532 /* probe changes */
34f80b04 5533 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5534 msleep(5);
34f80b04 5535 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5536
5537#ifdef BCM_ISCSI
5538 /* tell the searcher where the T2 table is */
5539 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5540
5541 wb_write[0] = U64_LO(bp->t2_mapping);
5542 wb_write[1] = U64_HI(bp->t2_mapping);
5543 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5544 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5545 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5546 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5547
5548 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5549 /* Port SRCH comes here */
5550#endif
5551 /* Port CDU comes here */
5552 /* Port CFC comes here */
34f80b04
EG
5553
5554 if (CHIP_IS_E1(bp)) {
5555 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5556 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5557 }
5558 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5559 port ? HC_PORT1_END : HC_PORT0_END);
5560
5561 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5562 MISC_AEU_PORT0_START,
34f80b04
EG
5563 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5564 /* init aeu_mask_attn_func_0/1:
5565 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5566 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5567 * bits 4-7 are used for "per vn group attention" */
5568 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5569 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5570
a2fbb9ea
ET
5571 /* Port PXPCS comes here */
5572 /* Port EMAC0 comes here */
5573 /* Port EMAC1 comes here */
5574 /* Port DBU comes here */
5575 /* Port DBG comes here */
34f80b04
EG
5576 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5577 port ? NIG_PORT1_END : NIG_PORT0_END);
5578
5579 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5580
5581 if (CHIP_IS_E1H(bp)) {
5582 u32 wsum;
5583 struct cmng_struct_per_port m_cmng_port;
5584 int vn;
5585
5586 /* 0x2 disable e1hov, 0x1 enable */
5587 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5588 (IS_E1HMF(bp) ? 0x1 : 0x2));
5589
5590 /* Init RATE SHAPING and FAIRNESS contexts.
5591 Initialize as if there is 10G link. */
5592 wsum = bnx2x_calc_vn_wsum(bp);
5593 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5594 if (IS_E1HMF(bp))
5595 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5596 bnx2x_init_vn_minmax(bp, 2*vn + port,
5597 wsum, 10000, &m_cmng_port);
5598 }
5599
a2fbb9ea
ET
5600 /* Port MCP comes here */
5601 /* Port DMAE comes here */
5602
34f80b04 5603 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5604 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5605 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5606 /* add SPIO 5 to group 0 */
5607 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5608 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5609 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5610 break;
5611
5612 default:
5613 break;
5614 }
5615
c18487ee 5616 bnx2x__link_reset(bp);
a2fbb9ea 5617
34f80b04
EG
5618 return 0;
5619}
5620
5621#define ILT_PER_FUNC (768/2)
5622#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5623/* the phys address is shifted right 12 bits and has an added
5624 1=valid bit added to the 53rd bit
5625 then since this is a wide register(TM)
5626 we split it into two 32 bit writes
5627 */
5628#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5629#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5630#define PXP_ONE_ILT(x) (((x) << 10) | x)
5631#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5632
5633#define CNIC_ILT_LINES 0
5634
5635static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5636{
5637 int reg;
5638
5639 if (CHIP_IS_E1H(bp))
5640 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5641 else /* E1 */
5642 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5643
5644 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5645}
5646
5647static int bnx2x_init_func(struct bnx2x *bp)
5648{
5649 int port = BP_PORT(bp);
5650 int func = BP_FUNC(bp);
5651 int i;
5652
5653 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5654
5655 i = FUNC_ILT_BASE(func);
5656
5657 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5658 if (CHIP_IS_E1H(bp)) {
5659 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5660 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5661 } else /* E1 */
5662 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5663 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5664
5665
5666 if (CHIP_IS_E1H(bp)) {
5667 for (i = 0; i < 9; i++)
5668 bnx2x_init_block(bp,
5669 cm_start[func][i], cm_end[func][i]);
5670
5671 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5672 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5673 }
5674
5675 /* HC init per function */
5676 if (CHIP_IS_E1H(bp)) {
5677 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5678
5679 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5680 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5681 }
5682 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5683
c14423fe 5684 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5685 REG_WR(bp, 0x2114, 0xffffffff);
5686 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5687
34f80b04
EG
5688 return 0;
5689}
5690
5691static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5692{
5693 int i, rc = 0;
a2fbb9ea 5694
34f80b04
EG
5695 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5696 BP_FUNC(bp), load_code);
a2fbb9ea 5697
34f80b04
EG
5698 bp->dmae_ready = 0;
5699 mutex_init(&bp->dmae_mutex);
5700 bnx2x_gunzip_init(bp);
a2fbb9ea 5701
34f80b04
EG
5702 switch (load_code) {
5703 case FW_MSG_CODE_DRV_LOAD_COMMON:
5704 rc = bnx2x_init_common(bp);
5705 if (rc)
5706 goto init_hw_err;
5707 /* no break */
5708
5709 case FW_MSG_CODE_DRV_LOAD_PORT:
5710 bp->dmae_ready = 1;
5711 rc = bnx2x_init_port(bp);
5712 if (rc)
5713 goto init_hw_err;
5714 /* no break */
5715
5716 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5717 bp->dmae_ready = 1;
5718 rc = bnx2x_init_func(bp);
5719 if (rc)
5720 goto init_hw_err;
5721 break;
5722
5723 default:
5724 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5725 break;
5726 }
5727
5728 if (!BP_NOMCP(bp)) {
5729 int func = BP_FUNC(bp);
a2fbb9ea
ET
5730
5731 bp->fw_drv_pulse_wr_seq =
34f80b04 5732 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5733 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5734 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5735 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5736 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5737 } else
5738 bp->func_stx = 0;
a2fbb9ea 5739
34f80b04
EG
5740 /* this needs to be done before gunzip end */
5741 bnx2x_zero_def_sb(bp);
5742 for_each_queue(bp, i)
5743 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5744
5745init_hw_err:
5746 bnx2x_gunzip_end(bp);
5747
5748 return rc;
a2fbb9ea
ET
5749}
5750
c14423fe 5751/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5752static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5753{
34f80b04 5754 int func = BP_FUNC(bp);
f1410647
ET
5755 u32 seq = ++bp->fw_seq;
5756 u32 rc = 0;
19680c48
EG
5757 u32 cnt = 1;
5758 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5759
34f80b04 5760 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5761 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5762
19680c48
EG
5763 do {
5764 /* let the FW do it's magic ... */
5765 msleep(delay);
a2fbb9ea 5766
19680c48 5767 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5768
19680c48
EG
5769 /* Give the FW up to 2 second (200*10ms) */
5770 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5771
5772 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5773 cnt*delay, rc, seq);
a2fbb9ea
ET
5774
5775 /* is this a reply to our command? */
5776 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5777 rc &= FW_MSG_CODE_MASK;
f1410647 5778
a2fbb9ea
ET
5779 } else {
5780 /* FW BUG! */
5781 BNX2X_ERR("FW failed to respond!\n");
5782 bnx2x_fw_dump(bp);
5783 rc = 0;
5784 }
f1410647 5785
a2fbb9ea
ET
5786 return rc;
5787}
5788
5789static void bnx2x_free_mem(struct bnx2x *bp)
5790{
5791
5792#define BNX2X_PCI_FREE(x, y, size) \
5793 do { \
5794 if (x) { \
5795 pci_free_consistent(bp->pdev, size, x, y); \
5796 x = NULL; \
5797 y = 0; \
5798 } \
5799 } while (0)
5800
5801#define BNX2X_FREE(x) \
5802 do { \
5803 if (x) { \
5804 vfree(x); \
5805 x = NULL; \
5806 } \
5807 } while (0)
5808
5809 int i;
5810
5811 /* fastpath */
555f6c78 5812 /* Common */
a2fbb9ea
ET
5813 for_each_queue(bp, i) {
5814
555f6c78 5815 /* status blocks */
a2fbb9ea
ET
5816 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5817 bnx2x_fp(bp, i, status_blk_mapping),
5818 sizeof(struct host_status_block) +
5819 sizeof(struct eth_tx_db_data));
555f6c78
EG
5820 }
5821 /* Rx */
5822 for_each_rx_queue(bp, i) {
a2fbb9ea 5823
555f6c78 5824 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5825 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5826 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5827 bnx2x_fp(bp, i, rx_desc_mapping),
5828 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5829
5830 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5831 bnx2x_fp(bp, i, rx_comp_mapping),
5832 sizeof(struct eth_fast_path_rx_cqe) *
5833 NUM_RCQ_BD);
a2fbb9ea 5834
7a9b2557 5835 /* SGE ring */
32626230 5836 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5837 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5838 bnx2x_fp(bp, i, rx_sge_mapping),
5839 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5840 }
555f6c78
EG
5841 /* Tx */
5842 for_each_tx_queue(bp, i) {
5843
5844 /* fastpath tx rings: tx_buf tx_desc */
5845 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5846 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5847 bnx2x_fp(bp, i, tx_desc_mapping),
5848 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5849 }
a2fbb9ea
ET
5850 /* end of fastpath */
5851
5852 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5853 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5854
5855 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5856 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5857
5858#ifdef BCM_ISCSI
5859 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5860 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5861 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5862 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5863#endif
7a9b2557 5864 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5865
5866#undef BNX2X_PCI_FREE
5867#undef BNX2X_KFREE
5868}
5869
5870static int bnx2x_alloc_mem(struct bnx2x *bp)
5871{
5872
5873#define BNX2X_PCI_ALLOC(x, y, size) \
5874 do { \
5875 x = pci_alloc_consistent(bp->pdev, size, y); \
5876 if (x == NULL) \
5877 goto alloc_mem_err; \
5878 memset(x, 0, size); \
5879 } while (0)
5880
5881#define BNX2X_ALLOC(x, size) \
5882 do { \
5883 x = vmalloc(size); \
5884 if (x == NULL) \
5885 goto alloc_mem_err; \
5886 memset(x, 0, size); \
5887 } while (0)
5888
5889 int i;
5890
5891 /* fastpath */
555f6c78 5892 /* Common */
a2fbb9ea
ET
5893 for_each_queue(bp, i) {
5894 bnx2x_fp(bp, i, bp) = bp;
5895
555f6c78 5896 /* status blocks */
a2fbb9ea
ET
5897 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5898 &bnx2x_fp(bp, i, status_blk_mapping),
5899 sizeof(struct host_status_block) +
5900 sizeof(struct eth_tx_db_data));
555f6c78
EG
5901 }
5902 /* Rx */
5903 for_each_rx_queue(bp, i) {
a2fbb9ea 5904
555f6c78 5905 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5906 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5907 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5908 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5909 &bnx2x_fp(bp, i, rx_desc_mapping),
5910 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5911
5912 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5913 &bnx2x_fp(bp, i, rx_comp_mapping),
5914 sizeof(struct eth_fast_path_rx_cqe) *
5915 NUM_RCQ_BD);
5916
7a9b2557
VZ
5917 /* SGE ring */
5918 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5919 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5920 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5921 &bnx2x_fp(bp, i, rx_sge_mapping),
5922 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 5923 }
555f6c78
EG
5924 /* Tx */
5925 for_each_tx_queue(bp, i) {
5926
5927 bnx2x_fp(bp, i, hw_tx_prods) =
5928 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5929
5930 bnx2x_fp(bp, i, tx_prods_mapping) =
5931 bnx2x_fp(bp, i, status_blk_mapping) +
5932 sizeof(struct host_status_block);
5933
5934 /* fastpath tx rings: tx_buf tx_desc */
5935 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5936 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5937 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5938 &bnx2x_fp(bp, i, tx_desc_mapping),
5939 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5940 }
a2fbb9ea
ET
5941 /* end of fastpath */
5942
5943 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5944 sizeof(struct host_def_status_block));
5945
5946 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5947 sizeof(struct bnx2x_slowpath));
5948
5949#ifdef BCM_ISCSI
5950 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5951
5952 /* Initialize T1 */
5953 for (i = 0; i < 64*1024; i += 64) {
5954 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5955 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5956 }
5957
5958 /* allocate searcher T2 table
5959 we allocate 1/4 of alloc num for T2
5960 (which is not entered into the ILT) */
5961 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5962
5963 /* Initialize T2 */
5964 for (i = 0; i < 16*1024; i += 64)
5965 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5966
c14423fe 5967 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5968 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5969
5970 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5971 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5972
5973 /* QM queues (128*MAX_CONN) */
5974 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5975#endif
5976
5977 /* Slow path ring */
5978 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5979
5980 return 0;
5981
5982alloc_mem_err:
5983 bnx2x_free_mem(bp);
5984 return -ENOMEM;
5985
5986#undef BNX2X_PCI_ALLOC
5987#undef BNX2X_ALLOC
5988}
5989
5990static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5991{
5992 int i;
5993
555f6c78 5994 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
5995 struct bnx2x_fastpath *fp = &bp->fp[i];
5996
5997 u16 bd_cons = fp->tx_bd_cons;
5998 u16 sw_prod = fp->tx_pkt_prod;
5999 u16 sw_cons = fp->tx_pkt_cons;
6000
a2fbb9ea
ET
6001 while (sw_cons != sw_prod) {
6002 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6003 sw_cons++;
6004 }
6005 }
6006}
6007
6008static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6009{
6010 int i, j;
6011
555f6c78 6012 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6013 struct bnx2x_fastpath *fp = &bp->fp[j];
6014
a2fbb9ea
ET
6015 for (i = 0; i < NUM_RX_BD; i++) {
6016 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6017 struct sk_buff *skb = rx_buf->skb;
6018
6019 if (skb == NULL)
6020 continue;
6021
6022 pci_unmap_single(bp->pdev,
6023 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6024 bp->rx_buf_size,
a2fbb9ea
ET
6025 PCI_DMA_FROMDEVICE);
6026
6027 rx_buf->skb = NULL;
6028 dev_kfree_skb(skb);
6029 }
7a9b2557 6030 if (!fp->disable_tpa)
32626230
EG
6031 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6032 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6033 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6034 }
6035}
6036
6037static void bnx2x_free_skbs(struct bnx2x *bp)
6038{
6039 bnx2x_free_tx_skbs(bp);
6040 bnx2x_free_rx_skbs(bp);
6041}
6042
6043static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6044{
34f80b04 6045 int i, offset = 1;
a2fbb9ea
ET
6046
6047 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6048 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6049 bp->msix_table[0].vector);
6050
6051 for_each_queue(bp, i) {
c14423fe 6052 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6053 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6054 bnx2x_fp(bp, i, state));
6055
228241eb
ET
6056 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
6057 BNX2X_ERR("IRQ of fp #%d being freed while "
6058 "state != closed\n", i);
a2fbb9ea 6059
34f80b04 6060 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6061 }
a2fbb9ea
ET
6062}
6063
6064static void bnx2x_free_irq(struct bnx2x *bp)
6065{
a2fbb9ea 6066 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6067 bnx2x_free_msix_irqs(bp);
6068 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6069 bp->flags &= ~USING_MSIX_FLAG;
6070
6071 } else
6072 free_irq(bp->pdev->irq, bp->dev);
6073}
6074
6075static int bnx2x_enable_msix(struct bnx2x *bp)
6076{
34f80b04 6077 int i, rc, offset;
a2fbb9ea
ET
6078
6079 bp->msix_table[0].entry = 0;
34f80b04
EG
6080 offset = 1;
6081 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6082
34f80b04
EG
6083 for_each_queue(bp, i) {
6084 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6085
34f80b04
EG
6086 bp->msix_table[i + offset].entry = igu_vec;
6087 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6088 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6089 }
6090
34f80b04 6091 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6092 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04
EG
6093 if (rc) {
6094 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6095 return -1;
6096 }
a2fbb9ea
ET
6097 bp->flags |= USING_MSIX_FLAG;
6098
6099 return 0;
a2fbb9ea
ET
6100}
6101
a2fbb9ea
ET
6102static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6103{
34f80b04 6104 int i, rc, offset = 1;
a2fbb9ea 6105
a2fbb9ea
ET
6106 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6107 bp->dev->name, bp->dev);
a2fbb9ea
ET
6108 if (rc) {
6109 BNX2X_ERR("request sp irq failed\n");
6110 return -EBUSY;
6111 }
6112
6113 for_each_queue(bp, i) {
555f6c78
EG
6114 struct bnx2x_fastpath *fp = &bp->fp[i];
6115
6116 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6117 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6118 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6119 if (rc) {
555f6c78 6120 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6121 bnx2x_free_msix_irqs(bp);
6122 return -EBUSY;
6123 }
6124
555f6c78 6125 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6126 }
6127
555f6c78
EG
6128 i = BNX2X_NUM_QUEUES(bp);
6129 if (is_multi(bp))
6130 printk(KERN_INFO PFX
6131 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6132 bp->dev->name, bp->msix_table[0].vector,
6133 bp->msix_table[offset].vector,
6134 bp->msix_table[offset + i - 1].vector);
6135 else
6136 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6137 bp->dev->name, bp->msix_table[0].vector,
6138 bp->msix_table[offset + i - 1].vector);
6139
a2fbb9ea 6140 return 0;
a2fbb9ea
ET
6141}
6142
6143static int bnx2x_req_irq(struct bnx2x *bp)
6144{
34f80b04 6145 int rc;
a2fbb9ea 6146
34f80b04
EG
6147 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6148 bp->dev->name, bp->dev);
a2fbb9ea
ET
6149 if (!rc)
6150 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6151
6152 return rc;
a2fbb9ea
ET
6153}
6154
65abd74d
YG
6155static void bnx2x_napi_enable(struct bnx2x *bp)
6156{
6157 int i;
6158
555f6c78 6159 for_each_rx_queue(bp, i)
65abd74d
YG
6160 napi_enable(&bnx2x_fp(bp, i, napi));
6161}
6162
6163static void bnx2x_napi_disable(struct bnx2x *bp)
6164{
6165 int i;
6166
555f6c78 6167 for_each_rx_queue(bp, i)
65abd74d
YG
6168 napi_disable(&bnx2x_fp(bp, i, napi));
6169}
6170
6171static void bnx2x_netif_start(struct bnx2x *bp)
6172{
6173 if (atomic_dec_and_test(&bp->intr_sem)) {
6174 if (netif_running(bp->dev)) {
65abd74d
YG
6175 bnx2x_napi_enable(bp);
6176 bnx2x_int_enable(bp);
555f6c78
EG
6177 if (bp->state == BNX2X_STATE_OPEN)
6178 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6179 }
6180 }
6181}
6182
f8ef6e44 6183static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6184{
f8ef6e44 6185 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6186 bnx2x_napi_disable(bp);
65abd74d 6187 if (netif_running(bp->dev)) {
65abd74d
YG
6188 netif_tx_disable(bp->dev);
6189 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6190 }
6191}
6192
a2fbb9ea
ET
6193/*
6194 * Init service functions
6195 */
6196
3101c2bc 6197static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6198{
6199 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6200 int port = BP_PORT(bp);
a2fbb9ea
ET
6201
6202 /* CAM allocation
6203 * unicasts 0-31:port0 32-63:port1
6204 * multicast 64-127:port0 128-191:port1
6205 */
8d9c5f34 6206 config->hdr.length = 2;
af246401 6207 config->hdr.offset = port ? 32 : 0;
34f80b04 6208 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6209 config->hdr.reserved1 = 0;
6210
6211 /* primary MAC */
6212 config->config_table[0].cam_entry.msb_mac_addr =
6213 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6214 config->config_table[0].cam_entry.middle_mac_addr =
6215 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6216 config->config_table[0].cam_entry.lsb_mac_addr =
6217 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6218 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6219 if (set)
6220 config->config_table[0].target_table_entry.flags = 0;
6221 else
6222 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6223 config->config_table[0].target_table_entry.client_id = 0;
6224 config->config_table[0].target_table_entry.vlan_id = 0;
6225
3101c2bc
YG
6226 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6227 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6228 config->config_table[0].cam_entry.msb_mac_addr,
6229 config->config_table[0].cam_entry.middle_mac_addr,
6230 config->config_table[0].cam_entry.lsb_mac_addr);
6231
6232 /* broadcast */
6233 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6234 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6235 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6236 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6237 if (set)
6238 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6239 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6240 else
6241 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6242 config->config_table[1].target_table_entry.client_id = 0;
6243 config->config_table[1].target_table_entry.vlan_id = 0;
6244
6245 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6246 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6247 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6248}
6249
3101c2bc 6250static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6251{
6252 struct mac_configuration_cmd_e1h *config =
6253 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6254
3101c2bc 6255 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6256 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6257 return;
6258 }
6259
6260 /* CAM allocation for E1H
6261 * unicasts: by func number
6262 * multicast: 20+FUNC*20, 20 each
6263 */
8d9c5f34 6264 config->hdr.length = 1;
34f80b04
EG
6265 config->hdr.offset = BP_FUNC(bp);
6266 config->hdr.client_id = BP_CL_ID(bp);
6267 config->hdr.reserved1 = 0;
6268
6269 /* primary MAC */
6270 config->config_table[0].msb_mac_addr =
6271 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6272 config->config_table[0].middle_mac_addr =
6273 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6274 config->config_table[0].lsb_mac_addr =
6275 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6276 config->config_table[0].client_id = BP_L_ID(bp);
6277 config->config_table[0].vlan_id = 0;
6278 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6279 if (set)
6280 config->config_table[0].flags = BP_PORT(bp);
6281 else
6282 config->config_table[0].flags =
6283 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6284
3101c2bc
YG
6285 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6286 (set ? "setting" : "clearing"),
34f80b04
EG
6287 config->config_table[0].msb_mac_addr,
6288 config->config_table[0].middle_mac_addr,
6289 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6290
6291 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6292 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6293 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6294}
6295
a2fbb9ea
ET
6296static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6297 int *state_p, int poll)
6298{
6299 /* can take a while if any port is running */
34f80b04 6300 int cnt = 500;
a2fbb9ea 6301
c14423fe
ET
6302 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6303 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6304
6305 might_sleep();
34f80b04 6306 while (cnt--) {
a2fbb9ea
ET
6307 if (poll) {
6308 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6309 /* if index is different from 0
6310 * the reply for some commands will
3101c2bc 6311 * be on the non default queue
a2fbb9ea
ET
6312 */
6313 if (idx)
6314 bnx2x_rx_int(&bp->fp[idx], 10);
6315 }
a2fbb9ea 6316
3101c2bc 6317 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6318 if (*state_p == state)
a2fbb9ea
ET
6319 return 0;
6320
a2fbb9ea 6321 msleep(1);
a2fbb9ea
ET
6322 }
6323
a2fbb9ea 6324 /* timeout! */
49d66772
ET
6325 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6326 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6327#ifdef BNX2X_STOP_ON_ERROR
6328 bnx2x_panic();
6329#endif
a2fbb9ea 6330
49d66772 6331 return -EBUSY;
a2fbb9ea
ET
6332}
6333
6334static int bnx2x_setup_leading(struct bnx2x *bp)
6335{
34f80b04 6336 int rc;
a2fbb9ea 6337
c14423fe 6338 /* reset IGU state */
34f80b04 6339 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6340
6341 /* SETUP ramrod */
6342 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6343
34f80b04
EG
6344 /* Wait for completion */
6345 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6346
34f80b04 6347 return rc;
a2fbb9ea
ET
6348}
6349
6350static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6351{
555f6c78
EG
6352 struct bnx2x_fastpath *fp = &bp->fp[index];
6353
a2fbb9ea 6354 /* reset IGU state */
555f6c78 6355 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6356
228241eb 6357 /* SETUP ramrod */
555f6c78
EG
6358 fp->state = BNX2X_FP_STATE_OPENING;
6359 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6360 fp->cl_id, 0);
a2fbb9ea
ET
6361
6362 /* Wait for completion */
6363 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6364 &(fp->state), 0);
a2fbb9ea
ET
6365}
6366
a2fbb9ea
ET
6367static int bnx2x_poll(struct napi_struct *napi, int budget);
6368static void bnx2x_set_rx_mode(struct net_device *dev);
6369
34f80b04
EG
6370/* must be called with rtnl_lock */
6371static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6372{
228241eb 6373 u32 load_code;
2dfe0e1f 6374 int i, rc = 0;
555f6c78 6375 int num_queues;
34f80b04
EG
6376#ifdef BNX2X_STOP_ON_ERROR
6377 if (unlikely(bp->panic))
6378 return -EPERM;
6379#endif
a2fbb9ea
ET
6380
6381 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6382
34f80b04 6383 if (use_inta) {
555f6c78
EG
6384 num_queues = 1;
6385 bp->num_rx_queues = num_queues;
6386 bp->num_tx_queues = num_queues;
6387 DP(NETIF_MSG_IFUP,
6388 "set number of queues to %d\n", num_queues);
34f80b04 6389 } else {
555f6c78
EG
6390 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6391 num_queues = min_t(u32, num_online_cpus(),
6392 BNX2X_MAX_QUEUES(bp));
34f80b04 6393 else
555f6c78
EG
6394 num_queues = 1;
6395 bp->num_rx_queues = num_queues;
6396 bp->num_tx_queues = num_queues;
6397 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6398 " number of tx queues to %d\n",
6399 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6400 /* if we can't use MSI-X we only need one fp,
6401 * so try to enable MSI-X with the requested number of fp's
6402 * and fallback to MSI or legacy INTx with one fp
6403 */
6404 rc = bnx2x_enable_msix(bp);
6405 if (rc) {
34f80b04 6406 /* failed to enable MSI-X */
555f6c78
EG
6407 num_queues = 1;
6408 bp->num_rx_queues = num_queues;
6409 bp->num_tx_queues = num_queues;
6410 if (bp->multi_mode)
6411 BNX2X_ERR("Multi requested but failed to "
6412 "enable MSI-X set number of "
6413 "queues to %d\n", num_queues);
a2fbb9ea
ET
6414 }
6415 }
555f6c78 6416 bp->dev->real_num_tx_queues = bp->num_tx_queues;
c14423fe 6417
a2fbb9ea
ET
6418 if (bnx2x_alloc_mem(bp))
6419 return -ENOMEM;
6420
555f6c78 6421 for_each_rx_queue(bp, i)
7a9b2557
VZ
6422 bnx2x_fp(bp, i, disable_tpa) =
6423 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6424
555f6c78 6425 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6426 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6427 bnx2x_poll, 128);
6428
6429#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6430 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6431 struct bnx2x_fastpath *fp = &bp->fp[i];
6432
6433 fp->poll_no_work = 0;
6434 fp->poll_calls = 0;
6435 fp->poll_max_calls = 0;
6436 fp->poll_complete = 0;
6437 fp->poll_exit = 0;
6438 }
6439#endif
6440 bnx2x_napi_enable(bp);
6441
34f80b04
EG
6442 if (bp->flags & USING_MSIX_FLAG) {
6443 rc = bnx2x_req_msix_irqs(bp);
6444 if (rc) {
6445 pci_disable_msix(bp->pdev);
2dfe0e1f 6446 goto load_error1;
34f80b04 6447 }
2dfe0e1f 6448 printk(KERN_INFO PFX "%s: using MSI-X\n", bp->dev->name);
34f80b04
EG
6449 } else {
6450 bnx2x_ack_int(bp);
6451 rc = bnx2x_req_irq(bp);
6452 if (rc) {
2dfe0e1f
EG
6453 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6454 goto load_error1;
a2fbb9ea
ET
6455 }
6456 }
6457
2dfe0e1f
EG
6458 /* Send LOAD_REQUEST command to MCP
6459 Returns the type of LOAD command:
6460 if it is the first port to be initialized
6461 common blocks should be initialized, otherwise - not
6462 */
6463 if (!BP_NOMCP(bp)) {
6464 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6465 if (!load_code) {
6466 BNX2X_ERR("MCP response failure, aborting\n");
6467 rc = -EBUSY;
6468 goto load_error2;
6469 }
6470 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6471 rc = -EBUSY; /* other port in diagnostic mode */
6472 goto load_error2;
6473 }
6474
6475 } else {
6476 int port = BP_PORT(bp);
6477
6478 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6479 load_count[0], load_count[1], load_count[2]);
6480 load_count[0]++;
6481 load_count[1 + port]++;
6482 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6483 load_count[0], load_count[1], load_count[2]);
6484 if (load_count[0] == 1)
6485 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6486 else if (load_count[1 + port] == 1)
6487 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6488 else
6489 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6490 }
6491
6492 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6493 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6494 bp->port.pmf = 1;
6495 else
6496 bp->port.pmf = 0;
6497 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6498
a2fbb9ea 6499 /* Initialize HW */
34f80b04
EG
6500 rc = bnx2x_init_hw(bp, load_code);
6501 if (rc) {
a2fbb9ea 6502 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6503 goto load_error2;
a2fbb9ea
ET
6504 }
6505
a2fbb9ea 6506 /* Setup NIC internals and enable interrupts */
471de716 6507 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6508
6509 /* Send LOAD_DONE command to MCP */
34f80b04 6510 if (!BP_NOMCP(bp)) {
228241eb
ET
6511 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6512 if (!load_code) {
da5a662a 6513 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6514 rc = -EBUSY;
2dfe0e1f 6515 goto load_error3;
a2fbb9ea
ET
6516 }
6517 }
6518
6519 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6520
34f80b04
EG
6521 rc = bnx2x_setup_leading(bp);
6522 if (rc) {
da5a662a 6523 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6524 goto load_error3;
34f80b04 6525 }
a2fbb9ea 6526
34f80b04
EG
6527 if (CHIP_IS_E1H(bp))
6528 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6529 BNX2X_ERR("!!! mf_cfg function disabled\n");
6530 bp->state = BNX2X_STATE_DISABLED;
6531 }
a2fbb9ea 6532
34f80b04
EG
6533 if (bp->state == BNX2X_STATE_OPEN)
6534 for_each_nondefault_queue(bp, i) {
6535 rc = bnx2x_setup_multi(bp, i);
6536 if (rc)
2dfe0e1f 6537 goto load_error3;
34f80b04 6538 }
a2fbb9ea 6539
34f80b04 6540 if (CHIP_IS_E1(bp))
3101c2bc 6541 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6542 else
3101c2bc 6543 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6544
6545 if (bp->port.pmf)
6546 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6547
6548 /* Start fast path */
34f80b04
EG
6549 switch (load_mode) {
6550 case LOAD_NORMAL:
6551 /* Tx queue should be only reenabled */
555f6c78 6552 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6553 /* Initialize the receive filter. */
34f80b04
EG
6554 bnx2x_set_rx_mode(bp->dev);
6555 break;
6556
6557 case LOAD_OPEN:
555f6c78 6558 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6559 /* Initialize the receive filter. */
34f80b04 6560 bnx2x_set_rx_mode(bp->dev);
34f80b04 6561 break;
a2fbb9ea 6562
34f80b04 6563 case LOAD_DIAG:
2dfe0e1f 6564 /* Initialize the receive filter. */
a2fbb9ea 6565 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6566 bp->state = BNX2X_STATE_DIAG;
6567 break;
6568
6569 default:
6570 break;
a2fbb9ea
ET
6571 }
6572
34f80b04
EG
6573 if (!bp->port.pmf)
6574 bnx2x__link_status_update(bp);
6575
a2fbb9ea
ET
6576 /* start the timer */
6577 mod_timer(&bp->timer, jiffies + bp->current_interval);
6578
34f80b04 6579
a2fbb9ea
ET
6580 return 0;
6581
2dfe0e1f
EG
6582load_error3:
6583 bnx2x_int_disable_sync(bp, 1);
6584 if (!BP_NOMCP(bp)) {
6585 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6586 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6587 }
6588 bp->port.pmf = 0;
7a9b2557
VZ
6589 /* Free SKBs, SGEs, TPA pool and driver internals */
6590 bnx2x_free_skbs(bp);
555f6c78 6591 for_each_rx_queue(bp, i)
3196a88a 6592 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6593load_error2:
d1014634
YG
6594 /* Release IRQs */
6595 bnx2x_free_irq(bp);
2dfe0e1f
EG
6596load_error1:
6597 bnx2x_napi_disable(bp);
555f6c78 6598 for_each_rx_queue(bp, i)
7cde1c8b 6599 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6600 bnx2x_free_mem(bp);
6601
6602 /* TBD we really need to reset the chip
6603 if we want to recover from this */
34f80b04 6604 return rc;
a2fbb9ea
ET
6605}
6606
6607static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6608{
555f6c78 6609 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6610 int rc;
6611
c14423fe 6612 /* halt the connection */
555f6c78
EG
6613 fp->state = BNX2X_FP_STATE_HALTING;
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6615
34f80b04 6616 /* Wait for completion */
a2fbb9ea 6617 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6618 &(fp->state), 1);
c14423fe 6619 if (rc) /* timeout */
a2fbb9ea
ET
6620 return rc;
6621
6622 /* delete cfc entry */
6623 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6624
34f80b04
EG
6625 /* Wait for completion */
6626 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6627 &(fp->state), 1);
34f80b04 6628 return rc;
a2fbb9ea
ET
6629}
6630
da5a662a 6631static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6632{
49d66772 6633 u16 dsb_sp_prod_idx;
c14423fe 6634 /* if the other port is handling traffic,
a2fbb9ea 6635 this can take a lot of time */
34f80b04
EG
6636 int cnt = 500;
6637 int rc;
a2fbb9ea
ET
6638
6639 might_sleep();
6640
6641 /* Send HALT ramrod */
6642 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6643 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6644
34f80b04
EG
6645 /* Wait for completion */
6646 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6647 &(bp->fp[0].state), 1);
6648 if (rc) /* timeout */
da5a662a 6649 return rc;
a2fbb9ea 6650
49d66772 6651 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6652
228241eb 6653 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6654 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6655
49d66772 6656 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6657 we are going to reset the chip anyway
6658 so there is not much to do if this times out
6659 */
34f80b04 6660 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6661 if (!cnt) {
6662 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6663 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6664 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6665#ifdef BNX2X_STOP_ON_ERROR
6666 bnx2x_panic();
da5a662a
VZ
6667#else
6668 rc = -EBUSY;
34f80b04
EG
6669#endif
6670 break;
6671 }
6672 cnt--;
da5a662a 6673 msleep(1);
5650d9d4 6674 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6675 }
6676 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6677 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6678
6679 return rc;
a2fbb9ea
ET
6680}
6681
34f80b04
EG
6682static void bnx2x_reset_func(struct bnx2x *bp)
6683{
6684 int port = BP_PORT(bp);
6685 int func = BP_FUNC(bp);
6686 int base, i;
6687
6688 /* Configure IGU */
6689 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6690 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6691
6692 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6693
6694 /* Clear ILT */
6695 base = FUNC_ILT_BASE(func);
6696 for (i = base; i < base + ILT_PER_FUNC; i++)
6697 bnx2x_ilt_wr(bp, i, 0);
6698}
6699
6700static void bnx2x_reset_port(struct bnx2x *bp)
6701{
6702 int port = BP_PORT(bp);
6703 u32 val;
6704
6705 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6706
6707 /* Do not rcv packets to BRB */
6708 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6709 /* Do not direct rcv packets that are not for MCP to the BRB */
6710 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6711 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6712
6713 /* Configure AEU */
6714 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6715
6716 msleep(100);
6717 /* Check for BRB port occupancy */
6718 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6719 if (val)
6720 DP(NETIF_MSG_IFDOWN,
33471629 6721 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6722
6723 /* TODO: Close Doorbell port? */
6724}
6725
34f80b04
EG
6726static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6727{
6728 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6729 BP_FUNC(bp), reset_code);
6730
6731 switch (reset_code) {
6732 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6733 bnx2x_reset_port(bp);
6734 bnx2x_reset_func(bp);
6735 bnx2x_reset_common(bp);
6736 break;
6737
6738 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6739 bnx2x_reset_port(bp);
6740 bnx2x_reset_func(bp);
6741 break;
6742
6743 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6744 bnx2x_reset_func(bp);
6745 break;
49d66772 6746
34f80b04
EG
6747 default:
6748 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6749 break;
6750 }
6751}
6752
33471629 6753/* must be called with rtnl_lock */
34f80b04 6754static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6755{
da5a662a 6756 int port = BP_PORT(bp);
a2fbb9ea 6757 u32 reset_code = 0;
da5a662a 6758 int i, cnt, rc;
a2fbb9ea
ET
6759
6760 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6761
228241eb
ET
6762 bp->rx_mode = BNX2X_RX_MODE_NONE;
6763 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6764
f8ef6e44 6765 bnx2x_netif_stop(bp, 1);
e94d8af3 6766
34f80b04
EG
6767 del_timer_sync(&bp->timer);
6768 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6769 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6770 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6771
70b9986c
EG
6772 /* Release IRQs */
6773 bnx2x_free_irq(bp);
6774
555f6c78
EG
6775 /* Wait until tx fastpath tasks complete */
6776 for_each_tx_queue(bp, i) {
228241eb
ET
6777 struct bnx2x_fastpath *fp = &bp->fp[i];
6778
34f80b04
EG
6779 cnt = 1000;
6780 smp_rmb();
e8b5fc51 6781 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6782
65abd74d 6783 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6784 if (!cnt) {
6785 BNX2X_ERR("timeout waiting for queue[%d]\n",
6786 i);
6787#ifdef BNX2X_STOP_ON_ERROR
6788 bnx2x_panic();
6789 return -EBUSY;
6790#else
6791 break;
6792#endif
6793 }
6794 cnt--;
da5a662a 6795 msleep(1);
34f80b04
EG
6796 smp_rmb();
6797 }
228241eb 6798 }
da5a662a
VZ
6799 /* Give HW time to discard old tx messages */
6800 msleep(1);
a2fbb9ea 6801
3101c2bc
YG
6802 if (CHIP_IS_E1(bp)) {
6803 struct mac_configuration_cmd *config =
6804 bnx2x_sp(bp, mcast_config);
6805
6806 bnx2x_set_mac_addr_e1(bp, 0);
6807
8d9c5f34 6808 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
6809 CAM_INVALIDATE(config->config_table[i]);
6810
8d9c5f34 6811 config->hdr.length = i;
3101c2bc
YG
6812 if (CHIP_REV_IS_SLOW(bp))
6813 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6814 else
6815 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6816 config->hdr.client_id = BP_CL_ID(bp);
6817 config->hdr.reserved1 = 0;
6818
6819 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6820 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6821 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6822
6823 } else { /* E1H */
65abd74d
YG
6824 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6825
3101c2bc
YG
6826 bnx2x_set_mac_addr_e1h(bp, 0);
6827
6828 for (i = 0; i < MC_HASH_SIZE; i++)
6829 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6830 }
6831
65abd74d
YG
6832 if (unload_mode == UNLOAD_NORMAL)
6833 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6834
6835 else if (bp->flags & NO_WOL_FLAG) {
6836 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6837 if (CHIP_IS_E1H(bp))
6838 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6839
6840 } else if (bp->wol) {
6841 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6842 u8 *mac_addr = bp->dev->dev_addr;
6843 u32 val;
6844 /* The mac address is written to entries 1-4 to
6845 preserve entry 0 which is used by the PMF */
6846 u8 entry = (BP_E1HVN(bp) + 1)*8;
6847
6848 val = (mac_addr[0] << 8) | mac_addr[1];
6849 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6850
6851 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6852 (mac_addr[4] << 8) | mac_addr[5];
6853 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6854
6855 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6856
6857 } else
6858 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6859
34f80b04
EG
6860 /* Close multi and leading connections
6861 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6862 for_each_nondefault_queue(bp, i)
6863 if (bnx2x_stop_multi(bp, i))
228241eb 6864 goto unload_error;
a2fbb9ea 6865
da5a662a
VZ
6866 rc = bnx2x_stop_leading(bp);
6867 if (rc) {
34f80b04 6868 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6869#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6870 return -EBUSY;
da5a662a
VZ
6871#else
6872 goto unload_error;
34f80b04 6873#endif
228241eb
ET
6874 }
6875
6876unload_error:
34f80b04 6877 if (!BP_NOMCP(bp))
228241eb 6878 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6879 else {
6880 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6881 load_count[0], load_count[1], load_count[2]);
6882 load_count[0]--;
da5a662a 6883 load_count[1 + port]--;
34f80b04
EG
6884 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6885 load_count[0], load_count[1], load_count[2]);
6886 if (load_count[0] == 0)
6887 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6888 else if (load_count[1 + port] == 0)
34f80b04
EG
6889 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6890 else
6891 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6892 }
a2fbb9ea 6893
34f80b04
EG
6894 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6895 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6896 bnx2x__link_reset(bp);
a2fbb9ea
ET
6897
6898 /* Reset the chip */
228241eb 6899 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6900
6901 /* Report UNLOAD_DONE to MCP */
34f80b04 6902 if (!BP_NOMCP(bp))
a2fbb9ea 6903 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6904 bp->port.pmf = 0;
a2fbb9ea 6905
7a9b2557 6906 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6907 bnx2x_free_skbs(bp);
555f6c78 6908 for_each_rx_queue(bp, i)
3196a88a 6909 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 6910 for_each_rx_queue(bp, i)
7cde1c8b 6911 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6912 bnx2x_free_mem(bp);
6913
6914 bp->state = BNX2X_STATE_CLOSED;
228241eb 6915
a2fbb9ea
ET
6916 netif_carrier_off(bp->dev);
6917
6918 return 0;
6919}
6920
34f80b04
EG
6921static void bnx2x_reset_task(struct work_struct *work)
6922{
6923 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6924
6925#ifdef BNX2X_STOP_ON_ERROR
6926 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6927 " so reset not done to allow debug dump,\n"
6928 KERN_ERR " you will need to reboot when done\n");
6929 return;
6930#endif
6931
6932 rtnl_lock();
6933
6934 if (!netif_running(bp->dev))
6935 goto reset_task_exit;
6936
6937 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6938 bnx2x_nic_load(bp, LOAD_NORMAL);
6939
6940reset_task_exit:
6941 rtnl_unlock();
6942}
6943
a2fbb9ea
ET
6944/* end of nic load/unload */
6945
6946/* ethtool_ops */
6947
6948/*
6949 * Init service functions
6950 */
6951
34f80b04
EG
6952static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6953{
6954 u32 val;
6955
6956 /* Check if there is any driver already loaded */
6957 val = REG_RD(bp, MISC_REG_UNPREPARED);
6958 if (val == 0x1) {
6959 /* Check if it is the UNDI driver
6960 * UNDI driver initializes CID offset for normal bell to 0x7
6961 */
4a37fb66 6962 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
6963 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6964 if (val == 0x7) {
6965 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6966 /* save our func */
34f80b04 6967 int func = BP_FUNC(bp);
da5a662a
VZ
6968 u32 swap_en;
6969 u32 swap_val;
34f80b04 6970
b4661739
EG
6971 /* clear the UNDI indication */
6972 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6973
34f80b04
EG
6974 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6975
6976 /* try unload UNDI on port 0 */
6977 bp->func = 0;
da5a662a
VZ
6978 bp->fw_seq =
6979 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6980 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6981 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6982
6983 /* if UNDI is loaded on the other port */
6984 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6985
da5a662a
VZ
6986 /* send "DONE" for previous unload */
6987 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6988
6989 /* unload UNDI on port 1 */
34f80b04 6990 bp->func = 1;
da5a662a
VZ
6991 bp->fw_seq =
6992 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6993 DRV_MSG_SEQ_NUMBER_MASK);
6994 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6995
6996 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6997 }
6998
b4661739
EG
6999 /* now it's safe to release the lock */
7000 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7001
da5a662a
VZ
7002 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
7003 HC_REG_CONFIG_0), 0x1000);
7004
7005 /* close input traffic and wait for it */
7006 /* Do not rcv packets to BRB */
7007 REG_WR(bp,
7008 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7009 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7010 /* Do not direct rcv packets that are not for MCP to
7011 * the BRB */
7012 REG_WR(bp,
7013 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7014 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7015 /* clear AEU */
7016 REG_WR(bp,
7017 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7018 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7019 msleep(10);
7020
7021 /* save NIG port swap info */
7022 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7023 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7024 /* reset device */
7025 REG_WR(bp,
7026 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7027 0xd3ffffff);
34f80b04
EG
7028 REG_WR(bp,
7029 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7030 0x1403);
da5a662a
VZ
7031 /* take the NIG out of reset and restore swap values */
7032 REG_WR(bp,
7033 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7034 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7035 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7036 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7037
7038 /* send unload done to the MCP */
7039 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7040
7041 /* restore our func and fw_seq */
7042 bp->func = func;
7043 bp->fw_seq =
7044 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7045 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7046
7047 } else
7048 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7049 }
7050}
7051
7052static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7053{
7054 u32 val, val2, val3, val4, id;
72ce58c3 7055 u16 pmc;
34f80b04
EG
7056
7057 /* Get the chip revision id and number. */
7058 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7059 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7060 id = ((val & 0xffff) << 16);
7061 val = REG_RD(bp, MISC_REG_CHIP_REV);
7062 id |= ((val & 0xf) << 12);
7063 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7064 id |= ((val & 0xff) << 4);
5a40e08e 7065 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7066 id |= (val & 0xf);
7067 bp->common.chip_id = id;
7068 bp->link_params.chip_id = bp->common.chip_id;
7069 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7070
7071 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7072 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7073 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7074 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7075 bp->common.flash_size, bp->common.flash_size);
7076
7077 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7078 bp->link_params.shmem_base = bp->common.shmem_base;
7079 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7080
7081 if (!bp->common.shmem_base ||
7082 (bp->common.shmem_base < 0xA0000) ||
7083 (bp->common.shmem_base >= 0xC0000)) {
7084 BNX2X_DEV_INFO("MCP not active\n");
7085 bp->flags |= NO_MCP_FLAG;
7086 return;
7087 }
7088
7089 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7090 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7091 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7092 BNX2X_ERR("BAD MCP validity signature\n");
7093
7094 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7095 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7096
7097 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7098 bp->common.hw_config, bp->common.board);
7099
7100 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7101 SHARED_HW_CFG_LED_MODE_MASK) >>
7102 SHARED_HW_CFG_LED_MODE_SHIFT);
7103
7104 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7105 bp->common.bc_ver = val;
7106 BNX2X_DEV_INFO("bc_ver %X\n", val);
7107 if (val < BNX2X_BC_VER) {
7108 /* for now only warn
7109 * later we might need to enforce this */
7110 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7111 " please upgrade BC\n", BNX2X_BC_VER, val);
7112 }
72ce58c3
EG
7113
7114 if (BP_E1HVN(bp) == 0) {
7115 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7116 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7117 } else {
7118 /* no WOL capability for E1HVN != 0 */
7119 bp->flags |= NO_WOL_FLAG;
7120 }
7121 BNX2X_DEV_INFO("%sWoL capable\n",
7122 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7123
7124 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7125 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7126 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7127 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7128
7129 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7130 val, val2, val3, val4);
7131}
7132
7133static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7134 u32 switch_cfg)
a2fbb9ea 7135{
34f80b04 7136 int port = BP_PORT(bp);
a2fbb9ea
ET
7137 u32 ext_phy_type;
7138
a2fbb9ea
ET
7139 switch (switch_cfg) {
7140 case SWITCH_CFG_1G:
7141 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7142
c18487ee
YR
7143 ext_phy_type =
7144 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7145 switch (ext_phy_type) {
7146 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7147 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7148 ext_phy_type);
7149
34f80b04
EG
7150 bp->port.supported |= (SUPPORTED_10baseT_Half |
7151 SUPPORTED_10baseT_Full |
7152 SUPPORTED_100baseT_Half |
7153 SUPPORTED_100baseT_Full |
7154 SUPPORTED_1000baseT_Full |
7155 SUPPORTED_2500baseX_Full |
7156 SUPPORTED_TP |
7157 SUPPORTED_FIBRE |
7158 SUPPORTED_Autoneg |
7159 SUPPORTED_Pause |
7160 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7161 break;
7162
7163 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7164 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7165 ext_phy_type);
7166
34f80b04
EG
7167 bp->port.supported |= (SUPPORTED_10baseT_Half |
7168 SUPPORTED_10baseT_Full |
7169 SUPPORTED_100baseT_Half |
7170 SUPPORTED_100baseT_Full |
7171 SUPPORTED_1000baseT_Full |
7172 SUPPORTED_TP |
7173 SUPPORTED_FIBRE |
7174 SUPPORTED_Autoneg |
7175 SUPPORTED_Pause |
7176 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7177 break;
7178
7179 default:
7180 BNX2X_ERR("NVRAM config error. "
7181 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7182 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7183 return;
7184 }
7185
34f80b04
EG
7186 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7187 port*0x10);
7188 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7189 break;
7190
7191 case SWITCH_CFG_10G:
7192 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7193
c18487ee
YR
7194 ext_phy_type =
7195 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7196 switch (ext_phy_type) {
7197 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7198 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7199 ext_phy_type);
7200
34f80b04
EG
7201 bp->port.supported |= (SUPPORTED_10baseT_Half |
7202 SUPPORTED_10baseT_Full |
7203 SUPPORTED_100baseT_Half |
7204 SUPPORTED_100baseT_Full |
7205 SUPPORTED_1000baseT_Full |
7206 SUPPORTED_2500baseX_Full |
7207 SUPPORTED_10000baseT_Full |
7208 SUPPORTED_TP |
7209 SUPPORTED_FIBRE |
7210 SUPPORTED_Autoneg |
7211 SUPPORTED_Pause |
7212 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7213 break;
7214
7215 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7216 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7217 ext_phy_type);
f1410647 7218
34f80b04
EG
7219 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7220 SUPPORTED_FIBRE |
7221 SUPPORTED_Pause |
7222 SUPPORTED_Asym_Pause);
f1410647
ET
7223 break;
7224
a2fbb9ea 7225 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7226 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7227 ext_phy_type);
7228
34f80b04
EG
7229 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7230 SUPPORTED_1000baseT_Full |
7231 SUPPORTED_FIBRE |
7232 SUPPORTED_Pause |
7233 SUPPORTED_Asym_Pause);
f1410647
ET
7234 break;
7235
7236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7237 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7238 ext_phy_type);
7239
34f80b04
EG
7240 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7241 SUPPORTED_1000baseT_Full |
7242 SUPPORTED_FIBRE |
7243 SUPPORTED_Autoneg |
7244 SUPPORTED_Pause |
7245 SUPPORTED_Asym_Pause);
f1410647
ET
7246 break;
7247
c18487ee
YR
7248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7249 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7250 ext_phy_type);
7251
34f80b04
EG
7252 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7253 SUPPORTED_2500baseX_Full |
7254 SUPPORTED_1000baseT_Full |
7255 SUPPORTED_FIBRE |
7256 SUPPORTED_Autoneg |
7257 SUPPORTED_Pause |
7258 SUPPORTED_Asym_Pause);
c18487ee
YR
7259 break;
7260
f1410647
ET
7261 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7262 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7263 ext_phy_type);
7264
34f80b04
EG
7265 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7266 SUPPORTED_TP |
7267 SUPPORTED_Autoneg |
7268 SUPPORTED_Pause |
7269 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7270 break;
7271
c18487ee
YR
7272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7273 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7274 bp->link_params.ext_phy_config);
7275 break;
7276
a2fbb9ea
ET
7277 default:
7278 BNX2X_ERR("NVRAM config error. "
7279 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7280 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7281 return;
7282 }
7283
34f80b04
EG
7284 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7285 port*0x18);
7286 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7287
a2fbb9ea
ET
7288 break;
7289
7290 default:
7291 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7292 bp->port.link_config);
a2fbb9ea
ET
7293 return;
7294 }
34f80b04 7295 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7296
7297 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7298 if (!(bp->link_params.speed_cap_mask &
7299 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7300 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7301
c18487ee
YR
7302 if (!(bp->link_params.speed_cap_mask &
7303 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7304 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7305
c18487ee
YR
7306 if (!(bp->link_params.speed_cap_mask &
7307 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7308 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7309
c18487ee
YR
7310 if (!(bp->link_params.speed_cap_mask &
7311 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7312 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7313
c18487ee
YR
7314 if (!(bp->link_params.speed_cap_mask &
7315 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7316 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7317 SUPPORTED_1000baseT_Full);
a2fbb9ea 7318
c18487ee
YR
7319 if (!(bp->link_params.speed_cap_mask &
7320 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7321 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7322
c18487ee
YR
7323 if (!(bp->link_params.speed_cap_mask &
7324 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7325 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7326
34f80b04 7327 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7328}
7329
34f80b04 7330static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7331{
c18487ee 7332 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7333
34f80b04 7334 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7335 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7336 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7337 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7338 bp->port.advertising = bp->port.supported;
a2fbb9ea 7339 } else {
c18487ee
YR
7340 u32 ext_phy_type =
7341 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7342
7343 if ((ext_phy_type ==
7344 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7345 (ext_phy_type ==
7346 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7347 /* force 10G, no AN */
c18487ee 7348 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7349 bp->port.advertising =
a2fbb9ea
ET
7350 (ADVERTISED_10000baseT_Full |
7351 ADVERTISED_FIBRE);
7352 break;
7353 }
7354 BNX2X_ERR("NVRAM config error. "
7355 "Invalid link_config 0x%x"
7356 " Autoneg not supported\n",
34f80b04 7357 bp->port.link_config);
a2fbb9ea
ET
7358 return;
7359 }
7360 break;
7361
7362 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7363 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7364 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7365 bp->port.advertising = (ADVERTISED_10baseT_Full |
7366 ADVERTISED_TP);
a2fbb9ea
ET
7367 } else {
7368 BNX2X_ERR("NVRAM config error. "
7369 "Invalid link_config 0x%x"
7370 " speed_cap_mask 0x%x\n",
34f80b04 7371 bp->port.link_config,
c18487ee 7372 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7373 return;
7374 }
7375 break;
7376
7377 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7378 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7379 bp->link_params.req_line_speed = SPEED_10;
7380 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7381 bp->port.advertising = (ADVERTISED_10baseT_Half |
7382 ADVERTISED_TP);
a2fbb9ea
ET
7383 } else {
7384 BNX2X_ERR("NVRAM config error. "
7385 "Invalid link_config 0x%x"
7386 " speed_cap_mask 0x%x\n",
34f80b04 7387 bp->port.link_config,
c18487ee 7388 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7389 return;
7390 }
7391 break;
7392
7393 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7394 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7395 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7396 bp->port.advertising = (ADVERTISED_100baseT_Full |
7397 ADVERTISED_TP);
a2fbb9ea
ET
7398 } else {
7399 BNX2X_ERR("NVRAM config error. "
7400 "Invalid link_config 0x%x"
7401 " speed_cap_mask 0x%x\n",
34f80b04 7402 bp->port.link_config,
c18487ee 7403 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7404 return;
7405 }
7406 break;
7407
7408 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7409 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7410 bp->link_params.req_line_speed = SPEED_100;
7411 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7412 bp->port.advertising = (ADVERTISED_100baseT_Half |
7413 ADVERTISED_TP);
a2fbb9ea
ET
7414 } else {
7415 BNX2X_ERR("NVRAM config error. "
7416 "Invalid link_config 0x%x"
7417 " speed_cap_mask 0x%x\n",
34f80b04 7418 bp->port.link_config,
c18487ee 7419 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7420 return;
7421 }
7422 break;
7423
7424 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7425 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7426 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7427 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7428 ADVERTISED_TP);
a2fbb9ea
ET
7429 } else {
7430 BNX2X_ERR("NVRAM config error. "
7431 "Invalid link_config 0x%x"
7432 " speed_cap_mask 0x%x\n",
34f80b04 7433 bp->port.link_config,
c18487ee 7434 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7435 return;
7436 }
7437 break;
7438
7439 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7440 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7441 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7442 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7443 ADVERTISED_TP);
a2fbb9ea
ET
7444 } else {
7445 BNX2X_ERR("NVRAM config error. "
7446 "Invalid link_config 0x%x"
7447 " speed_cap_mask 0x%x\n",
34f80b04 7448 bp->port.link_config,
c18487ee 7449 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7450 return;
7451 }
7452 break;
7453
7454 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7455 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7456 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7457 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7458 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7459 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7460 ADVERTISED_FIBRE);
a2fbb9ea
ET
7461 } else {
7462 BNX2X_ERR("NVRAM config error. "
7463 "Invalid link_config 0x%x"
7464 " speed_cap_mask 0x%x\n",
34f80b04 7465 bp->port.link_config,
c18487ee 7466 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7467 return;
7468 }
7469 break;
7470
7471 default:
7472 BNX2X_ERR("NVRAM config error. "
7473 "BAD link speed link_config 0x%x\n",
34f80b04 7474 bp->port.link_config);
c18487ee 7475 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7476 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7477 break;
7478 }
a2fbb9ea 7479
34f80b04
EG
7480 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7481 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7482 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7483 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7484 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7485
c18487ee 7486 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7487 " advertising 0x%x\n",
c18487ee
YR
7488 bp->link_params.req_line_speed,
7489 bp->link_params.req_duplex,
34f80b04 7490 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7491}
7492
34f80b04 7493static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7494{
34f80b04
EG
7495 int port = BP_PORT(bp);
7496 u32 val, val2;
a2fbb9ea 7497
c18487ee 7498 bp->link_params.bp = bp;
34f80b04 7499 bp->link_params.port = port;
c18487ee 7500
c18487ee 7501 bp->link_params.serdes_config =
f1410647 7502 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7503 bp->link_params.lane_config =
a2fbb9ea 7504 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7505 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7506 SHMEM_RD(bp,
7507 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7508 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7509 SHMEM_RD(bp,
7510 dev_info.port_hw_config[port].speed_capability_mask);
7511
34f80b04 7512 bp->port.link_config =
a2fbb9ea
ET
7513 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7514
34f80b04
EG
7515 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7516 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7517 " link_config 0x%08x\n",
c18487ee
YR
7518 bp->link_params.serdes_config,
7519 bp->link_params.lane_config,
7520 bp->link_params.ext_phy_config,
34f80b04 7521 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7522
34f80b04 7523 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7524 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7525 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7526
7527 bnx2x_link_settings_requested(bp);
7528
7529 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7530 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7531 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7532 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7533 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7534 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7535 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7536 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7537 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7538 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7539}
7540
7541static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7542{
7543 int func = BP_FUNC(bp);
7544 u32 val, val2;
7545 int rc = 0;
a2fbb9ea 7546
34f80b04 7547 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7548
34f80b04
EG
7549 bp->e1hov = 0;
7550 bp->e1hmf = 0;
7551 if (CHIP_IS_E1H(bp)) {
7552 bp->mf_config =
7553 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7554
3196a88a
EG
7555 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7556 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7557 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7558
34f80b04
EG
7559 bp->e1hov = val;
7560 bp->e1hmf = 1;
7561 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7562 "(0x%04x)\n",
7563 func, bp->e1hov, bp->e1hov);
7564 } else {
7565 BNX2X_DEV_INFO("Single function mode\n");
7566 if (BP_E1HVN(bp)) {
7567 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7568 " aborting\n", func);
7569 rc = -EPERM;
7570 }
7571 }
7572 }
a2fbb9ea 7573
34f80b04
EG
7574 if (!BP_NOMCP(bp)) {
7575 bnx2x_get_port_hwinfo(bp);
7576
7577 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7578 DRV_MSG_SEQ_NUMBER_MASK);
7579 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7580 }
7581
7582 if (IS_E1HMF(bp)) {
7583 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7584 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7585 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7586 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7587 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7588 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7589 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7590 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7591 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7592 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7593 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7594 ETH_ALEN);
7595 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7596 ETH_ALEN);
a2fbb9ea 7597 }
34f80b04
EG
7598
7599 return rc;
a2fbb9ea
ET
7600 }
7601
34f80b04
EG
7602 if (BP_NOMCP(bp)) {
7603 /* only supposed to happen on emulation/FPGA */
33471629 7604 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7605 random_ether_addr(bp->dev->dev_addr);
7606 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7607 }
a2fbb9ea 7608
34f80b04
EG
7609 return rc;
7610}
7611
7612static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7613{
7614 int func = BP_FUNC(bp);
7615 int rc;
7616
da5a662a
VZ
7617 /* Disable interrupt handling until HW is initialized */
7618 atomic_set(&bp->intr_sem, 1);
7619
34f80b04 7620 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7621
1cf167f2 7622 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7623 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7624
7625 rc = bnx2x_get_hwinfo(bp);
7626
7627 /* need to reset chip if undi was active */
7628 if (!BP_NOMCP(bp))
7629 bnx2x_undi_unload(bp);
7630
7631 if (CHIP_REV_IS_FPGA(bp))
7632 printk(KERN_ERR PFX "FPGA detected\n");
7633
7634 if (BP_NOMCP(bp) && (func == 0))
7635 printk(KERN_ERR PFX
7636 "MCP disabled, must load devices in order!\n");
7637
555f6c78
EG
7638 /* Set multi queue mode */
7639 if ((multi_mode != ETH_RSS_MODE_DISABLED) && (!use_inta)) {
7640 printk(KERN_ERR PFX
7641 "Multi disabled since INTA is requested\n");
7642 multi_mode = ETH_RSS_MODE_DISABLED;
7643 }
7644 bp->multi_mode = multi_mode;
7645
7646
7a9b2557
VZ
7647 /* Set TPA flags */
7648 if (disable_tpa) {
7649 bp->flags &= ~TPA_ENABLE_FLAG;
7650 bp->dev->features &= ~NETIF_F_LRO;
7651 } else {
7652 bp->flags |= TPA_ENABLE_FLAG;
7653 bp->dev->features |= NETIF_F_LRO;
7654 }
7655
7656
34f80b04
EG
7657 bp->tx_ring_size = MAX_TX_AVAIL;
7658 bp->rx_ring_size = MAX_RX_AVAIL;
7659
7660 bp->rx_csum = 1;
7661 bp->rx_offset = 0;
7662
7663 bp->tx_ticks = 50;
7664 bp->rx_ticks = 25;
7665
34f80b04
EG
7666 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7667 bp->current_interval = (poll ? poll : bp->timer_interval);
7668
7669 init_timer(&bp->timer);
7670 bp->timer.expires = jiffies + bp->current_interval;
7671 bp->timer.data = (unsigned long) bp;
7672 bp->timer.function = bnx2x_timer;
7673
7674 return rc;
a2fbb9ea
ET
7675}
7676
7677/*
7678 * ethtool service functions
7679 */
7680
7681/* All ethtool functions called with rtnl_lock */
7682
7683static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7684{
7685 struct bnx2x *bp = netdev_priv(dev);
7686
34f80b04
EG
7687 cmd->supported = bp->port.supported;
7688 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7689
7690 if (netif_carrier_ok(dev)) {
c18487ee
YR
7691 cmd->speed = bp->link_vars.line_speed;
7692 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7693 } else {
c18487ee
YR
7694 cmd->speed = bp->link_params.req_line_speed;
7695 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7696 }
34f80b04
EG
7697 if (IS_E1HMF(bp)) {
7698 u16 vn_max_rate;
7699
7700 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7701 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7702 if (vn_max_rate < cmd->speed)
7703 cmd->speed = vn_max_rate;
7704 }
a2fbb9ea 7705
c18487ee
YR
7706 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7707 u32 ext_phy_type =
7708 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7709
7710 switch (ext_phy_type) {
7711 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7712 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7713 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7714 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7715 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7716 cmd->port = PORT_FIBRE;
7717 break;
7718
7719 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7720 cmd->port = PORT_TP;
7721 break;
7722
c18487ee
YR
7723 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7724 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7725 bp->link_params.ext_phy_config);
7726 break;
7727
f1410647
ET
7728 default:
7729 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7730 bp->link_params.ext_phy_config);
7731 break;
f1410647
ET
7732 }
7733 } else
a2fbb9ea 7734 cmd->port = PORT_TP;
a2fbb9ea 7735
34f80b04 7736 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7737 cmd->transceiver = XCVR_INTERNAL;
7738
c18487ee 7739 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7740 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7741 else
a2fbb9ea 7742 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7743
7744 cmd->maxtxpkt = 0;
7745 cmd->maxrxpkt = 0;
7746
7747 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7748 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7749 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7750 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7751 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7752 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7753 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7754
7755 return 0;
7756}
7757
7758static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7759{
7760 struct bnx2x *bp = netdev_priv(dev);
7761 u32 advertising;
7762
34f80b04
EG
7763 if (IS_E1HMF(bp))
7764 return 0;
7765
a2fbb9ea
ET
7766 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7767 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7768 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7769 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7770 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7771 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7772 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7773
a2fbb9ea 7774 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7775 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7776 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7777 return -EINVAL;
f1410647 7778 }
a2fbb9ea
ET
7779
7780 /* advertise the requested speed and duplex if supported */
34f80b04 7781 cmd->advertising &= bp->port.supported;
a2fbb9ea 7782
c18487ee
YR
7783 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7784 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7785 bp->port.advertising |= (ADVERTISED_Autoneg |
7786 cmd->advertising);
a2fbb9ea
ET
7787
7788 } else { /* forced speed */
7789 /* advertise the requested speed and duplex if supported */
7790 switch (cmd->speed) {
7791 case SPEED_10:
7792 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7793 if (!(bp->port.supported &
f1410647
ET
7794 SUPPORTED_10baseT_Full)) {
7795 DP(NETIF_MSG_LINK,
7796 "10M full not supported\n");
a2fbb9ea 7797 return -EINVAL;
f1410647 7798 }
a2fbb9ea
ET
7799
7800 advertising = (ADVERTISED_10baseT_Full |
7801 ADVERTISED_TP);
7802 } else {
34f80b04 7803 if (!(bp->port.supported &
f1410647
ET
7804 SUPPORTED_10baseT_Half)) {
7805 DP(NETIF_MSG_LINK,
7806 "10M half not supported\n");
a2fbb9ea 7807 return -EINVAL;
f1410647 7808 }
a2fbb9ea
ET
7809
7810 advertising = (ADVERTISED_10baseT_Half |
7811 ADVERTISED_TP);
7812 }
7813 break;
7814
7815 case SPEED_100:
7816 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7817 if (!(bp->port.supported &
f1410647
ET
7818 SUPPORTED_100baseT_Full)) {
7819 DP(NETIF_MSG_LINK,
7820 "100M full not supported\n");
a2fbb9ea 7821 return -EINVAL;
f1410647 7822 }
a2fbb9ea
ET
7823
7824 advertising = (ADVERTISED_100baseT_Full |
7825 ADVERTISED_TP);
7826 } else {
34f80b04 7827 if (!(bp->port.supported &
f1410647
ET
7828 SUPPORTED_100baseT_Half)) {
7829 DP(NETIF_MSG_LINK,
7830 "100M half not supported\n");
a2fbb9ea 7831 return -EINVAL;
f1410647 7832 }
a2fbb9ea
ET
7833
7834 advertising = (ADVERTISED_100baseT_Half |
7835 ADVERTISED_TP);
7836 }
7837 break;
7838
7839 case SPEED_1000:
f1410647
ET
7840 if (cmd->duplex != DUPLEX_FULL) {
7841 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7842 return -EINVAL;
f1410647 7843 }
a2fbb9ea 7844
34f80b04 7845 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7846 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7847 return -EINVAL;
f1410647 7848 }
a2fbb9ea
ET
7849
7850 advertising = (ADVERTISED_1000baseT_Full |
7851 ADVERTISED_TP);
7852 break;
7853
7854 case SPEED_2500:
f1410647
ET
7855 if (cmd->duplex != DUPLEX_FULL) {
7856 DP(NETIF_MSG_LINK,
7857 "2.5G half not supported\n");
a2fbb9ea 7858 return -EINVAL;
f1410647 7859 }
a2fbb9ea 7860
34f80b04 7861 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7862 DP(NETIF_MSG_LINK,
7863 "2.5G full not supported\n");
a2fbb9ea 7864 return -EINVAL;
f1410647 7865 }
a2fbb9ea 7866
f1410647 7867 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7868 ADVERTISED_TP);
7869 break;
7870
7871 case SPEED_10000:
f1410647
ET
7872 if (cmd->duplex != DUPLEX_FULL) {
7873 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7874 return -EINVAL;
f1410647 7875 }
a2fbb9ea 7876
34f80b04 7877 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7878 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7879 return -EINVAL;
f1410647 7880 }
a2fbb9ea
ET
7881
7882 advertising = (ADVERTISED_10000baseT_Full |
7883 ADVERTISED_FIBRE);
7884 break;
7885
7886 default:
f1410647 7887 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7888 return -EINVAL;
7889 }
7890
c18487ee
YR
7891 bp->link_params.req_line_speed = cmd->speed;
7892 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7893 bp->port.advertising = advertising;
a2fbb9ea
ET
7894 }
7895
c18487ee 7896 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7897 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7898 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7899 bp->port.advertising);
a2fbb9ea 7900
34f80b04 7901 if (netif_running(dev)) {
bb2a0f7a 7902 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7903 bnx2x_link_set(bp);
7904 }
a2fbb9ea
ET
7905
7906 return 0;
7907}
7908
c18487ee
YR
7909#define PHY_FW_VER_LEN 10
7910
a2fbb9ea
ET
7911static void bnx2x_get_drvinfo(struct net_device *dev,
7912 struct ethtool_drvinfo *info)
7913{
7914 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7915 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7916
7917 strcpy(info->driver, DRV_MODULE_NAME);
7918 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7919
7920 phy_fw_ver[0] = '\0';
34f80b04 7921 if (bp->port.pmf) {
4a37fb66 7922 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7923 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7924 (bp->state != BNX2X_STATE_CLOSED),
7925 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7926 bnx2x_release_phy_lock(bp);
34f80b04 7927 }
c18487ee 7928
f0e53a84
EG
7929 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7930 (bp->common.bc_ver & 0xff0000) >> 16,
7931 (bp->common.bc_ver & 0xff00) >> 8,
7932 (bp->common.bc_ver & 0xff),
7933 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7934 strcpy(info->bus_info, pci_name(bp->pdev));
7935 info->n_stats = BNX2X_NUM_STATS;
7936 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7937 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7938 info->regdump_len = 0;
7939}
7940
7941static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7942{
7943 struct bnx2x *bp = netdev_priv(dev);
7944
7945 if (bp->flags & NO_WOL_FLAG) {
7946 wol->supported = 0;
7947 wol->wolopts = 0;
7948 } else {
7949 wol->supported = WAKE_MAGIC;
7950 if (bp->wol)
7951 wol->wolopts = WAKE_MAGIC;
7952 else
7953 wol->wolopts = 0;
7954 }
7955 memset(&wol->sopass, 0, sizeof(wol->sopass));
7956}
7957
7958static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7959{
7960 struct bnx2x *bp = netdev_priv(dev);
7961
7962 if (wol->wolopts & ~WAKE_MAGIC)
7963 return -EINVAL;
7964
7965 if (wol->wolopts & WAKE_MAGIC) {
7966 if (bp->flags & NO_WOL_FLAG)
7967 return -EINVAL;
7968
7969 bp->wol = 1;
34f80b04 7970 } else
a2fbb9ea 7971 bp->wol = 0;
34f80b04 7972
a2fbb9ea
ET
7973 return 0;
7974}
7975
7976static u32 bnx2x_get_msglevel(struct net_device *dev)
7977{
7978 struct bnx2x *bp = netdev_priv(dev);
7979
7980 return bp->msglevel;
7981}
7982
7983static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7984{
7985 struct bnx2x *bp = netdev_priv(dev);
7986
7987 if (capable(CAP_NET_ADMIN))
7988 bp->msglevel = level;
7989}
7990
7991static int bnx2x_nway_reset(struct net_device *dev)
7992{
7993 struct bnx2x *bp = netdev_priv(dev);
7994
34f80b04
EG
7995 if (!bp->port.pmf)
7996 return 0;
a2fbb9ea 7997
34f80b04 7998 if (netif_running(dev)) {
bb2a0f7a 7999 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8000 bnx2x_link_set(bp);
8001 }
a2fbb9ea
ET
8002
8003 return 0;
8004}
8005
8006static int bnx2x_get_eeprom_len(struct net_device *dev)
8007{
8008 struct bnx2x *bp = netdev_priv(dev);
8009
34f80b04 8010 return bp->common.flash_size;
a2fbb9ea
ET
8011}
8012
8013static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8014{
34f80b04 8015 int port = BP_PORT(bp);
a2fbb9ea
ET
8016 int count, i;
8017 u32 val = 0;
8018
8019 /* adjust timeout for emulation/FPGA */
8020 count = NVRAM_TIMEOUT_COUNT;
8021 if (CHIP_REV_IS_SLOW(bp))
8022 count *= 100;
8023
8024 /* request access to nvram interface */
8025 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8026 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8027
8028 for (i = 0; i < count*10; i++) {
8029 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8030 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8031 break;
8032
8033 udelay(5);
8034 }
8035
8036 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8037 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8038 return -EBUSY;
8039 }
8040
8041 return 0;
8042}
8043
8044static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8045{
34f80b04 8046 int port = BP_PORT(bp);
a2fbb9ea
ET
8047 int count, i;
8048 u32 val = 0;
8049
8050 /* adjust timeout for emulation/FPGA */
8051 count = NVRAM_TIMEOUT_COUNT;
8052 if (CHIP_REV_IS_SLOW(bp))
8053 count *= 100;
8054
8055 /* relinquish nvram interface */
8056 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8057 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8058
8059 for (i = 0; i < count*10; i++) {
8060 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8061 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8062 break;
8063
8064 udelay(5);
8065 }
8066
8067 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8068 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8069 return -EBUSY;
8070 }
8071
8072 return 0;
8073}
8074
8075static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8076{
8077 u32 val;
8078
8079 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8080
8081 /* enable both bits, even on read */
8082 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8083 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8084 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8085}
8086
8087static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8088{
8089 u32 val;
8090
8091 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8092
8093 /* disable both bits, even after read */
8094 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8095 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8096 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8097}
8098
8099static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8100 u32 cmd_flags)
8101{
f1410647 8102 int count, i, rc;
a2fbb9ea
ET
8103 u32 val;
8104
8105 /* build the command word */
8106 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8107
8108 /* need to clear DONE bit separately */
8109 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8110
8111 /* address of the NVRAM to read from */
8112 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8113 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8114
8115 /* issue a read command */
8116 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8117
8118 /* adjust timeout for emulation/FPGA */
8119 count = NVRAM_TIMEOUT_COUNT;
8120 if (CHIP_REV_IS_SLOW(bp))
8121 count *= 100;
8122
8123 /* wait for completion */
8124 *ret_val = 0;
8125 rc = -EBUSY;
8126 for (i = 0; i < count; i++) {
8127 udelay(5);
8128 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8129
8130 if (val & MCPR_NVM_COMMAND_DONE) {
8131 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8132 /* we read nvram data in cpu order
8133 * but ethtool sees it as an array of bytes
8134 * converting to big-endian will do the work */
8135 val = cpu_to_be32(val);
8136 *ret_val = val;
8137 rc = 0;
8138 break;
8139 }
8140 }
8141
8142 return rc;
8143}
8144
8145static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8146 int buf_size)
8147{
8148 int rc;
8149 u32 cmd_flags;
8150 u32 val;
8151
8152 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8153 DP(BNX2X_MSG_NVM,
c14423fe 8154 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8155 offset, buf_size);
8156 return -EINVAL;
8157 }
8158
34f80b04
EG
8159 if (offset + buf_size > bp->common.flash_size) {
8160 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8161 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8162 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8163 return -EINVAL;
8164 }
8165
8166 /* request access to nvram interface */
8167 rc = bnx2x_acquire_nvram_lock(bp);
8168 if (rc)
8169 return rc;
8170
8171 /* enable access to nvram interface */
8172 bnx2x_enable_nvram_access(bp);
8173
8174 /* read the first word(s) */
8175 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8176 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8177 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8178 memcpy(ret_buf, &val, 4);
8179
8180 /* advance to the next dword */
8181 offset += sizeof(u32);
8182 ret_buf += sizeof(u32);
8183 buf_size -= sizeof(u32);
8184 cmd_flags = 0;
8185 }
8186
8187 if (rc == 0) {
8188 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8189 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8190 memcpy(ret_buf, &val, 4);
8191 }
8192
8193 /* disable access to nvram interface */
8194 bnx2x_disable_nvram_access(bp);
8195 bnx2x_release_nvram_lock(bp);
8196
8197 return rc;
8198}
8199
8200static int bnx2x_get_eeprom(struct net_device *dev,
8201 struct ethtool_eeprom *eeprom, u8 *eebuf)
8202{
8203 struct bnx2x *bp = netdev_priv(dev);
8204 int rc;
8205
2add3acb
EG
8206 if (!netif_running(dev))
8207 return -EAGAIN;
8208
34f80b04 8209 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8210 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8211 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8212 eeprom->len, eeprom->len);
8213
8214 /* parameters already validated in ethtool_get_eeprom */
8215
8216 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8217
8218 return rc;
8219}
8220
8221static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8222 u32 cmd_flags)
8223{
f1410647 8224 int count, i, rc;
a2fbb9ea
ET
8225
8226 /* build the command word */
8227 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8228
8229 /* need to clear DONE bit separately */
8230 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8231
8232 /* write the data */
8233 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8234
8235 /* address of the NVRAM to write to */
8236 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8237 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8238
8239 /* issue the write command */
8240 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8241
8242 /* adjust timeout for emulation/FPGA */
8243 count = NVRAM_TIMEOUT_COUNT;
8244 if (CHIP_REV_IS_SLOW(bp))
8245 count *= 100;
8246
8247 /* wait for completion */
8248 rc = -EBUSY;
8249 for (i = 0; i < count; i++) {
8250 udelay(5);
8251 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8252 if (val & MCPR_NVM_COMMAND_DONE) {
8253 rc = 0;
8254 break;
8255 }
8256 }
8257
8258 return rc;
8259}
8260
f1410647 8261#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8262
8263static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8264 int buf_size)
8265{
8266 int rc;
8267 u32 cmd_flags;
8268 u32 align_offset;
8269 u32 val;
8270
34f80b04
EG
8271 if (offset + buf_size > bp->common.flash_size) {
8272 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8273 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8274 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8275 return -EINVAL;
8276 }
8277
8278 /* request access to nvram interface */
8279 rc = bnx2x_acquire_nvram_lock(bp);
8280 if (rc)
8281 return rc;
8282
8283 /* enable access to nvram interface */
8284 bnx2x_enable_nvram_access(bp);
8285
8286 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8287 align_offset = (offset & ~0x03);
8288 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8289
8290 if (rc == 0) {
8291 val &= ~(0xff << BYTE_OFFSET(offset));
8292 val |= (*data_buf << BYTE_OFFSET(offset));
8293
8294 /* nvram data is returned as an array of bytes
8295 * convert it back to cpu order */
8296 val = be32_to_cpu(val);
8297
a2fbb9ea
ET
8298 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8299 cmd_flags);
8300 }
8301
8302 /* disable access to nvram interface */
8303 bnx2x_disable_nvram_access(bp);
8304 bnx2x_release_nvram_lock(bp);
8305
8306 return rc;
8307}
8308
8309static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8310 int buf_size)
8311{
8312 int rc;
8313 u32 cmd_flags;
8314 u32 val;
8315 u32 written_so_far;
8316
34f80b04 8317 if (buf_size == 1) /* ethtool */
a2fbb9ea 8318 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8319
8320 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8321 DP(BNX2X_MSG_NVM,
c14423fe 8322 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8323 offset, buf_size);
8324 return -EINVAL;
8325 }
8326
34f80b04
EG
8327 if (offset + buf_size > bp->common.flash_size) {
8328 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8329 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8330 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8331 return -EINVAL;
8332 }
8333
8334 /* request access to nvram interface */
8335 rc = bnx2x_acquire_nvram_lock(bp);
8336 if (rc)
8337 return rc;
8338
8339 /* enable access to nvram interface */
8340 bnx2x_enable_nvram_access(bp);
8341
8342 written_so_far = 0;
8343 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8344 while ((written_so_far < buf_size) && (rc == 0)) {
8345 if (written_so_far == (buf_size - sizeof(u32)))
8346 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8347 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8348 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8349 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8350 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8351
8352 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8353
8354 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8355
8356 /* advance to the next dword */
8357 offset += sizeof(u32);
8358 data_buf += sizeof(u32);
8359 written_so_far += sizeof(u32);
8360 cmd_flags = 0;
8361 }
8362
8363 /* disable access to nvram interface */
8364 bnx2x_disable_nvram_access(bp);
8365 bnx2x_release_nvram_lock(bp);
8366
8367 return rc;
8368}
8369
8370static int bnx2x_set_eeprom(struct net_device *dev,
8371 struct ethtool_eeprom *eeprom, u8 *eebuf)
8372{
8373 struct bnx2x *bp = netdev_priv(dev);
8374 int rc;
8375
9f4c9583
EG
8376 if (!netif_running(dev))
8377 return -EAGAIN;
8378
34f80b04 8379 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8380 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8381 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8382 eeprom->len, eeprom->len);
8383
8384 /* parameters already validated in ethtool_set_eeprom */
8385
c18487ee 8386 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8387 if (eeprom->magic == 0x00504859)
8388 if (bp->port.pmf) {
8389
4a37fb66 8390 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8391 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8392 bp->link_params.ext_phy_config,
8393 (bp->state != BNX2X_STATE_CLOSED),
8394 eebuf, eeprom->len);
bb2a0f7a
YG
8395 if ((bp->state == BNX2X_STATE_OPEN) ||
8396 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8397 rc |= bnx2x_link_reset(&bp->link_params,
8398 &bp->link_vars);
8399 rc |= bnx2x_phy_init(&bp->link_params,
8400 &bp->link_vars);
bb2a0f7a 8401 }
4a37fb66 8402 bnx2x_release_phy_lock(bp);
34f80b04
EG
8403
8404 } else /* Only the PMF can access the PHY */
8405 return -EINVAL;
8406 else
c18487ee 8407 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8408
8409 return rc;
8410}
8411
8412static int bnx2x_get_coalesce(struct net_device *dev,
8413 struct ethtool_coalesce *coal)
8414{
8415 struct bnx2x *bp = netdev_priv(dev);
8416
8417 memset(coal, 0, sizeof(struct ethtool_coalesce));
8418
8419 coal->rx_coalesce_usecs = bp->rx_ticks;
8420 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8421
8422 return 0;
8423}
8424
8425static int bnx2x_set_coalesce(struct net_device *dev,
8426 struct ethtool_coalesce *coal)
8427{
8428 struct bnx2x *bp = netdev_priv(dev);
8429
8430 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8431 if (bp->rx_ticks > 3000)
8432 bp->rx_ticks = 3000;
8433
8434 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8435 if (bp->tx_ticks > 0x3000)
8436 bp->tx_ticks = 0x3000;
8437
34f80b04 8438 if (netif_running(dev))
a2fbb9ea
ET
8439 bnx2x_update_coalesce(bp);
8440
8441 return 0;
8442}
8443
8444static void bnx2x_get_ringparam(struct net_device *dev,
8445 struct ethtool_ringparam *ering)
8446{
8447 struct bnx2x *bp = netdev_priv(dev);
8448
8449 ering->rx_max_pending = MAX_RX_AVAIL;
8450 ering->rx_mini_max_pending = 0;
8451 ering->rx_jumbo_max_pending = 0;
8452
8453 ering->rx_pending = bp->rx_ring_size;
8454 ering->rx_mini_pending = 0;
8455 ering->rx_jumbo_pending = 0;
8456
8457 ering->tx_max_pending = MAX_TX_AVAIL;
8458 ering->tx_pending = bp->tx_ring_size;
8459}
8460
8461static int bnx2x_set_ringparam(struct net_device *dev,
8462 struct ethtool_ringparam *ering)
8463{
8464 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8465 int rc = 0;
a2fbb9ea
ET
8466
8467 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8468 (ering->tx_pending > MAX_TX_AVAIL) ||
8469 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8470 return -EINVAL;
8471
8472 bp->rx_ring_size = ering->rx_pending;
8473 bp->tx_ring_size = ering->tx_pending;
8474
34f80b04
EG
8475 if (netif_running(dev)) {
8476 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8477 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8478 }
8479
34f80b04 8480 return rc;
a2fbb9ea
ET
8481}
8482
8483static void bnx2x_get_pauseparam(struct net_device *dev,
8484 struct ethtool_pauseparam *epause)
8485{
8486 struct bnx2x *bp = netdev_priv(dev);
8487
c0700f90 8488 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8489 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8490
c0700f90
DM
8491 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8492 BNX2X_FLOW_CTRL_RX);
8493 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8494 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8495
8496 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8497 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8498 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8499}
8500
8501static int bnx2x_set_pauseparam(struct net_device *dev,
8502 struct ethtool_pauseparam *epause)
8503{
8504 struct bnx2x *bp = netdev_priv(dev);
8505
34f80b04
EG
8506 if (IS_E1HMF(bp))
8507 return 0;
8508
a2fbb9ea
ET
8509 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8510 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8511 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8512
c0700f90 8513 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8514
f1410647 8515 if (epause->rx_pause)
c0700f90 8516 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8517
f1410647 8518 if (epause->tx_pause)
c0700f90 8519 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8520
c0700f90
DM
8521 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8522 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8523
c18487ee 8524 if (epause->autoneg) {
34f80b04 8525 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8526 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8527 return -EINVAL;
8528 }
a2fbb9ea 8529
c18487ee 8530 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8531 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8532 }
a2fbb9ea 8533
c18487ee
YR
8534 DP(NETIF_MSG_LINK,
8535 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8536
8537 if (netif_running(dev)) {
bb2a0f7a 8538 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8539 bnx2x_link_set(bp);
8540 }
a2fbb9ea
ET
8541
8542 return 0;
8543}
8544
df0f2343
VZ
8545static int bnx2x_set_flags(struct net_device *dev, u32 data)
8546{
8547 struct bnx2x *bp = netdev_priv(dev);
8548 int changed = 0;
8549 int rc = 0;
8550
8551 /* TPA requires Rx CSUM offloading */
8552 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8553 if (!(dev->features & NETIF_F_LRO)) {
8554 dev->features |= NETIF_F_LRO;
8555 bp->flags |= TPA_ENABLE_FLAG;
8556 changed = 1;
8557 }
8558
8559 } else if (dev->features & NETIF_F_LRO) {
8560 dev->features &= ~NETIF_F_LRO;
8561 bp->flags &= ~TPA_ENABLE_FLAG;
8562 changed = 1;
8563 }
8564
8565 if (changed && netif_running(dev)) {
8566 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8567 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8568 }
8569
8570 return rc;
8571}
8572
a2fbb9ea
ET
8573static u32 bnx2x_get_rx_csum(struct net_device *dev)
8574{
8575 struct bnx2x *bp = netdev_priv(dev);
8576
8577 return bp->rx_csum;
8578}
8579
8580static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8581{
8582 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8583 int rc = 0;
a2fbb9ea
ET
8584
8585 bp->rx_csum = data;
df0f2343
VZ
8586
8587 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8588 TPA'ed packets will be discarded due to wrong TCP CSUM */
8589 if (!data) {
8590 u32 flags = ethtool_op_get_flags(dev);
8591
8592 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8593 }
8594
8595 return rc;
a2fbb9ea
ET
8596}
8597
8598static int bnx2x_set_tso(struct net_device *dev, u32 data)
8599{
755735eb 8600 if (data) {
a2fbb9ea 8601 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8602 dev->features |= NETIF_F_TSO6;
8603 } else {
a2fbb9ea 8604 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8605 dev->features &= ~NETIF_F_TSO6;
8606 }
8607
a2fbb9ea
ET
8608 return 0;
8609}
8610
f3c87cdd 8611static const struct {
a2fbb9ea
ET
8612 char string[ETH_GSTRING_LEN];
8613} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8614 { "register_test (offline)" },
8615 { "memory_test (offline)" },
8616 { "loopback_test (offline)" },
8617 { "nvram_test (online)" },
8618 { "interrupt_test (online)" },
8619 { "link_test (online)" },
8620 { "idle check (online)" },
8621 { "MC errors (online)" }
a2fbb9ea
ET
8622};
8623
8624static int bnx2x_self_test_count(struct net_device *dev)
8625{
8626 return BNX2X_NUM_TESTS;
8627}
8628
f3c87cdd
YG
8629static int bnx2x_test_registers(struct bnx2x *bp)
8630{
8631 int idx, i, rc = -ENODEV;
8632 u32 wr_val = 0;
9dabc424 8633 int port = BP_PORT(bp);
f3c87cdd
YG
8634 static const struct {
8635 u32 offset0;
8636 u32 offset1;
8637 u32 mask;
8638 } reg_tbl[] = {
8639/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8640 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8641 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8642 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8643 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8644 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8645 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8646 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8647 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8648 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8649/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8650 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8651 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8652 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8653 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8654 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8655 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8656 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8657 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8658 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8659/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8660 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8661 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8662 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8663 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8664 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8665 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8666 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8667 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8668 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8669/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8670 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8671 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8672 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8673 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8674 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8675 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8676 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8677
8678 { 0xffffffff, 0, 0x00000000 }
8679 };
8680
8681 if (!netif_running(bp->dev))
8682 return rc;
8683
8684 /* Repeat the test twice:
8685 First by writing 0x00000000, second by writing 0xffffffff */
8686 for (idx = 0; idx < 2; idx++) {
8687
8688 switch (idx) {
8689 case 0:
8690 wr_val = 0;
8691 break;
8692 case 1:
8693 wr_val = 0xffffffff;
8694 break;
8695 }
8696
8697 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8698 u32 offset, mask, save_val, val;
f3c87cdd
YG
8699
8700 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8701 mask = reg_tbl[i].mask;
8702
8703 save_val = REG_RD(bp, offset);
8704
8705 REG_WR(bp, offset, wr_val);
8706 val = REG_RD(bp, offset);
8707
8708 /* Restore the original register's value */
8709 REG_WR(bp, offset, save_val);
8710
8711 /* verify that value is as expected value */
8712 if ((val & mask) != (wr_val & mask))
8713 goto test_reg_exit;
8714 }
8715 }
8716
8717 rc = 0;
8718
8719test_reg_exit:
8720 return rc;
8721}
8722
8723static int bnx2x_test_memory(struct bnx2x *bp)
8724{
8725 int i, j, rc = -ENODEV;
8726 u32 val;
8727 static const struct {
8728 u32 offset;
8729 int size;
8730 } mem_tbl[] = {
8731 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8732 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8733 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8734 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8735 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8736 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8737 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8738
8739 { 0xffffffff, 0 }
8740 };
8741 static const struct {
8742 char *name;
8743 u32 offset;
9dabc424
YG
8744 u32 e1_mask;
8745 u32 e1h_mask;
f3c87cdd 8746 } prty_tbl[] = {
9dabc424
YG
8747 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8748 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8749 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8750 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8751 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8752 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8753
8754 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8755 };
8756
8757 if (!netif_running(bp->dev))
8758 return rc;
8759
8760 /* Go through all the memories */
8761 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8762 for (j = 0; j < mem_tbl[i].size; j++)
8763 REG_RD(bp, mem_tbl[i].offset + j*4);
8764
8765 /* Check the parity status */
8766 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8767 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8768 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8769 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8770 DP(NETIF_MSG_HW,
8771 "%s is 0x%x\n", prty_tbl[i].name, val);
8772 goto test_mem_exit;
8773 }
8774 }
8775
8776 rc = 0;
8777
8778test_mem_exit:
8779 return rc;
8780}
8781
f3c87cdd
YG
8782static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8783{
8784 int cnt = 1000;
8785
8786 if (link_up)
8787 while (bnx2x_link_test(bp) && cnt--)
8788 msleep(10);
8789}
8790
8791static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8792{
8793 unsigned int pkt_size, num_pkts, i;
8794 struct sk_buff *skb;
8795 unsigned char *packet;
8796 struct bnx2x_fastpath *fp = &bp->fp[0];
8797 u16 tx_start_idx, tx_idx;
8798 u16 rx_start_idx, rx_idx;
8799 u16 pkt_prod;
8800 struct sw_tx_bd *tx_buf;
8801 struct eth_tx_bd *tx_bd;
8802 dma_addr_t mapping;
8803 union eth_rx_cqe *cqe;
8804 u8 cqe_fp_flags;
8805 struct sw_rx_bd *rx_buf;
8806 u16 len;
8807 int rc = -ENODEV;
8808
8809 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8810 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 8811 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
8812
8813 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 8814 u16 cnt = 1000;
f3c87cdd 8815 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 8816 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 8817 /* wait until link state is restored */
3910c8ae
EG
8818 if (link_up)
8819 while (cnt-- && bnx2x_test_link(&bp->link_params,
8820 &bp->link_vars))
8821 msleep(10);
f3c87cdd
YG
8822 } else
8823 return -EINVAL;
8824
8825 pkt_size = 1514;
8826 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8827 if (!skb) {
8828 rc = -ENOMEM;
8829 goto test_loopback_exit;
8830 }
8831 packet = skb_put(skb, pkt_size);
8832 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8833 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8834 for (i = ETH_HLEN; i < pkt_size; i++)
8835 packet[i] = (unsigned char) (i & 0xff);
8836
8837 num_pkts = 0;
8838 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8839 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8840
8841 pkt_prod = fp->tx_pkt_prod++;
8842 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8843 tx_buf->first_bd = fp->tx_bd_prod;
8844 tx_buf->skb = skb;
8845
8846 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8847 mapping = pci_map_single(bp->pdev, skb->data,
8848 skb_headlen(skb), PCI_DMA_TODEVICE);
8849 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8850 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8851 tx_bd->nbd = cpu_to_le16(1);
8852 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8853 tx_bd->vlan = cpu_to_le16(pkt_prod);
8854 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8855 ETH_TX_BD_FLAGS_END_BD);
8856 tx_bd->general_data = ((UNICAST_ADDRESS <<
8857 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8858
58f4c4cf
EG
8859 wmb();
8860
f3c87cdd
YG
8861 fp->hw_tx_prods->bds_prod =
8862 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8863 mb(); /* FW restriction: must not reorder writing nbd and packets */
8864 fp->hw_tx_prods->packets_prod =
8865 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8866 DOORBELL(bp, FP_IDX(fp), 0);
8867
8868 mmiowb();
8869
8870 num_pkts++;
8871 fp->tx_bd_prod++;
8872 bp->dev->trans_start = jiffies;
8873
8874 udelay(100);
8875
8876 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8877 if (tx_idx != tx_start_idx + num_pkts)
8878 goto test_loopback_exit;
8879
8880 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8881 if (rx_idx != rx_start_idx + num_pkts)
8882 goto test_loopback_exit;
8883
8884 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8885 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8886 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8887 goto test_loopback_rx_exit;
8888
8889 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8890 if (len != pkt_size)
8891 goto test_loopback_rx_exit;
8892
8893 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8894 skb = rx_buf->skb;
8895 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8896 for (i = ETH_HLEN; i < pkt_size; i++)
8897 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8898 goto test_loopback_rx_exit;
8899
8900 rc = 0;
8901
8902test_loopback_rx_exit:
f3c87cdd
YG
8903
8904 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8905 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8906 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8907 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8908
8909 /* Update producers */
8910 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8911 fp->rx_sge_prod);
f3c87cdd
YG
8912
8913test_loopback_exit:
8914 bp->link_params.loopback_mode = LOOPBACK_NONE;
8915
8916 return rc;
8917}
8918
8919static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8920{
8921 int rc = 0;
8922
8923 if (!netif_running(bp->dev))
8924 return BNX2X_LOOPBACK_FAILED;
8925
f8ef6e44 8926 bnx2x_netif_stop(bp, 1);
3910c8ae 8927 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
8928
8929 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8930 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8931 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8932 }
8933
8934 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8935 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8936 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8937 }
8938
3910c8ae 8939 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8940 bnx2x_netif_start(bp);
8941
8942 return rc;
8943}
8944
8945#define CRC32_RESIDUAL 0xdebb20e3
8946
8947static int bnx2x_test_nvram(struct bnx2x *bp)
8948{
8949 static const struct {
8950 int offset;
8951 int size;
8952 } nvram_tbl[] = {
8953 { 0, 0x14 }, /* bootstrap */
8954 { 0x14, 0xec }, /* dir */
8955 { 0x100, 0x350 }, /* manuf_info */
8956 { 0x450, 0xf0 }, /* feature_info */
8957 { 0x640, 0x64 }, /* upgrade_key_info */
8958 { 0x6a4, 0x64 },
8959 { 0x708, 0x70 }, /* manuf_key_info */
8960 { 0x778, 0x70 },
8961 { 0, 0 }
8962 };
8963 u32 buf[0x350 / 4];
8964 u8 *data = (u8 *)buf;
8965 int i, rc;
8966 u32 magic, csum;
8967
8968 rc = bnx2x_nvram_read(bp, 0, data, 4);
8969 if (rc) {
8970 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8971 goto test_nvram_exit;
8972 }
8973
8974 magic = be32_to_cpu(buf[0]);
8975 if (magic != 0x669955aa) {
8976 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8977 rc = -ENODEV;
8978 goto test_nvram_exit;
8979 }
8980
8981 for (i = 0; nvram_tbl[i].size; i++) {
8982
8983 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8984 nvram_tbl[i].size);
8985 if (rc) {
8986 DP(NETIF_MSG_PROBE,
8987 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8988 goto test_nvram_exit;
8989 }
8990
8991 csum = ether_crc_le(nvram_tbl[i].size, data);
8992 if (csum != CRC32_RESIDUAL) {
8993 DP(NETIF_MSG_PROBE,
8994 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8995 rc = -ENODEV;
8996 goto test_nvram_exit;
8997 }
8998 }
8999
9000test_nvram_exit:
9001 return rc;
9002}
9003
9004static int bnx2x_test_intr(struct bnx2x *bp)
9005{
9006 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9007 int i, rc;
9008
9009 if (!netif_running(bp->dev))
9010 return -ENODEV;
9011
8d9c5f34 9012 config->hdr.length = 0;
af246401
EG
9013 if (CHIP_IS_E1(bp))
9014 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9015 else
9016 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9017 config->hdr.client_id = BP_CL_ID(bp);
9018 config->hdr.reserved1 = 0;
9019
9020 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9021 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9022 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9023 if (rc == 0) {
9024 bp->set_mac_pending++;
9025 for (i = 0; i < 10; i++) {
9026 if (!bp->set_mac_pending)
9027 break;
9028 msleep_interruptible(10);
9029 }
9030 if (i == 10)
9031 rc = -ENODEV;
9032 }
9033
9034 return rc;
9035}
9036
a2fbb9ea
ET
9037static void bnx2x_self_test(struct net_device *dev,
9038 struct ethtool_test *etest, u64 *buf)
9039{
9040 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9041
9042 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9043
f3c87cdd 9044 if (!netif_running(dev))
a2fbb9ea 9045 return;
a2fbb9ea 9046
33471629 9047 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9048 if (IS_E1HMF(bp))
9049 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9050
9051 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9052 u8 link_up;
9053
9054 link_up = bp->link_vars.link_up;
9055 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9056 bnx2x_nic_load(bp, LOAD_DIAG);
9057 /* wait until link state is restored */
9058 bnx2x_wait_for_link(bp, link_up);
9059
9060 if (bnx2x_test_registers(bp) != 0) {
9061 buf[0] = 1;
9062 etest->flags |= ETH_TEST_FL_FAILED;
9063 }
9064 if (bnx2x_test_memory(bp) != 0) {
9065 buf[1] = 1;
9066 etest->flags |= ETH_TEST_FL_FAILED;
9067 }
9068 buf[2] = bnx2x_test_loopback(bp, link_up);
9069 if (buf[2] != 0)
9070 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9071
f3c87cdd
YG
9072 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9073 bnx2x_nic_load(bp, LOAD_NORMAL);
9074 /* wait until link state is restored */
9075 bnx2x_wait_for_link(bp, link_up);
9076 }
9077 if (bnx2x_test_nvram(bp) != 0) {
9078 buf[3] = 1;
a2fbb9ea
ET
9079 etest->flags |= ETH_TEST_FL_FAILED;
9080 }
f3c87cdd
YG
9081 if (bnx2x_test_intr(bp) != 0) {
9082 buf[4] = 1;
9083 etest->flags |= ETH_TEST_FL_FAILED;
9084 }
9085 if (bp->port.pmf)
9086 if (bnx2x_link_test(bp) != 0) {
9087 buf[5] = 1;
9088 etest->flags |= ETH_TEST_FL_FAILED;
9089 }
9090 buf[7] = bnx2x_mc_assert(bp);
9091 if (buf[7] != 0)
9092 etest->flags |= ETH_TEST_FL_FAILED;
9093
9094#ifdef BNX2X_EXTRA_DEBUG
9095 bnx2x_panic_dump(bp);
9096#endif
a2fbb9ea
ET
9097}
9098
bb2a0f7a
YG
9099static const struct {
9100 long offset;
9101 int size;
9102 u32 flags;
66e855f3
YG
9103#define STATS_FLAGS_PORT 1
9104#define STATS_FLAGS_FUNC 2
9105 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9106} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
9107/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9108 8, STATS_FLAGS_FUNC, "rx_bytes" },
9109 { STATS_OFFSET32(error_bytes_received_hi),
9110 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9111 { STATS_OFFSET32(total_bytes_transmitted_hi),
9112 8, STATS_FLAGS_FUNC, "tx_bytes" },
9113 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9114 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 9115 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 9116 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 9117 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 9118 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9119 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9120 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9121 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9122 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9123 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9124 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9125/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9126 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9127 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9128 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9129 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9130 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9131 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9132 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9133 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9134 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9135 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9136 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9137 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9138 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9139 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9140 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9141 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9142 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9143 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9144 8, STATS_FLAGS_PORT, "rx_fragments" },
9145/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9146 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9147 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9148 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9149 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9150 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9151 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9152 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9153 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9154 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9155 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9156 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9157 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9158 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9159 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9160 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9161 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9162 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9163 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9164 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9165/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9166 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9167 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9168 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9169 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9170 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9171 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9172 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9173 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9174 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9175 { STATS_OFFSET32(mac_filter_discard),
9176 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9177 { STATS_OFFSET32(no_buff_discard),
9178 4, STATS_FLAGS_FUNC, "rx_discards" },
9179 { STATS_OFFSET32(xxoverflow_discard),
9180 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9181 { STATS_OFFSET32(brb_drop_hi),
9182 8, STATS_FLAGS_PORT, "brb_discard" },
9183 { STATS_OFFSET32(brb_truncate_hi),
9184 8, STATS_FLAGS_PORT, "brb_truncate" },
9185/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9186 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9187 { STATS_OFFSET32(rx_skb_alloc_failed),
9188 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9189/* 42 */{ STATS_OFFSET32(hw_csum_err),
9190 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9191};
9192
66e855f3
YG
9193#define IS_NOT_E1HMF_STAT(bp, i) \
9194 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9195
a2fbb9ea
ET
9196static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9197{
bb2a0f7a
YG
9198 struct bnx2x *bp = netdev_priv(dev);
9199 int i, j;
9200
a2fbb9ea
ET
9201 switch (stringset) {
9202 case ETH_SS_STATS:
bb2a0f7a 9203 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9204 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9205 continue;
9206 strcpy(buf + j*ETH_GSTRING_LEN,
9207 bnx2x_stats_arr[i].string);
9208 j++;
9209 }
a2fbb9ea
ET
9210 break;
9211
9212 case ETH_SS_TEST:
9213 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9214 break;
9215 }
9216}
9217
9218static int bnx2x_get_stats_count(struct net_device *dev)
9219{
bb2a0f7a
YG
9220 struct bnx2x *bp = netdev_priv(dev);
9221 int i, num_stats = 0;
9222
9223 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9224 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9225 continue;
9226 num_stats++;
9227 }
9228 return num_stats;
a2fbb9ea
ET
9229}
9230
9231static void bnx2x_get_ethtool_stats(struct net_device *dev,
9232 struct ethtool_stats *stats, u64 *buf)
9233{
9234 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9235 u32 *hw_stats = (u32 *)&bp->eth_stats;
9236 int i, j;
a2fbb9ea 9237
bb2a0f7a 9238 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9239 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9240 continue;
bb2a0f7a
YG
9241
9242 if (bnx2x_stats_arr[i].size == 0) {
9243 /* skip this counter */
9244 buf[j] = 0;
9245 j++;
a2fbb9ea
ET
9246 continue;
9247 }
bb2a0f7a 9248 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9249 /* 4-byte counter */
bb2a0f7a
YG
9250 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9251 j++;
a2fbb9ea
ET
9252 continue;
9253 }
9254 /* 8-byte counter */
bb2a0f7a
YG
9255 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9256 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9257 j++;
a2fbb9ea
ET
9258 }
9259}
9260
9261static int bnx2x_phys_id(struct net_device *dev, u32 data)
9262{
9263 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9264 int port = BP_PORT(bp);
a2fbb9ea
ET
9265 int i;
9266
34f80b04
EG
9267 if (!netif_running(dev))
9268 return 0;
9269
9270 if (!bp->port.pmf)
9271 return 0;
9272
a2fbb9ea
ET
9273 if (data == 0)
9274 data = 2;
9275
9276 for (i = 0; i < (data * 2); i++) {
c18487ee 9277 if ((i % 2) == 0)
34f80b04 9278 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9279 bp->link_params.hw_led_mode,
9280 bp->link_params.chip_id);
9281 else
34f80b04 9282 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9283 bp->link_params.hw_led_mode,
9284 bp->link_params.chip_id);
9285
a2fbb9ea
ET
9286 msleep_interruptible(500);
9287 if (signal_pending(current))
9288 break;
9289 }
9290
c18487ee 9291 if (bp->link_vars.link_up)
34f80b04 9292 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9293 bp->link_vars.line_speed,
9294 bp->link_params.hw_led_mode,
9295 bp->link_params.chip_id);
a2fbb9ea
ET
9296
9297 return 0;
9298}
9299
9300static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9301 .get_settings = bnx2x_get_settings,
9302 .set_settings = bnx2x_set_settings,
9303 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9304 .get_wol = bnx2x_get_wol,
9305 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9306 .get_msglevel = bnx2x_get_msglevel,
9307 .set_msglevel = bnx2x_set_msglevel,
9308 .nway_reset = bnx2x_nway_reset,
9309 .get_link = ethtool_op_get_link,
9310 .get_eeprom_len = bnx2x_get_eeprom_len,
9311 .get_eeprom = bnx2x_get_eeprom,
9312 .set_eeprom = bnx2x_set_eeprom,
9313 .get_coalesce = bnx2x_get_coalesce,
9314 .set_coalesce = bnx2x_set_coalesce,
9315 .get_ringparam = bnx2x_get_ringparam,
9316 .set_ringparam = bnx2x_set_ringparam,
9317 .get_pauseparam = bnx2x_get_pauseparam,
9318 .set_pauseparam = bnx2x_set_pauseparam,
9319 .get_rx_csum = bnx2x_get_rx_csum,
9320 .set_rx_csum = bnx2x_set_rx_csum,
9321 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9322 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9323 .set_flags = bnx2x_set_flags,
9324 .get_flags = ethtool_op_get_flags,
9325 .get_sg = ethtool_op_get_sg,
9326 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9327 .get_tso = ethtool_op_get_tso,
9328 .set_tso = bnx2x_set_tso,
9329 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9330 .self_test = bnx2x_self_test,
9331 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9332 .phys_id = bnx2x_phys_id,
9333 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9334 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9335};
9336
9337/* end of ethtool_ops */
9338
9339/****************************************************************************
9340* General service functions
9341****************************************************************************/
9342
9343static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9344{
9345 u16 pmcsr;
9346
9347 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9348
9349 switch (state) {
9350 case PCI_D0:
34f80b04 9351 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9352 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9353 PCI_PM_CTRL_PME_STATUS));
9354
9355 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9356 /* delay required during transition out of D3hot */
a2fbb9ea 9357 msleep(20);
34f80b04 9358 break;
a2fbb9ea 9359
34f80b04
EG
9360 case PCI_D3hot:
9361 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9362 pmcsr |= 3;
a2fbb9ea 9363
34f80b04
EG
9364 if (bp->wol)
9365 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9366
34f80b04
EG
9367 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9368 pmcsr);
a2fbb9ea 9369
34f80b04
EG
9370 /* No more memory access after this point until
9371 * device is brought back to D0.
9372 */
9373 break;
9374
9375 default:
9376 return -EINVAL;
9377 }
9378 return 0;
a2fbb9ea
ET
9379}
9380
237907c1
EG
9381static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9382{
9383 u16 rx_cons_sb;
9384
9385 /* Tell compiler that status block fields can change */
9386 barrier();
9387 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9388 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9389 rx_cons_sb++;
9390 return (fp->rx_comp_cons != rx_cons_sb);
9391}
9392
34f80b04
EG
9393/*
9394 * net_device service functions
9395 */
9396
a2fbb9ea
ET
9397static int bnx2x_poll(struct napi_struct *napi, int budget)
9398{
9399 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9400 napi);
9401 struct bnx2x *bp = fp->bp;
9402 int work_done = 0;
9403
9404#ifdef BNX2X_STOP_ON_ERROR
9405 if (unlikely(bp->panic))
34f80b04 9406 goto poll_panic;
a2fbb9ea
ET
9407#endif
9408
9409 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9410 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9411 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9412
9413 bnx2x_update_fpsb_idx(fp);
9414
237907c1 9415 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9416 bnx2x_tx_int(fp, budget);
9417
237907c1 9418 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9419 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9420 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9421
9422 /* must not complete if we consumed full budget */
da5a662a 9423 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9424
9425#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9426poll_panic:
a2fbb9ea 9427#endif
288379f0 9428 napi_complete(napi);
a2fbb9ea 9429
34f80b04 9430 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9431 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9432 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9433 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9434 }
a2fbb9ea
ET
9435 return work_done;
9436}
9437
755735eb
EG
9438
9439/* we split the first BD into headers and data BDs
33471629 9440 * to ease the pain of our fellow microcode engineers
755735eb
EG
9441 * we use one mapping for both BDs
9442 * So far this has only been observed to happen
9443 * in Other Operating Systems(TM)
9444 */
9445static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9446 struct bnx2x_fastpath *fp,
9447 struct eth_tx_bd **tx_bd, u16 hlen,
9448 u16 bd_prod, int nbd)
9449{
9450 struct eth_tx_bd *h_tx_bd = *tx_bd;
9451 struct eth_tx_bd *d_tx_bd;
9452 dma_addr_t mapping;
9453 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9454
9455 /* first fix first BD */
9456 h_tx_bd->nbd = cpu_to_le16(nbd);
9457 h_tx_bd->nbytes = cpu_to_le16(hlen);
9458
9459 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9460 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9461 h_tx_bd->addr_lo, h_tx_bd->nbd);
9462
9463 /* now get a new data BD
9464 * (after the pbd) and fill it */
9465 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9466 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9467
9468 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9469 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9470
9471 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9472 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9473 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9474 d_tx_bd->vlan = 0;
9475 /* this marks the BD as one that has no individual mapping
9476 * the FW ignores this flag in a BD not marked start
9477 */
9478 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9479 DP(NETIF_MSG_TX_QUEUED,
9480 "TSO split data size is %d (%x:%x)\n",
9481 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9482
9483 /* update tx_bd for marking the last BD flag */
9484 *tx_bd = d_tx_bd;
9485
9486 return bd_prod;
9487}
9488
9489static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9490{
9491 if (fix > 0)
9492 csum = (u16) ~csum_fold(csum_sub(csum,
9493 csum_partial(t_header - fix, fix, 0)));
9494
9495 else if (fix < 0)
9496 csum = (u16) ~csum_fold(csum_add(csum,
9497 csum_partial(t_header, -fix, 0)));
9498
9499 return swab16(csum);
9500}
9501
9502static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9503{
9504 u32 rc;
9505
9506 if (skb->ip_summed != CHECKSUM_PARTIAL)
9507 rc = XMIT_PLAIN;
9508
9509 else {
9510 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9511 rc = XMIT_CSUM_V6;
9512 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9513 rc |= XMIT_CSUM_TCP;
9514
9515 } else {
9516 rc = XMIT_CSUM_V4;
9517 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9518 rc |= XMIT_CSUM_TCP;
9519 }
9520 }
9521
9522 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9523 rc |= XMIT_GSO_V4;
9524
9525 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9526 rc |= XMIT_GSO_V6;
9527
9528 return rc;
9529}
9530
632da4d6 9531#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9532/* check if packet requires linearization (packet is too fragmented) */
9533static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9534 u32 xmit_type)
9535{
9536 int to_copy = 0;
9537 int hlen = 0;
9538 int first_bd_sz = 0;
9539
9540 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9541 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9542
9543 if (xmit_type & XMIT_GSO) {
9544 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9545 /* Check if LSO packet needs to be copied:
9546 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9547 int wnd_size = MAX_FETCH_BD - 3;
33471629 9548 /* Number of windows to check */
755735eb
EG
9549 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9550 int wnd_idx = 0;
9551 int frag_idx = 0;
9552 u32 wnd_sum = 0;
9553
9554 /* Headers length */
9555 hlen = (int)(skb_transport_header(skb) - skb->data) +
9556 tcp_hdrlen(skb);
9557
9558 /* Amount of data (w/o headers) on linear part of SKB*/
9559 first_bd_sz = skb_headlen(skb) - hlen;
9560
9561 wnd_sum = first_bd_sz;
9562
9563 /* Calculate the first sum - it's special */
9564 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9565 wnd_sum +=
9566 skb_shinfo(skb)->frags[frag_idx].size;
9567
9568 /* If there was data on linear skb data - check it */
9569 if (first_bd_sz > 0) {
9570 if (unlikely(wnd_sum < lso_mss)) {
9571 to_copy = 1;
9572 goto exit_lbl;
9573 }
9574
9575 wnd_sum -= first_bd_sz;
9576 }
9577
9578 /* Others are easier: run through the frag list and
9579 check all windows */
9580 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9581 wnd_sum +=
9582 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9583
9584 if (unlikely(wnd_sum < lso_mss)) {
9585 to_copy = 1;
9586 break;
9587 }
9588 wnd_sum -=
9589 skb_shinfo(skb)->frags[wnd_idx].size;
9590 }
9591
9592 } else {
9593 /* in non-LSO too fragmented packet should always
9594 be linearized */
9595 to_copy = 1;
9596 }
9597 }
9598
9599exit_lbl:
9600 if (unlikely(to_copy))
9601 DP(NETIF_MSG_TX_QUEUED,
9602 "Linearization IS REQUIRED for %s packet. "
9603 "num_frags %d hlen %d first_bd_sz %d\n",
9604 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9605 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9606
9607 return to_copy;
9608}
632da4d6 9609#endif
755735eb
EG
9610
9611/* called with netif_tx_lock
a2fbb9ea 9612 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9613 * netif_wake_queue()
a2fbb9ea
ET
9614 */
9615static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9616{
9617 struct bnx2x *bp = netdev_priv(dev);
9618 struct bnx2x_fastpath *fp;
555f6c78 9619 struct netdev_queue *txq;
a2fbb9ea
ET
9620 struct sw_tx_bd *tx_buf;
9621 struct eth_tx_bd *tx_bd;
9622 struct eth_tx_parse_bd *pbd = NULL;
9623 u16 pkt_prod, bd_prod;
755735eb 9624 int nbd, fp_index;
a2fbb9ea 9625 dma_addr_t mapping;
755735eb
EG
9626 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9627 int vlan_off = (bp->e1hov ? 4 : 0);
9628 int i;
9629 u8 hlen = 0;
a2fbb9ea
ET
9630
9631#ifdef BNX2X_STOP_ON_ERROR
9632 if (unlikely(bp->panic))
9633 return NETDEV_TX_BUSY;
9634#endif
9635
555f6c78
EG
9636 fp_index = skb_get_queue_mapping(skb);
9637 txq = netdev_get_tx_queue(dev, fp_index);
9638
a2fbb9ea 9639 fp = &bp->fp[fp_index];
755735eb 9640
231fd58a 9641 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9642 bp->eth_stats.driver_xoff++,
555f6c78 9643 netif_tx_stop_queue(txq);
a2fbb9ea
ET
9644 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9645 return NETDEV_TX_BUSY;
9646 }
9647
755735eb
EG
9648 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9649 " gso type %x xmit_type %x\n",
9650 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9651 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9652
632da4d6 9653#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 9654 /* First, check if we need to linearize the skb
755735eb
EG
9655 (due to FW restrictions) */
9656 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9657 /* Statistics of linearization */
9658 bp->lin_cnt++;
9659 if (skb_linearize(skb) != 0) {
9660 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9661 "silently dropping this SKB\n");
9662 dev_kfree_skb_any(skb);
da5a662a 9663 return NETDEV_TX_OK;
755735eb
EG
9664 }
9665 }
632da4d6 9666#endif
755735eb 9667
a2fbb9ea 9668 /*
755735eb 9669 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9670 then for TSO or xsum we have a parsing info BD,
755735eb 9671 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9672 (don't forget to mark the last one as last,
9673 and to unmap only AFTER you write to the BD ...)
755735eb 9674 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9675 */
9676
9677 pkt_prod = fp->tx_pkt_prod++;
755735eb 9678 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9679
755735eb 9680 /* get a tx_buf and first BD */
a2fbb9ea
ET
9681 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9682 tx_bd = &fp->tx_desc_ring[bd_prod];
9683
9684 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9685 tx_bd->general_data = (UNICAST_ADDRESS <<
9686 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9687 /* header nbd */
9688 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9689
755735eb
EG
9690 /* remember the first BD of the packet */
9691 tx_buf->first_bd = fp->tx_bd_prod;
9692 tx_buf->skb = skb;
a2fbb9ea
ET
9693
9694 DP(NETIF_MSG_TX_QUEUED,
9695 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9696 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9697
0c6671b0
EG
9698#ifdef BCM_VLAN
9699 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9700 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
9701 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9702 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9703 vlan_off += 4;
9704 } else
0c6671b0 9705#endif
755735eb 9706 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9707
755735eb 9708 if (xmit_type) {
755735eb 9709 /* turn on parsing and get a BD */
a2fbb9ea
ET
9710 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9711 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9712
9713 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9714 }
9715
9716 if (xmit_type & XMIT_CSUM) {
9717 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9718
9719 /* for now NS flag is not used in Linux */
755735eb 9720 pbd->global_data = (hlen |
96fc1784 9721 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9722 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9723
755735eb
EG
9724 pbd->ip_hlen = (skb_transport_header(skb) -
9725 skb_network_header(skb)) / 2;
9726
9727 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9728
755735eb
EG
9729 pbd->total_hlen = cpu_to_le16(hlen);
9730 hlen = hlen*2 - vlan_off;
a2fbb9ea 9731
755735eb
EG
9732 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9733
9734 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9735 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9736 ETH_TX_BD_FLAGS_IP_CSUM;
9737 else
9738 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9739
9740 if (xmit_type & XMIT_CSUM_TCP) {
9741 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9742
9743 } else {
9744 s8 fix = SKB_CS_OFF(skb); /* signed! */
9745
a2fbb9ea 9746 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9747 pbd->cs_offset = fix / 2;
a2fbb9ea 9748
755735eb
EG
9749 DP(NETIF_MSG_TX_QUEUED,
9750 "hlen %d offset %d fix %d csum before fix %x\n",
9751 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9752 SKB_CS(skb));
9753
9754 /* HW bug: fixup the CSUM */
9755 pbd->tcp_pseudo_csum =
9756 bnx2x_csum_fix(skb_transport_header(skb),
9757 SKB_CS(skb), fix);
9758
9759 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9760 pbd->tcp_pseudo_csum);
9761 }
a2fbb9ea
ET
9762 }
9763
9764 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9765 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9766
9767 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9768 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9769 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9770 tx_bd->nbd = cpu_to_le16(nbd);
9771 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9772
9773 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9774 " nbytes %d flags %x vlan %x\n",
9775 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9776 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9777 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9778
755735eb 9779 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9780
9781 DP(NETIF_MSG_TX_QUEUED,
9782 "TSO packet len %d hlen %d total len %d tso size %d\n",
9783 skb->len, hlen, skb_headlen(skb),
9784 skb_shinfo(skb)->gso_size);
9785
9786 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9787
755735eb
EG
9788 if (unlikely(skb_headlen(skb) > hlen))
9789 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9790 bd_prod, ++nbd);
a2fbb9ea
ET
9791
9792 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9793 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9794 pbd->tcp_flags = pbd_tcp_flags(skb);
9795
9796 if (xmit_type & XMIT_GSO_V4) {
9797 pbd->ip_id = swab16(ip_hdr(skb)->id);
9798 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9799 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9800 ip_hdr(skb)->daddr,
9801 0, IPPROTO_TCP, 0));
755735eb
EG
9802
9803 } else
9804 pbd->tcp_pseudo_csum =
9805 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9806 &ipv6_hdr(skb)->daddr,
9807 0, IPPROTO_TCP, 0));
9808
a2fbb9ea
ET
9809 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9810 }
9811
755735eb
EG
9812 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9813 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9814
755735eb
EG
9815 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9816 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9817
755735eb
EG
9818 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9819 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9820
755735eb
EG
9821 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9822 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9823 tx_bd->nbytes = cpu_to_le16(frag->size);
9824 tx_bd->vlan = cpu_to_le16(pkt_prod);
9825 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9826
755735eb
EG
9827 DP(NETIF_MSG_TX_QUEUED,
9828 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9829 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9830 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9831 }
9832
755735eb 9833 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9834 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9835
9836 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9837 tx_bd, tx_bd->bd_flags.as_bitfield);
9838
a2fbb9ea
ET
9839 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9840
755735eb 9841 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9842 * if the packet contains or ends with it
9843 */
9844 if (TX_BD_POFF(bd_prod) < nbd)
9845 nbd++;
9846
9847 if (pbd)
9848 DP(NETIF_MSG_TX_QUEUED,
9849 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9850 " tcp_flags %x xsum %x seq %u hlen %u\n",
9851 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9852 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9853 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9854
755735eb 9855 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9856
58f4c4cf
EG
9857 /*
9858 * Make sure that the BD data is updated before updating the producer
9859 * since FW might read the BD right after the producer is updated.
9860 * This is only applicable for weak-ordered memory model archs such
9861 * as IA-64. The following barrier is also mandatory since FW will
9862 * assumes packets must have BDs.
9863 */
9864 wmb();
9865
96fc1784
ET
9866 fp->hw_tx_prods->bds_prod =
9867 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9868 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9869 fp->hw_tx_prods->packets_prod =
9870 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9871 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9872
9873 mmiowb();
9874
755735eb 9875 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9876 dev->trans_start = jiffies;
9877
9878 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9879 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9880 if we put Tx into XOFF state. */
9881 smp_mb();
555f6c78 9882 netif_tx_stop_queue(txq);
bb2a0f7a 9883 bp->eth_stats.driver_xoff++;
a2fbb9ea 9884 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 9885 netif_tx_wake_queue(txq);
a2fbb9ea
ET
9886 }
9887 fp->tx_pkt++;
9888
9889 return NETDEV_TX_OK;
9890}
9891
bb2a0f7a 9892/* called with rtnl_lock */
a2fbb9ea
ET
9893static int bnx2x_open(struct net_device *dev)
9894{
9895 struct bnx2x *bp = netdev_priv(dev);
9896
6eccabb3
EG
9897 netif_carrier_off(dev);
9898
a2fbb9ea
ET
9899 bnx2x_set_power_state(bp, PCI_D0);
9900
bb2a0f7a 9901 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9902}
9903
bb2a0f7a 9904/* called with rtnl_lock */
a2fbb9ea
ET
9905static int bnx2x_close(struct net_device *dev)
9906{
a2fbb9ea
ET
9907 struct bnx2x *bp = netdev_priv(dev);
9908
9909 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9910 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9911 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9912 if (!CHIP_REV_IS_SLOW(bp))
9913 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9914
9915 return 0;
9916}
9917
34f80b04
EG
9918/* called with netif_tx_lock from set_multicast */
9919static void bnx2x_set_rx_mode(struct net_device *dev)
9920{
9921 struct bnx2x *bp = netdev_priv(dev);
9922 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9923 int port = BP_PORT(bp);
9924
9925 if (bp->state != BNX2X_STATE_OPEN) {
9926 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9927 return;
9928 }
9929
9930 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9931
9932 if (dev->flags & IFF_PROMISC)
9933 rx_mode = BNX2X_RX_MODE_PROMISC;
9934
9935 else if ((dev->flags & IFF_ALLMULTI) ||
9936 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9937 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9938
9939 else { /* some multicasts */
9940 if (CHIP_IS_E1(bp)) {
9941 int i, old, offset;
9942 struct dev_mc_list *mclist;
9943 struct mac_configuration_cmd *config =
9944 bnx2x_sp(bp, mcast_config);
9945
9946 for (i = 0, mclist = dev->mc_list;
9947 mclist && (i < dev->mc_count);
9948 i++, mclist = mclist->next) {
9949
9950 config->config_table[i].
9951 cam_entry.msb_mac_addr =
9952 swab16(*(u16 *)&mclist->dmi_addr[0]);
9953 config->config_table[i].
9954 cam_entry.middle_mac_addr =
9955 swab16(*(u16 *)&mclist->dmi_addr[2]);
9956 config->config_table[i].
9957 cam_entry.lsb_mac_addr =
9958 swab16(*(u16 *)&mclist->dmi_addr[4]);
9959 config->config_table[i].cam_entry.flags =
9960 cpu_to_le16(port);
9961 config->config_table[i].
9962 target_table_entry.flags = 0;
9963 config->config_table[i].
9964 target_table_entry.client_id = 0;
9965 config->config_table[i].
9966 target_table_entry.vlan_id = 0;
9967
9968 DP(NETIF_MSG_IFUP,
9969 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9970 config->config_table[i].
9971 cam_entry.msb_mac_addr,
9972 config->config_table[i].
9973 cam_entry.middle_mac_addr,
9974 config->config_table[i].
9975 cam_entry.lsb_mac_addr);
9976 }
8d9c5f34 9977 old = config->hdr.length;
34f80b04
EG
9978 if (old > i) {
9979 for (; i < old; i++) {
9980 if (CAM_IS_INVALID(config->
9981 config_table[i])) {
af246401 9982 /* already invalidated */
34f80b04
EG
9983 break;
9984 }
9985 /* invalidate */
9986 CAM_INVALIDATE(config->
9987 config_table[i]);
9988 }
9989 }
9990
9991 if (CHIP_REV_IS_SLOW(bp))
9992 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9993 else
9994 offset = BNX2X_MAX_MULTICAST*(1 + port);
9995
8d9c5f34 9996 config->hdr.length = i;
34f80b04 9997 config->hdr.offset = offset;
8d9c5f34 9998 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
9999 config->hdr.reserved1 = 0;
10000
10001 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10002 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10003 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10004 0);
10005 } else { /* E1H */
10006 /* Accept one or more multicasts */
10007 struct dev_mc_list *mclist;
10008 u32 mc_filter[MC_HASH_SIZE];
10009 u32 crc, bit, regidx;
10010 int i;
10011
10012 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10013
10014 for (i = 0, mclist = dev->mc_list;
10015 mclist && (i < dev->mc_count);
10016 i++, mclist = mclist->next) {
10017
7c510e4b
JB
10018 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10019 mclist->dmi_addr);
34f80b04
EG
10020
10021 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10022 bit = (crc >> 24) & 0xff;
10023 regidx = bit >> 5;
10024 bit &= 0x1f;
10025 mc_filter[regidx] |= (1 << bit);
10026 }
10027
10028 for (i = 0; i < MC_HASH_SIZE; i++)
10029 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10030 mc_filter[i]);
10031 }
10032 }
10033
10034 bp->rx_mode = rx_mode;
10035 bnx2x_set_storm_rx_mode(bp);
10036}
10037
10038/* called with rtnl_lock */
a2fbb9ea
ET
10039static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10040{
10041 struct sockaddr *addr = p;
10042 struct bnx2x *bp = netdev_priv(dev);
10043
34f80b04 10044 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10045 return -EINVAL;
10046
10047 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10048 if (netif_running(dev)) {
10049 if (CHIP_IS_E1(bp))
3101c2bc 10050 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10051 else
3101c2bc 10052 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10053 }
a2fbb9ea
ET
10054
10055 return 0;
10056}
10057
c18487ee 10058/* called with rtnl_lock */
a2fbb9ea
ET
10059static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10060{
10061 struct mii_ioctl_data *data = if_mii(ifr);
10062 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10063 int port = BP_PORT(bp);
a2fbb9ea
ET
10064 int err;
10065
10066 switch (cmd) {
10067 case SIOCGMIIPHY:
34f80b04 10068 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10069
c14423fe 10070 /* fallthrough */
c18487ee 10071
a2fbb9ea 10072 case SIOCGMIIREG: {
c18487ee 10073 u16 mii_regval;
a2fbb9ea 10074
c18487ee
YR
10075 if (!netif_running(dev))
10076 return -EAGAIN;
a2fbb9ea 10077
34f80b04 10078 mutex_lock(&bp->port.phy_mutex);
3196a88a 10079 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10080 DEFAULT_PHY_DEV_ADDR,
10081 (data->reg_num & 0x1f), &mii_regval);
10082 data->val_out = mii_regval;
34f80b04 10083 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10084 return err;
10085 }
10086
10087 case SIOCSMIIREG:
10088 if (!capable(CAP_NET_ADMIN))
10089 return -EPERM;
10090
c18487ee
YR
10091 if (!netif_running(dev))
10092 return -EAGAIN;
10093
34f80b04 10094 mutex_lock(&bp->port.phy_mutex);
3196a88a 10095 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10096 DEFAULT_PHY_DEV_ADDR,
10097 (data->reg_num & 0x1f), data->val_in);
34f80b04 10098 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10099 return err;
10100
10101 default:
10102 /* do nothing */
10103 break;
10104 }
10105
10106 return -EOPNOTSUPP;
10107}
10108
34f80b04 10109/* called with rtnl_lock */
a2fbb9ea
ET
10110static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10111{
10112 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10113 int rc = 0;
a2fbb9ea
ET
10114
10115 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10116 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10117 return -EINVAL;
10118
10119 /* This does not race with packet allocation
c14423fe 10120 * because the actual alloc size is
a2fbb9ea
ET
10121 * only updated as part of load
10122 */
10123 dev->mtu = new_mtu;
10124
10125 if (netif_running(dev)) {
34f80b04
EG
10126 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10127 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10128 }
34f80b04
EG
10129
10130 return rc;
a2fbb9ea
ET
10131}
10132
10133static void bnx2x_tx_timeout(struct net_device *dev)
10134{
10135 struct bnx2x *bp = netdev_priv(dev);
10136
10137#ifdef BNX2X_STOP_ON_ERROR
10138 if (!bp->panic)
10139 bnx2x_panic();
10140#endif
10141 /* This allows the netif to be shutdown gracefully before resetting */
10142 schedule_work(&bp->reset_task);
10143}
10144
10145#ifdef BCM_VLAN
34f80b04 10146/* called with rtnl_lock */
a2fbb9ea
ET
10147static void bnx2x_vlan_rx_register(struct net_device *dev,
10148 struct vlan_group *vlgrp)
10149{
10150 struct bnx2x *bp = netdev_priv(dev);
10151
10152 bp->vlgrp = vlgrp;
0c6671b0
EG
10153
10154 /* Set flags according to the required capabilities */
10155 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10156
10157 if (dev->features & NETIF_F_HW_VLAN_TX)
10158 bp->flags |= HW_VLAN_TX_FLAG;
10159
10160 if (dev->features & NETIF_F_HW_VLAN_RX)
10161 bp->flags |= HW_VLAN_RX_FLAG;
10162
a2fbb9ea 10163 if (netif_running(dev))
49d66772 10164 bnx2x_set_client_config(bp);
a2fbb9ea 10165}
34f80b04 10166
a2fbb9ea
ET
10167#endif
10168
10169#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10170static void poll_bnx2x(struct net_device *dev)
10171{
10172 struct bnx2x *bp = netdev_priv(dev);
10173
10174 disable_irq(bp->pdev->irq);
10175 bnx2x_interrupt(bp->pdev->irq, dev);
10176 enable_irq(bp->pdev->irq);
10177}
10178#endif
10179
c64213cd
SH
10180static const struct net_device_ops bnx2x_netdev_ops = {
10181 .ndo_open = bnx2x_open,
10182 .ndo_stop = bnx2x_close,
10183 .ndo_start_xmit = bnx2x_start_xmit,
10184 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10185 .ndo_set_mac_address = bnx2x_change_mac_addr,
10186 .ndo_validate_addr = eth_validate_addr,
10187 .ndo_do_ioctl = bnx2x_ioctl,
10188 .ndo_change_mtu = bnx2x_change_mtu,
10189 .ndo_tx_timeout = bnx2x_tx_timeout,
10190#ifdef BCM_VLAN
10191 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10192#endif
10193#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10194 .ndo_poll_controller = poll_bnx2x,
10195#endif
10196};
10197
10198
34f80b04
EG
10199static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10200 struct net_device *dev)
a2fbb9ea
ET
10201{
10202 struct bnx2x *bp;
10203 int rc;
10204
10205 SET_NETDEV_DEV(dev, &pdev->dev);
10206 bp = netdev_priv(dev);
10207
34f80b04
EG
10208 bp->dev = dev;
10209 bp->pdev = pdev;
a2fbb9ea 10210 bp->flags = 0;
34f80b04 10211 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10212
10213 rc = pci_enable_device(pdev);
10214 if (rc) {
10215 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10216 goto err_out;
10217 }
10218
10219 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10220 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10221 " aborting\n");
10222 rc = -ENODEV;
10223 goto err_out_disable;
10224 }
10225
10226 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10227 printk(KERN_ERR PFX "Cannot find second PCI device"
10228 " base address, aborting\n");
10229 rc = -ENODEV;
10230 goto err_out_disable;
10231 }
10232
34f80b04
EG
10233 if (atomic_read(&pdev->enable_cnt) == 1) {
10234 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10235 if (rc) {
10236 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10237 " aborting\n");
10238 goto err_out_disable;
10239 }
a2fbb9ea 10240
34f80b04
EG
10241 pci_set_master(pdev);
10242 pci_save_state(pdev);
10243 }
a2fbb9ea
ET
10244
10245 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10246 if (bp->pm_cap == 0) {
10247 printk(KERN_ERR PFX "Cannot find power management"
10248 " capability, aborting\n");
10249 rc = -EIO;
10250 goto err_out_release;
10251 }
10252
10253 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10254 if (bp->pcie_cap == 0) {
10255 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10256 " aborting\n");
10257 rc = -EIO;
10258 goto err_out_release;
10259 }
10260
10261 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10262 bp->flags |= USING_DAC_FLAG;
10263 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10264 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10265 " failed, aborting\n");
10266 rc = -EIO;
10267 goto err_out_release;
10268 }
10269
10270 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10271 printk(KERN_ERR PFX "System does not support DMA,"
10272 " aborting\n");
10273 rc = -EIO;
10274 goto err_out_release;
10275 }
10276
34f80b04
EG
10277 dev->mem_start = pci_resource_start(pdev, 0);
10278 dev->base_addr = dev->mem_start;
10279 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10280
10281 dev->irq = pdev->irq;
10282
275f165f 10283 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10284 if (!bp->regview) {
10285 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10286 rc = -ENOMEM;
10287 goto err_out_release;
10288 }
10289
34f80b04
EG
10290 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10291 min_t(u64, BNX2X_DB_SIZE,
10292 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10293 if (!bp->doorbells) {
10294 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10295 rc = -ENOMEM;
10296 goto err_out_unmap;
10297 }
10298
10299 bnx2x_set_power_state(bp, PCI_D0);
10300
34f80b04
EG
10301 /* clean indirect addresses */
10302 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10303 PCICFG_VENDOR_ID_OFFSET);
10304 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10305 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10306 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10307 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10308
34f80b04 10309 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10310
c64213cd 10311 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10312 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10313 dev->features |= NETIF_F_SG;
10314 dev->features |= NETIF_F_HW_CSUM;
10315 if (bp->flags & USING_DAC_FLAG)
10316 dev->features |= NETIF_F_HIGHDMA;
10317#ifdef BCM_VLAN
10318 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10319 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10320#endif
10321 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10322 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10323
10324 return 0;
10325
10326err_out_unmap:
10327 if (bp->regview) {
10328 iounmap(bp->regview);
10329 bp->regview = NULL;
10330 }
a2fbb9ea
ET
10331 if (bp->doorbells) {
10332 iounmap(bp->doorbells);
10333 bp->doorbells = NULL;
10334 }
10335
10336err_out_release:
34f80b04
EG
10337 if (atomic_read(&pdev->enable_cnt) == 1)
10338 pci_release_regions(pdev);
a2fbb9ea
ET
10339
10340err_out_disable:
10341 pci_disable_device(pdev);
10342 pci_set_drvdata(pdev, NULL);
10343
10344err_out:
10345 return rc;
10346}
10347
25047950
ET
10348static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10349{
10350 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10351
10352 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10353 return val;
10354}
10355
10356/* return value of 1=2.5GHz 2=5GHz */
10357static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10358{
10359 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10360
10361 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10362 return val;
10363}
10364
a2fbb9ea
ET
10365static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10366 const struct pci_device_id *ent)
10367{
10368 static int version_printed;
10369 struct net_device *dev = NULL;
10370 struct bnx2x *bp;
25047950 10371 int rc;
a2fbb9ea
ET
10372
10373 if (version_printed++ == 0)
10374 printk(KERN_INFO "%s", version);
10375
10376 /* dev zeroed in init_etherdev */
555f6c78 10377 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10378 if (!dev) {
10379 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10380 return -ENOMEM;
34f80b04 10381 }
a2fbb9ea 10382
a2fbb9ea
ET
10383 bp = netdev_priv(dev);
10384 bp->msglevel = debug;
10385
34f80b04 10386 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10387 if (rc < 0) {
10388 free_netdev(dev);
10389 return rc;
10390 }
10391
a2fbb9ea
ET
10392 pci_set_drvdata(pdev, dev);
10393
34f80b04 10394 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10395 if (rc)
10396 goto init_one_exit;
10397
10398 rc = register_netdev(dev);
34f80b04 10399 if (rc) {
693fc0d1 10400 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10401 goto init_one_exit;
10402 }
10403
10404 bp->common.name = board_info[ent->driver_data].name;
25047950 10405 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10406 " IRQ %d, ", dev->name, bp->common.name,
10407 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10408 bnx2x_get_pcie_width(bp),
10409 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10410 dev->base_addr, bp->pdev->irq);
e174961c 10411 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10412 return 0;
34f80b04
EG
10413
10414init_one_exit:
10415 if (bp->regview)
10416 iounmap(bp->regview);
10417
10418 if (bp->doorbells)
10419 iounmap(bp->doorbells);
10420
10421 free_netdev(dev);
10422
10423 if (atomic_read(&pdev->enable_cnt) == 1)
10424 pci_release_regions(pdev);
10425
10426 pci_disable_device(pdev);
10427 pci_set_drvdata(pdev, NULL);
10428
10429 return rc;
a2fbb9ea
ET
10430}
10431
10432static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10433{
10434 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10435 struct bnx2x *bp;
10436
10437 if (!dev) {
228241eb
ET
10438 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10439 return;
10440 }
228241eb 10441 bp = netdev_priv(dev);
a2fbb9ea 10442
a2fbb9ea
ET
10443 unregister_netdev(dev);
10444
10445 if (bp->regview)
10446 iounmap(bp->regview);
10447
10448 if (bp->doorbells)
10449 iounmap(bp->doorbells);
10450
10451 free_netdev(dev);
34f80b04
EG
10452
10453 if (atomic_read(&pdev->enable_cnt) == 1)
10454 pci_release_regions(pdev);
10455
a2fbb9ea
ET
10456 pci_disable_device(pdev);
10457 pci_set_drvdata(pdev, NULL);
10458}
10459
10460static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10461{
10462 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10463 struct bnx2x *bp;
10464
34f80b04
EG
10465 if (!dev) {
10466 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10467 return -ENODEV;
10468 }
10469 bp = netdev_priv(dev);
a2fbb9ea 10470
34f80b04 10471 rtnl_lock();
a2fbb9ea 10472
34f80b04 10473 pci_save_state(pdev);
228241eb 10474
34f80b04
EG
10475 if (!netif_running(dev)) {
10476 rtnl_unlock();
10477 return 0;
10478 }
a2fbb9ea
ET
10479
10480 netif_device_detach(dev);
a2fbb9ea 10481
da5a662a 10482 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10483
a2fbb9ea 10484 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10485
34f80b04
EG
10486 rtnl_unlock();
10487
a2fbb9ea
ET
10488 return 0;
10489}
10490
10491static int bnx2x_resume(struct pci_dev *pdev)
10492{
10493 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10494 struct bnx2x *bp;
a2fbb9ea
ET
10495 int rc;
10496
228241eb
ET
10497 if (!dev) {
10498 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10499 return -ENODEV;
10500 }
228241eb 10501 bp = netdev_priv(dev);
a2fbb9ea 10502
34f80b04
EG
10503 rtnl_lock();
10504
228241eb 10505 pci_restore_state(pdev);
34f80b04
EG
10506
10507 if (!netif_running(dev)) {
10508 rtnl_unlock();
10509 return 0;
10510 }
10511
a2fbb9ea
ET
10512 bnx2x_set_power_state(bp, PCI_D0);
10513 netif_device_attach(dev);
10514
da5a662a 10515 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10516
34f80b04
EG
10517 rtnl_unlock();
10518
10519 return rc;
a2fbb9ea
ET
10520}
10521
f8ef6e44
YG
10522static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10523{
10524 int i;
10525
10526 bp->state = BNX2X_STATE_ERROR;
10527
10528 bp->rx_mode = BNX2X_RX_MODE_NONE;
10529
10530 bnx2x_netif_stop(bp, 0);
10531
10532 del_timer_sync(&bp->timer);
10533 bp->stats_state = STATS_STATE_DISABLED;
10534 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10535
10536 /* Release IRQs */
10537 bnx2x_free_irq(bp);
10538
10539 if (CHIP_IS_E1(bp)) {
10540 struct mac_configuration_cmd *config =
10541 bnx2x_sp(bp, mcast_config);
10542
8d9c5f34 10543 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
10544 CAM_INVALIDATE(config->config_table[i]);
10545 }
10546
10547 /* Free SKBs, SGEs, TPA pool and driver internals */
10548 bnx2x_free_skbs(bp);
555f6c78 10549 for_each_rx_queue(bp, i)
f8ef6e44 10550 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 10551 for_each_rx_queue(bp, i)
7cde1c8b 10552 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10553 bnx2x_free_mem(bp);
10554
10555 bp->state = BNX2X_STATE_CLOSED;
10556
10557 netif_carrier_off(bp->dev);
10558
10559 return 0;
10560}
10561
10562static void bnx2x_eeh_recover(struct bnx2x *bp)
10563{
10564 u32 val;
10565
10566 mutex_init(&bp->port.phy_mutex);
10567
10568 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10569 bp->link_params.shmem_base = bp->common.shmem_base;
10570 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10571
10572 if (!bp->common.shmem_base ||
10573 (bp->common.shmem_base < 0xA0000) ||
10574 (bp->common.shmem_base >= 0xC0000)) {
10575 BNX2X_DEV_INFO("MCP not active\n");
10576 bp->flags |= NO_MCP_FLAG;
10577 return;
10578 }
10579
10580 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10581 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10582 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10583 BNX2X_ERR("BAD MCP validity signature\n");
10584
10585 if (!BP_NOMCP(bp)) {
10586 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10587 & DRV_MSG_SEQ_NUMBER_MASK);
10588 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10589 }
10590}
10591
493adb1f
WX
10592/**
10593 * bnx2x_io_error_detected - called when PCI error is detected
10594 * @pdev: Pointer to PCI device
10595 * @state: The current pci connection state
10596 *
10597 * This function is called after a PCI bus error affecting
10598 * this device has been detected.
10599 */
10600static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10601 pci_channel_state_t state)
10602{
10603 struct net_device *dev = pci_get_drvdata(pdev);
10604 struct bnx2x *bp = netdev_priv(dev);
10605
10606 rtnl_lock();
10607
10608 netif_device_detach(dev);
10609
10610 if (netif_running(dev))
f8ef6e44 10611 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10612
10613 pci_disable_device(pdev);
10614
10615 rtnl_unlock();
10616
10617 /* Request a slot reset */
10618 return PCI_ERS_RESULT_NEED_RESET;
10619}
10620
10621/**
10622 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10623 * @pdev: Pointer to PCI device
10624 *
10625 * Restart the card from scratch, as if from a cold-boot.
10626 */
10627static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10628{
10629 struct net_device *dev = pci_get_drvdata(pdev);
10630 struct bnx2x *bp = netdev_priv(dev);
10631
10632 rtnl_lock();
10633
10634 if (pci_enable_device(pdev)) {
10635 dev_err(&pdev->dev,
10636 "Cannot re-enable PCI device after reset\n");
10637 rtnl_unlock();
10638 return PCI_ERS_RESULT_DISCONNECT;
10639 }
10640
10641 pci_set_master(pdev);
10642 pci_restore_state(pdev);
10643
10644 if (netif_running(dev))
10645 bnx2x_set_power_state(bp, PCI_D0);
10646
10647 rtnl_unlock();
10648
10649 return PCI_ERS_RESULT_RECOVERED;
10650}
10651
10652/**
10653 * bnx2x_io_resume - called when traffic can start flowing again
10654 * @pdev: Pointer to PCI device
10655 *
10656 * This callback is called when the error recovery driver tells us that
10657 * its OK to resume normal operation.
10658 */
10659static void bnx2x_io_resume(struct pci_dev *pdev)
10660{
10661 struct net_device *dev = pci_get_drvdata(pdev);
10662 struct bnx2x *bp = netdev_priv(dev);
10663
10664 rtnl_lock();
10665
f8ef6e44
YG
10666 bnx2x_eeh_recover(bp);
10667
493adb1f 10668 if (netif_running(dev))
f8ef6e44 10669 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10670
10671 netif_device_attach(dev);
10672
10673 rtnl_unlock();
10674}
10675
10676static struct pci_error_handlers bnx2x_err_handler = {
10677 .error_detected = bnx2x_io_error_detected,
10678 .slot_reset = bnx2x_io_slot_reset,
10679 .resume = bnx2x_io_resume,
10680};
10681
a2fbb9ea 10682static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10683 .name = DRV_MODULE_NAME,
10684 .id_table = bnx2x_pci_tbl,
10685 .probe = bnx2x_init_one,
10686 .remove = __devexit_p(bnx2x_remove_one),
10687 .suspend = bnx2x_suspend,
10688 .resume = bnx2x_resume,
10689 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10690};
10691
10692static int __init bnx2x_init(void)
10693{
1cf167f2
EG
10694 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10695 if (bnx2x_wq == NULL) {
10696 printk(KERN_ERR PFX "Cannot create workqueue\n");
10697 return -ENOMEM;
10698 }
10699
a2fbb9ea
ET
10700 return pci_register_driver(&bnx2x_pci_driver);
10701}
10702
10703static void __exit bnx2x_cleanup(void)
10704{
10705 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10706
10707 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10708}
10709
10710module_init(bnx2x_init);
10711module_exit(bnx2x_cleanup);
10712