bnx2x: System-page alignment
[linux-2.6-block.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04
EG
628 if (bp->port.pmf)
629 /* enable nig attention */
630 val |= 0x0100;
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1094
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1099}
1100
7a9b2557
VZ
1101static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1102 u16 idx)
1103{
1104 u16 last_max = fp->last_max_sge;
1105
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1108}
1109
1110static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1111{
1112 int i, j;
1113
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1116
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1119 idx--;
1120 }
1121 }
1122}
1123
1124static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1126{
1127 struct bnx2x *bp = fp->bp;
4f40f2cb 1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1130 SGE_PAGE_SHIFT;
7a9b2557
VZ
1131 u16 last_max, last_elem, first_elem;
1132 u16 delta = 0;
1133 u16 i;
1134
1135 if (!sge_len)
1136 return;
1137
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1155 last_elem++;
1156
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1160 break;
1161
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1164 }
1165
1166 if (delta > 0) {
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1175}
1176
1177static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178{
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182
33471629
EG
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1188}
1189
1190static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1192{
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1197 dma_addr_t mapping;
1198
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1207
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1213
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217
1218#ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220#ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222#else
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224#endif
1225 fp->tpa_queue_used);
1226#endif
1227}
1228
1229static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1232 u16 cqe_idx)
1233{
1234 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1237 int err;
1238 int j;
1239
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1242
1243 /* This is needed in order to enable forwarding support */
1244 if (frag_size)
4f40f2cb 1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1246 max(frag_size, (u32)len_on_bd));
1247
1248#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1249 if (pages >
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 pages, cqe_idx);
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1255 bnx2x_panic();
1256 return -EINVAL;
1257 }
1258#endif
1259
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1267 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1268 old_rx_pg = *rx_pg;
1269
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
66e855f3 1274 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1275 return err;
1276 }
1277
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1281
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1288
1289 frag_size -= frag_len;
1290 }
1291
1292 return 0;
1293}
1294
1295static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1297 u16 cqe_idx)
1298{
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1301 /* alloc new skb */
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 fails. */
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1309
7a9b2557 1310 if (likely(new_skb)) {
66e855f3
YG
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
0c6671b0
EG
1313#ifdef BCM_VLAN
1314 int is_vlan_cqe =
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1319#endif
7a9b2557
VZ
1320
1321 prefetch(skb);
1322 prefetch(((char *)(skb)) + 128);
1323
7a9b2557
VZ
1324#ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1329 bnx2x_panic();
1330 return;
1331 }
1332#endif
1333
1334 skb_reserve(skb, pad);
1335 skb_put(skb, len);
1336
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
0c8dfc83 1339 skb_record_rx_queue(skb, queue);
7a9b2557
VZ
1340
1341 {
1342 struct iphdr *iph;
1343
1344 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1345#ifdef BCM_VLAN
1346 /* If there is no Rx VLAN offloading -
1347 take VLAN tag into an account */
1348 if (unlikely(is_not_hwaccel_vlan_cqe))
1349 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1350#endif
7a9b2557
VZ
1351 iph->check = 0;
1352 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1353 }
1354
1355 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1356 &cqe->fast_path_cqe, cqe_idx)) {
1357#ifdef BCM_VLAN
0c6671b0
EG
1358 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1359 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1360 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1361 le16_to_cpu(cqe->fast_path_cqe.
1362 vlan_tag));
1363 else
1364#endif
1365 netif_receive_skb(skb);
1366 } else {
1367 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1368 " - dropping packet!\n");
1369 dev_kfree_skb(skb);
1370 }
1371
7a9b2557
VZ
1372
1373 /* put new skb in bin */
1374 fp->tpa_pool[queue].skb = new_skb;
1375
1376 } else {
66e855f3 1377 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1378 DP(NETIF_MSG_RX_STATUS,
1379 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1380 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1381 }
1382
1383 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1384}
1385
1386static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1387 struct bnx2x_fastpath *fp,
1388 u16 bd_prod, u16 rx_comp_prod,
1389 u16 rx_sge_prod)
1390{
8d9c5f34 1391 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1392 int i;
1393
1394 /* Update producers */
1395 rx_prods.bd_prod = bd_prod;
1396 rx_prods.cqe_prod = rx_comp_prod;
1397 rx_prods.sge_prod = rx_sge_prod;
1398
58f4c4cf
EG
1399 /*
1400 * Make sure that the BD and SGE data is updated before updating the
1401 * producers since FW might read the BD/SGE right after the producer
1402 * is updated.
1403 * This is only applicable for weak-ordered memory model archs such
1404 * as IA-64. The following barrier is also mandatory since FW will
1405 * assumes BDs must have buffers.
1406 */
1407 wmb();
1408
8d9c5f34
EG
1409 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1410 REG_WR(bp, BAR_USTRORM_INTMEM +
1411 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1412 ((u32 *)&rx_prods)[i]);
1413
58f4c4cf
EG
1414 mmiowb(); /* keep prod updates ordered */
1415
7a9b2557 1416 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1417 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1418 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1419}
1420
a2fbb9ea
ET
1421static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1422{
1423 struct bnx2x *bp = fp->bp;
34f80b04 1424 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1425 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1426 int rx_pkt = 0;
1427
1428#ifdef BNX2X_STOP_ON_ERROR
1429 if (unlikely(bp->panic))
1430 return 0;
1431#endif
1432
34f80b04
EG
1433 /* CQ "next element" is of the size of the regular element,
1434 that's why it's ok here */
a2fbb9ea
ET
1435 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1436 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1437 hw_comp_cons++;
1438
1439 bd_cons = fp->rx_bd_cons;
1440 bd_prod = fp->rx_bd_prod;
34f80b04 1441 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1442 sw_comp_cons = fp->rx_comp_cons;
1443 sw_comp_prod = fp->rx_comp_prod;
1444
1445 /* Memory barrier necessary as speculative reads of the rx
1446 * buffer can be ahead of the index in the status block
1447 */
1448 rmb();
1449
1450 DP(NETIF_MSG_RX_STATUS,
1451 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1452 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1453
1454 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1455 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1456 struct sk_buff *skb;
1457 union eth_rx_cqe *cqe;
34f80b04
EG
1458 u8 cqe_fp_flags;
1459 u16 len, pad;
a2fbb9ea
ET
1460
1461 comp_ring_cons = RCQ_BD(sw_comp_cons);
1462 bd_prod = RX_BD(bd_prod);
1463 bd_cons = RX_BD(bd_cons);
1464
1465 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1466 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1467
a2fbb9ea 1468 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1469 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1470 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1471 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1472 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1473 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1474
1475 /* is this a slowpath msg? */
34f80b04 1476 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1477 bnx2x_sp_event(fp, cqe);
1478 goto next_cqe;
1479
1480 /* this is an rx packet */
1481 } else {
1482 rx_buf = &fp->rx_buf_ring[bd_cons];
1483 skb = rx_buf->skb;
a2fbb9ea
ET
1484 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1485 pad = cqe->fast_path_cqe.placement_offset;
1486
7a9b2557
VZ
1487 /* If CQE is marked both TPA_START and TPA_END
1488 it is a non-TPA CQE */
1489 if ((!fp->disable_tpa) &&
1490 (TPA_TYPE(cqe_fp_flags) !=
1491 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1492 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1493
1494 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1495 DP(NETIF_MSG_RX_STATUS,
1496 "calling tpa_start on queue %d\n",
1497 queue);
1498
1499 bnx2x_tpa_start(fp, queue, skb,
1500 bd_cons, bd_prod);
1501 goto next_rx;
1502 }
1503
1504 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1505 DP(NETIF_MSG_RX_STATUS,
1506 "calling tpa_stop on queue %d\n",
1507 queue);
1508
1509 if (!BNX2X_RX_SUM_FIX(cqe))
1510 BNX2X_ERR("STOP on none TCP "
1511 "data\n");
1512
1513 /* This is a size of the linear data
1514 on this skb */
1515 len = le16_to_cpu(cqe->fast_path_cqe.
1516 len_on_bd);
1517 bnx2x_tpa_stop(bp, fp, queue, pad,
1518 len, cqe, comp_ring_cons);
1519#ifdef BNX2X_STOP_ON_ERROR
1520 if (bp->panic)
1521 return -EINVAL;
1522#endif
1523
1524 bnx2x_update_sge_prod(fp,
1525 &cqe->fast_path_cqe);
1526 goto next_cqe;
1527 }
1528 }
1529
a2fbb9ea
ET
1530 pci_dma_sync_single_for_device(bp->pdev,
1531 pci_unmap_addr(rx_buf, mapping),
1532 pad + RX_COPY_THRESH,
1533 PCI_DMA_FROMDEVICE);
1534 prefetch(skb);
1535 prefetch(((char *)(skb)) + 128);
1536
1537 /* is this an error packet? */
34f80b04 1538 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1539 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1540 "ERROR flags %x rx packet %u\n",
1541 cqe_fp_flags, sw_comp_cons);
66e855f3 1542 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1543 goto reuse_rx;
1544 }
1545
1546 /* Since we don't have a jumbo ring
1547 * copy small packets if mtu > 1500
1548 */
1549 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1550 (len <= RX_COPY_THRESH)) {
1551 struct sk_buff *new_skb;
1552
1553 new_skb = netdev_alloc_skb(bp->dev,
1554 len + pad);
1555 if (new_skb == NULL) {
1556 DP(NETIF_MSG_RX_ERR,
34f80b04 1557 "ERROR packet dropped "
a2fbb9ea 1558 "because of alloc failure\n");
66e855f3 1559 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1560 goto reuse_rx;
1561 }
1562
1563 /* aligned copy */
1564 skb_copy_from_linear_data_offset(skb, pad,
1565 new_skb->data + pad, len);
1566 skb_reserve(new_skb, pad);
1567 skb_put(new_skb, len);
1568
1569 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1570
1571 skb = new_skb;
1572
1573 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1574 pci_unmap_single(bp->pdev,
1575 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1576 bp->rx_buf_size,
a2fbb9ea
ET
1577 PCI_DMA_FROMDEVICE);
1578 skb_reserve(skb, pad);
1579 skb_put(skb, len);
1580
1581 } else {
1582 DP(NETIF_MSG_RX_ERR,
34f80b04 1583 "ERROR packet dropped because "
a2fbb9ea 1584 "of alloc failure\n");
66e855f3 1585 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1586reuse_rx:
1587 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1588 goto next_rx;
1589 }
1590
1591 skb->protocol = eth_type_trans(skb, bp->dev);
1592
1593 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1594 if (bp->rx_csum) {
1adcd8be
EG
1595 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1596 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1597 else
1598 bp->eth_stats.hw_csum_err++;
1599 }
a2fbb9ea
ET
1600 }
1601
1602#ifdef BCM_VLAN
0c6671b0 1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1608 else
1609#endif
34f80b04 1610 netif_receive_skb(skb);
a2fbb9ea 1611
a2fbb9ea
ET
1612
1613next_rx:
1614 rx_buf->skb = NULL;
1615
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1619 rx_pkt++;
a2fbb9ea
ET
1620next_cqe:
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1623
34f80b04 1624 if (rx_pkt == budget)
a2fbb9ea
ET
1625 break;
1626 } /* while */
1627
1628 fp->rx_bd_cons = bd_cons;
34f80b04 1629 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1632
7a9b2557
VZ
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1635 fp->rx_sge_prod);
a2fbb9ea
ET
1636
1637 fp->rx_pkt += rx_pkt;
1638 fp->rx_calls++;
1639
1640 return rx_pkt;
1641}
1642
1643static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644{
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
34f80b04 1647 int index = FP_IDX(fp);
a2fbb9ea 1648
da5a662a
VZ
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1652 return IRQ_HANDLED;
1653 }
1654
34f80b04
EG
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1658
1659#ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1661 return IRQ_HANDLED;
1662#endif
1663
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
288379f0 1669 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1670
a2fbb9ea
ET
1671 return IRQ_HANDLED;
1672}
1673
1674static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675{
555f6c78 1676 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1677 u16 status = bnx2x_ack_int(bp);
34f80b04 1678 u16 mask;
a2fbb9ea 1679
34f80b04 1680 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1683 return IRQ_NONE;
1684 }
34f80b04 1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1686
34f80b04 1687 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1690 return IRQ_HANDLED;
1691 }
1692
3196a88a
EG
1693#ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1695 return IRQ_HANDLED;
1696#endif
1697
34f80b04
EG
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
a2fbb9ea
ET
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1701
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706
288379f0 1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1708
34f80b04 1709 status &= ~mask;
a2fbb9ea
ET
1710 }
1711
a2fbb9ea 1712
34f80b04 1713 if (unlikely(status & 0x1)) {
1cf167f2 1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1715
1716 status &= ~0x1;
1717 if (!status)
1718 return IRQ_HANDLED;
1719 }
1720
34f80b04
EG
1721 if (status)
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1723 status);
a2fbb9ea 1724
c18487ee 1725 return IRQ_HANDLED;
a2fbb9ea
ET
1726}
1727
c18487ee 1728/* end of fast path */
a2fbb9ea 1729
bb2a0f7a 1730static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1731
c18487ee
YR
1732/* Link */
1733
1734/*
1735 * General service functions
1736 */
a2fbb9ea 1737
4a37fb66 1738static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1739{
1740 u32 lock_status;
1741 u32 resource_bit = (1 << resource);
4a37fb66
YG
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
c18487ee 1744 int cnt;
a2fbb9ea 1745
c18487ee
YR
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 DP(NETIF_MSG_HW,
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751 return -EINVAL;
1752 }
a2fbb9ea 1753
4a37fb66
YG
1754 if (func <= 5) {
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 } else {
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1759 }
1760
c18487ee 1761 /* Validating that the resource is not already taken */
4a37fb66 1762 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1766 return -EEXIST;
1767 }
a2fbb9ea 1768
46230476
EG
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1771 /* Try to acquire the lock */
4a37fb66
YG
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1774 if (lock_status & resource_bit)
1775 return 0;
a2fbb9ea 1776
c18487ee 1777 msleep(5);
a2fbb9ea 1778 }
c18487ee
YR
1779 DP(NETIF_MSG_HW, "Timeout\n");
1780 return -EAGAIN;
1781}
a2fbb9ea 1782
4a37fb66 1783static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1784{
1785 u32 lock_status;
1786 u32 resource_bit = (1 << resource);
4a37fb66
YG
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
a2fbb9ea 1789
c18487ee
YR
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 DP(NETIF_MSG_HW,
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795 return -EINVAL;
1796 }
1797
4a37fb66
YG
1798 if (func <= 5) {
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 } else {
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803 }
1804
c18487ee 1805 /* Validating that the resource is currently taken */
4a37fb66 1806 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1810 return -EFAULT;
a2fbb9ea
ET
1811 }
1812
4a37fb66 1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1814 return 0;
1815}
1816
1817/* HW Lock for shared dual port PHYs */
4a37fb66 1818static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1819{
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1821
34f80b04 1822 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1823
c18487ee
YR
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1827}
a2fbb9ea 1828
4a37fb66 1829static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1830{
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1832
c18487ee
YR
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1836
34f80b04 1837 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1838}
a2fbb9ea 1839
17de50b7 1840int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1841{
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1848 u32 gpio_reg;
a2fbb9ea 1849
c18487ee
YR
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1866 break;
a2fbb9ea 1867
c18487ee
YR
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1874 break;
a2fbb9ea 1875
17de50b7 1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1879 /* set FLOAT */
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1881 break;
a2fbb9ea 1882
c18487ee
YR
1883 default:
1884 break;
a2fbb9ea
ET
1885 }
1886
c18487ee 1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1889
c18487ee 1890 return 0;
a2fbb9ea
ET
1891}
1892
c18487ee 1893static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1894{
c18487ee
YR
1895 u32 spio_mask = (1 << spio_num);
1896 u32 spio_reg;
a2fbb9ea 1897
c18487ee
YR
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1901 return -EINVAL;
a2fbb9ea
ET
1902 }
1903
4a37fb66 1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1907
c18487ee 1908 switch (mode) {
6378c025 1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1914 break;
a2fbb9ea 1915
6378c025 1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1921 break;
a2fbb9ea 1922
c18487ee
YR
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1925 /* set FLOAT */
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1935
a2fbb9ea
ET
1936 return 0;
1937}
1938
c18487ee 1939static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1940{
ad33ea3a
EG
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1945 ADVERTISED_Pause);
1946 break;
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1949 ADVERTISED_Pause);
1950 break;
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1953 break;
1954 default:
34f80b04 1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1956 ADVERTISED_Pause);
1957 break;
1958 }
1959}
f1410647 1960
c18487ee
YR
1961static void bnx2x_link_report(struct bnx2x *bp)
1962{
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1967
c18487ee 1968 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1969
c18487ee
YR
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1972 else
1973 printk("half duplex");
f1410647 1974
c0700f90
DM
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1977 printk(", receive ");
c0700f90 1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1979 printk("& transmit ");
1980 } else {
1981 printk(", transmit ");
1982 }
1983 printk("flow control ON");
1984 }
1985 printk("\n");
f1410647 1986
c18487ee
YR
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1990 }
c18487ee
YR
1991}
1992
1993static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1994{
19680c48
EG
1995 if (!BP_NOMCP(bp)) {
1996 u8 rc;
a2fbb9ea 1997
19680c48 1998 /* Initialize link parameters structure variables */
8c99e7b0
YR
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2001 if (IS_E1HMF(bp))
c0700f90 2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2003 else if (bp->dev->mtu > 5000)
c0700f90 2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2005 else
c0700f90 2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2007
4a37fb66 2008 bnx2x_acquire_phy_lock(bp);
19680c48 2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2010 bnx2x_release_phy_lock(bp);
a2fbb9ea 2011
3c96c68b
EG
2012 bnx2x_calc_fc_adv(bp);
2013
19680c48
EG
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
a2fbb9ea 2016
34f80b04 2017
19680c48
EG
2018 return rc;
2019 }
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2021 return -EINVAL;
a2fbb9ea
ET
2022}
2023
c18487ee 2024static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2025{
19680c48 2026 if (!BP_NOMCP(bp)) {
4a37fb66 2027 bnx2x_acquire_phy_lock(bp);
19680c48 2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2029 bnx2x_release_phy_lock(bp);
a2fbb9ea 2030
19680c48
EG
2031 bnx2x_calc_fc_adv(bp);
2032 } else
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2034}
a2fbb9ea 2035
c18487ee
YR
2036static void bnx2x__link_reset(struct bnx2x *bp)
2037{
19680c48 2038 if (!BP_NOMCP(bp)) {
4a37fb66 2039 bnx2x_acquire_phy_lock(bp);
19680c48 2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2041 bnx2x_release_phy_lock(bp);
19680c48
EG
2042 } else
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2044}
a2fbb9ea 2045
c18487ee
YR
2046static u8 bnx2x_link_test(struct bnx2x *bp)
2047{
2048 u8 rc;
a2fbb9ea 2049
4a37fb66 2050 bnx2x_acquire_phy_lock(bp);
c18487ee 2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2052 bnx2x_release_phy_lock(bp);
a2fbb9ea 2053
c18487ee
YR
2054 return rc;
2055}
a2fbb9ea 2056
34f80b04
EG
2057/* Calculates the sum of vn_min_rates.
2058 It's needed for further normalizing of the min_rates.
2059
2060 Returns:
2061 sum of vn_min_rates
2062 or
2063 0 - if all the min_rates are 0.
33471629 2064 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2065 If not all min_rates are zero then those that are zeroes will
2066 be set to 1.
2067 */
2068static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2069{
2070 int i, port = BP_PORT(bp);
2071 u32 wsum = 0;
2072 int all_zero = 1;
2073
2074 for (i = 0; i < E1HVN_MAX; i++) {
2075 u32 vn_cfg =
2076 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2077 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2078 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2079 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2080 /* If min rate is zero - set it to 1 */
2081 if (!vn_min_rate)
2082 vn_min_rate = DEF_MIN_RATE;
2083 else
2084 all_zero = 0;
2085
2086 wsum += vn_min_rate;
2087 }
2088 }
2089
2090 /* ... only if all min rates are zeros - disable FAIRNESS */
2091 if (all_zero)
2092 return 0;
2093
2094 return wsum;
2095}
2096
2097static void bnx2x_init_port_minmax(struct bnx2x *bp,
2098 int en_fness,
2099 u16 port_rate,
2100 struct cmng_struct_per_port *m_cmng_port)
2101{
2102 u32 r_param = port_rate / 8;
2103 int port = BP_PORT(bp);
2104 int i;
2105
2106 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2107
2108 /* Enable minmax only if we are in e1hmf mode */
2109 if (IS_E1HMF(bp)) {
2110 u32 fair_periodic_timeout_usec;
2111 u32 t_fair;
2112
2113 /* Enable rate shaping and fairness */
2114 m_cmng_port->flags.cmng_vn_enable = 1;
2115 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2116 m_cmng_port->flags.rate_shaping_enable = 1;
2117
2118 if (!en_fness)
2119 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2120 " fairness will be disabled\n");
2121
2122 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2123 m_cmng_port->rs_vars.rs_periodic_timeout =
2124 RS_PERIODIC_TIMEOUT_USEC / 4;
2125
2126 /* this is the threshold below which no timer arming will occur
2127 1.25 coefficient is for the threshold to be a little bigger
2128 than the real time, to compensate for timer in-accuracy */
2129 m_cmng_port->rs_vars.rs_threshold =
2130 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2131
2132 /* resolution of fairness timer */
2133 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2134 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2135 t_fair = T_FAIR_COEF / port_rate;
2136
2137 /* this is the threshold below which we won't arm
2138 the timer anymore */
2139 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2140
2141 /* we multiply by 1e3/8 to get bytes/msec.
2142 We don't want the credits to pass a credit
2143 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2144 m_cmng_port->fair_vars.upper_bound =
2145 r_param * t_fair * FAIR_MEM;
2146 /* since each tick is 4 usec */
2147 m_cmng_port->fair_vars.fairness_timeout =
2148 fair_periodic_timeout_usec / 4;
2149
2150 } else {
2151 /* Disable rate shaping and fairness */
2152 m_cmng_port->flags.cmng_vn_enable = 0;
2153 m_cmng_port->flags.fairness_enable = 0;
2154 m_cmng_port->flags.rate_shaping_enable = 0;
2155
2156 DP(NETIF_MSG_IFUP,
2157 "Single function mode minmax will be disabled\n");
2158 }
2159
2160 /* Store it to internal memory */
2161 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2162 REG_WR(bp, BAR_XSTRORM_INTMEM +
2163 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2164 ((u32 *)(m_cmng_port))[i]);
2165}
2166
2167static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2168 u32 wsum, u16 port_rate,
2169 struct cmng_struct_per_port *m_cmng_port)
2170{
2171 struct rate_shaping_vars_per_vn m_rs_vn;
2172 struct fairness_vars_per_vn m_fair_vn;
2173 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2174 u16 vn_min_rate, vn_max_rate;
2175 int i;
2176
2177 /* If function is hidden - set min and max to zeroes */
2178 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2179 vn_min_rate = 0;
2180 vn_max_rate = 0;
2181
2182 } else {
2183 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2184 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2185 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2186 if current min rate is zero - set it to 1.
33471629 2187 This is a requirement of the algorithm. */
34f80b04
EG
2188 if ((vn_min_rate == 0) && wsum)
2189 vn_min_rate = DEF_MIN_RATE;
2190 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2191 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2192 }
2193
2194 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2195 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2196
2197 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2198 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2199
2200 /* global vn counter - maximal Mbps for this vn */
2201 m_rs_vn.vn_counter.rate = vn_max_rate;
2202
2203 /* quota - number of bytes transmitted in this period */
2204 m_rs_vn.vn_counter.quota =
2205 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2206
2207#ifdef BNX2X_PER_PROT_QOS
2208 /* per protocol counter */
2209 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2210 /* maximal Mbps for this protocol */
2211 m_rs_vn.protocol_counters[protocol].rate =
2212 protocol_max_rate[protocol];
2213 /* the quota in each timer period -
2214 number of bytes transmitted in this period */
2215 m_rs_vn.protocol_counters[protocol].quota =
2216 (u32)(rs_periodic_timeout_usec *
2217 ((double)m_rs_vn.
2218 protocol_counters[protocol].rate/8));
2219 }
2220#endif
2221
2222 if (wsum) {
2223 /* credit for each period of the fairness algorithm:
2224 number of bytes in T_FAIR (the vn share the port rate).
2225 wsum should not be larger than 10000, thus
2226 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2227 m_fair_vn.vn_credit_delta =
2228 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2229 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2230 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2231 m_fair_vn.vn_credit_delta);
2232 }
2233
2234#ifdef BNX2X_PER_PROT_QOS
2235 do {
2236 u32 protocolWeightSum = 0;
2237
2238 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2239 protocolWeightSum +=
2240 drvInit.protocol_min_rate[protocol];
2241 /* per protocol counter -
2242 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2243 if (protocolWeightSum > 0) {
2244 for (protocol = 0;
2245 protocol < NUM_OF_PROTOCOLS; protocol++)
2246 /* credit for each period of the
2247 fairness algorithm - number of bytes in
2248 T_FAIR (the protocol share the vn rate) */
2249 m_fair_vn.protocol_credit_delta[protocol] =
2250 (u32)((vn_min_rate / 8) * t_fair *
2251 protocol_min_rate / protocolWeightSum);
2252 }
2253 } while (0);
2254#endif
2255
2256 /* Store it to internal memory */
2257 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2258 REG_WR(bp, BAR_XSTRORM_INTMEM +
2259 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2260 ((u32 *)(&m_rs_vn))[i]);
2261
2262 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2263 REG_WR(bp, BAR_XSTRORM_INTMEM +
2264 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2265 ((u32 *)(&m_fair_vn))[i]);
2266}
2267
c18487ee
YR
2268/* This function is called upon link interrupt */
2269static void bnx2x_link_attn(struct bnx2x *bp)
2270{
34f80b04
EG
2271 int vn;
2272
bb2a0f7a
YG
2273 /* Make sure that we are synced with the current statistics */
2274 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2275
c18487ee 2276 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2277
bb2a0f7a
YG
2278 if (bp->link_vars.link_up) {
2279
2280 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2281 struct host_port_stats *pstats;
2282
2283 pstats = bnx2x_sp(bp, port_stats);
2284 /* reset old bmac stats */
2285 memset(&(pstats->mac_stx[0]), 0,
2286 sizeof(struct mac_stx));
2287 }
2288 if ((bp->state == BNX2X_STATE_OPEN) ||
2289 (bp->state == BNX2X_STATE_DISABLED))
2290 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291 }
2292
c18487ee
YR
2293 /* indicate link status */
2294 bnx2x_link_report(bp);
34f80b04
EG
2295
2296 if (IS_E1HMF(bp)) {
2297 int func;
2298
2299 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2300 if (vn == BP_E1HVN(bp))
2301 continue;
2302
2303 func = ((vn << 1) | BP_PORT(bp));
2304
2305 /* Set the attention towards other drivers
2306 on the same port */
2307 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2308 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2309 }
2310 }
2311
2312 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2313 struct cmng_struct_per_port m_cmng_port;
2314 u32 wsum;
2315 int port = BP_PORT(bp);
2316
2317 /* Init RATE SHAPING and FAIRNESS contexts */
2318 wsum = bnx2x_calc_vn_wsum(bp);
2319 bnx2x_init_port_minmax(bp, (int)wsum,
2320 bp->link_vars.line_speed,
2321 &m_cmng_port);
2322 if (IS_E1HMF(bp))
2323 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2324 bnx2x_init_vn_minmax(bp, 2*vn + port,
2325 wsum, bp->link_vars.line_speed,
2326 &m_cmng_port);
2327 }
c18487ee 2328}
a2fbb9ea 2329
c18487ee
YR
2330static void bnx2x__link_status_update(struct bnx2x *bp)
2331{
2332 if (bp->state != BNX2X_STATE_OPEN)
2333 return;
a2fbb9ea 2334
c18487ee 2335 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2336
bb2a0f7a
YG
2337 if (bp->link_vars.link_up)
2338 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2339 else
2340 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2341
c18487ee
YR
2342 /* indicate link status */
2343 bnx2x_link_report(bp);
a2fbb9ea 2344}
a2fbb9ea 2345
34f80b04
EG
2346static void bnx2x_pmf_update(struct bnx2x *bp)
2347{
2348 int port = BP_PORT(bp);
2349 u32 val;
2350
2351 bp->port.pmf = 1;
2352 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2353
2354 /* enable nig attention */
2355 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2356 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2357 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2358
2359 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2360}
2361
c18487ee 2362/* end of Link */
a2fbb9ea
ET
2363
2364/* slow path */
2365
2366/*
2367 * General service functions
2368 */
2369
2370/* the slow path queue is odd since completions arrive on the fastpath ring */
2371static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2372 u32 data_hi, u32 data_lo, int common)
2373{
34f80b04 2374 int func = BP_FUNC(bp);
a2fbb9ea 2375
34f80b04
EG
2376 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2377 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2378 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2379 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2380 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2381
2382#ifdef BNX2X_STOP_ON_ERROR
2383 if (unlikely(bp->panic))
2384 return -EIO;
2385#endif
2386
34f80b04 2387 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2388
2389 if (!bp->spq_left) {
2390 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2391 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2392 bnx2x_panic();
2393 return -EBUSY;
2394 }
f1410647 2395
a2fbb9ea
ET
2396 /* CID needs port number to be encoded int it */
2397 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2398 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2399 HW_CID(bp, cid)));
2400 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2401 if (common)
2402 bp->spq_prod_bd->hdr.type |=
2403 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2404
2405 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2406 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2407
2408 bp->spq_left--;
2409
2410 if (bp->spq_prod_bd == bp->spq_last_bd) {
2411 bp->spq_prod_bd = bp->spq;
2412 bp->spq_prod_idx = 0;
2413 DP(NETIF_MSG_TIMER, "end of spq\n");
2414
2415 } else {
2416 bp->spq_prod_bd++;
2417 bp->spq_prod_idx++;
2418 }
2419
34f80b04 2420 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2421 bp->spq_prod_idx);
2422
34f80b04 2423 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2424 return 0;
2425}
2426
2427/* acquire split MCP access lock register */
4a37fb66 2428static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2429{
a2fbb9ea 2430 u32 i, j, val;
34f80b04 2431 int rc = 0;
a2fbb9ea
ET
2432
2433 might_sleep();
2434 i = 100;
2435 for (j = 0; j < i*10; j++) {
2436 val = (1UL << 31);
2437 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2438 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2439 if (val & (1L << 31))
2440 break;
2441
2442 msleep(5);
2443 }
a2fbb9ea 2444 if (!(val & (1L << 31))) {
19680c48 2445 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2446 rc = -EBUSY;
2447 }
2448
2449 return rc;
2450}
2451
4a37fb66
YG
2452/* release split MCP access lock register */
2453static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2454{
2455 u32 val = 0;
2456
2457 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2458}
2459
2460static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2461{
2462 struct host_def_status_block *def_sb = bp->def_status_blk;
2463 u16 rc = 0;
2464
2465 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2466 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2467 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2468 rc |= 1;
2469 }
2470 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2471 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2472 rc |= 2;
2473 }
2474 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2475 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2476 rc |= 4;
2477 }
2478 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2479 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2480 rc |= 8;
2481 }
2482 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2483 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2484 rc |= 16;
2485 }
2486 return rc;
2487}
2488
2489/*
2490 * slow path service functions
2491 */
2492
2493static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2494{
34f80b04 2495 int port = BP_PORT(bp);
5c862848
EG
2496 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2497 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2498 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2499 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2500 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2501 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2502 u32 aeu_mask;
a2fbb9ea 2503
a2fbb9ea
ET
2504 if (bp->attn_state & asserted)
2505 BNX2X_ERR("IGU ERROR\n");
2506
3fcaf2e5
EG
2507 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2508 aeu_mask = REG_RD(bp, aeu_addr);
2509
a2fbb9ea 2510 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2511 aeu_mask, asserted);
2512 aeu_mask &= ~(asserted & 0xff);
2513 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2514
3fcaf2e5
EG
2515 REG_WR(bp, aeu_addr, aeu_mask);
2516 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2517
3fcaf2e5 2518 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2519 bp->attn_state |= asserted;
3fcaf2e5 2520 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2521
2522 if (asserted & ATTN_HARD_WIRED_MASK) {
2523 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2524
a5e9a7cf
EG
2525 bnx2x_acquire_phy_lock(bp);
2526
877e9aa4
ET
2527 /* save nig interrupt mask */
2528 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2529 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2530
c18487ee 2531 bnx2x_link_attn(bp);
a2fbb9ea
ET
2532
2533 /* handle unicore attn? */
2534 }
2535 if (asserted & ATTN_SW_TIMER_4_FUNC)
2536 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2537
2538 if (asserted & GPIO_2_FUNC)
2539 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2540
2541 if (asserted & GPIO_3_FUNC)
2542 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2543
2544 if (asserted & GPIO_4_FUNC)
2545 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2546
2547 if (port == 0) {
2548 if (asserted & ATTN_GENERAL_ATTN_1) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2551 }
2552 if (asserted & ATTN_GENERAL_ATTN_2) {
2553 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2554 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2555 }
2556 if (asserted & ATTN_GENERAL_ATTN_3) {
2557 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2558 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2559 }
2560 } else {
2561 if (asserted & ATTN_GENERAL_ATTN_4) {
2562 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2563 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2564 }
2565 if (asserted & ATTN_GENERAL_ATTN_5) {
2566 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2567 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2568 }
2569 if (asserted & ATTN_GENERAL_ATTN_6) {
2570 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2571 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2572 }
2573 }
2574
2575 } /* if hardwired */
2576
5c862848
EG
2577 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2578 asserted, hc_addr);
2579 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2580
2581 /* now set back the mask */
a5e9a7cf 2582 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2583 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2584 bnx2x_release_phy_lock(bp);
2585 }
a2fbb9ea
ET
2586}
2587
877e9aa4 2588static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2589{
34f80b04 2590 int port = BP_PORT(bp);
877e9aa4
ET
2591 int reg_offset;
2592 u32 val;
2593
34f80b04
EG
2594 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2595 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2596
34f80b04 2597 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2598
2599 val = REG_RD(bp, reg_offset);
2600 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2601 REG_WR(bp, reg_offset, val);
2602
2603 BNX2X_ERR("SPIO5 hw attention\n");
2604
34f80b04 2605 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2606 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2607 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2608 /* Fan failure attention */
2609
17de50b7 2610 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2611 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2612 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2613 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2614 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2615 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2616 /* mark the failure */
c18487ee 2617 bp->link_params.ext_phy_config &=
877e9aa4 2618 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2619 bp->link_params.ext_phy_config |=
877e9aa4
ET
2620 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2621 SHMEM_WR(bp,
2622 dev_info.port_hw_config[port].
2623 external_phy_config,
c18487ee 2624 bp->link_params.ext_phy_config);
877e9aa4
ET
2625 /* log the failure */
2626 printk(KERN_ERR PFX "Fan Failure on Network"
2627 " Controller %s has caused the driver to"
2628 " shutdown the card to prevent permanent"
2629 " damage. Please contact Dell Support for"
2630 " assistance\n", bp->dev->name);
2631 break;
2632
2633 default:
2634 break;
2635 }
2636 }
34f80b04
EG
2637
2638 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2639
2640 val = REG_RD(bp, reg_offset);
2641 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2642 REG_WR(bp, reg_offset, val);
2643
2644 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2645 (attn & HW_INTERRUT_ASSERT_SET_0));
2646 bnx2x_panic();
2647 }
877e9aa4
ET
2648}
2649
2650static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2651{
2652 u32 val;
2653
2654 if (attn & BNX2X_DOORQ_ASSERT) {
2655
2656 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2657 BNX2X_ERR("DB hw attention 0x%x\n", val);
2658 /* DORQ discard attention */
2659 if (val & 0x2)
2660 BNX2X_ERR("FATAL error from DORQ\n");
2661 }
34f80b04
EG
2662
2663 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2664
2665 int port = BP_PORT(bp);
2666 int reg_offset;
2667
2668 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2669 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2670
2671 val = REG_RD(bp, reg_offset);
2672 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2673 REG_WR(bp, reg_offset, val);
2674
2675 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2676 (attn & HW_INTERRUT_ASSERT_SET_1));
2677 bnx2x_panic();
2678 }
877e9aa4
ET
2679}
2680
2681static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2682{
2683 u32 val;
2684
2685 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2686
2687 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2688 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2689 /* CFC error attention */
2690 if (val & 0x2)
2691 BNX2X_ERR("FATAL error from CFC\n");
2692 }
2693
2694 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2695
2696 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2697 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2698 /* RQ_USDMDP_FIFO_OVERFLOW */
2699 if (val & 0x18000)
2700 BNX2X_ERR("FATAL error from PXP\n");
2701 }
34f80b04
EG
2702
2703 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2704
2705 int port = BP_PORT(bp);
2706 int reg_offset;
2707
2708 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2709 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2710
2711 val = REG_RD(bp, reg_offset);
2712 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2713 REG_WR(bp, reg_offset, val);
2714
2715 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2716 (attn & HW_INTERRUT_ASSERT_SET_2));
2717 bnx2x_panic();
2718 }
877e9aa4
ET
2719}
2720
2721static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2722{
34f80b04
EG
2723 u32 val;
2724
877e9aa4
ET
2725 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2726
34f80b04
EG
2727 if (attn & BNX2X_PMF_LINK_ASSERT) {
2728 int func = BP_FUNC(bp);
2729
2730 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2731 bnx2x__link_status_update(bp);
2732 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2733 DRV_STATUS_PMF)
2734 bnx2x_pmf_update(bp);
2735
2736 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2737
2738 BNX2X_ERR("MC assert!\n");
2739 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2740 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2741 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2742 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2743 bnx2x_panic();
2744
2745 } else if (attn & BNX2X_MCP_ASSERT) {
2746
2747 BNX2X_ERR("MCP assert!\n");
2748 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2749 bnx2x_fw_dump(bp);
877e9aa4
ET
2750
2751 } else
2752 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2753 }
2754
2755 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2756 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2757 if (attn & BNX2X_GRC_TIMEOUT) {
2758 val = CHIP_IS_E1H(bp) ?
2759 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2760 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2761 }
2762 if (attn & BNX2X_GRC_RSV) {
2763 val = CHIP_IS_E1H(bp) ?
2764 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2765 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2766 }
877e9aa4 2767 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2768 }
2769}
2770
2771static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2772{
a2fbb9ea
ET
2773 struct attn_route attn;
2774 struct attn_route group_mask;
34f80b04 2775 int port = BP_PORT(bp);
877e9aa4 2776 int index;
a2fbb9ea
ET
2777 u32 reg_addr;
2778 u32 val;
3fcaf2e5 2779 u32 aeu_mask;
a2fbb9ea
ET
2780
2781 /* need to take HW lock because MCP or other port might also
2782 try to handle this event */
4a37fb66 2783 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2784
2785 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2786 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2787 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2788 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2789 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2790 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2791
2792 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2793 if (deasserted & (1 << index)) {
2794 group_mask = bp->attn_group[index];
2795
34f80b04
EG
2796 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2797 index, group_mask.sig[0], group_mask.sig[1],
2798 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2799
877e9aa4
ET
2800 bnx2x_attn_int_deasserted3(bp,
2801 attn.sig[3] & group_mask.sig[3]);
2802 bnx2x_attn_int_deasserted1(bp,
2803 attn.sig[1] & group_mask.sig[1]);
2804 bnx2x_attn_int_deasserted2(bp,
2805 attn.sig[2] & group_mask.sig[2]);
2806 bnx2x_attn_int_deasserted0(bp,
2807 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2808
a2fbb9ea
ET
2809 if ((attn.sig[0] & group_mask.sig[0] &
2810 HW_PRTY_ASSERT_SET_0) ||
2811 (attn.sig[1] & group_mask.sig[1] &
2812 HW_PRTY_ASSERT_SET_1) ||
2813 (attn.sig[2] & group_mask.sig[2] &
2814 HW_PRTY_ASSERT_SET_2))
6378c025 2815 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2816 }
2817 }
2818
4a37fb66 2819 bnx2x_release_alr(bp);
a2fbb9ea 2820
5c862848 2821 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2822
2823 val = ~deasserted;
3fcaf2e5
EG
2824 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2825 val, reg_addr);
5c862848 2826 REG_WR(bp, reg_addr, val);
a2fbb9ea 2827
a2fbb9ea 2828 if (~bp->attn_state & deasserted)
3fcaf2e5 2829 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2830
2831 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2832 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2833
3fcaf2e5
EG
2834 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2835 aeu_mask = REG_RD(bp, reg_addr);
2836
2837 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2838 aeu_mask, deasserted);
2839 aeu_mask |= (deasserted & 0xff);
2840 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2841
3fcaf2e5
EG
2842 REG_WR(bp, reg_addr, aeu_mask);
2843 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2844
2845 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2846 bp->attn_state &= ~deasserted;
2847 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2848}
2849
2850static void bnx2x_attn_int(struct bnx2x *bp)
2851{
2852 /* read local copy of bits */
68d59484
EG
2853 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2854 attn_bits);
2855 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2856 attn_bits_ack);
a2fbb9ea
ET
2857 u32 attn_state = bp->attn_state;
2858
2859 /* look for changed bits */
2860 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2861 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2862
2863 DP(NETIF_MSG_HW,
2864 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2865 attn_bits, attn_ack, asserted, deasserted);
2866
2867 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2868 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2869
2870 /* handle bits that were raised */
2871 if (asserted)
2872 bnx2x_attn_int_asserted(bp, asserted);
2873
2874 if (deasserted)
2875 bnx2x_attn_int_deasserted(bp, deasserted);
2876}
2877
2878static void bnx2x_sp_task(struct work_struct *work)
2879{
1cf167f2 2880 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2881 u16 status;
2882
34f80b04 2883
a2fbb9ea
ET
2884 /* Return here if interrupt is disabled */
2885 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2886 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2887 return;
2888 }
2889
2890 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2891/* if (status == 0) */
2892/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2893
3196a88a 2894 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2895
877e9aa4
ET
2896 /* HW attentions */
2897 if (status & 0x1)
a2fbb9ea 2898 bnx2x_attn_int(bp);
a2fbb9ea 2899
bb2a0f7a
YG
2900 /* CStorm events: query_stats, port delete ramrod */
2901 if (status & 0x2)
2902 bp->stats_pending = 0;
2903
68d59484 2904 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2905 IGU_INT_NOP, 1);
2906 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2907 IGU_INT_NOP, 1);
2908 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2909 IGU_INT_NOP, 1);
2910 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2911 IGU_INT_NOP, 1);
2912 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2913 IGU_INT_ENABLE, 1);
877e9aa4 2914
a2fbb9ea
ET
2915}
2916
2917static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2918{
2919 struct net_device *dev = dev_instance;
2920 struct bnx2x *bp = netdev_priv(dev);
2921
2922 /* Return here if interrupt is disabled */
2923 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2924 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2925 return IRQ_HANDLED;
2926 }
2927
8d9c5f34 2928 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2929
2930#ifdef BNX2X_STOP_ON_ERROR
2931 if (unlikely(bp->panic))
2932 return IRQ_HANDLED;
2933#endif
2934
1cf167f2 2935 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2936
2937 return IRQ_HANDLED;
2938}
2939
2940/* end of slow path */
2941
2942/* Statistics */
2943
2944/****************************************************************************
2945* Macros
2946****************************************************************************/
2947
a2fbb9ea
ET
2948/* sum[hi:lo] += add[hi:lo] */
2949#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2950 do { \
2951 s_lo += a_lo; \
f5ba6772 2952 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2953 } while (0)
2954
2955/* difference = minuend - subtrahend */
2956#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2957 do { \
bb2a0f7a
YG
2958 if (m_lo < s_lo) { \
2959 /* underflow */ \
a2fbb9ea 2960 d_hi = m_hi - s_hi; \
bb2a0f7a 2961 if (d_hi > 0) { \
6378c025 2962 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2963 d_hi--; \
2964 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2965 } else { \
6378c025 2966 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2967 d_hi = 0; \
2968 d_lo = 0; \
2969 } \
bb2a0f7a
YG
2970 } else { \
2971 /* m_lo >= s_lo */ \
a2fbb9ea 2972 if (m_hi < s_hi) { \
bb2a0f7a
YG
2973 d_hi = 0; \
2974 d_lo = 0; \
2975 } else { \
6378c025 2976 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2977 d_hi = m_hi - s_hi; \
2978 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2979 } \
2980 } \
2981 } while (0)
2982
bb2a0f7a 2983#define UPDATE_STAT64(s, t) \
a2fbb9ea 2984 do { \
bb2a0f7a
YG
2985 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2986 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2987 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2988 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2989 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2990 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2991 } while (0)
2992
bb2a0f7a 2993#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2994 do { \
bb2a0f7a
YG
2995 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2996 diff.lo, new->s##_lo, old->s##_lo); \
2997 ADD_64(estats->t##_hi, diff.hi, \
2998 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2999 } while (0)
3000
3001/* sum[hi:lo] += add */
3002#define ADD_EXTEND_64(s_hi, s_lo, a) \
3003 do { \
3004 s_lo += a; \
3005 s_hi += (s_lo < a) ? 1 : 0; \
3006 } while (0)
3007
bb2a0f7a 3008#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3009 do { \
bb2a0f7a
YG
3010 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3011 pstats->mac_stx[1].s##_lo, \
3012 new->s); \
a2fbb9ea
ET
3013 } while (0)
3014
bb2a0f7a 3015#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
3016 do { \
3017 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
3018 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
3019 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
3020 } while (0)
3021
3022#define UPDATE_EXTEND_XSTAT(s, t) \
3023 do { \
3024 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3025 old_xclient->s = le32_to_cpu(xclient->s); \
3026 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
3027 } while (0)
3028
3029/*
3030 * General service functions
3031 */
3032
3033static inline long bnx2x_hilo(u32 *hiref)
3034{
3035 u32 lo = *(hiref + 1);
3036#if (BITS_PER_LONG == 64)
3037 u32 hi = *hiref;
3038
3039 return HILO_U64(hi, lo);
3040#else
3041 return lo;
3042#endif
3043}
3044
3045/*
3046 * Init service functions
3047 */
3048
bb2a0f7a
YG
3049static void bnx2x_storm_stats_post(struct bnx2x *bp)
3050{
3051 if (!bp->stats_pending) {
3052 struct eth_query_ramrod_data ramrod_data = {0};
3053 int rc;
3054
3055 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3056 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
bb2a0f7a
YG
3057 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3058
3059 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3060 ((u32 *)&ramrod_data)[1],
3061 ((u32 *)&ramrod_data)[0], 0);
3062 if (rc == 0) {
3063 /* stats ramrod has it's own slot on the spq */
3064 bp->spq_left++;
3065 bp->stats_pending = 1;
3066 }
3067 }
3068}
3069
3070static void bnx2x_stats_init(struct bnx2x *bp)
3071{
3072 int port = BP_PORT(bp);
3073
3074 bp->executer_idx = 0;
3075 bp->stats_counter = 0;
3076
3077 /* port stats */
3078 if (!BP_NOMCP(bp))
3079 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3080 else
3081 bp->port.port_stx = 0;
3082 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3083
3084 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3085 bp->port.old_nig_stats.brb_discard =
3086 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3087 bp->port.old_nig_stats.brb_truncate =
3088 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3089 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3090 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3091 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3092 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3093
3094 /* function stats */
3095 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3096 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3097 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3098 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3099
3100 bp->stats_state = STATS_STATE_DISABLED;
3101 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3102 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3103}
3104
3105static void bnx2x_hw_stats_post(struct bnx2x *bp)
3106{
3107 struct dmae_command *dmae = &bp->stats_dmae;
3108 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3109
3110 *stats_comp = DMAE_COMP_VAL;
3111
3112 /* loader */
3113 if (bp->executer_idx) {
3114 int loader_idx = PMF_DMAE_C(bp);
3115
3116 memset(dmae, 0, sizeof(struct dmae_command));
3117
3118 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3119 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3120 DMAE_CMD_DST_RESET |
3121#ifdef __BIG_ENDIAN
3122 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3123#else
3124 DMAE_CMD_ENDIANITY_DW_SWAP |
3125#endif
3126 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3127 DMAE_CMD_PORT_0) |
3128 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3129 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3130 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3131 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3132 sizeof(struct dmae_command) *
3133 (loader_idx + 1)) >> 2;
3134 dmae->dst_addr_hi = 0;
3135 dmae->len = sizeof(struct dmae_command) >> 2;
3136 if (CHIP_IS_E1(bp))
3137 dmae->len--;
3138 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3139 dmae->comp_addr_hi = 0;
3140 dmae->comp_val = 1;
3141
3142 *stats_comp = 0;
3143 bnx2x_post_dmae(bp, dmae, loader_idx);
3144
3145 } else if (bp->func_stx) {
3146 *stats_comp = 0;
3147 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3148 }
3149}
3150
3151static int bnx2x_stats_comp(struct bnx2x *bp)
3152{
3153 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3154 int cnt = 10;
3155
3156 might_sleep();
3157 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3158 if (!cnt) {
3159 BNX2X_ERR("timeout waiting for stats finished\n");
3160 break;
3161 }
3162 cnt--;
12469401 3163 msleep(1);
bb2a0f7a
YG
3164 }
3165 return 1;
3166}
3167
3168/*
3169 * Statistics service functions
3170 */
3171
3172static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3173{
3174 struct dmae_command *dmae;
3175 u32 opcode;
3176 int loader_idx = PMF_DMAE_C(bp);
3177 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3178
3179 /* sanity */
3180 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3181 BNX2X_ERR("BUG!\n");
3182 return;
3183 }
3184
3185 bp->executer_idx = 0;
3186
3187 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3188 DMAE_CMD_C_ENABLE |
3189 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3190#ifdef __BIG_ENDIAN
3191 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3192#else
3193 DMAE_CMD_ENDIANITY_DW_SWAP |
3194#endif
3195 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3196 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3197
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3200 dmae->src_addr_lo = bp->port.port_stx >> 2;
3201 dmae->src_addr_hi = 0;
3202 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3203 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3204 dmae->len = DMAE_LEN32_RD_MAX;
3205 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3206 dmae->comp_addr_hi = 0;
3207 dmae->comp_val = 1;
3208
3209 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3210 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3211 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3212 dmae->src_addr_hi = 0;
7a9b2557
VZ
3213 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3214 DMAE_LEN32_RD_MAX * 4);
3215 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3216 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3217 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3218 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3219 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3220 dmae->comp_val = DMAE_COMP_VAL;
3221
3222 *stats_comp = 0;
3223 bnx2x_hw_stats_post(bp);
3224 bnx2x_stats_comp(bp);
3225}
3226
3227static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3228{
3229 struct dmae_command *dmae;
34f80b04 3230 int port = BP_PORT(bp);
bb2a0f7a 3231 int vn = BP_E1HVN(bp);
a2fbb9ea 3232 u32 opcode;
bb2a0f7a 3233 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3234 u32 mac_addr;
bb2a0f7a
YG
3235 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3236
3237 /* sanity */
3238 if (!bp->link_vars.link_up || !bp->port.pmf) {
3239 BNX2X_ERR("BUG!\n");
3240 return;
3241 }
a2fbb9ea
ET
3242
3243 bp->executer_idx = 0;
bb2a0f7a
YG
3244
3245 /* MCP */
3246 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3247 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3248 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3249#ifdef __BIG_ENDIAN
bb2a0f7a 3250 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3251#else
bb2a0f7a 3252 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3253#endif
bb2a0f7a
YG
3254 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3255 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3256
bb2a0f7a 3257 if (bp->port.port_stx) {
a2fbb9ea
ET
3258
3259 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3260 dmae->opcode = opcode;
bb2a0f7a
YG
3261 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3262 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3263 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3264 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3265 dmae->len = sizeof(struct host_port_stats) >> 2;
3266 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3267 dmae->comp_addr_hi = 0;
3268 dmae->comp_val = 1;
a2fbb9ea
ET
3269 }
3270
bb2a0f7a
YG
3271 if (bp->func_stx) {
3272
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
3275 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3276 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3277 dmae->dst_addr_lo = bp->func_stx >> 2;
3278 dmae->dst_addr_hi = 0;
3279 dmae->len = sizeof(struct host_func_stats) >> 2;
3280 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281 dmae->comp_addr_hi = 0;
3282 dmae->comp_val = 1;
a2fbb9ea
ET
3283 }
3284
bb2a0f7a 3285 /* MAC */
a2fbb9ea
ET
3286 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3287 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3288 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3289#ifdef __BIG_ENDIAN
3290 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3291#else
3292 DMAE_CMD_ENDIANITY_DW_SWAP |
3293#endif
bb2a0f7a
YG
3294 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3295 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3296
c18487ee 3297 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3298
3299 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3300 NIG_REG_INGRESS_BMAC0_MEM);
3301
3302 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3303 BIGMAC_REGISTER_TX_STAT_GTBYT */
3304 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3305 dmae->opcode = opcode;
3306 dmae->src_addr_lo = (mac_addr +
3307 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3308 dmae->src_addr_hi = 0;
3309 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3310 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3311 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3312 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3313 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3314 dmae->comp_addr_hi = 0;
3315 dmae->comp_val = 1;
3316
3317 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3318 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3319 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3320 dmae->opcode = opcode;
3321 dmae->src_addr_lo = (mac_addr +
3322 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3323 dmae->src_addr_hi = 0;
3324 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3325 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3326 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3327 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3328 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3329 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3330 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3331 dmae->comp_addr_hi = 0;
3332 dmae->comp_val = 1;
3333
c18487ee 3334 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3335
3336 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3337
3338 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (mac_addr +
3342 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3345 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3346 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3347 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3348 dmae->comp_addr_hi = 0;
3349 dmae->comp_val = 1;
3350
3351 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3352 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3353 dmae->opcode = opcode;
3354 dmae->src_addr_lo = (mac_addr +
3355 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3356 dmae->src_addr_hi = 0;
3357 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3358 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3360 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3361 dmae->len = 1;
3362 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3363 dmae->comp_addr_hi = 0;
3364 dmae->comp_val = 1;
3365
3366 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3367 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3368 dmae->opcode = opcode;
3369 dmae->src_addr_lo = (mac_addr +
3370 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3371 dmae->src_addr_hi = 0;
3372 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3373 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3374 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3375 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3376 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3377 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3378 dmae->comp_addr_hi = 0;
3379 dmae->comp_val = 1;
3380 }
3381
3382 /* NIG */
bb2a0f7a
YG
3383 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3384 dmae->opcode = opcode;
3385 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3386 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3387 dmae->src_addr_hi = 0;
3388 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3389 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3390 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3393 dmae->comp_val = 1;
3394
3395 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3396 dmae->opcode = opcode;
3397 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3398 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3399 dmae->src_addr_hi = 0;
3400 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3401 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3402 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3403 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3404 dmae->len = (2*sizeof(u32)) >> 2;
3405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406 dmae->comp_addr_hi = 0;
3407 dmae->comp_val = 1;
3408
a2fbb9ea
ET
3409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3411 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3412 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3413#ifdef __BIG_ENDIAN
3414 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3415#else
3416 DMAE_CMD_ENDIANITY_DW_SWAP |
3417#endif
bb2a0f7a
YG
3418 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3419 (vn << DMAE_CMD_E1HVN_SHIFT));
3420 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3421 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3422 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3423 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3424 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3425 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3426 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3427 dmae->len = (2*sizeof(u32)) >> 2;
3428 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3429 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3430 dmae->comp_val = DMAE_COMP_VAL;
3431
3432 *stats_comp = 0;
a2fbb9ea
ET
3433}
3434
bb2a0f7a 3435static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3436{
bb2a0f7a
YG
3437 struct dmae_command *dmae = &bp->stats_dmae;
3438 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3439
bb2a0f7a
YG
3440 /* sanity */
3441 if (!bp->func_stx) {
3442 BNX2X_ERR("BUG!\n");
3443 return;
3444 }
a2fbb9ea 3445
bb2a0f7a
YG
3446 bp->executer_idx = 0;
3447 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3448
bb2a0f7a
YG
3449 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3450 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3451 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3452#ifdef __BIG_ENDIAN
3453 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3454#else
3455 DMAE_CMD_ENDIANITY_DW_SWAP |
3456#endif
3457 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3458 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3459 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3460 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3461 dmae->dst_addr_lo = bp->func_stx >> 2;
3462 dmae->dst_addr_hi = 0;
3463 dmae->len = sizeof(struct host_func_stats) >> 2;
3464 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3465 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3466 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3467
bb2a0f7a
YG
3468 *stats_comp = 0;
3469}
a2fbb9ea 3470
bb2a0f7a
YG
3471static void bnx2x_stats_start(struct bnx2x *bp)
3472{
3473 if (bp->port.pmf)
3474 bnx2x_port_stats_init(bp);
3475
3476 else if (bp->func_stx)
3477 bnx2x_func_stats_init(bp);
3478
3479 bnx2x_hw_stats_post(bp);
3480 bnx2x_storm_stats_post(bp);
3481}
3482
3483static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3484{
3485 bnx2x_stats_comp(bp);
3486 bnx2x_stats_pmf_update(bp);
3487 bnx2x_stats_start(bp);
3488}
3489
3490static void bnx2x_stats_restart(struct bnx2x *bp)
3491{
3492 bnx2x_stats_comp(bp);
3493 bnx2x_stats_start(bp);
3494}
3495
3496static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3497{
3498 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3499 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3500 struct regpair diff;
3501
3502 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3503 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3504 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3505 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3506 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3507 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3508 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3509 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3510 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3511 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3512 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3513 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3514 UPDATE_STAT64(tx_stat_gt127,
3515 tx_stat_etherstatspkts65octetsto127octets);
3516 UPDATE_STAT64(tx_stat_gt255,
3517 tx_stat_etherstatspkts128octetsto255octets);
3518 UPDATE_STAT64(tx_stat_gt511,
3519 tx_stat_etherstatspkts256octetsto511octets);
3520 UPDATE_STAT64(tx_stat_gt1023,
3521 tx_stat_etherstatspkts512octetsto1023octets);
3522 UPDATE_STAT64(tx_stat_gt1518,
3523 tx_stat_etherstatspkts1024octetsto1522octets);
3524 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3525 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3526 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3527 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3528 UPDATE_STAT64(tx_stat_gterr,
3529 tx_stat_dot3statsinternalmactransmiterrors);
3530 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3531}
3532
3533static void bnx2x_emac_stats_update(struct bnx2x *bp)
3534{
3535 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3536 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3537
3538 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3539 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3540 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3541 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3542 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3543 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3544 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3545 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3546 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3547 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3548 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3549 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3550 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3551 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3552 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3553 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3554 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3555 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3556 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3557 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3558 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3559 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3560 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3561 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3562 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3563 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3564 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3565 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3566 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3567 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3568 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3569}
3570
3571static int bnx2x_hw_stats_update(struct bnx2x *bp)
3572{
3573 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3574 struct nig_stats *old = &(bp->port.old_nig_stats);
3575 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3576 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3577 struct regpair diff;
3578
3579 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3580 bnx2x_bmac_stats_update(bp);
3581
3582 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3583 bnx2x_emac_stats_update(bp);
3584
3585 else { /* unreached */
3586 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3587 return -1;
3588 }
a2fbb9ea 3589
bb2a0f7a
YG
3590 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3591 new->brb_discard - old->brb_discard);
66e855f3
YG
3592 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3593 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3594
bb2a0f7a
YG
3595 UPDATE_STAT64_NIG(egress_mac_pkt0,
3596 etherstatspkts1024octetsto1522octets);
3597 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3598
bb2a0f7a 3599 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3600
bb2a0f7a
YG
3601 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3602 sizeof(struct mac_stx));
3603 estats->brb_drop_hi = pstats->brb_drop_hi;
3604 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3605
bb2a0f7a 3606 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3607
bb2a0f7a 3608 return 0;
a2fbb9ea
ET
3609}
3610
bb2a0f7a 3611static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3612{
3613 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3614 int cl_id = BP_CL_ID(bp);
3615 struct tstorm_per_port_stats *tport =
3616 &stats->tstorm_common.port_statistics;
a2fbb9ea 3617 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3618 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3619 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3620 struct xstorm_per_client_stats *xclient =
3621 &stats->xstorm_common.client_statistics[cl_id];
3622 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3623 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3624 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3625 u32 diff;
3626
bb2a0f7a
YG
3627 /* are storm stats valid? */
3628 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3629 bp->stats_counter) {
3630 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3631 " tstorm counter (%d) != stats_counter (%d)\n",
3632 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3633 return -1;
3634 }
bb2a0f7a
YG
3635 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3636 bp->stats_counter) {
3637 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3638 " xstorm counter (%d) != stats_counter (%d)\n",
3639 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3640 return -2;
3641 }
a2fbb9ea 3642
bb2a0f7a
YG
3643 fstats->total_bytes_received_hi =
3644 fstats->valid_bytes_received_hi =
a2fbb9ea 3645 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3646 fstats->total_bytes_received_lo =
3647 fstats->valid_bytes_received_lo =
a2fbb9ea 3648 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3649
3650 estats->error_bytes_received_hi =
3651 le32_to_cpu(tclient->rcv_error_bytes.hi);
3652 estats->error_bytes_received_lo =
3653 le32_to_cpu(tclient->rcv_error_bytes.lo);
3654 ADD_64(estats->error_bytes_received_hi,
3655 estats->rx_stat_ifhcinbadoctets_hi,
3656 estats->error_bytes_received_lo,
3657 estats->rx_stat_ifhcinbadoctets_lo);
3658
3659 ADD_64(fstats->total_bytes_received_hi,
3660 estats->error_bytes_received_hi,
3661 fstats->total_bytes_received_lo,
3662 estats->error_bytes_received_lo);
3663
3664 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3665 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3666 total_multicast_packets_received);
a2fbb9ea 3667 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3668 total_broadcast_packets_received);
3669
3670 fstats->total_bytes_transmitted_hi =
3671 le32_to_cpu(xclient->total_sent_bytes.hi);
3672 fstats->total_bytes_transmitted_lo =
3673 le32_to_cpu(xclient->total_sent_bytes.lo);
3674
3675 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3676 total_unicast_packets_transmitted);
3677 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3678 total_multicast_packets_transmitted);
3679 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3680 total_broadcast_packets_transmitted);
3681
3682 memcpy(estats, &(fstats->total_bytes_received_hi),
3683 sizeof(struct host_func_stats) - 2*sizeof(u32));
3684
3685 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3686 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3687 estats->brb_truncate_discard =
3688 le32_to_cpu(tport->brb_truncate_discard);
3689 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3690
3691 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3692 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3693 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3694 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3695 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3696 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3697 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3698 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3699 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3700 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3701 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3702 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3703 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3704
bb2a0f7a
YG
3705 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3706 old_tclient->packets_too_big_discard =
a2fbb9ea 3707 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3708 estats->no_buff_discard =
3709 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3710 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3711
3712 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3713 old_xclient->unicast_bytes_sent.hi =
3714 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3715 old_xclient->unicast_bytes_sent.lo =
3716 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3717 old_xclient->multicast_bytes_sent.hi =
3718 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3719 old_xclient->multicast_bytes_sent.lo =
3720 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3721 old_xclient->broadcast_bytes_sent.hi =
3722 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3723 old_xclient->broadcast_bytes_sent.lo =
3724 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3725
3726 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3727
3728 return 0;
3729}
3730
bb2a0f7a 3731static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3732{
bb2a0f7a
YG
3733 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3734 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3735 struct net_device_stats *nstats = &bp->dev->stats;
3736
3737 nstats->rx_packets =
3738 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3739 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3740 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3741
3742 nstats->tx_packets =
3743 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3744 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3745 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3746
bb2a0f7a 3747 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3748
0e39e645 3749 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3750
bb2a0f7a
YG
3751 nstats->rx_dropped = old_tclient->checksum_discard +
3752 estats->mac_discard;
a2fbb9ea
ET
3753 nstats->tx_dropped = 0;
3754
3755 nstats->multicast =
3756 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3757
bb2a0f7a
YG
3758 nstats->collisions =
3759 estats->tx_stat_dot3statssinglecollisionframes_lo +
3760 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3761 estats->tx_stat_dot3statslatecollisions_lo +
3762 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3763
bb2a0f7a
YG
3764 estats->jabber_packets_received =
3765 old_tclient->packets_too_big_discard +
3766 estats->rx_stat_dot3statsframestoolong_lo;
3767
3768 nstats->rx_length_errors =
3769 estats->rx_stat_etherstatsundersizepkts_lo +
3770 estats->jabber_packets_received;
66e855f3 3771 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3772 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3773 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3774 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3775 nstats->rx_missed_errors = estats->xxoverflow_discard;
3776
3777 nstats->rx_errors = nstats->rx_length_errors +
3778 nstats->rx_over_errors +
3779 nstats->rx_crc_errors +
3780 nstats->rx_frame_errors +
0e39e645
ET
3781 nstats->rx_fifo_errors +
3782 nstats->rx_missed_errors;
a2fbb9ea 3783
bb2a0f7a
YG
3784 nstats->tx_aborted_errors =
3785 estats->tx_stat_dot3statslatecollisions_lo +
3786 estats->tx_stat_dot3statsexcessivecollisions_lo;
3787 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3788 nstats->tx_fifo_errors = 0;
3789 nstats->tx_heartbeat_errors = 0;
3790 nstats->tx_window_errors = 0;
3791
3792 nstats->tx_errors = nstats->tx_aborted_errors +
3793 nstats->tx_carrier_errors;
a2fbb9ea
ET
3794}
3795
bb2a0f7a 3796static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3797{
bb2a0f7a
YG
3798 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3799 int update = 0;
a2fbb9ea 3800
bb2a0f7a
YG
3801 if (*stats_comp != DMAE_COMP_VAL)
3802 return;
3803
3804 if (bp->port.pmf)
3805 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3806
bb2a0f7a 3807 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3808
bb2a0f7a
YG
3809 if (update)
3810 bnx2x_net_stats_update(bp);
a2fbb9ea 3811
bb2a0f7a
YG
3812 else {
3813 if (bp->stats_pending) {
3814 bp->stats_pending++;
3815 if (bp->stats_pending == 3) {
3816 BNX2X_ERR("stats not updated for 3 times\n");
3817 bnx2x_panic();
3818 return;
3819 }
3820 }
a2fbb9ea
ET
3821 }
3822
3823 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3824 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3825 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3826 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3827 int i;
a2fbb9ea
ET
3828
3829 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3830 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3831 " tx pkt (%lx)\n",
3832 bnx2x_tx_avail(bp->fp),
7a9b2557 3833 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3834 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3835 " rx pkt (%lx)\n",
7a9b2557
VZ
3836 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3837 bp->fp->rx_comp_cons),
3838 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3839 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3840 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3841 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3842 printk(KERN_DEBUG "tstats: checksum_discard %u "
3843 "packets_too_big_discard %u no_buff_discard %u "
3844 "mac_discard %u mac_filter_discard %u "
3845 "xxovrflow_discard %u brb_truncate_discard %u "
3846 "ttl0_discard %u\n",
bb2a0f7a
YG
3847 old_tclient->checksum_discard,
3848 old_tclient->packets_too_big_discard,
3849 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3850 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3851 estats->brb_truncate_discard,
3852 old_tclient->ttl0_discard);
a2fbb9ea
ET
3853
3854 for_each_queue(bp, i) {
3855 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3856 bnx2x_fp(bp, i, tx_pkt),
3857 bnx2x_fp(bp, i, rx_pkt),
3858 bnx2x_fp(bp, i, rx_calls));
3859 }
3860 }
3861
bb2a0f7a
YG
3862 bnx2x_hw_stats_post(bp);
3863 bnx2x_storm_stats_post(bp);
3864}
a2fbb9ea 3865
bb2a0f7a
YG
3866static void bnx2x_port_stats_stop(struct bnx2x *bp)
3867{
3868 struct dmae_command *dmae;
3869 u32 opcode;
3870 int loader_idx = PMF_DMAE_C(bp);
3871 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3872
bb2a0f7a 3873 bp->executer_idx = 0;
a2fbb9ea 3874
bb2a0f7a
YG
3875 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3876 DMAE_CMD_C_ENABLE |
3877 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3878#ifdef __BIG_ENDIAN
bb2a0f7a 3879 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3880#else
bb2a0f7a 3881 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3882#endif
bb2a0f7a
YG
3883 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3884 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3885
3886 if (bp->port.port_stx) {
3887
3888 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3889 if (bp->func_stx)
3890 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3891 else
3892 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3893 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3894 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3895 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3896 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3897 dmae->len = sizeof(struct host_port_stats) >> 2;
3898 if (bp->func_stx) {
3899 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3900 dmae->comp_addr_hi = 0;
3901 dmae->comp_val = 1;
3902 } else {
3903 dmae->comp_addr_lo =
3904 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3905 dmae->comp_addr_hi =
3906 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3907 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3908
bb2a0f7a
YG
3909 *stats_comp = 0;
3910 }
a2fbb9ea
ET
3911 }
3912
bb2a0f7a
YG
3913 if (bp->func_stx) {
3914
3915 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3916 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3917 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3918 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3919 dmae->dst_addr_lo = bp->func_stx >> 2;
3920 dmae->dst_addr_hi = 0;
3921 dmae->len = sizeof(struct host_func_stats) >> 2;
3922 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3923 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3924 dmae->comp_val = DMAE_COMP_VAL;
3925
3926 *stats_comp = 0;
a2fbb9ea 3927 }
bb2a0f7a
YG
3928}
3929
3930static void bnx2x_stats_stop(struct bnx2x *bp)
3931{
3932 int update = 0;
3933
3934 bnx2x_stats_comp(bp);
3935
3936 if (bp->port.pmf)
3937 update = (bnx2x_hw_stats_update(bp) == 0);
3938
3939 update |= (bnx2x_storm_stats_update(bp) == 0);
3940
3941 if (update) {
3942 bnx2x_net_stats_update(bp);
a2fbb9ea 3943
bb2a0f7a
YG
3944 if (bp->port.pmf)
3945 bnx2x_port_stats_stop(bp);
3946
3947 bnx2x_hw_stats_post(bp);
3948 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3949 }
3950}
3951
bb2a0f7a
YG
3952static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3953{
3954}
3955
3956static const struct {
3957 void (*action)(struct bnx2x *bp);
3958 enum bnx2x_stats_state next_state;
3959} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3960/* state event */
3961{
3962/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3963/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3964/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3965/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3966},
3967{
3968/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3969/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3970/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3971/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3972}
3973};
3974
3975static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3976{
3977 enum bnx2x_stats_state state = bp->stats_state;
3978
3979 bnx2x_stats_stm[state][event].action(bp);
3980 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3981
3982 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3983 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3984 state, event, bp->stats_state);
3985}
3986
a2fbb9ea
ET
3987static void bnx2x_timer(unsigned long data)
3988{
3989 struct bnx2x *bp = (struct bnx2x *) data;
3990
3991 if (!netif_running(bp->dev))
3992 return;
3993
3994 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3995 goto timer_restart;
a2fbb9ea
ET
3996
3997 if (poll) {
3998 struct bnx2x_fastpath *fp = &bp->fp[0];
3999 int rc;
4000
4001 bnx2x_tx_int(fp, 1000);
4002 rc = bnx2x_rx_int(fp, 1000);
4003 }
4004
34f80b04
EG
4005 if (!BP_NOMCP(bp)) {
4006 int func = BP_FUNC(bp);
a2fbb9ea
ET
4007 u32 drv_pulse;
4008 u32 mcp_pulse;
4009
4010 ++bp->fw_drv_pulse_wr_seq;
4011 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4012 /* TBD - add SYSTEM_TIME */
4013 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4014 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4015
34f80b04 4016 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4017 MCP_PULSE_SEQ_MASK);
4018 /* The delta between driver pulse and mcp response
4019 * should be 1 (before mcp response) or 0 (after mcp response)
4020 */
4021 if ((drv_pulse != mcp_pulse) &&
4022 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4023 /* someone lost a heartbeat... */
4024 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4025 drv_pulse, mcp_pulse);
4026 }
4027 }
4028
bb2a0f7a
YG
4029 if ((bp->state == BNX2X_STATE_OPEN) ||
4030 (bp->state == BNX2X_STATE_DISABLED))
4031 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4032
f1410647 4033timer_restart:
a2fbb9ea
ET
4034 mod_timer(&bp->timer, jiffies + bp->current_interval);
4035}
4036
4037/* end of Statistics */
4038
4039/* nic init */
4040
4041/*
4042 * nic init service functions
4043 */
4044
34f80b04 4045static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4046{
34f80b04
EG
4047 int port = BP_PORT(bp);
4048
4049 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4050 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4051 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4052 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4053 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4054 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4055}
4056
5c862848
EG
4057static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4058 dma_addr_t mapping, int sb_id)
34f80b04
EG
4059{
4060 int port = BP_PORT(bp);
bb2a0f7a 4061 int func = BP_FUNC(bp);
a2fbb9ea 4062 int index;
34f80b04 4063 u64 section;
a2fbb9ea
ET
4064
4065 /* USTORM */
4066 section = ((u64)mapping) + offsetof(struct host_status_block,
4067 u_status_block);
34f80b04 4068 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4069
4070 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4071 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4072 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4073 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4074 U64_HI(section));
bb2a0f7a
YG
4075 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4076 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4077
4078 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4079 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4080 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4081
4082 /* CSTORM */
4083 section = ((u64)mapping) + offsetof(struct host_status_block,
4084 c_status_block);
34f80b04 4085 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4086
4087 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4088 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4089 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4090 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4091 U64_HI(section));
7a9b2557
VZ
4092 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4093 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4094
4095 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4096 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4097 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4098
4099 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4100}
4101
4102static void bnx2x_zero_def_sb(struct bnx2x *bp)
4103{
4104 int func = BP_FUNC(bp);
a2fbb9ea 4105
34f80b04
EG
4106 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4107 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4108 sizeof(struct ustorm_def_status_block)/4);
4109 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4110 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4111 sizeof(struct cstorm_def_status_block)/4);
4112 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4113 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4114 sizeof(struct xstorm_def_status_block)/4);
4115 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4116 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4117 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4118}
4119
4120static void bnx2x_init_def_sb(struct bnx2x *bp,
4121 struct host_def_status_block *def_sb,
34f80b04 4122 dma_addr_t mapping, int sb_id)
a2fbb9ea 4123{
34f80b04
EG
4124 int port = BP_PORT(bp);
4125 int func = BP_FUNC(bp);
a2fbb9ea
ET
4126 int index, val, reg_offset;
4127 u64 section;
4128
4129 /* ATTN */
4130 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4131 atten_status_block);
34f80b04 4132 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4133
49d66772
ET
4134 bp->attn_state = 0;
4135
a2fbb9ea
ET
4136 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4137 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4138
34f80b04 4139 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4140 bp->attn_group[index].sig[0] = REG_RD(bp,
4141 reg_offset + 0x10*index);
4142 bp->attn_group[index].sig[1] = REG_RD(bp,
4143 reg_offset + 0x4 + 0x10*index);
4144 bp->attn_group[index].sig[2] = REG_RD(bp,
4145 reg_offset + 0x8 + 0x10*index);
4146 bp->attn_group[index].sig[3] = REG_RD(bp,
4147 reg_offset + 0xc + 0x10*index);
4148 }
4149
a2fbb9ea
ET
4150 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4151 HC_REG_ATTN_MSG0_ADDR_L);
4152
4153 REG_WR(bp, reg_offset, U64_LO(section));
4154 REG_WR(bp, reg_offset + 4, U64_HI(section));
4155
4156 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4157
4158 val = REG_RD(bp, reg_offset);
34f80b04 4159 val |= sb_id;
a2fbb9ea
ET
4160 REG_WR(bp, reg_offset, val);
4161
4162 /* USTORM */
4163 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4164 u_def_status_block);
34f80b04 4165 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4166
4167 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4168 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4169 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4170 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4171 U64_HI(section));
5c862848 4172 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4173 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4174
4175 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4176 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4177 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4178
4179 /* CSTORM */
4180 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4181 c_def_status_block);
34f80b04 4182 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4183
4184 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4185 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4186 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4187 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4188 U64_HI(section));
5c862848 4189 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4190 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4191
4192 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4193 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4194 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4195
4196 /* TSTORM */
4197 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4198 t_def_status_block);
34f80b04 4199 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4200
4201 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4202 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4203 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4204 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4205 U64_HI(section));
5c862848 4206 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4207 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4208
4209 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4210 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4211 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4212
4213 /* XSTORM */
4214 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4215 x_def_status_block);
34f80b04 4216 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4217
4218 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4219 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4220 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4221 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4222 U64_HI(section));
5c862848 4223 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4224 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4225
4226 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4227 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4228 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4229
bb2a0f7a 4230 bp->stats_pending = 0;
66e855f3 4231 bp->set_mac_pending = 0;
bb2a0f7a 4232
34f80b04 4233 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4234}
4235
4236static void bnx2x_update_coalesce(struct bnx2x *bp)
4237{
34f80b04 4238 int port = BP_PORT(bp);
a2fbb9ea
ET
4239 int i;
4240
4241 for_each_queue(bp, i) {
34f80b04 4242 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4243
4244 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4245 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4246 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4247 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4248 bp->rx_ticks/12);
a2fbb9ea 4249 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4250 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4251 U_SB_ETH_RX_CQ_INDEX),
4252 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4253
4254 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4255 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4256 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4257 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4258 bp->tx_ticks/12);
a2fbb9ea 4259 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4260 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4261 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4262 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4263 }
4264}
4265
7a9b2557
VZ
4266static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4267 struct bnx2x_fastpath *fp, int last)
4268{
4269 int i;
4270
4271 for (i = 0; i < last; i++) {
4272 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4273 struct sk_buff *skb = rx_buf->skb;
4274
4275 if (skb == NULL) {
4276 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4277 continue;
4278 }
4279
4280 if (fp->tpa_state[i] == BNX2X_TPA_START)
4281 pci_unmap_single(bp->pdev,
4282 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4283 bp->rx_buf_size,
7a9b2557
VZ
4284 PCI_DMA_FROMDEVICE);
4285
4286 dev_kfree_skb(skb);
4287 rx_buf->skb = NULL;
4288 }
4289}
4290
a2fbb9ea
ET
4291static void bnx2x_init_rx_rings(struct bnx2x *bp)
4292{
7a9b2557 4293 int func = BP_FUNC(bp);
32626230
EG
4294 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4295 ETH_MAX_AGGREGATION_QUEUES_E1H;
4296 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4297 int i, j;
a2fbb9ea 4298
0f00846d
EG
4299 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4300 DP(NETIF_MSG_IFUP,
4301 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4302
7a9b2557 4303 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4304
555f6c78 4305 for_each_rx_queue(bp, j) {
32626230 4306 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4307
32626230 4308 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4309 fp->tpa_pool[i].skb =
4310 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4311 if (!fp->tpa_pool[i].skb) {
4312 BNX2X_ERR("Failed to allocate TPA "
4313 "skb pool for queue[%d] - "
4314 "disabling TPA on this "
4315 "queue!\n", j);
4316 bnx2x_free_tpa_pool(bp, fp, i);
4317 fp->disable_tpa = 1;
4318 break;
4319 }
4320 pci_unmap_addr_set((struct sw_rx_bd *)
4321 &bp->fp->tpa_pool[i],
4322 mapping, 0);
4323 fp->tpa_state[i] = BNX2X_TPA_STOP;
4324 }
4325 }
4326 }
4327
555f6c78 4328 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4329 struct bnx2x_fastpath *fp = &bp->fp[j];
4330
4331 fp->rx_bd_cons = 0;
4332 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4333 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4334
4335 /* "next page" elements initialization */
4336 /* SGE ring */
4337 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4338 struct eth_rx_sge *sge;
4339
4340 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4341 sge->addr_hi =
4342 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4343 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4344 sge->addr_lo =
4345 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4346 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4347 }
4348
4349 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4350
7a9b2557 4351 /* RX BD ring */
a2fbb9ea
ET
4352 for (i = 1; i <= NUM_RX_RINGS; i++) {
4353 struct eth_rx_bd *rx_bd;
4354
4355 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4356 rx_bd->addr_hi =
4357 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4358 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4359 rx_bd->addr_lo =
4360 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4361 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4362 }
4363
34f80b04 4364 /* CQ ring */
a2fbb9ea
ET
4365 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4366 struct eth_rx_cqe_next_page *nextpg;
4367
4368 nextpg = (struct eth_rx_cqe_next_page *)
4369 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4370 nextpg->addr_hi =
4371 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4372 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4373 nextpg->addr_lo =
4374 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4375 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4376 }
4377
7a9b2557
VZ
4378 /* Allocate SGEs and initialize the ring elements */
4379 for (i = 0, ring_prod = 0;
4380 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4381
7a9b2557
VZ
4382 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4383 BNX2X_ERR("was only able to allocate "
4384 "%d rx sges\n", i);
4385 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4386 /* Cleanup already allocated elements */
4387 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4388 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4389 fp->disable_tpa = 1;
4390 ring_prod = 0;
4391 break;
4392 }
4393 ring_prod = NEXT_SGE_IDX(ring_prod);
4394 }
4395 fp->rx_sge_prod = ring_prod;
4396
4397 /* Allocate BDs and initialize BD ring */
66e855f3 4398 fp->rx_comp_cons = 0;
7a9b2557 4399 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4400 for (i = 0; i < bp->rx_ring_size; i++) {
4401 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4402 BNX2X_ERR("was only able to allocate "
4403 "%d rx skbs\n", i);
66e855f3 4404 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4405 break;
4406 }
4407 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4408 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4409 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4410 }
4411
7a9b2557
VZ
4412 fp->rx_bd_prod = ring_prod;
4413 /* must not have more available CQEs than BDs */
4414 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4415 cqe_ring_prod);
a2fbb9ea
ET
4416 fp->rx_pkt = fp->rx_calls = 0;
4417
7a9b2557
VZ
4418 /* Warning!
4419 * this will generate an interrupt (to the TSTORM)
4420 * must only be done after chip is initialized
4421 */
4422 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4423 fp->rx_sge_prod);
a2fbb9ea
ET
4424 if (j != 0)
4425 continue;
4426
4427 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4428 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4429 U64_LO(fp->rx_comp_mapping));
4430 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4431 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4432 U64_HI(fp->rx_comp_mapping));
4433 }
4434}
4435
4436static void bnx2x_init_tx_ring(struct bnx2x *bp)
4437{
4438 int i, j;
4439
555f6c78 4440 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4441 struct bnx2x_fastpath *fp = &bp->fp[j];
4442
4443 for (i = 1; i <= NUM_TX_RINGS; i++) {
4444 struct eth_tx_bd *tx_bd =
4445 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4446
4447 tx_bd->addr_hi =
4448 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4449 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4450 tx_bd->addr_lo =
4451 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4452 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4453 }
4454
4455 fp->tx_pkt_prod = 0;
4456 fp->tx_pkt_cons = 0;
4457 fp->tx_bd_prod = 0;
4458 fp->tx_bd_cons = 0;
4459 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4460 fp->tx_pkt = 0;
4461 }
4462}
4463
4464static void bnx2x_init_sp_ring(struct bnx2x *bp)
4465{
34f80b04 4466 int func = BP_FUNC(bp);
a2fbb9ea
ET
4467
4468 spin_lock_init(&bp->spq_lock);
4469
4470 bp->spq_left = MAX_SPQ_PENDING;
4471 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4472 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4473 bp->spq_prod_bd = bp->spq;
4474 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4475
34f80b04 4476 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4477 U64_LO(bp->spq_mapping));
34f80b04
EG
4478 REG_WR(bp,
4479 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4480 U64_HI(bp->spq_mapping));
4481
34f80b04 4482 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4483 bp->spq_prod_idx);
4484}
4485
4486static void bnx2x_init_context(struct bnx2x *bp)
4487{
4488 int i;
4489
4490 for_each_queue(bp, i) {
4491 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4492 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4493 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4494
34f80b04
EG
4495 context->ustorm_st_context.common.sb_index_numbers =
4496 BNX2X_RX_SB_INDEX_NUM;
4497 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4498 context->ustorm_st_context.common.status_block_id = sb_id;
4499 context->ustorm_st_context.common.flags =
4500 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
8d9c5f34 4501 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4502 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4503 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4504 bp->rx_buf_size;
34f80b04 4505 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4506 U64_HI(fp->rx_desc_mapping);
34f80b04 4507 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4508 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4509 if (!fp->disable_tpa) {
4510 context->ustorm_st_context.common.flags |=
4511 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4512 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4513 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4514 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4515 (u32)0xffff);
7a9b2557
VZ
4516 context->ustorm_st_context.common.sge_page_base_hi =
4517 U64_HI(fp->rx_sge_mapping);
4518 context->ustorm_st_context.common.sge_page_base_lo =
4519 U64_LO(fp->rx_sge_mapping);
4520 }
4521
8d9c5f34
EG
4522 context->ustorm_ag_context.cdu_usage =
4523 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4524 CDU_REGION_NUMBER_UCM_AG,
4525 ETH_CONNECTION_TYPE);
4526
4527 context->xstorm_st_context.tx_bd_page_base_hi =
4528 U64_HI(fp->tx_desc_mapping);
4529 context->xstorm_st_context.tx_bd_page_base_lo =
4530 U64_LO(fp->tx_desc_mapping);
4531 context->xstorm_st_context.db_data_addr_hi =
4532 U64_HI(fp->tx_prods_mapping);
4533 context->xstorm_st_context.db_data_addr_lo =
4534 U64_LO(fp->tx_prods_mapping);
4535 context->xstorm_st_context.statistics_data = (fp->cl_id |
4536 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4537 context->cstorm_st_context.sb_index_number =
5c862848 4538 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4539 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4540
4541 context->xstorm_ag_context.cdu_reserved =
4542 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4543 CDU_REGION_NUMBER_XCM_AG,
4544 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4545 }
4546}
4547
4548static void bnx2x_init_ind_table(struct bnx2x *bp)
4549{
26c8fa4d 4550 int func = BP_FUNC(bp);
a2fbb9ea
ET
4551 int i;
4552
555f6c78 4553 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4554 return;
4555
555f6c78
EG
4556 DP(NETIF_MSG_IFUP,
4557 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4558 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4559 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4560 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4561 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4562}
4563
49d66772
ET
4564static void bnx2x_set_client_config(struct bnx2x *bp)
4565{
49d66772 4566 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4567 int port = BP_PORT(bp);
4568 int i;
49d66772 4569
e7799c5f 4570 tstorm_client.mtu = bp->dev->mtu;
66e855f3 4571 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4572 tstorm_client.config_flags =
4573 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4574#ifdef BCM_VLAN
0c6671b0 4575 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4576 tstorm_client.config_flags |=
8d9c5f34 4577 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4578 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4579 }
4580#endif
49d66772 4581
7a9b2557
VZ
4582 if (bp->flags & TPA_ENABLE_FLAG) {
4583 tstorm_client.max_sges_for_packet =
4f40f2cb 4584 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4585 tstorm_client.max_sges_for_packet =
4586 ((tstorm_client.max_sges_for_packet +
4587 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4588 PAGES_PER_SGE_SHIFT;
4589
4590 tstorm_client.config_flags |=
4591 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4592 }
4593
49d66772
ET
4594 for_each_queue(bp, i) {
4595 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4596 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4597 ((u32 *)&tstorm_client)[0]);
4598 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4599 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4600 ((u32 *)&tstorm_client)[1]);
4601 }
4602
34f80b04
EG
4603 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4604 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4605}
4606
a2fbb9ea
ET
4607static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4608{
a2fbb9ea 4609 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4610 int mode = bp->rx_mode;
4611 int mask = (1 << BP_L_ID(bp));
4612 int func = BP_FUNC(bp);
a2fbb9ea
ET
4613 int i;
4614
3196a88a 4615 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4616
4617 switch (mode) {
4618 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4619 tstorm_mac_filter.ucast_drop_all = mask;
4620 tstorm_mac_filter.mcast_drop_all = mask;
4621 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4622 break;
4623 case BNX2X_RX_MODE_NORMAL:
34f80b04 4624 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4625 break;
4626 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4627 tstorm_mac_filter.mcast_accept_all = mask;
4628 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4629 break;
4630 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4631 tstorm_mac_filter.ucast_accept_all = mask;
4632 tstorm_mac_filter.mcast_accept_all = mask;
4633 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4634 break;
4635 default:
34f80b04
EG
4636 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4637 break;
a2fbb9ea
ET
4638 }
4639
4640 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4641 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4642 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4643 ((u32 *)&tstorm_mac_filter)[i]);
4644
34f80b04 4645/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4646 ((u32 *)&tstorm_mac_filter)[i]); */
4647 }
a2fbb9ea 4648
49d66772
ET
4649 if (mode != BNX2X_RX_MODE_NONE)
4650 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4651}
4652
471de716
EG
4653static void bnx2x_init_internal_common(struct bnx2x *bp)
4654{
4655 int i;
4656
3cdf1db7
YG
4657 if (bp->flags & TPA_ENABLE_FLAG) {
4658 struct tstorm_eth_tpa_exist tpa = {0};
4659
4660 tpa.tpa_exist = 1;
4661
4662 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4663 ((u32 *)&tpa)[0]);
4664 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4665 ((u32 *)&tpa)[1]);
4666 }
4667
471de716
EG
4668 /* Zero this manually as its initialization is
4669 currently missing in the initTool */
4670 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4671 REG_WR(bp, BAR_USTRORM_INTMEM +
4672 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4673}
4674
4675static void bnx2x_init_internal_port(struct bnx2x *bp)
4676{
4677 int port = BP_PORT(bp);
4678
4679 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4680 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4681 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4682 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4683}
4684
4685static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4686{
a2fbb9ea
ET
4687 struct tstorm_eth_function_common_config tstorm_config = {0};
4688 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4689 int port = BP_PORT(bp);
4690 int func = BP_FUNC(bp);
4691 int i;
471de716 4692 u16 max_agg_size;
a2fbb9ea
ET
4693
4694 if (is_multi(bp)) {
555f6c78 4695 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4696 tstorm_config.rss_result_mask = MULTI_MASK;
4697 }
8d9c5f34
EG
4698 if (IS_E1HMF(bp))
4699 tstorm_config.config_flags |=
4700 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4701
34f80b04
EG
4702 tstorm_config.leading_client_id = BP_L_ID(bp);
4703
a2fbb9ea 4704 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4705 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4706 (*(u32 *)&tstorm_config));
4707
c14423fe 4708 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4709 bnx2x_set_storm_rx_mode(bp);
4710
66e855f3
YG
4711 /* reset xstorm per client statistics */
4712 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4713 REG_WR(bp, BAR_XSTRORM_INTMEM +
4714 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4715 i*4, 0);
4716 }
4717 /* reset tstorm per client statistics */
4718 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4719 REG_WR(bp, BAR_TSTRORM_INTMEM +
4720 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4721 i*4, 0);
4722 }
4723
4724 /* Init statistics related context */
34f80b04 4725 stats_flags.collect_eth = 1;
a2fbb9ea 4726
66e855f3 4727 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4728 ((u32 *)&stats_flags)[0]);
66e855f3 4729 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4730 ((u32 *)&stats_flags)[1]);
4731
66e855f3 4732 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4733 ((u32 *)&stats_flags)[0]);
66e855f3 4734 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4735 ((u32 *)&stats_flags)[1]);
4736
66e855f3 4737 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4738 ((u32 *)&stats_flags)[0]);
66e855f3 4739 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4740 ((u32 *)&stats_flags)[1]);
4741
66e855f3
YG
4742 REG_WR(bp, BAR_XSTRORM_INTMEM +
4743 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4744 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4745 REG_WR(bp, BAR_XSTRORM_INTMEM +
4746 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4747 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4748
4749 REG_WR(bp, BAR_TSTRORM_INTMEM +
4750 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4751 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4752 REG_WR(bp, BAR_TSTRORM_INTMEM +
4753 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4754 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4755
4756 if (CHIP_IS_E1H(bp)) {
4757 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4758 IS_E1HMF(bp));
4759 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4760 IS_E1HMF(bp));
4761 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4762 IS_E1HMF(bp));
4763 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4764 IS_E1HMF(bp));
4765
7a9b2557
VZ
4766 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4767 bp->e1hov);
34f80b04
EG
4768 }
4769
4f40f2cb
EG
4770 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4771 max_agg_size =
4772 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4773 SGE_PAGE_SIZE * PAGES_PER_SGE),
4774 (u32)0xffff);
555f6c78 4775 for_each_rx_queue(bp, i) {
7a9b2557 4776 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4777
4778 REG_WR(bp, BAR_USTRORM_INTMEM +
4779 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4780 U64_LO(fp->rx_comp_mapping));
4781 REG_WR(bp, BAR_USTRORM_INTMEM +
4782 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4783 U64_HI(fp->rx_comp_mapping));
4784
7a9b2557
VZ
4785 REG_WR16(bp, BAR_USTRORM_INTMEM +
4786 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4787 max_agg_size);
4788 }
a2fbb9ea
ET
4789}
4790
471de716
EG
4791static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4792{
4793 switch (load_code) {
4794 case FW_MSG_CODE_DRV_LOAD_COMMON:
4795 bnx2x_init_internal_common(bp);
4796 /* no break */
4797
4798 case FW_MSG_CODE_DRV_LOAD_PORT:
4799 bnx2x_init_internal_port(bp);
4800 /* no break */
4801
4802 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4803 bnx2x_init_internal_func(bp);
4804 break;
4805
4806 default:
4807 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4808 break;
4809 }
4810}
4811
4812static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4813{
4814 int i;
4815
4816 for_each_queue(bp, i) {
4817 struct bnx2x_fastpath *fp = &bp->fp[i];
4818
34f80b04 4819 fp->bp = bp;
a2fbb9ea 4820 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4821 fp->index = i;
34f80b04
EG
4822 fp->cl_id = BP_L_ID(bp) + i;
4823 fp->sb_id = fp->cl_id;
4824 DP(NETIF_MSG_IFUP,
4825 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4826 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4827 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4828 FP_SB_ID(fp));
4829 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4830 }
4831
5c862848
EG
4832 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4833 DEF_SB_ID);
4834 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4835 bnx2x_update_coalesce(bp);
4836 bnx2x_init_rx_rings(bp);
4837 bnx2x_init_tx_ring(bp);
4838 bnx2x_init_sp_ring(bp);
4839 bnx2x_init_context(bp);
471de716 4840 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4841 bnx2x_init_ind_table(bp);
0ef00459
EG
4842 bnx2x_stats_init(bp);
4843
4844 /* At this point, we are ready for interrupts */
4845 atomic_set(&bp->intr_sem, 0);
4846
4847 /* flush all before enabling interrupts */
4848 mb();
4849 mmiowb();
4850
615f8fd9 4851 bnx2x_int_enable(bp);
a2fbb9ea
ET
4852}
4853
4854/* end of nic init */
4855
4856/*
4857 * gzip service functions
4858 */
4859
4860static int bnx2x_gunzip_init(struct bnx2x *bp)
4861{
4862 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4863 &bp->gunzip_mapping);
4864 if (bp->gunzip_buf == NULL)
4865 goto gunzip_nomem1;
4866
4867 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4868 if (bp->strm == NULL)
4869 goto gunzip_nomem2;
4870
4871 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4872 GFP_KERNEL);
4873 if (bp->strm->workspace == NULL)
4874 goto gunzip_nomem3;
4875
4876 return 0;
4877
4878gunzip_nomem3:
4879 kfree(bp->strm);
4880 bp->strm = NULL;
4881
4882gunzip_nomem2:
4883 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4884 bp->gunzip_mapping);
4885 bp->gunzip_buf = NULL;
4886
4887gunzip_nomem1:
4888 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4889 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4890 return -ENOMEM;
4891}
4892
4893static void bnx2x_gunzip_end(struct bnx2x *bp)
4894{
4895 kfree(bp->strm->workspace);
4896
4897 kfree(bp->strm);
4898 bp->strm = NULL;
4899
4900 if (bp->gunzip_buf) {
4901 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4902 bp->gunzip_mapping);
4903 bp->gunzip_buf = NULL;
4904 }
4905}
4906
4907static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4908{
4909 int n, rc;
4910
4911 /* check gzip header */
4912 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4913 return -EINVAL;
4914
4915 n = 10;
4916
34f80b04 4917#define FNAME 0x8
a2fbb9ea
ET
4918
4919 if (zbuf[3] & FNAME)
4920 while ((zbuf[n++] != 0) && (n < len));
4921
4922 bp->strm->next_in = zbuf + n;
4923 bp->strm->avail_in = len - n;
4924 bp->strm->next_out = bp->gunzip_buf;
4925 bp->strm->avail_out = FW_BUF_SIZE;
4926
4927 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4928 if (rc != Z_OK)
4929 return rc;
4930
4931 rc = zlib_inflate(bp->strm, Z_FINISH);
4932 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4933 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4934 bp->dev->name, bp->strm->msg);
4935
4936 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4937 if (bp->gunzip_outlen & 0x3)
4938 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4939 " gunzip_outlen (%d) not aligned\n",
4940 bp->dev->name, bp->gunzip_outlen);
4941 bp->gunzip_outlen >>= 2;
4942
4943 zlib_inflateEnd(bp->strm);
4944
4945 if (rc == Z_STREAM_END)
4946 return 0;
4947
4948 return rc;
4949}
4950
4951/* nic load/unload */
4952
4953/*
34f80b04 4954 * General service functions
a2fbb9ea
ET
4955 */
4956
4957/* send a NIG loopback debug packet */
4958static void bnx2x_lb_pckt(struct bnx2x *bp)
4959{
a2fbb9ea 4960 u32 wb_write[3];
a2fbb9ea
ET
4961
4962 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4963 wb_write[0] = 0x55555555;
4964 wb_write[1] = 0x55555555;
34f80b04 4965 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4966 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4967
4968 /* NON-IP protocol */
a2fbb9ea
ET
4969 wb_write[0] = 0x09000000;
4970 wb_write[1] = 0x55555555;
34f80b04 4971 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4972 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4973}
4974
4975/* some of the internal memories
4976 * are not directly readable from the driver
4977 * to test them we send debug packets
4978 */
4979static int bnx2x_int_mem_test(struct bnx2x *bp)
4980{
4981 int factor;
4982 int count, i;
4983 u32 val = 0;
4984
ad8d3948 4985 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4986 factor = 120;
ad8d3948
EG
4987 else if (CHIP_REV_IS_EMUL(bp))
4988 factor = 200;
4989 else
a2fbb9ea 4990 factor = 1;
a2fbb9ea
ET
4991
4992 DP(NETIF_MSG_HW, "start part1\n");
4993
4994 /* Disable inputs of parser neighbor blocks */
4995 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4996 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4997 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4998 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4999
5000 /* Write 0 to parser credits for CFC search request */
5001 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5002
5003 /* send Ethernet packet */
5004 bnx2x_lb_pckt(bp);
5005
5006 /* TODO do i reset NIG statistic? */
5007 /* Wait until NIG register shows 1 packet of size 0x10 */
5008 count = 1000 * factor;
5009 while (count) {
34f80b04 5010
a2fbb9ea
ET
5011 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5012 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5013 if (val == 0x10)
5014 break;
5015
5016 msleep(10);
5017 count--;
5018 }
5019 if (val != 0x10) {
5020 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5021 return -1;
5022 }
5023
5024 /* Wait until PRS register shows 1 packet */
5025 count = 1000 * factor;
5026 while (count) {
5027 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5028 if (val == 1)
5029 break;
5030
5031 msleep(10);
5032 count--;
5033 }
5034 if (val != 0x1) {
5035 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5036 return -2;
5037 }
5038
5039 /* Reset and init BRB, PRS */
34f80b04 5040 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5041 msleep(50);
34f80b04 5042 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5043 msleep(50);
5044 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5045 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5046
5047 DP(NETIF_MSG_HW, "part2\n");
5048
5049 /* Disable inputs of parser neighbor blocks */
5050 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5051 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5052 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5053 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5054
5055 /* Write 0 to parser credits for CFC search request */
5056 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5057
5058 /* send 10 Ethernet packets */
5059 for (i = 0; i < 10; i++)
5060 bnx2x_lb_pckt(bp);
5061
5062 /* Wait until NIG register shows 10 + 1
5063 packets of size 11*0x10 = 0xb0 */
5064 count = 1000 * factor;
5065 while (count) {
34f80b04 5066
a2fbb9ea
ET
5067 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5068 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5069 if (val == 0xb0)
5070 break;
5071
5072 msleep(10);
5073 count--;
5074 }
5075 if (val != 0xb0) {
5076 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5077 return -3;
5078 }
5079
5080 /* Wait until PRS register shows 2 packets */
5081 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5082 if (val != 2)
5083 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5084
5085 /* Write 1 to parser credits for CFC search request */
5086 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5087
5088 /* Wait until PRS register shows 3 packets */
5089 msleep(10 * factor);
5090 /* Wait until NIG register shows 1 packet of size 0x10 */
5091 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5092 if (val != 3)
5093 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5094
5095 /* clear NIG EOP FIFO */
5096 for (i = 0; i < 11; i++)
5097 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5098 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5099 if (val != 1) {
5100 BNX2X_ERR("clear of NIG failed\n");
5101 return -4;
5102 }
5103
5104 /* Reset and init BRB, PRS, NIG */
5105 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5106 msleep(50);
5107 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5108 msleep(50);
5109 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5110 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5111#ifndef BCM_ISCSI
5112 /* set NIC mode */
5113 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5114#endif
5115
5116 /* Enable inputs of parser neighbor blocks */
5117 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5118 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5119 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5120 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5121
5122 DP(NETIF_MSG_HW, "done\n");
5123
5124 return 0; /* OK */
5125}
5126
5127static void enable_blocks_attention(struct bnx2x *bp)
5128{
5129 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5130 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5131 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5132 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5133 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5134 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5135 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5136 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5137 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5138/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5139/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5140 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5141 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5142 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5143/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5144/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5145 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5146 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5147 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5148 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5149/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5150/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5151 if (CHIP_REV_IS_FPGA(bp))
5152 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5153 else
5154 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5155 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5156 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5157 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5158/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5159/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5160 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5161 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5162/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5163 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5164}
5165
34f80b04 5166
81f75bbf
EG
5167static void bnx2x_reset_common(struct bnx2x *bp)
5168{
5169 /* reset_common */
5170 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5171 0xd3ffff7f);
5172 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5173}
5174
34f80b04 5175static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5176{
a2fbb9ea 5177 u32 val, i;
a2fbb9ea 5178
34f80b04 5179 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5180
81f75bbf 5181 bnx2x_reset_common(bp);
34f80b04
EG
5182 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5183 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5184
34f80b04
EG
5185 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5186 if (CHIP_IS_E1H(bp))
5187 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5188
34f80b04
EG
5189 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5190 msleep(30);
5191 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5192
34f80b04
EG
5193 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5194 if (CHIP_IS_E1(bp)) {
5195 /* enable HW interrupt from PXP on USDM overflow
5196 bit 16 on INT_MASK_0 */
5197 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5198 }
a2fbb9ea 5199
34f80b04
EG
5200 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5201 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5202
5203#ifdef __BIG_ENDIAN
34f80b04
EG
5204 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5205 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5206 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5207 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5208 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5209 /* make sure this value is 0 */
5210 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5211
5212/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5213 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5214 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5215 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5216 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5217#endif
5218
34f80b04 5219 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5220#ifdef BCM_ISCSI
34f80b04
EG
5221 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5222 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5223 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5224#endif
5225
34f80b04
EG
5226 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5227 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5228
34f80b04
EG
5229 /* let the HW do it's magic ... */
5230 msleep(100);
5231 /* finish PXP init */
5232 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5233 if (val != 1) {
5234 BNX2X_ERR("PXP2 CFG failed\n");
5235 return -EBUSY;
5236 }
5237 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5238 if (val != 1) {
5239 BNX2X_ERR("PXP2 RD_INIT failed\n");
5240 return -EBUSY;
5241 }
a2fbb9ea 5242
34f80b04
EG
5243 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5244 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5245
34f80b04 5246 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5247
34f80b04
EG
5248 /* clean the DMAE memory */
5249 bp->dmae_ready = 1;
5250 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5251
34f80b04
EG
5252 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5253 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5254 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5255 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5256
34f80b04
EG
5257 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5258 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5259 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5260 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5261
5262 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5263 /* soft reset pulse */
5264 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5265 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5266
5267#ifdef BCM_ISCSI
34f80b04 5268 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5269#endif
a2fbb9ea 5270
34f80b04
EG
5271 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5272 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5273 if (!CHIP_REV_IS_SLOW(bp)) {
5274 /* enable hw interrupt from doorbell Q */
5275 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5276 }
a2fbb9ea 5277
34f80b04
EG
5278 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5279 if (CHIP_REV_IS_SLOW(bp)) {
5280 /* fix for emulation and FPGA for no pause */
5281 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5282 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5283 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5284 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5285 }
a2fbb9ea 5286
34f80b04 5287 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5288 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5289 /* set NIC mode */
5290 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5291 if (CHIP_IS_E1H(bp))
5292 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5293
34f80b04
EG
5294 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5295 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5296 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5297 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5298
34f80b04
EG
5299 if (CHIP_IS_E1H(bp)) {
5300 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5301 STORM_INTMEM_SIZE_E1H/2);
5302 bnx2x_init_fill(bp,
5303 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5304 0, STORM_INTMEM_SIZE_E1H/2);
5305 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5306 STORM_INTMEM_SIZE_E1H/2);
5307 bnx2x_init_fill(bp,
5308 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5309 0, STORM_INTMEM_SIZE_E1H/2);
5310 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5311 STORM_INTMEM_SIZE_E1H/2);
5312 bnx2x_init_fill(bp,
5313 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5314 0, STORM_INTMEM_SIZE_E1H/2);
5315 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5316 STORM_INTMEM_SIZE_E1H/2);
5317 bnx2x_init_fill(bp,
5318 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5319 0, STORM_INTMEM_SIZE_E1H/2);
5320 } else { /* E1 */
ad8d3948
EG
5321 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5322 STORM_INTMEM_SIZE_E1);
5323 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5324 STORM_INTMEM_SIZE_E1);
5325 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5326 STORM_INTMEM_SIZE_E1);
5327 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5328 STORM_INTMEM_SIZE_E1);
34f80b04 5329 }
a2fbb9ea 5330
34f80b04
EG
5331 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5332 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5333 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5334 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5335
34f80b04
EG
5336 /* sync semi rtc */
5337 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5338 0x80000000);
5339 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5340 0x80000000);
a2fbb9ea 5341
34f80b04
EG
5342 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5343 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5344 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5345
34f80b04
EG
5346 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5347 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5348 REG_WR(bp, i, 0xc0cac01a);
5349 /* TODO: replace with something meaningful */
5350 }
8d9c5f34 5351 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5352 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5353
34f80b04
EG
5354 if (sizeof(union cdu_context) != 1024)
5355 /* we currently assume that a context is 1024 bytes */
5356 printk(KERN_ALERT PFX "please adjust the size of"
5357 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5358
34f80b04
EG
5359 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5360 val = (4 << 24) + (0 << 12) + 1024;
5361 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5362 if (CHIP_IS_E1(bp)) {
5363 /* !!! fix pxp client crdit until excel update */
5364 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5365 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5366 }
a2fbb9ea 5367
34f80b04
EG
5368 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5369 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5370 /* enable context validation interrupt from CFC */
5371 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5372
5373 /* set the thresholds to prevent CFC/CDU race */
5374 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5375
34f80b04
EG
5376 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5377 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5378
34f80b04
EG
5379 /* PXPCS COMMON comes here */
5380 /* Reset PCIE errors for debug */
5381 REG_WR(bp, 0x2814, 0xffffffff);
5382 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5383
34f80b04
EG
5384 /* EMAC0 COMMON comes here */
5385 /* EMAC1 COMMON comes here */
5386 /* DBU COMMON comes here */
5387 /* DBG COMMON comes here */
5388
5389 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5390 if (CHIP_IS_E1H(bp)) {
5391 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5392 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5393 }
5394
5395 if (CHIP_REV_IS_SLOW(bp))
5396 msleep(200);
5397
5398 /* finish CFC init */
5399 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5400 if (val != 1) {
5401 BNX2X_ERR("CFC LL_INIT failed\n");
5402 return -EBUSY;
5403 }
5404 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5405 if (val != 1) {
5406 BNX2X_ERR("CFC AC_INIT failed\n");
5407 return -EBUSY;
5408 }
5409 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5410 if (val != 1) {
5411 BNX2X_ERR("CFC CAM_INIT failed\n");
5412 return -EBUSY;
5413 }
5414 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5415
34f80b04
EG
5416 /* read NIG statistic
5417 to see if this is our first up since powerup */
5418 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5419 val = *bnx2x_sp(bp, wb_data[0]);
5420
5421 /* do internal memory self test */
5422 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5423 BNX2X_ERR("internal mem self test failed\n");
5424 return -EBUSY;
5425 }
5426
5427 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5428 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5429 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5430 /* Fan failure is indicated by SPIO 5 */
5431 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5432 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5433
5434 /* set to active low mode */
5435 val = REG_RD(bp, MISC_REG_SPIO_INT);
5436 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5437 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5438 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5439
34f80b04
EG
5440 /* enable interrupt to signal the IGU */
5441 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5442 val |= (1 << MISC_REGISTERS_SPIO_5);
5443 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5444 break;
f1410647 5445
34f80b04
EG
5446 default:
5447 break;
5448 }
f1410647 5449
34f80b04
EG
5450 /* clear PXP2 attentions */
5451 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5452
34f80b04 5453 enable_blocks_attention(bp);
a2fbb9ea 5454
6bbca910
YR
5455 if (!BP_NOMCP(bp)) {
5456 bnx2x_acquire_phy_lock(bp);
5457 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5458 bnx2x_release_phy_lock(bp);
5459 } else
5460 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5461
34f80b04
EG
5462 return 0;
5463}
a2fbb9ea 5464
34f80b04
EG
5465static int bnx2x_init_port(struct bnx2x *bp)
5466{
5467 int port = BP_PORT(bp);
5468 u32 val;
a2fbb9ea 5469
34f80b04
EG
5470 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5471
5472 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5473
5474 /* Port PXP comes here */
5475 /* Port PXP2 comes here */
a2fbb9ea
ET
5476#ifdef BCM_ISCSI
5477 /* Port0 1
5478 * Port1 385 */
5479 i++;
5480 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5481 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5482 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5483 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5484
5485 /* Port0 2
5486 * Port1 386 */
5487 i++;
5488 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5489 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5490 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5491 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5492
5493 /* Port0 3
5494 * Port1 387 */
5495 i++;
5496 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5497 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5498 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5499 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5500#endif
34f80b04 5501 /* Port CMs come here */
8d9c5f34
EG
5502 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5503 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5504
5505 /* Port QM comes here */
a2fbb9ea
ET
5506#ifdef BCM_ISCSI
5507 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5508 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5509
5510 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5511 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5512#endif
5513 /* Port DQ comes here */
5514 /* Port BRB1 comes here */
ad8d3948 5515 /* Port PRS comes here */
a2fbb9ea
ET
5516 /* Port TSDM comes here */
5517 /* Port CSDM comes here */
5518 /* Port USDM comes here */
5519 /* Port XSDM comes here */
34f80b04
EG
5520 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5521 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5522 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5523 port ? USEM_PORT1_END : USEM_PORT0_END);
5524 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5525 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5526 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5527 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5528 /* Port UPB comes here */
34f80b04
EG
5529 /* Port XPB comes here */
5530
5531 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5532 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5533
5534 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5535 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5536
5537 /* update threshold */
34f80b04 5538 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5539 /* update init credit */
34f80b04 5540 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5541
5542 /* probe changes */
34f80b04 5543 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5544 msleep(5);
34f80b04 5545 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5546
5547#ifdef BCM_ISCSI
5548 /* tell the searcher where the T2 table is */
5549 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5550
5551 wb_write[0] = U64_LO(bp->t2_mapping);
5552 wb_write[1] = U64_HI(bp->t2_mapping);
5553 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5554 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5555 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5556 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5557
5558 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5559 /* Port SRCH comes here */
5560#endif
5561 /* Port CDU comes here */
5562 /* Port CFC comes here */
34f80b04
EG
5563
5564 if (CHIP_IS_E1(bp)) {
5565 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5566 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5567 }
5568 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5569 port ? HC_PORT1_END : HC_PORT0_END);
5570
5571 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5572 MISC_AEU_PORT0_START,
34f80b04
EG
5573 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5574 /* init aeu_mask_attn_func_0/1:
5575 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5576 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5577 * bits 4-7 are used for "per vn group attention" */
5578 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5579 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5580
a2fbb9ea
ET
5581 /* Port PXPCS comes here */
5582 /* Port EMAC0 comes here */
5583 /* Port EMAC1 comes here */
5584 /* Port DBU comes here */
5585 /* Port DBG comes here */
34f80b04
EG
5586 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5587 port ? NIG_PORT1_END : NIG_PORT0_END);
5588
5589 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5590
5591 if (CHIP_IS_E1H(bp)) {
5592 u32 wsum;
5593 struct cmng_struct_per_port m_cmng_port;
5594 int vn;
5595
5596 /* 0x2 disable e1hov, 0x1 enable */
5597 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5598 (IS_E1HMF(bp) ? 0x1 : 0x2));
5599
5600 /* Init RATE SHAPING and FAIRNESS contexts.
5601 Initialize as if there is 10G link. */
5602 wsum = bnx2x_calc_vn_wsum(bp);
5603 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5604 if (IS_E1HMF(bp))
5605 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5606 bnx2x_init_vn_minmax(bp, 2*vn + port,
5607 wsum, 10000, &m_cmng_port);
5608 }
5609
a2fbb9ea
ET
5610 /* Port MCP comes here */
5611 /* Port DMAE comes here */
5612
34f80b04 5613 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5614 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5615 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5616 /* add SPIO 5 to group 0 */
5617 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5618 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5619 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5620 break;
5621
5622 default:
5623 break;
5624 }
5625
c18487ee 5626 bnx2x__link_reset(bp);
a2fbb9ea 5627
34f80b04
EG
5628 return 0;
5629}
5630
5631#define ILT_PER_FUNC (768/2)
5632#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5633/* the phys address is shifted right 12 bits and has an added
5634 1=valid bit added to the 53rd bit
5635 then since this is a wide register(TM)
5636 we split it into two 32 bit writes
5637 */
5638#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5639#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5640#define PXP_ONE_ILT(x) (((x) << 10) | x)
5641#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5642
5643#define CNIC_ILT_LINES 0
5644
5645static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5646{
5647 int reg;
5648
5649 if (CHIP_IS_E1H(bp))
5650 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5651 else /* E1 */
5652 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5653
5654 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5655}
5656
5657static int bnx2x_init_func(struct bnx2x *bp)
5658{
5659 int port = BP_PORT(bp);
5660 int func = BP_FUNC(bp);
8badd27a 5661 u32 addr, val;
34f80b04
EG
5662 int i;
5663
5664 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5665
8badd27a
EG
5666 /* set MSI reconfigure capability */
5667 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5668 val = REG_RD(bp, addr);
5669 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5670 REG_WR(bp, addr, val);
5671
34f80b04
EG
5672 i = FUNC_ILT_BASE(func);
5673
5674 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5675 if (CHIP_IS_E1H(bp)) {
5676 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5677 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5678 } else /* E1 */
5679 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5680 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5681
5682
5683 if (CHIP_IS_E1H(bp)) {
5684 for (i = 0; i < 9; i++)
5685 bnx2x_init_block(bp,
5686 cm_start[func][i], cm_end[func][i]);
5687
5688 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5689 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5690 }
5691
5692 /* HC init per function */
5693 if (CHIP_IS_E1H(bp)) {
5694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5695
5696 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5697 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5698 }
5699 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5700
c14423fe 5701 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5702 REG_WR(bp, 0x2114, 0xffffffff);
5703 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5704
34f80b04
EG
5705 return 0;
5706}
5707
5708static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5709{
5710 int i, rc = 0;
a2fbb9ea 5711
34f80b04
EG
5712 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5713 BP_FUNC(bp), load_code);
a2fbb9ea 5714
34f80b04
EG
5715 bp->dmae_ready = 0;
5716 mutex_init(&bp->dmae_mutex);
5717 bnx2x_gunzip_init(bp);
a2fbb9ea 5718
34f80b04
EG
5719 switch (load_code) {
5720 case FW_MSG_CODE_DRV_LOAD_COMMON:
5721 rc = bnx2x_init_common(bp);
5722 if (rc)
5723 goto init_hw_err;
5724 /* no break */
5725
5726 case FW_MSG_CODE_DRV_LOAD_PORT:
5727 bp->dmae_ready = 1;
5728 rc = bnx2x_init_port(bp);
5729 if (rc)
5730 goto init_hw_err;
5731 /* no break */
5732
5733 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5734 bp->dmae_ready = 1;
5735 rc = bnx2x_init_func(bp);
5736 if (rc)
5737 goto init_hw_err;
5738 break;
5739
5740 default:
5741 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5742 break;
5743 }
5744
5745 if (!BP_NOMCP(bp)) {
5746 int func = BP_FUNC(bp);
a2fbb9ea
ET
5747
5748 bp->fw_drv_pulse_wr_seq =
34f80b04 5749 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5750 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5751 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5752 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5753 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5754 } else
5755 bp->func_stx = 0;
a2fbb9ea 5756
34f80b04
EG
5757 /* this needs to be done before gunzip end */
5758 bnx2x_zero_def_sb(bp);
5759 for_each_queue(bp, i)
5760 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5761
5762init_hw_err:
5763 bnx2x_gunzip_end(bp);
5764
5765 return rc;
a2fbb9ea
ET
5766}
5767
c14423fe 5768/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5769static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5770{
34f80b04 5771 int func = BP_FUNC(bp);
f1410647
ET
5772 u32 seq = ++bp->fw_seq;
5773 u32 rc = 0;
19680c48
EG
5774 u32 cnt = 1;
5775 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5776
34f80b04 5777 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5778 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5779
19680c48
EG
5780 do {
5781 /* let the FW do it's magic ... */
5782 msleep(delay);
a2fbb9ea 5783
19680c48 5784 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5785
19680c48
EG
5786 /* Give the FW up to 2 second (200*10ms) */
5787 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5788
5789 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5790 cnt*delay, rc, seq);
a2fbb9ea
ET
5791
5792 /* is this a reply to our command? */
5793 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5794 rc &= FW_MSG_CODE_MASK;
f1410647 5795
a2fbb9ea
ET
5796 } else {
5797 /* FW BUG! */
5798 BNX2X_ERR("FW failed to respond!\n");
5799 bnx2x_fw_dump(bp);
5800 rc = 0;
5801 }
f1410647 5802
a2fbb9ea
ET
5803 return rc;
5804}
5805
5806static void bnx2x_free_mem(struct bnx2x *bp)
5807{
5808
5809#define BNX2X_PCI_FREE(x, y, size) \
5810 do { \
5811 if (x) { \
5812 pci_free_consistent(bp->pdev, size, x, y); \
5813 x = NULL; \
5814 y = 0; \
5815 } \
5816 } while (0)
5817
5818#define BNX2X_FREE(x) \
5819 do { \
5820 if (x) { \
5821 vfree(x); \
5822 x = NULL; \
5823 } \
5824 } while (0)
5825
5826 int i;
5827
5828 /* fastpath */
555f6c78 5829 /* Common */
a2fbb9ea
ET
5830 for_each_queue(bp, i) {
5831
555f6c78 5832 /* status blocks */
a2fbb9ea
ET
5833 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5834 bnx2x_fp(bp, i, status_blk_mapping),
5835 sizeof(struct host_status_block) +
5836 sizeof(struct eth_tx_db_data));
555f6c78
EG
5837 }
5838 /* Rx */
5839 for_each_rx_queue(bp, i) {
a2fbb9ea 5840
555f6c78 5841 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5842 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5843 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5844 bnx2x_fp(bp, i, rx_desc_mapping),
5845 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5846
5847 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5848 bnx2x_fp(bp, i, rx_comp_mapping),
5849 sizeof(struct eth_fast_path_rx_cqe) *
5850 NUM_RCQ_BD);
a2fbb9ea 5851
7a9b2557 5852 /* SGE ring */
32626230 5853 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5854 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5855 bnx2x_fp(bp, i, rx_sge_mapping),
5856 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5857 }
555f6c78
EG
5858 /* Tx */
5859 for_each_tx_queue(bp, i) {
5860
5861 /* fastpath tx rings: tx_buf tx_desc */
5862 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5863 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5864 bnx2x_fp(bp, i, tx_desc_mapping),
5865 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5866 }
a2fbb9ea
ET
5867 /* end of fastpath */
5868
5869 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5870 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5871
5872 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5873 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5874
5875#ifdef BCM_ISCSI
5876 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5877 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5878 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5879 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5880#endif
7a9b2557 5881 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5882
5883#undef BNX2X_PCI_FREE
5884#undef BNX2X_KFREE
5885}
5886
5887static int bnx2x_alloc_mem(struct bnx2x *bp)
5888{
5889
5890#define BNX2X_PCI_ALLOC(x, y, size) \
5891 do { \
5892 x = pci_alloc_consistent(bp->pdev, size, y); \
5893 if (x == NULL) \
5894 goto alloc_mem_err; \
5895 memset(x, 0, size); \
5896 } while (0)
5897
5898#define BNX2X_ALLOC(x, size) \
5899 do { \
5900 x = vmalloc(size); \
5901 if (x == NULL) \
5902 goto alloc_mem_err; \
5903 memset(x, 0, size); \
5904 } while (0)
5905
5906 int i;
5907
5908 /* fastpath */
555f6c78 5909 /* Common */
a2fbb9ea
ET
5910 for_each_queue(bp, i) {
5911 bnx2x_fp(bp, i, bp) = bp;
5912
555f6c78 5913 /* status blocks */
a2fbb9ea
ET
5914 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5915 &bnx2x_fp(bp, i, status_blk_mapping),
5916 sizeof(struct host_status_block) +
5917 sizeof(struct eth_tx_db_data));
555f6c78
EG
5918 }
5919 /* Rx */
5920 for_each_rx_queue(bp, i) {
a2fbb9ea 5921
555f6c78 5922 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5923 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5924 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5925 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5926 &bnx2x_fp(bp, i, rx_desc_mapping),
5927 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5928
5929 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5930 &bnx2x_fp(bp, i, rx_comp_mapping),
5931 sizeof(struct eth_fast_path_rx_cqe) *
5932 NUM_RCQ_BD);
5933
7a9b2557
VZ
5934 /* SGE ring */
5935 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5936 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5937 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5938 &bnx2x_fp(bp, i, rx_sge_mapping),
5939 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 5940 }
555f6c78
EG
5941 /* Tx */
5942 for_each_tx_queue(bp, i) {
5943
5944 bnx2x_fp(bp, i, hw_tx_prods) =
5945 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5946
5947 bnx2x_fp(bp, i, tx_prods_mapping) =
5948 bnx2x_fp(bp, i, status_blk_mapping) +
5949 sizeof(struct host_status_block);
5950
5951 /* fastpath tx rings: tx_buf tx_desc */
5952 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5953 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5954 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5955 &bnx2x_fp(bp, i, tx_desc_mapping),
5956 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5957 }
a2fbb9ea
ET
5958 /* end of fastpath */
5959
5960 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5961 sizeof(struct host_def_status_block));
5962
5963 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5964 sizeof(struct bnx2x_slowpath));
5965
5966#ifdef BCM_ISCSI
5967 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5968
5969 /* Initialize T1 */
5970 for (i = 0; i < 64*1024; i += 64) {
5971 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5972 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5973 }
5974
5975 /* allocate searcher T2 table
5976 we allocate 1/4 of alloc num for T2
5977 (which is not entered into the ILT) */
5978 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5979
5980 /* Initialize T2 */
5981 for (i = 0; i < 16*1024; i += 64)
5982 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5983
c14423fe 5984 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5985 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5986
5987 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5988 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5989
5990 /* QM queues (128*MAX_CONN) */
5991 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5992#endif
5993
5994 /* Slow path ring */
5995 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5996
5997 return 0;
5998
5999alloc_mem_err:
6000 bnx2x_free_mem(bp);
6001 return -ENOMEM;
6002
6003#undef BNX2X_PCI_ALLOC
6004#undef BNX2X_ALLOC
6005}
6006
6007static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6008{
6009 int i;
6010
555f6c78 6011 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6012 struct bnx2x_fastpath *fp = &bp->fp[i];
6013
6014 u16 bd_cons = fp->tx_bd_cons;
6015 u16 sw_prod = fp->tx_pkt_prod;
6016 u16 sw_cons = fp->tx_pkt_cons;
6017
a2fbb9ea
ET
6018 while (sw_cons != sw_prod) {
6019 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6020 sw_cons++;
6021 }
6022 }
6023}
6024
6025static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6026{
6027 int i, j;
6028
555f6c78 6029 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6030 struct bnx2x_fastpath *fp = &bp->fp[j];
6031
a2fbb9ea
ET
6032 for (i = 0; i < NUM_RX_BD; i++) {
6033 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6034 struct sk_buff *skb = rx_buf->skb;
6035
6036 if (skb == NULL)
6037 continue;
6038
6039 pci_unmap_single(bp->pdev,
6040 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6041 bp->rx_buf_size,
a2fbb9ea
ET
6042 PCI_DMA_FROMDEVICE);
6043
6044 rx_buf->skb = NULL;
6045 dev_kfree_skb(skb);
6046 }
7a9b2557 6047 if (!fp->disable_tpa)
32626230
EG
6048 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6049 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6050 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6051 }
6052}
6053
6054static void bnx2x_free_skbs(struct bnx2x *bp)
6055{
6056 bnx2x_free_tx_skbs(bp);
6057 bnx2x_free_rx_skbs(bp);
6058}
6059
6060static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6061{
34f80b04 6062 int i, offset = 1;
a2fbb9ea
ET
6063
6064 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6065 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6066 bp->msix_table[0].vector);
6067
6068 for_each_queue(bp, i) {
c14423fe 6069 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6070 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6071 bnx2x_fp(bp, i, state));
6072
34f80b04 6073 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6074 }
a2fbb9ea
ET
6075}
6076
6077static void bnx2x_free_irq(struct bnx2x *bp)
6078{
a2fbb9ea 6079 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6080 bnx2x_free_msix_irqs(bp);
6081 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6082 bp->flags &= ~USING_MSIX_FLAG;
6083
8badd27a
EG
6084 } else if (bp->flags & USING_MSI_FLAG) {
6085 free_irq(bp->pdev->irq, bp->dev);
6086 pci_disable_msi(bp->pdev);
6087 bp->flags &= ~USING_MSI_FLAG;
6088
a2fbb9ea
ET
6089 } else
6090 free_irq(bp->pdev->irq, bp->dev);
6091}
6092
6093static int bnx2x_enable_msix(struct bnx2x *bp)
6094{
8badd27a
EG
6095 int i, rc, offset = 1;
6096 int igu_vec = 0;
a2fbb9ea 6097
8badd27a
EG
6098 bp->msix_table[0].entry = igu_vec;
6099 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6100
34f80b04 6101 for_each_queue(bp, i) {
8badd27a 6102 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6103 bp->msix_table[i + offset].entry = igu_vec;
6104 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6105 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6106 }
6107
34f80b04 6108 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6109 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6110 if (rc) {
8badd27a
EG
6111 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6112 return rc;
34f80b04 6113 }
8badd27a 6114
a2fbb9ea
ET
6115 bp->flags |= USING_MSIX_FLAG;
6116
6117 return 0;
a2fbb9ea
ET
6118}
6119
a2fbb9ea
ET
6120static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6121{
34f80b04 6122 int i, rc, offset = 1;
a2fbb9ea 6123
a2fbb9ea
ET
6124 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6125 bp->dev->name, bp->dev);
a2fbb9ea
ET
6126 if (rc) {
6127 BNX2X_ERR("request sp irq failed\n");
6128 return -EBUSY;
6129 }
6130
6131 for_each_queue(bp, i) {
555f6c78
EG
6132 struct bnx2x_fastpath *fp = &bp->fp[i];
6133
6134 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6135 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6136 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6137 if (rc) {
555f6c78 6138 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6139 bnx2x_free_msix_irqs(bp);
6140 return -EBUSY;
6141 }
6142
555f6c78 6143 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6144 }
6145
555f6c78
EG
6146 i = BNX2X_NUM_QUEUES(bp);
6147 if (is_multi(bp))
6148 printk(KERN_INFO PFX
6149 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6150 bp->dev->name, bp->msix_table[0].vector,
6151 bp->msix_table[offset].vector,
6152 bp->msix_table[offset + i - 1].vector);
6153 else
6154 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6155 bp->dev->name, bp->msix_table[0].vector,
6156 bp->msix_table[offset + i - 1].vector);
6157
a2fbb9ea 6158 return 0;
a2fbb9ea
ET
6159}
6160
8badd27a
EG
6161static int bnx2x_enable_msi(struct bnx2x *bp)
6162{
6163 int rc;
6164
6165 rc = pci_enable_msi(bp->pdev);
6166 if (rc) {
6167 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6168 return -1;
6169 }
6170 bp->flags |= USING_MSI_FLAG;
6171
6172 return 0;
6173}
6174
a2fbb9ea
ET
6175static int bnx2x_req_irq(struct bnx2x *bp)
6176{
8badd27a 6177 unsigned long flags;
34f80b04 6178 int rc;
a2fbb9ea 6179
8badd27a
EG
6180 if (bp->flags & USING_MSI_FLAG)
6181 flags = 0;
6182 else
6183 flags = IRQF_SHARED;
6184
6185 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6186 bp->dev->name, bp->dev);
a2fbb9ea
ET
6187 if (!rc)
6188 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6189
6190 return rc;
a2fbb9ea
ET
6191}
6192
65abd74d
YG
6193static void bnx2x_napi_enable(struct bnx2x *bp)
6194{
6195 int i;
6196
555f6c78 6197 for_each_rx_queue(bp, i)
65abd74d
YG
6198 napi_enable(&bnx2x_fp(bp, i, napi));
6199}
6200
6201static void bnx2x_napi_disable(struct bnx2x *bp)
6202{
6203 int i;
6204
555f6c78 6205 for_each_rx_queue(bp, i)
65abd74d
YG
6206 napi_disable(&bnx2x_fp(bp, i, napi));
6207}
6208
6209static void bnx2x_netif_start(struct bnx2x *bp)
6210{
6211 if (atomic_dec_and_test(&bp->intr_sem)) {
6212 if (netif_running(bp->dev)) {
65abd74d
YG
6213 bnx2x_napi_enable(bp);
6214 bnx2x_int_enable(bp);
555f6c78
EG
6215 if (bp->state == BNX2X_STATE_OPEN)
6216 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6217 }
6218 }
6219}
6220
f8ef6e44 6221static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6222{
f8ef6e44 6223 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6224 bnx2x_napi_disable(bp);
65abd74d 6225 if (netif_running(bp->dev)) {
65abd74d
YG
6226 netif_tx_disable(bp->dev);
6227 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6228 }
6229}
6230
a2fbb9ea
ET
6231/*
6232 * Init service functions
6233 */
6234
3101c2bc 6235static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6236{
6237 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6238 int port = BP_PORT(bp);
a2fbb9ea
ET
6239
6240 /* CAM allocation
6241 * unicasts 0-31:port0 32-63:port1
6242 * multicast 64-127:port0 128-191:port1
6243 */
8d9c5f34 6244 config->hdr.length = 2;
af246401 6245 config->hdr.offset = port ? 32 : 0;
34f80b04 6246 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6247 config->hdr.reserved1 = 0;
6248
6249 /* primary MAC */
6250 config->config_table[0].cam_entry.msb_mac_addr =
6251 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6252 config->config_table[0].cam_entry.middle_mac_addr =
6253 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6254 config->config_table[0].cam_entry.lsb_mac_addr =
6255 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6256 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6257 if (set)
6258 config->config_table[0].target_table_entry.flags = 0;
6259 else
6260 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6261 config->config_table[0].target_table_entry.client_id = 0;
6262 config->config_table[0].target_table_entry.vlan_id = 0;
6263
3101c2bc
YG
6264 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6265 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6266 config->config_table[0].cam_entry.msb_mac_addr,
6267 config->config_table[0].cam_entry.middle_mac_addr,
6268 config->config_table[0].cam_entry.lsb_mac_addr);
6269
6270 /* broadcast */
6271 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6272 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6273 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6274 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6275 if (set)
6276 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6277 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6278 else
6279 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6280 config->config_table[1].target_table_entry.client_id = 0;
6281 config->config_table[1].target_table_entry.vlan_id = 0;
6282
6283 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6284 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6285 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6286}
6287
3101c2bc 6288static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6289{
6290 struct mac_configuration_cmd_e1h *config =
6291 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6292
3101c2bc 6293 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6294 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6295 return;
6296 }
6297
6298 /* CAM allocation for E1H
6299 * unicasts: by func number
6300 * multicast: 20+FUNC*20, 20 each
6301 */
8d9c5f34 6302 config->hdr.length = 1;
34f80b04
EG
6303 config->hdr.offset = BP_FUNC(bp);
6304 config->hdr.client_id = BP_CL_ID(bp);
6305 config->hdr.reserved1 = 0;
6306
6307 /* primary MAC */
6308 config->config_table[0].msb_mac_addr =
6309 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6310 config->config_table[0].middle_mac_addr =
6311 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6312 config->config_table[0].lsb_mac_addr =
6313 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6314 config->config_table[0].client_id = BP_L_ID(bp);
6315 config->config_table[0].vlan_id = 0;
6316 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6317 if (set)
6318 config->config_table[0].flags = BP_PORT(bp);
6319 else
6320 config->config_table[0].flags =
6321 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6322
3101c2bc
YG
6323 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6324 (set ? "setting" : "clearing"),
34f80b04
EG
6325 config->config_table[0].msb_mac_addr,
6326 config->config_table[0].middle_mac_addr,
6327 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6328
6329 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6330 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6331 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6332}
6333
a2fbb9ea
ET
6334static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6335 int *state_p, int poll)
6336{
6337 /* can take a while if any port is running */
34f80b04 6338 int cnt = 500;
a2fbb9ea 6339
c14423fe
ET
6340 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6341 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6342
6343 might_sleep();
34f80b04 6344 while (cnt--) {
a2fbb9ea
ET
6345 if (poll) {
6346 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6347 /* if index is different from 0
6348 * the reply for some commands will
3101c2bc 6349 * be on the non default queue
a2fbb9ea
ET
6350 */
6351 if (idx)
6352 bnx2x_rx_int(&bp->fp[idx], 10);
6353 }
a2fbb9ea 6354
3101c2bc 6355 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6356 if (*state_p == state)
a2fbb9ea
ET
6357 return 0;
6358
a2fbb9ea 6359 msleep(1);
a2fbb9ea
ET
6360 }
6361
a2fbb9ea 6362 /* timeout! */
49d66772
ET
6363 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6364 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6365#ifdef BNX2X_STOP_ON_ERROR
6366 bnx2x_panic();
6367#endif
a2fbb9ea 6368
49d66772 6369 return -EBUSY;
a2fbb9ea
ET
6370}
6371
6372static int bnx2x_setup_leading(struct bnx2x *bp)
6373{
34f80b04 6374 int rc;
a2fbb9ea 6375
c14423fe 6376 /* reset IGU state */
34f80b04 6377 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6378
6379 /* SETUP ramrod */
6380 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6381
34f80b04
EG
6382 /* Wait for completion */
6383 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6384
34f80b04 6385 return rc;
a2fbb9ea
ET
6386}
6387
6388static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6389{
555f6c78
EG
6390 struct bnx2x_fastpath *fp = &bp->fp[index];
6391
a2fbb9ea 6392 /* reset IGU state */
555f6c78 6393 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6394
228241eb 6395 /* SETUP ramrod */
555f6c78
EG
6396 fp->state = BNX2X_FP_STATE_OPENING;
6397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6398 fp->cl_id, 0);
a2fbb9ea
ET
6399
6400 /* Wait for completion */
6401 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6402 &(fp->state), 0);
a2fbb9ea
ET
6403}
6404
a2fbb9ea 6405static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6406
8badd27a 6407static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6408{
555f6c78 6409 int num_queues;
a2fbb9ea 6410
8badd27a
EG
6411 switch (int_mode) {
6412 case INT_MODE_INTx:
6413 case INT_MODE_MSI:
555f6c78
EG
6414 num_queues = 1;
6415 bp->num_rx_queues = num_queues;
6416 bp->num_tx_queues = num_queues;
6417 DP(NETIF_MSG_IFUP,
6418 "set number of queues to %d\n", num_queues);
8badd27a
EG
6419 break;
6420
6421 case INT_MODE_MSIX:
6422 default:
555f6c78
EG
6423 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6424 num_queues = min_t(u32, num_online_cpus(),
6425 BNX2X_MAX_QUEUES(bp));
34f80b04 6426 else
555f6c78
EG
6427 num_queues = 1;
6428 bp->num_rx_queues = num_queues;
6429 bp->num_tx_queues = num_queues;
6430 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6431 " number of tx queues to %d\n",
6432 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6433 /* if we can't use MSI-X we only need one fp,
6434 * so try to enable MSI-X with the requested number of fp's
6435 * and fallback to MSI or legacy INTx with one fp
6436 */
8badd27a 6437 if (bnx2x_enable_msix(bp)) {
34f80b04 6438 /* failed to enable MSI-X */
555f6c78
EG
6439 num_queues = 1;
6440 bp->num_rx_queues = num_queues;
6441 bp->num_tx_queues = num_queues;
6442 if (bp->multi_mode)
6443 BNX2X_ERR("Multi requested but failed to "
6444 "enable MSI-X set number of "
6445 "queues to %d\n", num_queues);
a2fbb9ea 6446 }
8badd27a 6447 break;
a2fbb9ea 6448 }
555f6c78 6449 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6450}
6451
6452static void bnx2x_set_rx_mode(struct net_device *dev);
6453
6454/* must be called with rtnl_lock */
6455static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6456{
6457 u32 load_code;
6458 int i, rc = 0;
6459#ifdef BNX2X_STOP_ON_ERROR
6460 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6461 if (unlikely(bp->panic))
6462 return -EPERM;
6463#endif
6464
6465 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6466
6467 bnx2x_set_int_mode(bp);
c14423fe 6468
a2fbb9ea
ET
6469 if (bnx2x_alloc_mem(bp))
6470 return -ENOMEM;
6471
555f6c78 6472 for_each_rx_queue(bp, i)
7a9b2557
VZ
6473 bnx2x_fp(bp, i, disable_tpa) =
6474 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6475
555f6c78 6476 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6477 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6478 bnx2x_poll, 128);
6479
6480#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6481 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6482 struct bnx2x_fastpath *fp = &bp->fp[i];
6483
6484 fp->poll_no_work = 0;
6485 fp->poll_calls = 0;
6486 fp->poll_max_calls = 0;
6487 fp->poll_complete = 0;
6488 fp->poll_exit = 0;
6489 }
6490#endif
6491 bnx2x_napi_enable(bp);
6492
34f80b04
EG
6493 if (bp->flags & USING_MSIX_FLAG) {
6494 rc = bnx2x_req_msix_irqs(bp);
6495 if (rc) {
6496 pci_disable_msix(bp->pdev);
2dfe0e1f 6497 goto load_error1;
34f80b04
EG
6498 }
6499 } else {
8badd27a
EG
6500 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6501 bnx2x_enable_msi(bp);
34f80b04
EG
6502 bnx2x_ack_int(bp);
6503 rc = bnx2x_req_irq(bp);
6504 if (rc) {
2dfe0e1f 6505 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6506 if (bp->flags & USING_MSI_FLAG)
6507 pci_disable_msi(bp->pdev);
2dfe0e1f 6508 goto load_error1;
a2fbb9ea 6509 }
8badd27a
EG
6510 if (bp->flags & USING_MSI_FLAG) {
6511 bp->dev->irq = bp->pdev->irq;
6512 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6513 bp->dev->name, bp->pdev->irq);
6514 }
a2fbb9ea
ET
6515 }
6516
2dfe0e1f
EG
6517 /* Send LOAD_REQUEST command to MCP
6518 Returns the type of LOAD command:
6519 if it is the first port to be initialized
6520 common blocks should be initialized, otherwise - not
6521 */
6522 if (!BP_NOMCP(bp)) {
6523 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6524 if (!load_code) {
6525 BNX2X_ERR("MCP response failure, aborting\n");
6526 rc = -EBUSY;
6527 goto load_error2;
6528 }
6529 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6530 rc = -EBUSY; /* other port in diagnostic mode */
6531 goto load_error2;
6532 }
6533
6534 } else {
6535 int port = BP_PORT(bp);
6536
6537 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6538 load_count[0], load_count[1], load_count[2]);
6539 load_count[0]++;
6540 load_count[1 + port]++;
6541 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6542 load_count[0], load_count[1], load_count[2]);
6543 if (load_count[0] == 1)
6544 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6545 else if (load_count[1 + port] == 1)
6546 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6547 else
6548 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6549 }
6550
6551 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6552 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6553 bp->port.pmf = 1;
6554 else
6555 bp->port.pmf = 0;
6556 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6557
a2fbb9ea 6558 /* Initialize HW */
34f80b04
EG
6559 rc = bnx2x_init_hw(bp, load_code);
6560 if (rc) {
a2fbb9ea 6561 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6562 goto load_error2;
a2fbb9ea
ET
6563 }
6564
a2fbb9ea 6565 /* Setup NIC internals and enable interrupts */
471de716 6566 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6567
6568 /* Send LOAD_DONE command to MCP */
34f80b04 6569 if (!BP_NOMCP(bp)) {
228241eb
ET
6570 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6571 if (!load_code) {
da5a662a 6572 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6573 rc = -EBUSY;
2dfe0e1f 6574 goto load_error3;
a2fbb9ea
ET
6575 }
6576 }
6577
6578 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6579
34f80b04
EG
6580 rc = bnx2x_setup_leading(bp);
6581 if (rc) {
da5a662a 6582 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6583 goto load_error3;
34f80b04 6584 }
a2fbb9ea 6585
34f80b04
EG
6586 if (CHIP_IS_E1H(bp))
6587 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6588 BNX2X_ERR("!!! mf_cfg function disabled\n");
6589 bp->state = BNX2X_STATE_DISABLED;
6590 }
a2fbb9ea 6591
34f80b04
EG
6592 if (bp->state == BNX2X_STATE_OPEN)
6593 for_each_nondefault_queue(bp, i) {
6594 rc = bnx2x_setup_multi(bp, i);
6595 if (rc)
2dfe0e1f 6596 goto load_error3;
34f80b04 6597 }
a2fbb9ea 6598
34f80b04 6599 if (CHIP_IS_E1(bp))
3101c2bc 6600 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6601 else
3101c2bc 6602 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6603
6604 if (bp->port.pmf)
6605 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6606
6607 /* Start fast path */
34f80b04
EG
6608 switch (load_mode) {
6609 case LOAD_NORMAL:
6610 /* Tx queue should be only reenabled */
555f6c78 6611 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6612 /* Initialize the receive filter. */
34f80b04
EG
6613 bnx2x_set_rx_mode(bp->dev);
6614 break;
6615
6616 case LOAD_OPEN:
555f6c78 6617 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6618 /* Initialize the receive filter. */
34f80b04 6619 bnx2x_set_rx_mode(bp->dev);
34f80b04 6620 break;
a2fbb9ea 6621
34f80b04 6622 case LOAD_DIAG:
2dfe0e1f 6623 /* Initialize the receive filter. */
a2fbb9ea 6624 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6625 bp->state = BNX2X_STATE_DIAG;
6626 break;
6627
6628 default:
6629 break;
a2fbb9ea
ET
6630 }
6631
34f80b04
EG
6632 if (!bp->port.pmf)
6633 bnx2x__link_status_update(bp);
6634
a2fbb9ea
ET
6635 /* start the timer */
6636 mod_timer(&bp->timer, jiffies + bp->current_interval);
6637
34f80b04 6638
a2fbb9ea
ET
6639 return 0;
6640
2dfe0e1f
EG
6641load_error3:
6642 bnx2x_int_disable_sync(bp, 1);
6643 if (!BP_NOMCP(bp)) {
6644 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6645 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6646 }
6647 bp->port.pmf = 0;
7a9b2557
VZ
6648 /* Free SKBs, SGEs, TPA pool and driver internals */
6649 bnx2x_free_skbs(bp);
555f6c78 6650 for_each_rx_queue(bp, i)
3196a88a 6651 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6652load_error2:
d1014634
YG
6653 /* Release IRQs */
6654 bnx2x_free_irq(bp);
2dfe0e1f
EG
6655load_error1:
6656 bnx2x_napi_disable(bp);
555f6c78 6657 for_each_rx_queue(bp, i)
7cde1c8b 6658 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6659 bnx2x_free_mem(bp);
6660
6661 /* TBD we really need to reset the chip
6662 if we want to recover from this */
34f80b04 6663 return rc;
a2fbb9ea
ET
6664}
6665
6666static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6667{
555f6c78 6668 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6669 int rc;
6670
c14423fe 6671 /* halt the connection */
555f6c78
EG
6672 fp->state = BNX2X_FP_STATE_HALTING;
6673 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6674
34f80b04 6675 /* Wait for completion */
a2fbb9ea 6676 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6677 &(fp->state), 1);
c14423fe 6678 if (rc) /* timeout */
a2fbb9ea
ET
6679 return rc;
6680
6681 /* delete cfc entry */
6682 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6683
34f80b04
EG
6684 /* Wait for completion */
6685 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6686 &(fp->state), 1);
34f80b04 6687 return rc;
a2fbb9ea
ET
6688}
6689
da5a662a 6690static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6691{
49d66772 6692 u16 dsb_sp_prod_idx;
c14423fe 6693 /* if the other port is handling traffic,
a2fbb9ea 6694 this can take a lot of time */
34f80b04
EG
6695 int cnt = 500;
6696 int rc;
a2fbb9ea
ET
6697
6698 might_sleep();
6699
6700 /* Send HALT ramrod */
6701 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6702 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6703
34f80b04
EG
6704 /* Wait for completion */
6705 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6706 &(bp->fp[0].state), 1);
6707 if (rc) /* timeout */
da5a662a 6708 return rc;
a2fbb9ea 6709
49d66772 6710 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6711
228241eb 6712 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6713 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6714
49d66772 6715 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6716 we are going to reset the chip anyway
6717 so there is not much to do if this times out
6718 */
34f80b04 6719 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6720 if (!cnt) {
6721 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6722 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6723 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6724#ifdef BNX2X_STOP_ON_ERROR
6725 bnx2x_panic();
da5a662a
VZ
6726#else
6727 rc = -EBUSY;
34f80b04
EG
6728#endif
6729 break;
6730 }
6731 cnt--;
da5a662a 6732 msleep(1);
5650d9d4 6733 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6734 }
6735 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6736 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6737
6738 return rc;
a2fbb9ea
ET
6739}
6740
34f80b04
EG
6741static void bnx2x_reset_func(struct bnx2x *bp)
6742{
6743 int port = BP_PORT(bp);
6744 int func = BP_FUNC(bp);
6745 int base, i;
6746
6747 /* Configure IGU */
6748 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6749 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6750
34f80b04
EG
6751 /* Clear ILT */
6752 base = FUNC_ILT_BASE(func);
6753 for (i = base; i < base + ILT_PER_FUNC; i++)
6754 bnx2x_ilt_wr(bp, i, 0);
6755}
6756
6757static void bnx2x_reset_port(struct bnx2x *bp)
6758{
6759 int port = BP_PORT(bp);
6760 u32 val;
6761
6762 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6763
6764 /* Do not rcv packets to BRB */
6765 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6766 /* Do not direct rcv packets that are not for MCP to the BRB */
6767 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6768 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6769
6770 /* Configure AEU */
6771 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6772
6773 msleep(100);
6774 /* Check for BRB port occupancy */
6775 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6776 if (val)
6777 DP(NETIF_MSG_IFDOWN,
33471629 6778 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6779
6780 /* TODO: Close Doorbell port? */
6781}
6782
34f80b04
EG
6783static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6784{
6785 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6786 BP_FUNC(bp), reset_code);
6787
6788 switch (reset_code) {
6789 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6790 bnx2x_reset_port(bp);
6791 bnx2x_reset_func(bp);
6792 bnx2x_reset_common(bp);
6793 break;
6794
6795 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6796 bnx2x_reset_port(bp);
6797 bnx2x_reset_func(bp);
6798 break;
6799
6800 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6801 bnx2x_reset_func(bp);
6802 break;
49d66772 6803
34f80b04
EG
6804 default:
6805 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6806 break;
6807 }
6808}
6809
33471629 6810/* must be called with rtnl_lock */
34f80b04 6811static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6812{
da5a662a 6813 int port = BP_PORT(bp);
a2fbb9ea 6814 u32 reset_code = 0;
da5a662a 6815 int i, cnt, rc;
a2fbb9ea
ET
6816
6817 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6818
228241eb
ET
6819 bp->rx_mode = BNX2X_RX_MODE_NONE;
6820 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6821
f8ef6e44 6822 bnx2x_netif_stop(bp, 1);
e94d8af3 6823
34f80b04
EG
6824 del_timer_sync(&bp->timer);
6825 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6826 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6827 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6828
70b9986c
EG
6829 /* Release IRQs */
6830 bnx2x_free_irq(bp);
6831
555f6c78
EG
6832 /* Wait until tx fastpath tasks complete */
6833 for_each_tx_queue(bp, i) {
228241eb
ET
6834 struct bnx2x_fastpath *fp = &bp->fp[i];
6835
34f80b04
EG
6836 cnt = 1000;
6837 smp_rmb();
e8b5fc51 6838 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6839
65abd74d 6840 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6841 if (!cnt) {
6842 BNX2X_ERR("timeout waiting for queue[%d]\n",
6843 i);
6844#ifdef BNX2X_STOP_ON_ERROR
6845 bnx2x_panic();
6846 return -EBUSY;
6847#else
6848 break;
6849#endif
6850 }
6851 cnt--;
da5a662a 6852 msleep(1);
34f80b04
EG
6853 smp_rmb();
6854 }
228241eb 6855 }
da5a662a
VZ
6856 /* Give HW time to discard old tx messages */
6857 msleep(1);
a2fbb9ea 6858
3101c2bc
YG
6859 if (CHIP_IS_E1(bp)) {
6860 struct mac_configuration_cmd *config =
6861 bnx2x_sp(bp, mcast_config);
6862
6863 bnx2x_set_mac_addr_e1(bp, 0);
6864
8d9c5f34 6865 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
6866 CAM_INVALIDATE(config->config_table[i]);
6867
8d9c5f34 6868 config->hdr.length = i;
3101c2bc
YG
6869 if (CHIP_REV_IS_SLOW(bp))
6870 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6871 else
6872 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6873 config->hdr.client_id = BP_CL_ID(bp);
6874 config->hdr.reserved1 = 0;
6875
6876 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6877 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6878 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6879
6880 } else { /* E1H */
65abd74d
YG
6881 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6882
3101c2bc
YG
6883 bnx2x_set_mac_addr_e1h(bp, 0);
6884
6885 for (i = 0; i < MC_HASH_SIZE; i++)
6886 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6887 }
6888
65abd74d
YG
6889 if (unload_mode == UNLOAD_NORMAL)
6890 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6891
6892 else if (bp->flags & NO_WOL_FLAG) {
6893 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6894 if (CHIP_IS_E1H(bp))
6895 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6896
6897 } else if (bp->wol) {
6898 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6899 u8 *mac_addr = bp->dev->dev_addr;
6900 u32 val;
6901 /* The mac address is written to entries 1-4 to
6902 preserve entry 0 which is used by the PMF */
6903 u8 entry = (BP_E1HVN(bp) + 1)*8;
6904
6905 val = (mac_addr[0] << 8) | mac_addr[1];
6906 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6907
6908 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6909 (mac_addr[4] << 8) | mac_addr[5];
6910 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6911
6912 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6913
6914 } else
6915 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6916
34f80b04
EG
6917 /* Close multi and leading connections
6918 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6919 for_each_nondefault_queue(bp, i)
6920 if (bnx2x_stop_multi(bp, i))
228241eb 6921 goto unload_error;
a2fbb9ea 6922
da5a662a
VZ
6923 rc = bnx2x_stop_leading(bp);
6924 if (rc) {
34f80b04 6925 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6926#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6927 return -EBUSY;
da5a662a
VZ
6928#else
6929 goto unload_error;
34f80b04 6930#endif
228241eb
ET
6931 }
6932
6933unload_error:
34f80b04 6934 if (!BP_NOMCP(bp))
228241eb 6935 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6936 else {
6937 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6938 load_count[0], load_count[1], load_count[2]);
6939 load_count[0]--;
da5a662a 6940 load_count[1 + port]--;
34f80b04
EG
6941 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6942 load_count[0], load_count[1], load_count[2]);
6943 if (load_count[0] == 0)
6944 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6945 else if (load_count[1 + port] == 0)
34f80b04
EG
6946 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6947 else
6948 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6949 }
a2fbb9ea 6950
34f80b04
EG
6951 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6952 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6953 bnx2x__link_reset(bp);
a2fbb9ea
ET
6954
6955 /* Reset the chip */
228241eb 6956 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6957
6958 /* Report UNLOAD_DONE to MCP */
34f80b04 6959 if (!BP_NOMCP(bp))
a2fbb9ea 6960 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6961 bp->port.pmf = 0;
a2fbb9ea 6962
7a9b2557 6963 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6964 bnx2x_free_skbs(bp);
555f6c78 6965 for_each_rx_queue(bp, i)
3196a88a 6966 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 6967 for_each_rx_queue(bp, i)
7cde1c8b 6968 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6969 bnx2x_free_mem(bp);
6970
6971 bp->state = BNX2X_STATE_CLOSED;
228241eb 6972
a2fbb9ea
ET
6973 netif_carrier_off(bp->dev);
6974
6975 return 0;
6976}
6977
34f80b04
EG
6978static void bnx2x_reset_task(struct work_struct *work)
6979{
6980 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6981
6982#ifdef BNX2X_STOP_ON_ERROR
6983 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6984 " so reset not done to allow debug dump,\n"
6985 KERN_ERR " you will need to reboot when done\n");
6986 return;
6987#endif
6988
6989 rtnl_lock();
6990
6991 if (!netif_running(bp->dev))
6992 goto reset_task_exit;
6993
6994 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6995 bnx2x_nic_load(bp, LOAD_NORMAL);
6996
6997reset_task_exit:
6998 rtnl_unlock();
6999}
7000
a2fbb9ea
ET
7001/* end of nic load/unload */
7002
7003/* ethtool_ops */
7004
7005/*
7006 * Init service functions
7007 */
7008
34f80b04
EG
7009static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7010{
7011 u32 val;
7012
7013 /* Check if there is any driver already loaded */
7014 val = REG_RD(bp, MISC_REG_UNPREPARED);
7015 if (val == 0x1) {
7016 /* Check if it is the UNDI driver
7017 * UNDI driver initializes CID offset for normal bell to 0x7
7018 */
4a37fb66 7019 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7020 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7021 if (val == 0x7) {
7022 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7023 /* save our func */
34f80b04 7024 int func = BP_FUNC(bp);
da5a662a
VZ
7025 u32 swap_en;
7026 u32 swap_val;
34f80b04 7027
b4661739
EG
7028 /* clear the UNDI indication */
7029 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7030
34f80b04
EG
7031 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7032
7033 /* try unload UNDI on port 0 */
7034 bp->func = 0;
da5a662a
VZ
7035 bp->fw_seq =
7036 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7037 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7038 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7039
7040 /* if UNDI is loaded on the other port */
7041 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7042
da5a662a
VZ
7043 /* send "DONE" for previous unload */
7044 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7045
7046 /* unload UNDI on port 1 */
34f80b04 7047 bp->func = 1;
da5a662a
VZ
7048 bp->fw_seq =
7049 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7050 DRV_MSG_SEQ_NUMBER_MASK);
7051 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7052
7053 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7054 }
7055
b4661739
EG
7056 /* now it's safe to release the lock */
7057 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7058
da5a662a
VZ
7059 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
7060 HC_REG_CONFIG_0), 0x1000);
7061
7062 /* close input traffic and wait for it */
7063 /* Do not rcv packets to BRB */
7064 REG_WR(bp,
7065 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7066 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7067 /* Do not direct rcv packets that are not for MCP to
7068 * the BRB */
7069 REG_WR(bp,
7070 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7071 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7072 /* clear AEU */
7073 REG_WR(bp,
7074 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7075 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7076 msleep(10);
7077
7078 /* save NIG port swap info */
7079 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7080 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7081 /* reset device */
7082 REG_WR(bp,
7083 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7084 0xd3ffffff);
34f80b04
EG
7085 REG_WR(bp,
7086 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7087 0x1403);
da5a662a
VZ
7088 /* take the NIG out of reset and restore swap values */
7089 REG_WR(bp,
7090 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7091 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7092 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7093 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7094
7095 /* send unload done to the MCP */
7096 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7097
7098 /* restore our func and fw_seq */
7099 bp->func = func;
7100 bp->fw_seq =
7101 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7102 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7103
7104 } else
7105 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7106 }
7107}
7108
7109static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7110{
7111 u32 val, val2, val3, val4, id;
72ce58c3 7112 u16 pmc;
34f80b04
EG
7113
7114 /* Get the chip revision id and number. */
7115 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7116 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7117 id = ((val & 0xffff) << 16);
7118 val = REG_RD(bp, MISC_REG_CHIP_REV);
7119 id |= ((val & 0xf) << 12);
7120 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7121 id |= ((val & 0xff) << 4);
5a40e08e 7122 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7123 id |= (val & 0xf);
7124 bp->common.chip_id = id;
7125 bp->link_params.chip_id = bp->common.chip_id;
7126 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7127
7128 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7129 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7130 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7131 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7132 bp->common.flash_size, bp->common.flash_size);
7133
7134 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7135 bp->link_params.shmem_base = bp->common.shmem_base;
7136 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7137
7138 if (!bp->common.shmem_base ||
7139 (bp->common.shmem_base < 0xA0000) ||
7140 (bp->common.shmem_base >= 0xC0000)) {
7141 BNX2X_DEV_INFO("MCP not active\n");
7142 bp->flags |= NO_MCP_FLAG;
7143 return;
7144 }
7145
7146 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7147 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7148 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7149 BNX2X_ERR("BAD MCP validity signature\n");
7150
7151 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7152 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7153
7154 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7155 bp->common.hw_config, bp->common.board);
7156
7157 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7158 SHARED_HW_CFG_LED_MODE_MASK) >>
7159 SHARED_HW_CFG_LED_MODE_SHIFT);
7160
7161 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7162 bp->common.bc_ver = val;
7163 BNX2X_DEV_INFO("bc_ver %X\n", val);
7164 if (val < BNX2X_BC_VER) {
7165 /* for now only warn
7166 * later we might need to enforce this */
7167 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7168 " please upgrade BC\n", BNX2X_BC_VER, val);
7169 }
72ce58c3
EG
7170
7171 if (BP_E1HVN(bp) == 0) {
7172 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7173 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7174 } else {
7175 /* no WOL capability for E1HVN != 0 */
7176 bp->flags |= NO_WOL_FLAG;
7177 }
7178 BNX2X_DEV_INFO("%sWoL capable\n",
7179 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7180
7181 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7182 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7183 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7184 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7185
7186 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7187 val, val2, val3, val4);
7188}
7189
7190static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7191 u32 switch_cfg)
a2fbb9ea 7192{
34f80b04 7193 int port = BP_PORT(bp);
a2fbb9ea
ET
7194 u32 ext_phy_type;
7195
a2fbb9ea
ET
7196 switch (switch_cfg) {
7197 case SWITCH_CFG_1G:
7198 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7199
c18487ee
YR
7200 ext_phy_type =
7201 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7202 switch (ext_phy_type) {
7203 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7204 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7205 ext_phy_type);
7206
34f80b04
EG
7207 bp->port.supported |= (SUPPORTED_10baseT_Half |
7208 SUPPORTED_10baseT_Full |
7209 SUPPORTED_100baseT_Half |
7210 SUPPORTED_100baseT_Full |
7211 SUPPORTED_1000baseT_Full |
7212 SUPPORTED_2500baseX_Full |
7213 SUPPORTED_TP |
7214 SUPPORTED_FIBRE |
7215 SUPPORTED_Autoneg |
7216 SUPPORTED_Pause |
7217 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7218 break;
7219
7220 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7221 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7222 ext_phy_type);
7223
34f80b04
EG
7224 bp->port.supported |= (SUPPORTED_10baseT_Half |
7225 SUPPORTED_10baseT_Full |
7226 SUPPORTED_100baseT_Half |
7227 SUPPORTED_100baseT_Full |
7228 SUPPORTED_1000baseT_Full |
7229 SUPPORTED_TP |
7230 SUPPORTED_FIBRE |
7231 SUPPORTED_Autoneg |
7232 SUPPORTED_Pause |
7233 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7234 break;
7235
7236 default:
7237 BNX2X_ERR("NVRAM config error. "
7238 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7239 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7240 return;
7241 }
7242
34f80b04
EG
7243 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7244 port*0x10);
7245 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7246 break;
7247
7248 case SWITCH_CFG_10G:
7249 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7250
c18487ee
YR
7251 ext_phy_type =
7252 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7253 switch (ext_phy_type) {
7254 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7255 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7256 ext_phy_type);
7257
34f80b04
EG
7258 bp->port.supported |= (SUPPORTED_10baseT_Half |
7259 SUPPORTED_10baseT_Full |
7260 SUPPORTED_100baseT_Half |
7261 SUPPORTED_100baseT_Full |
7262 SUPPORTED_1000baseT_Full |
7263 SUPPORTED_2500baseX_Full |
7264 SUPPORTED_10000baseT_Full |
7265 SUPPORTED_TP |
7266 SUPPORTED_FIBRE |
7267 SUPPORTED_Autoneg |
7268 SUPPORTED_Pause |
7269 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7270 break;
7271
7272 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7273 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7274 ext_phy_type);
f1410647 7275
34f80b04
EG
7276 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7277 SUPPORTED_FIBRE |
7278 SUPPORTED_Pause |
7279 SUPPORTED_Asym_Pause);
f1410647
ET
7280 break;
7281
a2fbb9ea 7282 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7283 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7284 ext_phy_type);
7285
34f80b04
EG
7286 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7287 SUPPORTED_1000baseT_Full |
7288 SUPPORTED_FIBRE |
7289 SUPPORTED_Pause |
7290 SUPPORTED_Asym_Pause);
f1410647
ET
7291 break;
7292
7293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7294 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7295 ext_phy_type);
7296
34f80b04
EG
7297 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7298 SUPPORTED_1000baseT_Full |
7299 SUPPORTED_FIBRE |
7300 SUPPORTED_Autoneg |
7301 SUPPORTED_Pause |
7302 SUPPORTED_Asym_Pause);
f1410647
ET
7303 break;
7304
c18487ee
YR
7305 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7306 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7307 ext_phy_type);
7308
34f80b04
EG
7309 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7310 SUPPORTED_2500baseX_Full |
7311 SUPPORTED_1000baseT_Full |
7312 SUPPORTED_FIBRE |
7313 SUPPORTED_Autoneg |
7314 SUPPORTED_Pause |
7315 SUPPORTED_Asym_Pause);
c18487ee
YR
7316 break;
7317
f1410647
ET
7318 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7319 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7320 ext_phy_type);
7321
34f80b04
EG
7322 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7323 SUPPORTED_TP |
7324 SUPPORTED_Autoneg |
7325 SUPPORTED_Pause |
7326 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7327 break;
7328
c18487ee
YR
7329 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7330 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7331 bp->link_params.ext_phy_config);
7332 break;
7333
a2fbb9ea
ET
7334 default:
7335 BNX2X_ERR("NVRAM config error. "
7336 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7337 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7338 return;
7339 }
7340
34f80b04
EG
7341 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7342 port*0x18);
7343 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7344
a2fbb9ea
ET
7345 break;
7346
7347 default:
7348 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7349 bp->port.link_config);
a2fbb9ea
ET
7350 return;
7351 }
34f80b04 7352 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7353
7354 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7355 if (!(bp->link_params.speed_cap_mask &
7356 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7357 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7358
c18487ee
YR
7359 if (!(bp->link_params.speed_cap_mask &
7360 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7361 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7362
c18487ee
YR
7363 if (!(bp->link_params.speed_cap_mask &
7364 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7365 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7366
c18487ee
YR
7367 if (!(bp->link_params.speed_cap_mask &
7368 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7369 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7370
c18487ee
YR
7371 if (!(bp->link_params.speed_cap_mask &
7372 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7373 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7374 SUPPORTED_1000baseT_Full);
a2fbb9ea 7375
c18487ee
YR
7376 if (!(bp->link_params.speed_cap_mask &
7377 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7378 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7379
c18487ee
YR
7380 if (!(bp->link_params.speed_cap_mask &
7381 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7382 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7383
34f80b04 7384 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7385}
7386
34f80b04 7387static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7388{
c18487ee 7389 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7390
34f80b04 7391 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7392 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7393 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7394 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7395 bp->port.advertising = bp->port.supported;
a2fbb9ea 7396 } else {
c18487ee
YR
7397 u32 ext_phy_type =
7398 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7399
7400 if ((ext_phy_type ==
7401 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7402 (ext_phy_type ==
7403 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7404 /* force 10G, no AN */
c18487ee 7405 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7406 bp->port.advertising =
a2fbb9ea
ET
7407 (ADVERTISED_10000baseT_Full |
7408 ADVERTISED_FIBRE);
7409 break;
7410 }
7411 BNX2X_ERR("NVRAM config error. "
7412 "Invalid link_config 0x%x"
7413 " Autoneg not supported\n",
34f80b04 7414 bp->port.link_config);
a2fbb9ea
ET
7415 return;
7416 }
7417 break;
7418
7419 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7420 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7421 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7422 bp->port.advertising = (ADVERTISED_10baseT_Full |
7423 ADVERTISED_TP);
a2fbb9ea
ET
7424 } else {
7425 BNX2X_ERR("NVRAM config error. "
7426 "Invalid link_config 0x%x"
7427 " speed_cap_mask 0x%x\n",
34f80b04 7428 bp->port.link_config,
c18487ee 7429 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7430 return;
7431 }
7432 break;
7433
7434 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7435 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7436 bp->link_params.req_line_speed = SPEED_10;
7437 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7438 bp->port.advertising = (ADVERTISED_10baseT_Half |
7439 ADVERTISED_TP);
a2fbb9ea
ET
7440 } else {
7441 BNX2X_ERR("NVRAM config error. "
7442 "Invalid link_config 0x%x"
7443 " speed_cap_mask 0x%x\n",
34f80b04 7444 bp->port.link_config,
c18487ee 7445 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7446 return;
7447 }
7448 break;
7449
7450 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7451 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7452 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7453 bp->port.advertising = (ADVERTISED_100baseT_Full |
7454 ADVERTISED_TP);
a2fbb9ea
ET
7455 } else {
7456 BNX2X_ERR("NVRAM config error. "
7457 "Invalid link_config 0x%x"
7458 " speed_cap_mask 0x%x\n",
34f80b04 7459 bp->port.link_config,
c18487ee 7460 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7461 return;
7462 }
7463 break;
7464
7465 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7466 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7467 bp->link_params.req_line_speed = SPEED_100;
7468 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7469 bp->port.advertising = (ADVERTISED_100baseT_Half |
7470 ADVERTISED_TP);
a2fbb9ea
ET
7471 } else {
7472 BNX2X_ERR("NVRAM config error. "
7473 "Invalid link_config 0x%x"
7474 " speed_cap_mask 0x%x\n",
34f80b04 7475 bp->port.link_config,
c18487ee 7476 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7477 return;
7478 }
7479 break;
7480
7481 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7482 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7483 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7484 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7485 ADVERTISED_TP);
a2fbb9ea
ET
7486 } else {
7487 BNX2X_ERR("NVRAM config error. "
7488 "Invalid link_config 0x%x"
7489 " speed_cap_mask 0x%x\n",
34f80b04 7490 bp->port.link_config,
c18487ee 7491 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7492 return;
7493 }
7494 break;
7495
7496 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7497 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7498 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7499 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7500 ADVERTISED_TP);
a2fbb9ea
ET
7501 } else {
7502 BNX2X_ERR("NVRAM config error. "
7503 "Invalid link_config 0x%x"
7504 " speed_cap_mask 0x%x\n",
34f80b04 7505 bp->port.link_config,
c18487ee 7506 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7507 return;
7508 }
7509 break;
7510
7511 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7512 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7513 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7514 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7515 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7516 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7517 ADVERTISED_FIBRE);
a2fbb9ea
ET
7518 } else {
7519 BNX2X_ERR("NVRAM config error. "
7520 "Invalid link_config 0x%x"
7521 " speed_cap_mask 0x%x\n",
34f80b04 7522 bp->port.link_config,
c18487ee 7523 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7524 return;
7525 }
7526 break;
7527
7528 default:
7529 BNX2X_ERR("NVRAM config error. "
7530 "BAD link speed link_config 0x%x\n",
34f80b04 7531 bp->port.link_config);
c18487ee 7532 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7533 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7534 break;
7535 }
a2fbb9ea 7536
34f80b04
EG
7537 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7538 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7539 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7540 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7541 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7542
c18487ee 7543 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7544 " advertising 0x%x\n",
c18487ee
YR
7545 bp->link_params.req_line_speed,
7546 bp->link_params.req_duplex,
34f80b04 7547 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7548}
7549
34f80b04 7550static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7551{
34f80b04
EG
7552 int port = BP_PORT(bp);
7553 u32 val, val2;
a2fbb9ea 7554
c18487ee 7555 bp->link_params.bp = bp;
34f80b04 7556 bp->link_params.port = port;
c18487ee 7557
c18487ee 7558 bp->link_params.serdes_config =
f1410647 7559 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7560 bp->link_params.lane_config =
a2fbb9ea 7561 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7562 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7563 SHMEM_RD(bp,
7564 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7565 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7566 SHMEM_RD(bp,
7567 dev_info.port_hw_config[port].speed_capability_mask);
7568
34f80b04 7569 bp->port.link_config =
a2fbb9ea
ET
7570 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7571
34f80b04
EG
7572 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7573 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7574 " link_config 0x%08x\n",
c18487ee
YR
7575 bp->link_params.serdes_config,
7576 bp->link_params.lane_config,
7577 bp->link_params.ext_phy_config,
34f80b04 7578 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7579
34f80b04 7580 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7581 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7582 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7583
7584 bnx2x_link_settings_requested(bp);
7585
7586 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7587 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7588 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7589 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7590 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7591 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7592 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7593 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7594 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7595 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7596}
7597
7598static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7599{
7600 int func = BP_FUNC(bp);
7601 u32 val, val2;
7602 int rc = 0;
a2fbb9ea 7603
34f80b04 7604 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7605
34f80b04
EG
7606 bp->e1hov = 0;
7607 bp->e1hmf = 0;
7608 if (CHIP_IS_E1H(bp)) {
7609 bp->mf_config =
7610 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7611
3196a88a
EG
7612 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7613 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7614 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7615
34f80b04
EG
7616 bp->e1hov = val;
7617 bp->e1hmf = 1;
7618 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7619 "(0x%04x)\n",
7620 func, bp->e1hov, bp->e1hov);
7621 } else {
7622 BNX2X_DEV_INFO("Single function mode\n");
7623 if (BP_E1HVN(bp)) {
7624 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7625 " aborting\n", func);
7626 rc = -EPERM;
7627 }
7628 }
7629 }
a2fbb9ea 7630
34f80b04
EG
7631 if (!BP_NOMCP(bp)) {
7632 bnx2x_get_port_hwinfo(bp);
7633
7634 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7635 DRV_MSG_SEQ_NUMBER_MASK);
7636 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7637 }
7638
7639 if (IS_E1HMF(bp)) {
7640 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7641 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7642 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7643 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7644 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7645 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7646 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7647 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7648 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7649 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7650 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7651 ETH_ALEN);
7652 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7653 ETH_ALEN);
a2fbb9ea 7654 }
34f80b04
EG
7655
7656 return rc;
a2fbb9ea
ET
7657 }
7658
34f80b04
EG
7659 if (BP_NOMCP(bp)) {
7660 /* only supposed to happen on emulation/FPGA */
33471629 7661 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7662 random_ether_addr(bp->dev->dev_addr);
7663 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7664 }
a2fbb9ea 7665
34f80b04
EG
7666 return rc;
7667}
7668
7669static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7670{
7671 int func = BP_FUNC(bp);
7672 int rc;
7673
da5a662a
VZ
7674 /* Disable interrupt handling until HW is initialized */
7675 atomic_set(&bp->intr_sem, 1);
7676
34f80b04 7677 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7678
1cf167f2 7679 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7680 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7681
7682 rc = bnx2x_get_hwinfo(bp);
7683
7684 /* need to reset chip if undi was active */
7685 if (!BP_NOMCP(bp))
7686 bnx2x_undi_unload(bp);
7687
7688 if (CHIP_REV_IS_FPGA(bp))
7689 printk(KERN_ERR PFX "FPGA detected\n");
7690
7691 if (BP_NOMCP(bp) && (func == 0))
7692 printk(KERN_ERR PFX
7693 "MCP disabled, must load devices in order!\n");
7694
555f6c78 7695 /* Set multi queue mode */
8badd27a
EG
7696 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7697 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 7698 printk(KERN_ERR PFX
8badd27a 7699 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
7700 multi_mode = ETH_RSS_MODE_DISABLED;
7701 }
7702 bp->multi_mode = multi_mode;
7703
7704
7a9b2557
VZ
7705 /* Set TPA flags */
7706 if (disable_tpa) {
7707 bp->flags &= ~TPA_ENABLE_FLAG;
7708 bp->dev->features &= ~NETIF_F_LRO;
7709 } else {
7710 bp->flags |= TPA_ENABLE_FLAG;
7711 bp->dev->features |= NETIF_F_LRO;
7712 }
7713
7714
34f80b04
EG
7715 bp->tx_ring_size = MAX_TX_AVAIL;
7716 bp->rx_ring_size = MAX_RX_AVAIL;
7717
7718 bp->rx_csum = 1;
7719 bp->rx_offset = 0;
7720
7721 bp->tx_ticks = 50;
7722 bp->rx_ticks = 25;
7723
34f80b04
EG
7724 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7725 bp->current_interval = (poll ? poll : bp->timer_interval);
7726
7727 init_timer(&bp->timer);
7728 bp->timer.expires = jiffies + bp->current_interval;
7729 bp->timer.data = (unsigned long) bp;
7730 bp->timer.function = bnx2x_timer;
7731
7732 return rc;
a2fbb9ea
ET
7733}
7734
7735/*
7736 * ethtool service functions
7737 */
7738
7739/* All ethtool functions called with rtnl_lock */
7740
7741static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7742{
7743 struct bnx2x *bp = netdev_priv(dev);
7744
34f80b04
EG
7745 cmd->supported = bp->port.supported;
7746 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7747
7748 if (netif_carrier_ok(dev)) {
c18487ee
YR
7749 cmd->speed = bp->link_vars.line_speed;
7750 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7751 } else {
c18487ee
YR
7752 cmd->speed = bp->link_params.req_line_speed;
7753 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7754 }
34f80b04
EG
7755 if (IS_E1HMF(bp)) {
7756 u16 vn_max_rate;
7757
7758 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7759 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7760 if (vn_max_rate < cmd->speed)
7761 cmd->speed = vn_max_rate;
7762 }
a2fbb9ea 7763
c18487ee
YR
7764 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7765 u32 ext_phy_type =
7766 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7767
7768 switch (ext_phy_type) {
7769 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7770 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7771 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7772 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7773 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7774 cmd->port = PORT_FIBRE;
7775 break;
7776
7777 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7778 cmd->port = PORT_TP;
7779 break;
7780
c18487ee
YR
7781 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7782 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7783 bp->link_params.ext_phy_config);
7784 break;
7785
f1410647
ET
7786 default:
7787 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7788 bp->link_params.ext_phy_config);
7789 break;
f1410647
ET
7790 }
7791 } else
a2fbb9ea 7792 cmd->port = PORT_TP;
a2fbb9ea 7793
34f80b04 7794 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7795 cmd->transceiver = XCVR_INTERNAL;
7796
c18487ee 7797 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7798 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7799 else
a2fbb9ea 7800 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7801
7802 cmd->maxtxpkt = 0;
7803 cmd->maxrxpkt = 0;
7804
7805 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7806 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7807 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7808 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7809 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7810 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7811 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7812
7813 return 0;
7814}
7815
7816static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7817{
7818 struct bnx2x *bp = netdev_priv(dev);
7819 u32 advertising;
7820
34f80b04
EG
7821 if (IS_E1HMF(bp))
7822 return 0;
7823
a2fbb9ea
ET
7824 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7825 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7826 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7827 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7828 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7829 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7830 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7831
a2fbb9ea 7832 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7833 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7834 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7835 return -EINVAL;
f1410647 7836 }
a2fbb9ea
ET
7837
7838 /* advertise the requested speed and duplex if supported */
34f80b04 7839 cmd->advertising &= bp->port.supported;
a2fbb9ea 7840
c18487ee
YR
7841 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7842 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7843 bp->port.advertising |= (ADVERTISED_Autoneg |
7844 cmd->advertising);
a2fbb9ea
ET
7845
7846 } else { /* forced speed */
7847 /* advertise the requested speed and duplex if supported */
7848 switch (cmd->speed) {
7849 case SPEED_10:
7850 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7851 if (!(bp->port.supported &
f1410647
ET
7852 SUPPORTED_10baseT_Full)) {
7853 DP(NETIF_MSG_LINK,
7854 "10M full not supported\n");
a2fbb9ea 7855 return -EINVAL;
f1410647 7856 }
a2fbb9ea
ET
7857
7858 advertising = (ADVERTISED_10baseT_Full |
7859 ADVERTISED_TP);
7860 } else {
34f80b04 7861 if (!(bp->port.supported &
f1410647
ET
7862 SUPPORTED_10baseT_Half)) {
7863 DP(NETIF_MSG_LINK,
7864 "10M half not supported\n");
a2fbb9ea 7865 return -EINVAL;
f1410647 7866 }
a2fbb9ea
ET
7867
7868 advertising = (ADVERTISED_10baseT_Half |
7869 ADVERTISED_TP);
7870 }
7871 break;
7872
7873 case SPEED_100:
7874 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7875 if (!(bp->port.supported &
f1410647
ET
7876 SUPPORTED_100baseT_Full)) {
7877 DP(NETIF_MSG_LINK,
7878 "100M full not supported\n");
a2fbb9ea 7879 return -EINVAL;
f1410647 7880 }
a2fbb9ea
ET
7881
7882 advertising = (ADVERTISED_100baseT_Full |
7883 ADVERTISED_TP);
7884 } else {
34f80b04 7885 if (!(bp->port.supported &
f1410647
ET
7886 SUPPORTED_100baseT_Half)) {
7887 DP(NETIF_MSG_LINK,
7888 "100M half not supported\n");
a2fbb9ea 7889 return -EINVAL;
f1410647 7890 }
a2fbb9ea
ET
7891
7892 advertising = (ADVERTISED_100baseT_Half |
7893 ADVERTISED_TP);
7894 }
7895 break;
7896
7897 case SPEED_1000:
f1410647
ET
7898 if (cmd->duplex != DUPLEX_FULL) {
7899 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7900 return -EINVAL;
f1410647 7901 }
a2fbb9ea 7902
34f80b04 7903 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7904 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7905 return -EINVAL;
f1410647 7906 }
a2fbb9ea
ET
7907
7908 advertising = (ADVERTISED_1000baseT_Full |
7909 ADVERTISED_TP);
7910 break;
7911
7912 case SPEED_2500:
f1410647
ET
7913 if (cmd->duplex != DUPLEX_FULL) {
7914 DP(NETIF_MSG_LINK,
7915 "2.5G half not supported\n");
a2fbb9ea 7916 return -EINVAL;
f1410647 7917 }
a2fbb9ea 7918
34f80b04 7919 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7920 DP(NETIF_MSG_LINK,
7921 "2.5G full not supported\n");
a2fbb9ea 7922 return -EINVAL;
f1410647 7923 }
a2fbb9ea 7924
f1410647 7925 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7926 ADVERTISED_TP);
7927 break;
7928
7929 case SPEED_10000:
f1410647
ET
7930 if (cmd->duplex != DUPLEX_FULL) {
7931 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7932 return -EINVAL;
f1410647 7933 }
a2fbb9ea 7934
34f80b04 7935 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7936 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7937 return -EINVAL;
f1410647 7938 }
a2fbb9ea
ET
7939
7940 advertising = (ADVERTISED_10000baseT_Full |
7941 ADVERTISED_FIBRE);
7942 break;
7943
7944 default:
f1410647 7945 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7946 return -EINVAL;
7947 }
7948
c18487ee
YR
7949 bp->link_params.req_line_speed = cmd->speed;
7950 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7951 bp->port.advertising = advertising;
a2fbb9ea
ET
7952 }
7953
c18487ee 7954 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7955 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7956 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7957 bp->port.advertising);
a2fbb9ea 7958
34f80b04 7959 if (netif_running(dev)) {
bb2a0f7a 7960 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7961 bnx2x_link_set(bp);
7962 }
a2fbb9ea
ET
7963
7964 return 0;
7965}
7966
c18487ee
YR
7967#define PHY_FW_VER_LEN 10
7968
a2fbb9ea
ET
7969static void bnx2x_get_drvinfo(struct net_device *dev,
7970 struct ethtool_drvinfo *info)
7971{
7972 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7973 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7974
7975 strcpy(info->driver, DRV_MODULE_NAME);
7976 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7977
7978 phy_fw_ver[0] = '\0';
34f80b04 7979 if (bp->port.pmf) {
4a37fb66 7980 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7981 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7982 (bp->state != BNX2X_STATE_CLOSED),
7983 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7984 bnx2x_release_phy_lock(bp);
34f80b04 7985 }
c18487ee 7986
f0e53a84
EG
7987 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7988 (bp->common.bc_ver & 0xff0000) >> 16,
7989 (bp->common.bc_ver & 0xff00) >> 8,
7990 (bp->common.bc_ver & 0xff),
7991 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7992 strcpy(info->bus_info, pci_name(bp->pdev));
7993 info->n_stats = BNX2X_NUM_STATS;
7994 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7995 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7996 info->regdump_len = 0;
7997}
7998
7999static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8000{
8001 struct bnx2x *bp = netdev_priv(dev);
8002
8003 if (bp->flags & NO_WOL_FLAG) {
8004 wol->supported = 0;
8005 wol->wolopts = 0;
8006 } else {
8007 wol->supported = WAKE_MAGIC;
8008 if (bp->wol)
8009 wol->wolopts = WAKE_MAGIC;
8010 else
8011 wol->wolopts = 0;
8012 }
8013 memset(&wol->sopass, 0, sizeof(wol->sopass));
8014}
8015
8016static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8017{
8018 struct bnx2x *bp = netdev_priv(dev);
8019
8020 if (wol->wolopts & ~WAKE_MAGIC)
8021 return -EINVAL;
8022
8023 if (wol->wolopts & WAKE_MAGIC) {
8024 if (bp->flags & NO_WOL_FLAG)
8025 return -EINVAL;
8026
8027 bp->wol = 1;
34f80b04 8028 } else
a2fbb9ea 8029 bp->wol = 0;
34f80b04 8030
a2fbb9ea
ET
8031 return 0;
8032}
8033
8034static u32 bnx2x_get_msglevel(struct net_device *dev)
8035{
8036 struct bnx2x *bp = netdev_priv(dev);
8037
8038 return bp->msglevel;
8039}
8040
8041static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8042{
8043 struct bnx2x *bp = netdev_priv(dev);
8044
8045 if (capable(CAP_NET_ADMIN))
8046 bp->msglevel = level;
8047}
8048
8049static int bnx2x_nway_reset(struct net_device *dev)
8050{
8051 struct bnx2x *bp = netdev_priv(dev);
8052
34f80b04
EG
8053 if (!bp->port.pmf)
8054 return 0;
a2fbb9ea 8055
34f80b04 8056 if (netif_running(dev)) {
bb2a0f7a 8057 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8058 bnx2x_link_set(bp);
8059 }
a2fbb9ea
ET
8060
8061 return 0;
8062}
8063
8064static int bnx2x_get_eeprom_len(struct net_device *dev)
8065{
8066 struct bnx2x *bp = netdev_priv(dev);
8067
34f80b04 8068 return bp->common.flash_size;
a2fbb9ea
ET
8069}
8070
8071static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8072{
34f80b04 8073 int port = BP_PORT(bp);
a2fbb9ea
ET
8074 int count, i;
8075 u32 val = 0;
8076
8077 /* adjust timeout for emulation/FPGA */
8078 count = NVRAM_TIMEOUT_COUNT;
8079 if (CHIP_REV_IS_SLOW(bp))
8080 count *= 100;
8081
8082 /* request access to nvram interface */
8083 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8084 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8085
8086 for (i = 0; i < count*10; i++) {
8087 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8088 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8089 break;
8090
8091 udelay(5);
8092 }
8093
8094 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8095 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8096 return -EBUSY;
8097 }
8098
8099 return 0;
8100}
8101
8102static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8103{
34f80b04 8104 int port = BP_PORT(bp);
a2fbb9ea
ET
8105 int count, i;
8106 u32 val = 0;
8107
8108 /* adjust timeout for emulation/FPGA */
8109 count = NVRAM_TIMEOUT_COUNT;
8110 if (CHIP_REV_IS_SLOW(bp))
8111 count *= 100;
8112
8113 /* relinquish nvram interface */
8114 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8115 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8116
8117 for (i = 0; i < count*10; i++) {
8118 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8119 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8120 break;
8121
8122 udelay(5);
8123 }
8124
8125 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8126 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8127 return -EBUSY;
8128 }
8129
8130 return 0;
8131}
8132
8133static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8134{
8135 u32 val;
8136
8137 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8138
8139 /* enable both bits, even on read */
8140 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8141 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8142 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8143}
8144
8145static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8146{
8147 u32 val;
8148
8149 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8150
8151 /* disable both bits, even after read */
8152 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8153 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8154 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8155}
8156
8157static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8158 u32 cmd_flags)
8159{
f1410647 8160 int count, i, rc;
a2fbb9ea
ET
8161 u32 val;
8162
8163 /* build the command word */
8164 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8165
8166 /* need to clear DONE bit separately */
8167 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8168
8169 /* address of the NVRAM to read from */
8170 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8171 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8172
8173 /* issue a read command */
8174 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8175
8176 /* adjust timeout for emulation/FPGA */
8177 count = NVRAM_TIMEOUT_COUNT;
8178 if (CHIP_REV_IS_SLOW(bp))
8179 count *= 100;
8180
8181 /* wait for completion */
8182 *ret_val = 0;
8183 rc = -EBUSY;
8184 for (i = 0; i < count; i++) {
8185 udelay(5);
8186 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8187
8188 if (val & MCPR_NVM_COMMAND_DONE) {
8189 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8190 /* we read nvram data in cpu order
8191 * but ethtool sees it as an array of bytes
8192 * converting to big-endian will do the work */
8193 val = cpu_to_be32(val);
8194 *ret_val = val;
8195 rc = 0;
8196 break;
8197 }
8198 }
8199
8200 return rc;
8201}
8202
8203static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8204 int buf_size)
8205{
8206 int rc;
8207 u32 cmd_flags;
8208 u32 val;
8209
8210 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8211 DP(BNX2X_MSG_NVM,
c14423fe 8212 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8213 offset, buf_size);
8214 return -EINVAL;
8215 }
8216
34f80b04
EG
8217 if (offset + buf_size > bp->common.flash_size) {
8218 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8219 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8220 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8221 return -EINVAL;
8222 }
8223
8224 /* request access to nvram interface */
8225 rc = bnx2x_acquire_nvram_lock(bp);
8226 if (rc)
8227 return rc;
8228
8229 /* enable access to nvram interface */
8230 bnx2x_enable_nvram_access(bp);
8231
8232 /* read the first word(s) */
8233 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8234 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8235 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8236 memcpy(ret_buf, &val, 4);
8237
8238 /* advance to the next dword */
8239 offset += sizeof(u32);
8240 ret_buf += sizeof(u32);
8241 buf_size -= sizeof(u32);
8242 cmd_flags = 0;
8243 }
8244
8245 if (rc == 0) {
8246 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8247 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8248 memcpy(ret_buf, &val, 4);
8249 }
8250
8251 /* disable access to nvram interface */
8252 bnx2x_disable_nvram_access(bp);
8253 bnx2x_release_nvram_lock(bp);
8254
8255 return rc;
8256}
8257
8258static int bnx2x_get_eeprom(struct net_device *dev,
8259 struct ethtool_eeprom *eeprom, u8 *eebuf)
8260{
8261 struct bnx2x *bp = netdev_priv(dev);
8262 int rc;
8263
2add3acb
EG
8264 if (!netif_running(dev))
8265 return -EAGAIN;
8266
34f80b04 8267 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8268 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8269 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8270 eeprom->len, eeprom->len);
8271
8272 /* parameters already validated in ethtool_get_eeprom */
8273
8274 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8275
8276 return rc;
8277}
8278
8279static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8280 u32 cmd_flags)
8281{
f1410647 8282 int count, i, rc;
a2fbb9ea
ET
8283
8284 /* build the command word */
8285 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8286
8287 /* need to clear DONE bit separately */
8288 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8289
8290 /* write the data */
8291 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8292
8293 /* address of the NVRAM to write to */
8294 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8295 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8296
8297 /* issue the write command */
8298 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8299
8300 /* adjust timeout for emulation/FPGA */
8301 count = NVRAM_TIMEOUT_COUNT;
8302 if (CHIP_REV_IS_SLOW(bp))
8303 count *= 100;
8304
8305 /* wait for completion */
8306 rc = -EBUSY;
8307 for (i = 0; i < count; i++) {
8308 udelay(5);
8309 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8310 if (val & MCPR_NVM_COMMAND_DONE) {
8311 rc = 0;
8312 break;
8313 }
8314 }
8315
8316 return rc;
8317}
8318
f1410647 8319#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8320
8321static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8322 int buf_size)
8323{
8324 int rc;
8325 u32 cmd_flags;
8326 u32 align_offset;
8327 u32 val;
8328
34f80b04
EG
8329 if (offset + buf_size > bp->common.flash_size) {
8330 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8331 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8332 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8333 return -EINVAL;
8334 }
8335
8336 /* request access to nvram interface */
8337 rc = bnx2x_acquire_nvram_lock(bp);
8338 if (rc)
8339 return rc;
8340
8341 /* enable access to nvram interface */
8342 bnx2x_enable_nvram_access(bp);
8343
8344 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8345 align_offset = (offset & ~0x03);
8346 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8347
8348 if (rc == 0) {
8349 val &= ~(0xff << BYTE_OFFSET(offset));
8350 val |= (*data_buf << BYTE_OFFSET(offset));
8351
8352 /* nvram data is returned as an array of bytes
8353 * convert it back to cpu order */
8354 val = be32_to_cpu(val);
8355
a2fbb9ea
ET
8356 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8357 cmd_flags);
8358 }
8359
8360 /* disable access to nvram interface */
8361 bnx2x_disable_nvram_access(bp);
8362 bnx2x_release_nvram_lock(bp);
8363
8364 return rc;
8365}
8366
8367static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8368 int buf_size)
8369{
8370 int rc;
8371 u32 cmd_flags;
8372 u32 val;
8373 u32 written_so_far;
8374
34f80b04 8375 if (buf_size == 1) /* ethtool */
a2fbb9ea 8376 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8377
8378 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8379 DP(BNX2X_MSG_NVM,
c14423fe 8380 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8381 offset, buf_size);
8382 return -EINVAL;
8383 }
8384
34f80b04
EG
8385 if (offset + buf_size > bp->common.flash_size) {
8386 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8387 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8388 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8389 return -EINVAL;
8390 }
8391
8392 /* request access to nvram interface */
8393 rc = bnx2x_acquire_nvram_lock(bp);
8394 if (rc)
8395 return rc;
8396
8397 /* enable access to nvram interface */
8398 bnx2x_enable_nvram_access(bp);
8399
8400 written_so_far = 0;
8401 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8402 while ((written_so_far < buf_size) && (rc == 0)) {
8403 if (written_so_far == (buf_size - sizeof(u32)))
8404 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8405 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8406 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8407 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8408 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8409
8410 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8411
8412 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8413
8414 /* advance to the next dword */
8415 offset += sizeof(u32);
8416 data_buf += sizeof(u32);
8417 written_so_far += sizeof(u32);
8418 cmd_flags = 0;
8419 }
8420
8421 /* disable access to nvram interface */
8422 bnx2x_disable_nvram_access(bp);
8423 bnx2x_release_nvram_lock(bp);
8424
8425 return rc;
8426}
8427
8428static int bnx2x_set_eeprom(struct net_device *dev,
8429 struct ethtool_eeprom *eeprom, u8 *eebuf)
8430{
8431 struct bnx2x *bp = netdev_priv(dev);
8432 int rc;
8433
9f4c9583
EG
8434 if (!netif_running(dev))
8435 return -EAGAIN;
8436
34f80b04 8437 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8438 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8439 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8440 eeprom->len, eeprom->len);
8441
8442 /* parameters already validated in ethtool_set_eeprom */
8443
c18487ee 8444 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8445 if (eeprom->magic == 0x00504859)
8446 if (bp->port.pmf) {
8447
4a37fb66 8448 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8449 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8450 bp->link_params.ext_phy_config,
8451 (bp->state != BNX2X_STATE_CLOSED),
8452 eebuf, eeprom->len);
bb2a0f7a
YG
8453 if ((bp->state == BNX2X_STATE_OPEN) ||
8454 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8455 rc |= bnx2x_link_reset(&bp->link_params,
8456 &bp->link_vars);
8457 rc |= bnx2x_phy_init(&bp->link_params,
8458 &bp->link_vars);
bb2a0f7a 8459 }
4a37fb66 8460 bnx2x_release_phy_lock(bp);
34f80b04
EG
8461
8462 } else /* Only the PMF can access the PHY */
8463 return -EINVAL;
8464 else
c18487ee 8465 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8466
8467 return rc;
8468}
8469
8470static int bnx2x_get_coalesce(struct net_device *dev,
8471 struct ethtool_coalesce *coal)
8472{
8473 struct bnx2x *bp = netdev_priv(dev);
8474
8475 memset(coal, 0, sizeof(struct ethtool_coalesce));
8476
8477 coal->rx_coalesce_usecs = bp->rx_ticks;
8478 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8479
8480 return 0;
8481}
8482
8483static int bnx2x_set_coalesce(struct net_device *dev,
8484 struct ethtool_coalesce *coal)
8485{
8486 struct bnx2x *bp = netdev_priv(dev);
8487
8488 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8489 if (bp->rx_ticks > 3000)
8490 bp->rx_ticks = 3000;
8491
8492 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8493 if (bp->tx_ticks > 0x3000)
8494 bp->tx_ticks = 0x3000;
8495
34f80b04 8496 if (netif_running(dev))
a2fbb9ea
ET
8497 bnx2x_update_coalesce(bp);
8498
8499 return 0;
8500}
8501
8502static void bnx2x_get_ringparam(struct net_device *dev,
8503 struct ethtool_ringparam *ering)
8504{
8505 struct bnx2x *bp = netdev_priv(dev);
8506
8507 ering->rx_max_pending = MAX_RX_AVAIL;
8508 ering->rx_mini_max_pending = 0;
8509 ering->rx_jumbo_max_pending = 0;
8510
8511 ering->rx_pending = bp->rx_ring_size;
8512 ering->rx_mini_pending = 0;
8513 ering->rx_jumbo_pending = 0;
8514
8515 ering->tx_max_pending = MAX_TX_AVAIL;
8516 ering->tx_pending = bp->tx_ring_size;
8517}
8518
8519static int bnx2x_set_ringparam(struct net_device *dev,
8520 struct ethtool_ringparam *ering)
8521{
8522 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8523 int rc = 0;
a2fbb9ea
ET
8524
8525 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8526 (ering->tx_pending > MAX_TX_AVAIL) ||
8527 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8528 return -EINVAL;
8529
8530 bp->rx_ring_size = ering->rx_pending;
8531 bp->tx_ring_size = ering->tx_pending;
8532
34f80b04
EG
8533 if (netif_running(dev)) {
8534 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8535 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8536 }
8537
34f80b04 8538 return rc;
a2fbb9ea
ET
8539}
8540
8541static void bnx2x_get_pauseparam(struct net_device *dev,
8542 struct ethtool_pauseparam *epause)
8543{
8544 struct bnx2x *bp = netdev_priv(dev);
8545
c0700f90 8546 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8547 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8548
c0700f90
DM
8549 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8550 BNX2X_FLOW_CTRL_RX);
8551 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8552 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8553
8554 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8555 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8556 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8557}
8558
8559static int bnx2x_set_pauseparam(struct net_device *dev,
8560 struct ethtool_pauseparam *epause)
8561{
8562 struct bnx2x *bp = netdev_priv(dev);
8563
34f80b04
EG
8564 if (IS_E1HMF(bp))
8565 return 0;
8566
a2fbb9ea
ET
8567 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8568 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8569 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8570
c0700f90 8571 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8572
f1410647 8573 if (epause->rx_pause)
c0700f90 8574 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8575
f1410647 8576 if (epause->tx_pause)
c0700f90 8577 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8578
c0700f90
DM
8579 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8580 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8581
c18487ee 8582 if (epause->autoneg) {
34f80b04 8583 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8584 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8585 return -EINVAL;
8586 }
a2fbb9ea 8587
c18487ee 8588 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8589 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8590 }
a2fbb9ea 8591
c18487ee
YR
8592 DP(NETIF_MSG_LINK,
8593 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8594
8595 if (netif_running(dev)) {
bb2a0f7a 8596 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8597 bnx2x_link_set(bp);
8598 }
a2fbb9ea
ET
8599
8600 return 0;
8601}
8602
df0f2343
VZ
8603static int bnx2x_set_flags(struct net_device *dev, u32 data)
8604{
8605 struct bnx2x *bp = netdev_priv(dev);
8606 int changed = 0;
8607 int rc = 0;
8608
8609 /* TPA requires Rx CSUM offloading */
8610 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8611 if (!(dev->features & NETIF_F_LRO)) {
8612 dev->features |= NETIF_F_LRO;
8613 bp->flags |= TPA_ENABLE_FLAG;
8614 changed = 1;
8615 }
8616
8617 } else if (dev->features & NETIF_F_LRO) {
8618 dev->features &= ~NETIF_F_LRO;
8619 bp->flags &= ~TPA_ENABLE_FLAG;
8620 changed = 1;
8621 }
8622
8623 if (changed && netif_running(dev)) {
8624 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8625 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8626 }
8627
8628 return rc;
8629}
8630
a2fbb9ea
ET
8631static u32 bnx2x_get_rx_csum(struct net_device *dev)
8632{
8633 struct bnx2x *bp = netdev_priv(dev);
8634
8635 return bp->rx_csum;
8636}
8637
8638static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8639{
8640 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8641 int rc = 0;
a2fbb9ea
ET
8642
8643 bp->rx_csum = data;
df0f2343
VZ
8644
8645 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8646 TPA'ed packets will be discarded due to wrong TCP CSUM */
8647 if (!data) {
8648 u32 flags = ethtool_op_get_flags(dev);
8649
8650 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8651 }
8652
8653 return rc;
a2fbb9ea
ET
8654}
8655
8656static int bnx2x_set_tso(struct net_device *dev, u32 data)
8657{
755735eb 8658 if (data) {
a2fbb9ea 8659 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8660 dev->features |= NETIF_F_TSO6;
8661 } else {
a2fbb9ea 8662 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8663 dev->features &= ~NETIF_F_TSO6;
8664 }
8665
a2fbb9ea
ET
8666 return 0;
8667}
8668
f3c87cdd 8669static const struct {
a2fbb9ea
ET
8670 char string[ETH_GSTRING_LEN];
8671} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8672 { "register_test (offline)" },
8673 { "memory_test (offline)" },
8674 { "loopback_test (offline)" },
8675 { "nvram_test (online)" },
8676 { "interrupt_test (online)" },
8677 { "link_test (online)" },
8678 { "idle check (online)" },
8679 { "MC errors (online)" }
a2fbb9ea
ET
8680};
8681
8682static int bnx2x_self_test_count(struct net_device *dev)
8683{
8684 return BNX2X_NUM_TESTS;
8685}
8686
f3c87cdd
YG
8687static int bnx2x_test_registers(struct bnx2x *bp)
8688{
8689 int idx, i, rc = -ENODEV;
8690 u32 wr_val = 0;
9dabc424 8691 int port = BP_PORT(bp);
f3c87cdd
YG
8692 static const struct {
8693 u32 offset0;
8694 u32 offset1;
8695 u32 mask;
8696 } reg_tbl[] = {
8697/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8698 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8699 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8700 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8701 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8702 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8703 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8704 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8705 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8706 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8707/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8708 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8709 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8710 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8711 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8712 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8713 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8714 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8715 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8716 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8717/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8718 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8719 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8720 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8721 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8722 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8723 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8724 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8725 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8726 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8727/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8728 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8729 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8730 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8731 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8732 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8733 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8734 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8735
8736 { 0xffffffff, 0, 0x00000000 }
8737 };
8738
8739 if (!netif_running(bp->dev))
8740 return rc;
8741
8742 /* Repeat the test twice:
8743 First by writing 0x00000000, second by writing 0xffffffff */
8744 for (idx = 0; idx < 2; idx++) {
8745
8746 switch (idx) {
8747 case 0:
8748 wr_val = 0;
8749 break;
8750 case 1:
8751 wr_val = 0xffffffff;
8752 break;
8753 }
8754
8755 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8756 u32 offset, mask, save_val, val;
f3c87cdd
YG
8757
8758 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8759 mask = reg_tbl[i].mask;
8760
8761 save_val = REG_RD(bp, offset);
8762
8763 REG_WR(bp, offset, wr_val);
8764 val = REG_RD(bp, offset);
8765
8766 /* Restore the original register's value */
8767 REG_WR(bp, offset, save_val);
8768
8769 /* verify that value is as expected value */
8770 if ((val & mask) != (wr_val & mask))
8771 goto test_reg_exit;
8772 }
8773 }
8774
8775 rc = 0;
8776
8777test_reg_exit:
8778 return rc;
8779}
8780
8781static int bnx2x_test_memory(struct bnx2x *bp)
8782{
8783 int i, j, rc = -ENODEV;
8784 u32 val;
8785 static const struct {
8786 u32 offset;
8787 int size;
8788 } mem_tbl[] = {
8789 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8790 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8791 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8792 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8793 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8794 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8795 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8796
8797 { 0xffffffff, 0 }
8798 };
8799 static const struct {
8800 char *name;
8801 u32 offset;
9dabc424
YG
8802 u32 e1_mask;
8803 u32 e1h_mask;
f3c87cdd 8804 } prty_tbl[] = {
9dabc424
YG
8805 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8806 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8807 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8808 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8809 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8810 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8811
8812 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8813 };
8814
8815 if (!netif_running(bp->dev))
8816 return rc;
8817
8818 /* Go through all the memories */
8819 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8820 for (j = 0; j < mem_tbl[i].size; j++)
8821 REG_RD(bp, mem_tbl[i].offset + j*4);
8822
8823 /* Check the parity status */
8824 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8825 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8826 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8827 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8828 DP(NETIF_MSG_HW,
8829 "%s is 0x%x\n", prty_tbl[i].name, val);
8830 goto test_mem_exit;
8831 }
8832 }
8833
8834 rc = 0;
8835
8836test_mem_exit:
8837 return rc;
8838}
8839
f3c87cdd
YG
8840static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8841{
8842 int cnt = 1000;
8843
8844 if (link_up)
8845 while (bnx2x_link_test(bp) && cnt--)
8846 msleep(10);
8847}
8848
8849static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8850{
8851 unsigned int pkt_size, num_pkts, i;
8852 struct sk_buff *skb;
8853 unsigned char *packet;
8854 struct bnx2x_fastpath *fp = &bp->fp[0];
8855 u16 tx_start_idx, tx_idx;
8856 u16 rx_start_idx, rx_idx;
8857 u16 pkt_prod;
8858 struct sw_tx_bd *tx_buf;
8859 struct eth_tx_bd *tx_bd;
8860 dma_addr_t mapping;
8861 union eth_rx_cqe *cqe;
8862 u8 cqe_fp_flags;
8863 struct sw_rx_bd *rx_buf;
8864 u16 len;
8865 int rc = -ENODEV;
8866
8867 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8868 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 8869 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
8870
8871 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 8872 u16 cnt = 1000;
f3c87cdd 8873 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 8874 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 8875 /* wait until link state is restored */
3910c8ae
EG
8876 if (link_up)
8877 while (cnt-- && bnx2x_test_link(&bp->link_params,
8878 &bp->link_vars))
8879 msleep(10);
f3c87cdd
YG
8880 } else
8881 return -EINVAL;
8882
8883 pkt_size = 1514;
8884 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8885 if (!skb) {
8886 rc = -ENOMEM;
8887 goto test_loopback_exit;
8888 }
8889 packet = skb_put(skb, pkt_size);
8890 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8891 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8892 for (i = ETH_HLEN; i < pkt_size; i++)
8893 packet[i] = (unsigned char) (i & 0xff);
8894
8895 num_pkts = 0;
8896 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8897 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8898
8899 pkt_prod = fp->tx_pkt_prod++;
8900 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8901 tx_buf->first_bd = fp->tx_bd_prod;
8902 tx_buf->skb = skb;
8903
8904 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8905 mapping = pci_map_single(bp->pdev, skb->data,
8906 skb_headlen(skb), PCI_DMA_TODEVICE);
8907 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8908 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8909 tx_bd->nbd = cpu_to_le16(1);
8910 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8911 tx_bd->vlan = cpu_to_le16(pkt_prod);
8912 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8913 ETH_TX_BD_FLAGS_END_BD);
8914 tx_bd->general_data = ((UNICAST_ADDRESS <<
8915 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8916
58f4c4cf
EG
8917 wmb();
8918
f3c87cdd
YG
8919 fp->hw_tx_prods->bds_prod =
8920 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8921 mb(); /* FW restriction: must not reorder writing nbd and packets */
8922 fp->hw_tx_prods->packets_prod =
8923 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8924 DOORBELL(bp, FP_IDX(fp), 0);
8925
8926 mmiowb();
8927
8928 num_pkts++;
8929 fp->tx_bd_prod++;
8930 bp->dev->trans_start = jiffies;
8931
8932 udelay(100);
8933
8934 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8935 if (tx_idx != tx_start_idx + num_pkts)
8936 goto test_loopback_exit;
8937
8938 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8939 if (rx_idx != rx_start_idx + num_pkts)
8940 goto test_loopback_exit;
8941
8942 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8943 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8944 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8945 goto test_loopback_rx_exit;
8946
8947 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8948 if (len != pkt_size)
8949 goto test_loopback_rx_exit;
8950
8951 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8952 skb = rx_buf->skb;
8953 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8954 for (i = ETH_HLEN; i < pkt_size; i++)
8955 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8956 goto test_loopback_rx_exit;
8957
8958 rc = 0;
8959
8960test_loopback_rx_exit:
f3c87cdd
YG
8961
8962 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8963 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8964 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8965 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8966
8967 /* Update producers */
8968 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8969 fp->rx_sge_prod);
f3c87cdd
YG
8970
8971test_loopback_exit:
8972 bp->link_params.loopback_mode = LOOPBACK_NONE;
8973
8974 return rc;
8975}
8976
8977static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8978{
8979 int rc = 0;
8980
8981 if (!netif_running(bp->dev))
8982 return BNX2X_LOOPBACK_FAILED;
8983
f8ef6e44 8984 bnx2x_netif_stop(bp, 1);
3910c8ae 8985 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
8986
8987 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8988 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8989 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8990 }
8991
8992 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8993 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8994 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8995 }
8996
3910c8ae 8997 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8998 bnx2x_netif_start(bp);
8999
9000 return rc;
9001}
9002
9003#define CRC32_RESIDUAL 0xdebb20e3
9004
9005static int bnx2x_test_nvram(struct bnx2x *bp)
9006{
9007 static const struct {
9008 int offset;
9009 int size;
9010 } nvram_tbl[] = {
9011 { 0, 0x14 }, /* bootstrap */
9012 { 0x14, 0xec }, /* dir */
9013 { 0x100, 0x350 }, /* manuf_info */
9014 { 0x450, 0xf0 }, /* feature_info */
9015 { 0x640, 0x64 }, /* upgrade_key_info */
9016 { 0x6a4, 0x64 },
9017 { 0x708, 0x70 }, /* manuf_key_info */
9018 { 0x778, 0x70 },
9019 { 0, 0 }
9020 };
9021 u32 buf[0x350 / 4];
9022 u8 *data = (u8 *)buf;
9023 int i, rc;
9024 u32 magic, csum;
9025
9026 rc = bnx2x_nvram_read(bp, 0, data, 4);
9027 if (rc) {
9028 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9029 goto test_nvram_exit;
9030 }
9031
9032 magic = be32_to_cpu(buf[0]);
9033 if (magic != 0x669955aa) {
9034 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9035 rc = -ENODEV;
9036 goto test_nvram_exit;
9037 }
9038
9039 for (i = 0; nvram_tbl[i].size; i++) {
9040
9041 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9042 nvram_tbl[i].size);
9043 if (rc) {
9044 DP(NETIF_MSG_PROBE,
9045 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9046 goto test_nvram_exit;
9047 }
9048
9049 csum = ether_crc_le(nvram_tbl[i].size, data);
9050 if (csum != CRC32_RESIDUAL) {
9051 DP(NETIF_MSG_PROBE,
9052 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9053 rc = -ENODEV;
9054 goto test_nvram_exit;
9055 }
9056 }
9057
9058test_nvram_exit:
9059 return rc;
9060}
9061
9062static int bnx2x_test_intr(struct bnx2x *bp)
9063{
9064 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9065 int i, rc;
9066
9067 if (!netif_running(bp->dev))
9068 return -ENODEV;
9069
8d9c5f34 9070 config->hdr.length = 0;
af246401
EG
9071 if (CHIP_IS_E1(bp))
9072 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9073 else
9074 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9075 config->hdr.client_id = BP_CL_ID(bp);
9076 config->hdr.reserved1 = 0;
9077
9078 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9079 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9080 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9081 if (rc == 0) {
9082 bp->set_mac_pending++;
9083 for (i = 0; i < 10; i++) {
9084 if (!bp->set_mac_pending)
9085 break;
9086 msleep_interruptible(10);
9087 }
9088 if (i == 10)
9089 rc = -ENODEV;
9090 }
9091
9092 return rc;
9093}
9094
a2fbb9ea
ET
9095static void bnx2x_self_test(struct net_device *dev,
9096 struct ethtool_test *etest, u64 *buf)
9097{
9098 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9099
9100 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9101
f3c87cdd 9102 if (!netif_running(dev))
a2fbb9ea 9103 return;
a2fbb9ea 9104
33471629 9105 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9106 if (IS_E1HMF(bp))
9107 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9108
9109 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9110 u8 link_up;
9111
9112 link_up = bp->link_vars.link_up;
9113 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9114 bnx2x_nic_load(bp, LOAD_DIAG);
9115 /* wait until link state is restored */
9116 bnx2x_wait_for_link(bp, link_up);
9117
9118 if (bnx2x_test_registers(bp) != 0) {
9119 buf[0] = 1;
9120 etest->flags |= ETH_TEST_FL_FAILED;
9121 }
9122 if (bnx2x_test_memory(bp) != 0) {
9123 buf[1] = 1;
9124 etest->flags |= ETH_TEST_FL_FAILED;
9125 }
9126 buf[2] = bnx2x_test_loopback(bp, link_up);
9127 if (buf[2] != 0)
9128 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9129
f3c87cdd
YG
9130 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9131 bnx2x_nic_load(bp, LOAD_NORMAL);
9132 /* wait until link state is restored */
9133 bnx2x_wait_for_link(bp, link_up);
9134 }
9135 if (bnx2x_test_nvram(bp) != 0) {
9136 buf[3] = 1;
a2fbb9ea
ET
9137 etest->flags |= ETH_TEST_FL_FAILED;
9138 }
f3c87cdd
YG
9139 if (bnx2x_test_intr(bp) != 0) {
9140 buf[4] = 1;
9141 etest->flags |= ETH_TEST_FL_FAILED;
9142 }
9143 if (bp->port.pmf)
9144 if (bnx2x_link_test(bp) != 0) {
9145 buf[5] = 1;
9146 etest->flags |= ETH_TEST_FL_FAILED;
9147 }
9148 buf[7] = bnx2x_mc_assert(bp);
9149 if (buf[7] != 0)
9150 etest->flags |= ETH_TEST_FL_FAILED;
9151
9152#ifdef BNX2X_EXTRA_DEBUG
9153 bnx2x_panic_dump(bp);
9154#endif
a2fbb9ea
ET
9155}
9156
bb2a0f7a
YG
9157static const struct {
9158 long offset;
9159 int size;
9160 u32 flags;
66e855f3
YG
9161#define STATS_FLAGS_PORT 1
9162#define STATS_FLAGS_FUNC 2
9163 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9164} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
9165/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
9166 8, STATS_FLAGS_FUNC, "rx_bytes" },
9167 { STATS_OFFSET32(error_bytes_received_hi),
9168 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
9169 { STATS_OFFSET32(total_bytes_transmitted_hi),
9170 8, STATS_FLAGS_FUNC, "tx_bytes" },
9171 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9172 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 9173 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 9174 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 9175 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 9176 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 9177 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 9178 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 9179 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 9180 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 9181 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 9182 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 9183/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 9184 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9185 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9186 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9187 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9188 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 9189 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9190 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9191 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9192 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9193 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9194 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9195 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9196 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9197 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9198 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9199 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9200 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9201 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9202 8, STATS_FLAGS_PORT, "rx_fragments" },
9203/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9204 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9205 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9206 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9207 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9208 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9209 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9210 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9211 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9212 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9213 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9214 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9215 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9216 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9217 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9218 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9219 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9220 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9221 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9222 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9223/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9224 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9225 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9226 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9227 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9228 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9229 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9230 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9231 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9232 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9233 { STATS_OFFSET32(mac_filter_discard),
9234 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9235 { STATS_OFFSET32(no_buff_discard),
9236 4, STATS_FLAGS_FUNC, "rx_discards" },
9237 { STATS_OFFSET32(xxoverflow_discard),
9238 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9239 { STATS_OFFSET32(brb_drop_hi),
9240 8, STATS_FLAGS_PORT, "brb_discard" },
9241 { STATS_OFFSET32(brb_truncate_hi),
9242 8, STATS_FLAGS_PORT, "brb_truncate" },
9243/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9244 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9245 { STATS_OFFSET32(rx_skb_alloc_failed),
9246 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9247/* 42 */{ STATS_OFFSET32(hw_csum_err),
9248 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9249};
9250
66e855f3
YG
9251#define IS_NOT_E1HMF_STAT(bp, i) \
9252 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9253
a2fbb9ea
ET
9254static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9255{
bb2a0f7a
YG
9256 struct bnx2x *bp = netdev_priv(dev);
9257 int i, j;
9258
a2fbb9ea
ET
9259 switch (stringset) {
9260 case ETH_SS_STATS:
bb2a0f7a 9261 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9262 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9263 continue;
9264 strcpy(buf + j*ETH_GSTRING_LEN,
9265 bnx2x_stats_arr[i].string);
9266 j++;
9267 }
a2fbb9ea
ET
9268 break;
9269
9270 case ETH_SS_TEST:
9271 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9272 break;
9273 }
9274}
9275
9276static int bnx2x_get_stats_count(struct net_device *dev)
9277{
bb2a0f7a
YG
9278 struct bnx2x *bp = netdev_priv(dev);
9279 int i, num_stats = 0;
9280
9281 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9282 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9283 continue;
9284 num_stats++;
9285 }
9286 return num_stats;
a2fbb9ea
ET
9287}
9288
9289static void bnx2x_get_ethtool_stats(struct net_device *dev,
9290 struct ethtool_stats *stats, u64 *buf)
9291{
9292 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9293 u32 *hw_stats = (u32 *)&bp->eth_stats;
9294 int i, j;
a2fbb9ea 9295
bb2a0f7a 9296 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9297 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9298 continue;
bb2a0f7a
YG
9299
9300 if (bnx2x_stats_arr[i].size == 0) {
9301 /* skip this counter */
9302 buf[j] = 0;
9303 j++;
a2fbb9ea
ET
9304 continue;
9305 }
bb2a0f7a 9306 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9307 /* 4-byte counter */
bb2a0f7a
YG
9308 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9309 j++;
a2fbb9ea
ET
9310 continue;
9311 }
9312 /* 8-byte counter */
bb2a0f7a
YG
9313 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9314 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9315 j++;
a2fbb9ea
ET
9316 }
9317}
9318
9319static int bnx2x_phys_id(struct net_device *dev, u32 data)
9320{
9321 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9322 int port = BP_PORT(bp);
a2fbb9ea
ET
9323 int i;
9324
34f80b04
EG
9325 if (!netif_running(dev))
9326 return 0;
9327
9328 if (!bp->port.pmf)
9329 return 0;
9330
a2fbb9ea
ET
9331 if (data == 0)
9332 data = 2;
9333
9334 for (i = 0; i < (data * 2); i++) {
c18487ee 9335 if ((i % 2) == 0)
34f80b04 9336 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9337 bp->link_params.hw_led_mode,
9338 bp->link_params.chip_id);
9339 else
34f80b04 9340 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9341 bp->link_params.hw_led_mode,
9342 bp->link_params.chip_id);
9343
a2fbb9ea
ET
9344 msleep_interruptible(500);
9345 if (signal_pending(current))
9346 break;
9347 }
9348
c18487ee 9349 if (bp->link_vars.link_up)
34f80b04 9350 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9351 bp->link_vars.line_speed,
9352 bp->link_params.hw_led_mode,
9353 bp->link_params.chip_id);
a2fbb9ea
ET
9354
9355 return 0;
9356}
9357
9358static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9359 .get_settings = bnx2x_get_settings,
9360 .set_settings = bnx2x_set_settings,
9361 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9362 .get_wol = bnx2x_get_wol,
9363 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9364 .get_msglevel = bnx2x_get_msglevel,
9365 .set_msglevel = bnx2x_set_msglevel,
9366 .nway_reset = bnx2x_nway_reset,
9367 .get_link = ethtool_op_get_link,
9368 .get_eeprom_len = bnx2x_get_eeprom_len,
9369 .get_eeprom = bnx2x_get_eeprom,
9370 .set_eeprom = bnx2x_set_eeprom,
9371 .get_coalesce = bnx2x_get_coalesce,
9372 .set_coalesce = bnx2x_set_coalesce,
9373 .get_ringparam = bnx2x_get_ringparam,
9374 .set_ringparam = bnx2x_set_ringparam,
9375 .get_pauseparam = bnx2x_get_pauseparam,
9376 .set_pauseparam = bnx2x_set_pauseparam,
9377 .get_rx_csum = bnx2x_get_rx_csum,
9378 .set_rx_csum = bnx2x_set_rx_csum,
9379 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9380 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9381 .set_flags = bnx2x_set_flags,
9382 .get_flags = ethtool_op_get_flags,
9383 .get_sg = ethtool_op_get_sg,
9384 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9385 .get_tso = ethtool_op_get_tso,
9386 .set_tso = bnx2x_set_tso,
9387 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9388 .self_test = bnx2x_self_test,
9389 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9390 .phys_id = bnx2x_phys_id,
9391 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9392 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9393};
9394
9395/* end of ethtool_ops */
9396
9397/****************************************************************************
9398* General service functions
9399****************************************************************************/
9400
9401static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9402{
9403 u16 pmcsr;
9404
9405 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9406
9407 switch (state) {
9408 case PCI_D0:
34f80b04 9409 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9410 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9411 PCI_PM_CTRL_PME_STATUS));
9412
9413 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9414 /* delay required during transition out of D3hot */
a2fbb9ea 9415 msleep(20);
34f80b04 9416 break;
a2fbb9ea 9417
34f80b04
EG
9418 case PCI_D3hot:
9419 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9420 pmcsr |= 3;
a2fbb9ea 9421
34f80b04
EG
9422 if (bp->wol)
9423 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9424
34f80b04
EG
9425 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9426 pmcsr);
a2fbb9ea 9427
34f80b04
EG
9428 /* No more memory access after this point until
9429 * device is brought back to D0.
9430 */
9431 break;
9432
9433 default:
9434 return -EINVAL;
9435 }
9436 return 0;
a2fbb9ea
ET
9437}
9438
237907c1
EG
9439static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9440{
9441 u16 rx_cons_sb;
9442
9443 /* Tell compiler that status block fields can change */
9444 barrier();
9445 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9446 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9447 rx_cons_sb++;
9448 return (fp->rx_comp_cons != rx_cons_sb);
9449}
9450
34f80b04
EG
9451/*
9452 * net_device service functions
9453 */
9454
a2fbb9ea
ET
9455static int bnx2x_poll(struct napi_struct *napi, int budget)
9456{
9457 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9458 napi);
9459 struct bnx2x *bp = fp->bp;
9460 int work_done = 0;
9461
9462#ifdef BNX2X_STOP_ON_ERROR
9463 if (unlikely(bp->panic))
34f80b04 9464 goto poll_panic;
a2fbb9ea
ET
9465#endif
9466
9467 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9468 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9469 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9470
9471 bnx2x_update_fpsb_idx(fp);
9472
237907c1 9473 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9474 bnx2x_tx_int(fp, budget);
9475
237907c1 9476 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9477 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9478 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9479
9480 /* must not complete if we consumed full budget */
da5a662a 9481 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9482
9483#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9484poll_panic:
a2fbb9ea 9485#endif
288379f0 9486 napi_complete(napi);
a2fbb9ea 9487
34f80b04 9488 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9489 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9490 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9491 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9492 }
a2fbb9ea
ET
9493 return work_done;
9494}
9495
755735eb
EG
9496
9497/* we split the first BD into headers and data BDs
33471629 9498 * to ease the pain of our fellow microcode engineers
755735eb
EG
9499 * we use one mapping for both BDs
9500 * So far this has only been observed to happen
9501 * in Other Operating Systems(TM)
9502 */
9503static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9504 struct bnx2x_fastpath *fp,
9505 struct eth_tx_bd **tx_bd, u16 hlen,
9506 u16 bd_prod, int nbd)
9507{
9508 struct eth_tx_bd *h_tx_bd = *tx_bd;
9509 struct eth_tx_bd *d_tx_bd;
9510 dma_addr_t mapping;
9511 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9512
9513 /* first fix first BD */
9514 h_tx_bd->nbd = cpu_to_le16(nbd);
9515 h_tx_bd->nbytes = cpu_to_le16(hlen);
9516
9517 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9518 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9519 h_tx_bd->addr_lo, h_tx_bd->nbd);
9520
9521 /* now get a new data BD
9522 * (after the pbd) and fill it */
9523 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9524 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9525
9526 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9527 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9528
9529 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9530 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9531 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9532 d_tx_bd->vlan = 0;
9533 /* this marks the BD as one that has no individual mapping
9534 * the FW ignores this flag in a BD not marked start
9535 */
9536 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9537 DP(NETIF_MSG_TX_QUEUED,
9538 "TSO split data size is %d (%x:%x)\n",
9539 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9540
9541 /* update tx_bd for marking the last BD flag */
9542 *tx_bd = d_tx_bd;
9543
9544 return bd_prod;
9545}
9546
9547static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9548{
9549 if (fix > 0)
9550 csum = (u16) ~csum_fold(csum_sub(csum,
9551 csum_partial(t_header - fix, fix, 0)));
9552
9553 else if (fix < 0)
9554 csum = (u16) ~csum_fold(csum_add(csum,
9555 csum_partial(t_header, -fix, 0)));
9556
9557 return swab16(csum);
9558}
9559
9560static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9561{
9562 u32 rc;
9563
9564 if (skb->ip_summed != CHECKSUM_PARTIAL)
9565 rc = XMIT_PLAIN;
9566
9567 else {
9568 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9569 rc = XMIT_CSUM_V6;
9570 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9571 rc |= XMIT_CSUM_TCP;
9572
9573 } else {
9574 rc = XMIT_CSUM_V4;
9575 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9576 rc |= XMIT_CSUM_TCP;
9577 }
9578 }
9579
9580 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9581 rc |= XMIT_GSO_V4;
9582
9583 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9584 rc |= XMIT_GSO_V6;
9585
9586 return rc;
9587}
9588
632da4d6 9589#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9590/* check if packet requires linearization (packet is too fragmented) */
9591static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9592 u32 xmit_type)
9593{
9594 int to_copy = 0;
9595 int hlen = 0;
9596 int first_bd_sz = 0;
9597
9598 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9599 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9600
9601 if (xmit_type & XMIT_GSO) {
9602 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9603 /* Check if LSO packet needs to be copied:
9604 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9605 int wnd_size = MAX_FETCH_BD - 3;
33471629 9606 /* Number of windows to check */
755735eb
EG
9607 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9608 int wnd_idx = 0;
9609 int frag_idx = 0;
9610 u32 wnd_sum = 0;
9611
9612 /* Headers length */
9613 hlen = (int)(skb_transport_header(skb) - skb->data) +
9614 tcp_hdrlen(skb);
9615
9616 /* Amount of data (w/o headers) on linear part of SKB*/
9617 first_bd_sz = skb_headlen(skb) - hlen;
9618
9619 wnd_sum = first_bd_sz;
9620
9621 /* Calculate the first sum - it's special */
9622 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9623 wnd_sum +=
9624 skb_shinfo(skb)->frags[frag_idx].size;
9625
9626 /* If there was data on linear skb data - check it */
9627 if (first_bd_sz > 0) {
9628 if (unlikely(wnd_sum < lso_mss)) {
9629 to_copy = 1;
9630 goto exit_lbl;
9631 }
9632
9633 wnd_sum -= first_bd_sz;
9634 }
9635
9636 /* Others are easier: run through the frag list and
9637 check all windows */
9638 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9639 wnd_sum +=
9640 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9641
9642 if (unlikely(wnd_sum < lso_mss)) {
9643 to_copy = 1;
9644 break;
9645 }
9646 wnd_sum -=
9647 skb_shinfo(skb)->frags[wnd_idx].size;
9648 }
9649
9650 } else {
9651 /* in non-LSO too fragmented packet should always
9652 be linearized */
9653 to_copy = 1;
9654 }
9655 }
9656
9657exit_lbl:
9658 if (unlikely(to_copy))
9659 DP(NETIF_MSG_TX_QUEUED,
9660 "Linearization IS REQUIRED for %s packet. "
9661 "num_frags %d hlen %d first_bd_sz %d\n",
9662 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9663 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9664
9665 return to_copy;
9666}
632da4d6 9667#endif
755735eb
EG
9668
9669/* called with netif_tx_lock
a2fbb9ea 9670 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9671 * netif_wake_queue()
a2fbb9ea
ET
9672 */
9673static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9674{
9675 struct bnx2x *bp = netdev_priv(dev);
9676 struct bnx2x_fastpath *fp;
555f6c78 9677 struct netdev_queue *txq;
a2fbb9ea
ET
9678 struct sw_tx_bd *tx_buf;
9679 struct eth_tx_bd *tx_bd;
9680 struct eth_tx_parse_bd *pbd = NULL;
9681 u16 pkt_prod, bd_prod;
755735eb 9682 int nbd, fp_index;
a2fbb9ea 9683 dma_addr_t mapping;
755735eb
EG
9684 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9685 int vlan_off = (bp->e1hov ? 4 : 0);
9686 int i;
9687 u8 hlen = 0;
a2fbb9ea
ET
9688
9689#ifdef BNX2X_STOP_ON_ERROR
9690 if (unlikely(bp->panic))
9691 return NETDEV_TX_BUSY;
9692#endif
9693
555f6c78
EG
9694 fp_index = skb_get_queue_mapping(skb);
9695 txq = netdev_get_tx_queue(dev, fp_index);
9696
a2fbb9ea 9697 fp = &bp->fp[fp_index];
755735eb 9698
231fd58a 9699 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9700 bp->eth_stats.driver_xoff++,
555f6c78 9701 netif_tx_stop_queue(txq);
a2fbb9ea
ET
9702 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9703 return NETDEV_TX_BUSY;
9704 }
9705
755735eb
EG
9706 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9707 " gso type %x xmit_type %x\n",
9708 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9709 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9710
632da4d6 9711#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 9712 /* First, check if we need to linearize the skb
755735eb
EG
9713 (due to FW restrictions) */
9714 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9715 /* Statistics of linearization */
9716 bp->lin_cnt++;
9717 if (skb_linearize(skb) != 0) {
9718 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9719 "silently dropping this SKB\n");
9720 dev_kfree_skb_any(skb);
da5a662a 9721 return NETDEV_TX_OK;
755735eb
EG
9722 }
9723 }
632da4d6 9724#endif
755735eb 9725
a2fbb9ea 9726 /*
755735eb 9727 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9728 then for TSO or xsum we have a parsing info BD,
755735eb 9729 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9730 (don't forget to mark the last one as last,
9731 and to unmap only AFTER you write to the BD ...)
755735eb 9732 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9733 */
9734
9735 pkt_prod = fp->tx_pkt_prod++;
755735eb 9736 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9737
755735eb 9738 /* get a tx_buf and first BD */
a2fbb9ea
ET
9739 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9740 tx_bd = &fp->tx_desc_ring[bd_prod];
9741
9742 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9743 tx_bd->general_data = (UNICAST_ADDRESS <<
9744 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9745 /* header nbd */
9746 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9747
755735eb
EG
9748 /* remember the first BD of the packet */
9749 tx_buf->first_bd = fp->tx_bd_prod;
9750 tx_buf->skb = skb;
a2fbb9ea
ET
9751
9752 DP(NETIF_MSG_TX_QUEUED,
9753 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9754 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9755
0c6671b0
EG
9756#ifdef BCM_VLAN
9757 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
9758 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
9759 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9760 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9761 vlan_off += 4;
9762 } else
0c6671b0 9763#endif
755735eb 9764 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9765
755735eb 9766 if (xmit_type) {
755735eb 9767 /* turn on parsing and get a BD */
a2fbb9ea
ET
9768 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9769 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9770
9771 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9772 }
9773
9774 if (xmit_type & XMIT_CSUM) {
9775 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9776
9777 /* for now NS flag is not used in Linux */
755735eb 9778 pbd->global_data = (hlen |
96fc1784 9779 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9780 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9781
755735eb
EG
9782 pbd->ip_hlen = (skb_transport_header(skb) -
9783 skb_network_header(skb)) / 2;
9784
9785 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9786
755735eb
EG
9787 pbd->total_hlen = cpu_to_le16(hlen);
9788 hlen = hlen*2 - vlan_off;
a2fbb9ea 9789
755735eb
EG
9790 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9791
9792 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9793 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9794 ETH_TX_BD_FLAGS_IP_CSUM;
9795 else
9796 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9797
9798 if (xmit_type & XMIT_CSUM_TCP) {
9799 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9800
9801 } else {
9802 s8 fix = SKB_CS_OFF(skb); /* signed! */
9803
a2fbb9ea 9804 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9805 pbd->cs_offset = fix / 2;
a2fbb9ea 9806
755735eb
EG
9807 DP(NETIF_MSG_TX_QUEUED,
9808 "hlen %d offset %d fix %d csum before fix %x\n",
9809 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9810 SKB_CS(skb));
9811
9812 /* HW bug: fixup the CSUM */
9813 pbd->tcp_pseudo_csum =
9814 bnx2x_csum_fix(skb_transport_header(skb),
9815 SKB_CS(skb), fix);
9816
9817 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9818 pbd->tcp_pseudo_csum);
9819 }
a2fbb9ea
ET
9820 }
9821
9822 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9823 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9824
9825 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9826 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9827 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9828 tx_bd->nbd = cpu_to_le16(nbd);
9829 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9830
9831 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9832 " nbytes %d flags %x vlan %x\n",
9833 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9834 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9835 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9836
755735eb 9837 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9838
9839 DP(NETIF_MSG_TX_QUEUED,
9840 "TSO packet len %d hlen %d total len %d tso size %d\n",
9841 skb->len, hlen, skb_headlen(skb),
9842 skb_shinfo(skb)->gso_size);
9843
9844 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9845
755735eb
EG
9846 if (unlikely(skb_headlen(skb) > hlen))
9847 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9848 bd_prod, ++nbd);
a2fbb9ea
ET
9849
9850 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9851 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9852 pbd->tcp_flags = pbd_tcp_flags(skb);
9853
9854 if (xmit_type & XMIT_GSO_V4) {
9855 pbd->ip_id = swab16(ip_hdr(skb)->id);
9856 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9857 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9858 ip_hdr(skb)->daddr,
9859 0, IPPROTO_TCP, 0));
755735eb
EG
9860
9861 } else
9862 pbd->tcp_pseudo_csum =
9863 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9864 &ipv6_hdr(skb)->daddr,
9865 0, IPPROTO_TCP, 0));
9866
a2fbb9ea
ET
9867 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9868 }
9869
755735eb
EG
9870 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9871 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9872
755735eb
EG
9873 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9874 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9875
755735eb
EG
9876 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9877 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9878
755735eb
EG
9879 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9880 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9881 tx_bd->nbytes = cpu_to_le16(frag->size);
9882 tx_bd->vlan = cpu_to_le16(pkt_prod);
9883 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9884
755735eb
EG
9885 DP(NETIF_MSG_TX_QUEUED,
9886 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9887 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9888 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9889 }
9890
755735eb 9891 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9892 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9893
9894 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9895 tx_bd, tx_bd->bd_flags.as_bitfield);
9896
a2fbb9ea
ET
9897 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9898
755735eb 9899 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9900 * if the packet contains or ends with it
9901 */
9902 if (TX_BD_POFF(bd_prod) < nbd)
9903 nbd++;
9904
9905 if (pbd)
9906 DP(NETIF_MSG_TX_QUEUED,
9907 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9908 " tcp_flags %x xsum %x seq %u hlen %u\n",
9909 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9910 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9911 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9912
755735eb 9913 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9914
58f4c4cf
EG
9915 /*
9916 * Make sure that the BD data is updated before updating the producer
9917 * since FW might read the BD right after the producer is updated.
9918 * This is only applicable for weak-ordered memory model archs such
9919 * as IA-64. The following barrier is also mandatory since FW will
9920 * assumes packets must have BDs.
9921 */
9922 wmb();
9923
96fc1784
ET
9924 fp->hw_tx_prods->bds_prod =
9925 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9926 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9927 fp->hw_tx_prods->packets_prod =
9928 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9929 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9930
9931 mmiowb();
9932
755735eb 9933 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9934 dev->trans_start = jiffies;
9935
9936 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
9937 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
9938 if we put Tx into XOFF state. */
9939 smp_mb();
555f6c78 9940 netif_tx_stop_queue(txq);
bb2a0f7a 9941 bp->eth_stats.driver_xoff++;
a2fbb9ea 9942 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 9943 netif_tx_wake_queue(txq);
a2fbb9ea
ET
9944 }
9945 fp->tx_pkt++;
9946
9947 return NETDEV_TX_OK;
9948}
9949
bb2a0f7a 9950/* called with rtnl_lock */
a2fbb9ea
ET
9951static int bnx2x_open(struct net_device *dev)
9952{
9953 struct bnx2x *bp = netdev_priv(dev);
9954
6eccabb3
EG
9955 netif_carrier_off(dev);
9956
a2fbb9ea
ET
9957 bnx2x_set_power_state(bp, PCI_D0);
9958
bb2a0f7a 9959 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9960}
9961
bb2a0f7a 9962/* called with rtnl_lock */
a2fbb9ea
ET
9963static int bnx2x_close(struct net_device *dev)
9964{
a2fbb9ea
ET
9965 struct bnx2x *bp = netdev_priv(dev);
9966
9967 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9968 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9969 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9970 if (!CHIP_REV_IS_SLOW(bp))
9971 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9972
9973 return 0;
9974}
9975
34f80b04
EG
9976/* called with netif_tx_lock from set_multicast */
9977static void bnx2x_set_rx_mode(struct net_device *dev)
9978{
9979 struct bnx2x *bp = netdev_priv(dev);
9980 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9981 int port = BP_PORT(bp);
9982
9983 if (bp->state != BNX2X_STATE_OPEN) {
9984 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9985 return;
9986 }
9987
9988 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9989
9990 if (dev->flags & IFF_PROMISC)
9991 rx_mode = BNX2X_RX_MODE_PROMISC;
9992
9993 else if ((dev->flags & IFF_ALLMULTI) ||
9994 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9995 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9996
9997 else { /* some multicasts */
9998 if (CHIP_IS_E1(bp)) {
9999 int i, old, offset;
10000 struct dev_mc_list *mclist;
10001 struct mac_configuration_cmd *config =
10002 bnx2x_sp(bp, mcast_config);
10003
10004 for (i = 0, mclist = dev->mc_list;
10005 mclist && (i < dev->mc_count);
10006 i++, mclist = mclist->next) {
10007
10008 config->config_table[i].
10009 cam_entry.msb_mac_addr =
10010 swab16(*(u16 *)&mclist->dmi_addr[0]);
10011 config->config_table[i].
10012 cam_entry.middle_mac_addr =
10013 swab16(*(u16 *)&mclist->dmi_addr[2]);
10014 config->config_table[i].
10015 cam_entry.lsb_mac_addr =
10016 swab16(*(u16 *)&mclist->dmi_addr[4]);
10017 config->config_table[i].cam_entry.flags =
10018 cpu_to_le16(port);
10019 config->config_table[i].
10020 target_table_entry.flags = 0;
10021 config->config_table[i].
10022 target_table_entry.client_id = 0;
10023 config->config_table[i].
10024 target_table_entry.vlan_id = 0;
10025
10026 DP(NETIF_MSG_IFUP,
10027 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10028 config->config_table[i].
10029 cam_entry.msb_mac_addr,
10030 config->config_table[i].
10031 cam_entry.middle_mac_addr,
10032 config->config_table[i].
10033 cam_entry.lsb_mac_addr);
10034 }
8d9c5f34 10035 old = config->hdr.length;
34f80b04
EG
10036 if (old > i) {
10037 for (; i < old; i++) {
10038 if (CAM_IS_INVALID(config->
10039 config_table[i])) {
af246401 10040 /* already invalidated */
34f80b04
EG
10041 break;
10042 }
10043 /* invalidate */
10044 CAM_INVALIDATE(config->
10045 config_table[i]);
10046 }
10047 }
10048
10049 if (CHIP_REV_IS_SLOW(bp))
10050 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10051 else
10052 offset = BNX2X_MAX_MULTICAST*(1 + port);
10053
8d9c5f34 10054 config->hdr.length = i;
34f80b04 10055 config->hdr.offset = offset;
8d9c5f34 10056 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10057 config->hdr.reserved1 = 0;
10058
10059 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10060 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10061 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10062 0);
10063 } else { /* E1H */
10064 /* Accept one or more multicasts */
10065 struct dev_mc_list *mclist;
10066 u32 mc_filter[MC_HASH_SIZE];
10067 u32 crc, bit, regidx;
10068 int i;
10069
10070 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10071
10072 for (i = 0, mclist = dev->mc_list;
10073 mclist && (i < dev->mc_count);
10074 i++, mclist = mclist->next) {
10075
7c510e4b
JB
10076 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10077 mclist->dmi_addr);
34f80b04
EG
10078
10079 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10080 bit = (crc >> 24) & 0xff;
10081 regidx = bit >> 5;
10082 bit &= 0x1f;
10083 mc_filter[regidx] |= (1 << bit);
10084 }
10085
10086 for (i = 0; i < MC_HASH_SIZE; i++)
10087 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10088 mc_filter[i]);
10089 }
10090 }
10091
10092 bp->rx_mode = rx_mode;
10093 bnx2x_set_storm_rx_mode(bp);
10094}
10095
10096/* called with rtnl_lock */
a2fbb9ea
ET
10097static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10098{
10099 struct sockaddr *addr = p;
10100 struct bnx2x *bp = netdev_priv(dev);
10101
34f80b04 10102 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10103 return -EINVAL;
10104
10105 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10106 if (netif_running(dev)) {
10107 if (CHIP_IS_E1(bp))
3101c2bc 10108 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10109 else
3101c2bc 10110 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10111 }
a2fbb9ea
ET
10112
10113 return 0;
10114}
10115
c18487ee 10116/* called with rtnl_lock */
a2fbb9ea
ET
10117static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10118{
10119 struct mii_ioctl_data *data = if_mii(ifr);
10120 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10121 int port = BP_PORT(bp);
a2fbb9ea
ET
10122 int err;
10123
10124 switch (cmd) {
10125 case SIOCGMIIPHY:
34f80b04 10126 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10127
c14423fe 10128 /* fallthrough */
c18487ee 10129
a2fbb9ea 10130 case SIOCGMIIREG: {
c18487ee 10131 u16 mii_regval;
a2fbb9ea 10132
c18487ee
YR
10133 if (!netif_running(dev))
10134 return -EAGAIN;
a2fbb9ea 10135
34f80b04 10136 mutex_lock(&bp->port.phy_mutex);
3196a88a 10137 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10138 DEFAULT_PHY_DEV_ADDR,
10139 (data->reg_num & 0x1f), &mii_regval);
10140 data->val_out = mii_regval;
34f80b04 10141 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10142 return err;
10143 }
10144
10145 case SIOCSMIIREG:
10146 if (!capable(CAP_NET_ADMIN))
10147 return -EPERM;
10148
c18487ee
YR
10149 if (!netif_running(dev))
10150 return -EAGAIN;
10151
34f80b04 10152 mutex_lock(&bp->port.phy_mutex);
3196a88a 10153 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10154 DEFAULT_PHY_DEV_ADDR,
10155 (data->reg_num & 0x1f), data->val_in);
34f80b04 10156 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10157 return err;
10158
10159 default:
10160 /* do nothing */
10161 break;
10162 }
10163
10164 return -EOPNOTSUPP;
10165}
10166
34f80b04 10167/* called with rtnl_lock */
a2fbb9ea
ET
10168static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10169{
10170 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10171 int rc = 0;
a2fbb9ea
ET
10172
10173 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10174 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10175 return -EINVAL;
10176
10177 /* This does not race with packet allocation
c14423fe 10178 * because the actual alloc size is
a2fbb9ea
ET
10179 * only updated as part of load
10180 */
10181 dev->mtu = new_mtu;
10182
10183 if (netif_running(dev)) {
34f80b04
EG
10184 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10185 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10186 }
34f80b04
EG
10187
10188 return rc;
a2fbb9ea
ET
10189}
10190
10191static void bnx2x_tx_timeout(struct net_device *dev)
10192{
10193 struct bnx2x *bp = netdev_priv(dev);
10194
10195#ifdef BNX2X_STOP_ON_ERROR
10196 if (!bp->panic)
10197 bnx2x_panic();
10198#endif
10199 /* This allows the netif to be shutdown gracefully before resetting */
10200 schedule_work(&bp->reset_task);
10201}
10202
10203#ifdef BCM_VLAN
34f80b04 10204/* called with rtnl_lock */
a2fbb9ea
ET
10205static void bnx2x_vlan_rx_register(struct net_device *dev,
10206 struct vlan_group *vlgrp)
10207{
10208 struct bnx2x *bp = netdev_priv(dev);
10209
10210 bp->vlgrp = vlgrp;
0c6671b0
EG
10211
10212 /* Set flags according to the required capabilities */
10213 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10214
10215 if (dev->features & NETIF_F_HW_VLAN_TX)
10216 bp->flags |= HW_VLAN_TX_FLAG;
10217
10218 if (dev->features & NETIF_F_HW_VLAN_RX)
10219 bp->flags |= HW_VLAN_RX_FLAG;
10220
a2fbb9ea 10221 if (netif_running(dev))
49d66772 10222 bnx2x_set_client_config(bp);
a2fbb9ea 10223}
34f80b04 10224
a2fbb9ea
ET
10225#endif
10226
10227#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10228static void poll_bnx2x(struct net_device *dev)
10229{
10230 struct bnx2x *bp = netdev_priv(dev);
10231
10232 disable_irq(bp->pdev->irq);
10233 bnx2x_interrupt(bp->pdev->irq, dev);
10234 enable_irq(bp->pdev->irq);
10235}
10236#endif
10237
c64213cd
SH
10238static const struct net_device_ops bnx2x_netdev_ops = {
10239 .ndo_open = bnx2x_open,
10240 .ndo_stop = bnx2x_close,
10241 .ndo_start_xmit = bnx2x_start_xmit,
10242 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10243 .ndo_set_mac_address = bnx2x_change_mac_addr,
10244 .ndo_validate_addr = eth_validate_addr,
10245 .ndo_do_ioctl = bnx2x_ioctl,
10246 .ndo_change_mtu = bnx2x_change_mtu,
10247 .ndo_tx_timeout = bnx2x_tx_timeout,
10248#ifdef BCM_VLAN
10249 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10250#endif
10251#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10252 .ndo_poll_controller = poll_bnx2x,
10253#endif
10254};
10255
10256
34f80b04
EG
10257static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10258 struct net_device *dev)
a2fbb9ea
ET
10259{
10260 struct bnx2x *bp;
10261 int rc;
10262
10263 SET_NETDEV_DEV(dev, &pdev->dev);
10264 bp = netdev_priv(dev);
10265
34f80b04
EG
10266 bp->dev = dev;
10267 bp->pdev = pdev;
a2fbb9ea 10268 bp->flags = 0;
34f80b04 10269 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10270
10271 rc = pci_enable_device(pdev);
10272 if (rc) {
10273 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10274 goto err_out;
10275 }
10276
10277 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10278 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10279 " aborting\n");
10280 rc = -ENODEV;
10281 goto err_out_disable;
10282 }
10283
10284 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10285 printk(KERN_ERR PFX "Cannot find second PCI device"
10286 " base address, aborting\n");
10287 rc = -ENODEV;
10288 goto err_out_disable;
10289 }
10290
34f80b04
EG
10291 if (atomic_read(&pdev->enable_cnt) == 1) {
10292 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10293 if (rc) {
10294 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10295 " aborting\n");
10296 goto err_out_disable;
10297 }
a2fbb9ea 10298
34f80b04
EG
10299 pci_set_master(pdev);
10300 pci_save_state(pdev);
10301 }
a2fbb9ea
ET
10302
10303 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10304 if (bp->pm_cap == 0) {
10305 printk(KERN_ERR PFX "Cannot find power management"
10306 " capability, aborting\n");
10307 rc = -EIO;
10308 goto err_out_release;
10309 }
10310
10311 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10312 if (bp->pcie_cap == 0) {
10313 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10314 " aborting\n");
10315 rc = -EIO;
10316 goto err_out_release;
10317 }
10318
10319 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10320 bp->flags |= USING_DAC_FLAG;
10321 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10322 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10323 " failed, aborting\n");
10324 rc = -EIO;
10325 goto err_out_release;
10326 }
10327
10328 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10329 printk(KERN_ERR PFX "System does not support DMA,"
10330 " aborting\n");
10331 rc = -EIO;
10332 goto err_out_release;
10333 }
10334
34f80b04
EG
10335 dev->mem_start = pci_resource_start(pdev, 0);
10336 dev->base_addr = dev->mem_start;
10337 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10338
10339 dev->irq = pdev->irq;
10340
275f165f 10341 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10342 if (!bp->regview) {
10343 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10344 rc = -ENOMEM;
10345 goto err_out_release;
10346 }
10347
34f80b04
EG
10348 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10349 min_t(u64, BNX2X_DB_SIZE,
10350 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10351 if (!bp->doorbells) {
10352 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10353 rc = -ENOMEM;
10354 goto err_out_unmap;
10355 }
10356
10357 bnx2x_set_power_state(bp, PCI_D0);
10358
34f80b04
EG
10359 /* clean indirect addresses */
10360 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10361 PCICFG_VENDOR_ID_OFFSET);
10362 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10363 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10364 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10365 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10366
34f80b04 10367 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10368
c64213cd 10369 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10370 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10371 dev->features |= NETIF_F_SG;
10372 dev->features |= NETIF_F_HW_CSUM;
10373 if (bp->flags & USING_DAC_FLAG)
10374 dev->features |= NETIF_F_HIGHDMA;
10375#ifdef BCM_VLAN
10376 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10377 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10378#endif
10379 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10380 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10381
10382 return 0;
10383
10384err_out_unmap:
10385 if (bp->regview) {
10386 iounmap(bp->regview);
10387 bp->regview = NULL;
10388 }
a2fbb9ea
ET
10389 if (bp->doorbells) {
10390 iounmap(bp->doorbells);
10391 bp->doorbells = NULL;
10392 }
10393
10394err_out_release:
34f80b04
EG
10395 if (atomic_read(&pdev->enable_cnt) == 1)
10396 pci_release_regions(pdev);
a2fbb9ea
ET
10397
10398err_out_disable:
10399 pci_disable_device(pdev);
10400 pci_set_drvdata(pdev, NULL);
10401
10402err_out:
10403 return rc;
10404}
10405
25047950
ET
10406static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10407{
10408 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10409
10410 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10411 return val;
10412}
10413
10414/* return value of 1=2.5GHz 2=5GHz */
10415static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10416{
10417 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10418
10419 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10420 return val;
10421}
10422
a2fbb9ea
ET
10423static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10424 const struct pci_device_id *ent)
10425{
10426 static int version_printed;
10427 struct net_device *dev = NULL;
10428 struct bnx2x *bp;
25047950 10429 int rc;
a2fbb9ea
ET
10430
10431 if (version_printed++ == 0)
10432 printk(KERN_INFO "%s", version);
10433
10434 /* dev zeroed in init_etherdev */
555f6c78 10435 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10436 if (!dev) {
10437 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10438 return -ENOMEM;
34f80b04 10439 }
a2fbb9ea 10440
a2fbb9ea
ET
10441 bp = netdev_priv(dev);
10442 bp->msglevel = debug;
10443
34f80b04 10444 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10445 if (rc < 0) {
10446 free_netdev(dev);
10447 return rc;
10448 }
10449
a2fbb9ea
ET
10450 pci_set_drvdata(pdev, dev);
10451
34f80b04 10452 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10453 if (rc)
10454 goto init_one_exit;
10455
10456 rc = register_netdev(dev);
34f80b04 10457 if (rc) {
693fc0d1 10458 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10459 goto init_one_exit;
10460 }
10461
10462 bp->common.name = board_info[ent->driver_data].name;
25047950 10463 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10464 " IRQ %d, ", dev->name, bp->common.name,
10465 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10466 bnx2x_get_pcie_width(bp),
10467 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10468 dev->base_addr, bp->pdev->irq);
e174961c 10469 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10470 return 0;
34f80b04
EG
10471
10472init_one_exit:
10473 if (bp->regview)
10474 iounmap(bp->regview);
10475
10476 if (bp->doorbells)
10477 iounmap(bp->doorbells);
10478
10479 free_netdev(dev);
10480
10481 if (atomic_read(&pdev->enable_cnt) == 1)
10482 pci_release_regions(pdev);
10483
10484 pci_disable_device(pdev);
10485 pci_set_drvdata(pdev, NULL);
10486
10487 return rc;
a2fbb9ea
ET
10488}
10489
10490static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10491{
10492 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10493 struct bnx2x *bp;
10494
10495 if (!dev) {
228241eb
ET
10496 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10497 return;
10498 }
228241eb 10499 bp = netdev_priv(dev);
a2fbb9ea 10500
a2fbb9ea
ET
10501 unregister_netdev(dev);
10502
10503 if (bp->regview)
10504 iounmap(bp->regview);
10505
10506 if (bp->doorbells)
10507 iounmap(bp->doorbells);
10508
10509 free_netdev(dev);
34f80b04
EG
10510
10511 if (atomic_read(&pdev->enable_cnt) == 1)
10512 pci_release_regions(pdev);
10513
a2fbb9ea
ET
10514 pci_disable_device(pdev);
10515 pci_set_drvdata(pdev, NULL);
10516}
10517
10518static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10519{
10520 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10521 struct bnx2x *bp;
10522
34f80b04
EG
10523 if (!dev) {
10524 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10525 return -ENODEV;
10526 }
10527 bp = netdev_priv(dev);
a2fbb9ea 10528
34f80b04 10529 rtnl_lock();
a2fbb9ea 10530
34f80b04 10531 pci_save_state(pdev);
228241eb 10532
34f80b04
EG
10533 if (!netif_running(dev)) {
10534 rtnl_unlock();
10535 return 0;
10536 }
a2fbb9ea
ET
10537
10538 netif_device_detach(dev);
a2fbb9ea 10539
da5a662a 10540 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10541
a2fbb9ea 10542 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10543
34f80b04
EG
10544 rtnl_unlock();
10545
a2fbb9ea
ET
10546 return 0;
10547}
10548
10549static int bnx2x_resume(struct pci_dev *pdev)
10550{
10551 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10552 struct bnx2x *bp;
a2fbb9ea
ET
10553 int rc;
10554
228241eb
ET
10555 if (!dev) {
10556 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10557 return -ENODEV;
10558 }
228241eb 10559 bp = netdev_priv(dev);
a2fbb9ea 10560
34f80b04
EG
10561 rtnl_lock();
10562
228241eb 10563 pci_restore_state(pdev);
34f80b04
EG
10564
10565 if (!netif_running(dev)) {
10566 rtnl_unlock();
10567 return 0;
10568 }
10569
a2fbb9ea
ET
10570 bnx2x_set_power_state(bp, PCI_D0);
10571 netif_device_attach(dev);
10572
da5a662a 10573 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10574
34f80b04
EG
10575 rtnl_unlock();
10576
10577 return rc;
a2fbb9ea
ET
10578}
10579
f8ef6e44
YG
10580static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10581{
10582 int i;
10583
10584 bp->state = BNX2X_STATE_ERROR;
10585
10586 bp->rx_mode = BNX2X_RX_MODE_NONE;
10587
10588 bnx2x_netif_stop(bp, 0);
10589
10590 del_timer_sync(&bp->timer);
10591 bp->stats_state = STATS_STATE_DISABLED;
10592 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10593
10594 /* Release IRQs */
10595 bnx2x_free_irq(bp);
10596
10597 if (CHIP_IS_E1(bp)) {
10598 struct mac_configuration_cmd *config =
10599 bnx2x_sp(bp, mcast_config);
10600
8d9c5f34 10601 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
10602 CAM_INVALIDATE(config->config_table[i]);
10603 }
10604
10605 /* Free SKBs, SGEs, TPA pool and driver internals */
10606 bnx2x_free_skbs(bp);
555f6c78 10607 for_each_rx_queue(bp, i)
f8ef6e44 10608 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 10609 for_each_rx_queue(bp, i)
7cde1c8b 10610 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10611 bnx2x_free_mem(bp);
10612
10613 bp->state = BNX2X_STATE_CLOSED;
10614
10615 netif_carrier_off(bp->dev);
10616
10617 return 0;
10618}
10619
10620static void bnx2x_eeh_recover(struct bnx2x *bp)
10621{
10622 u32 val;
10623
10624 mutex_init(&bp->port.phy_mutex);
10625
10626 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10627 bp->link_params.shmem_base = bp->common.shmem_base;
10628 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10629
10630 if (!bp->common.shmem_base ||
10631 (bp->common.shmem_base < 0xA0000) ||
10632 (bp->common.shmem_base >= 0xC0000)) {
10633 BNX2X_DEV_INFO("MCP not active\n");
10634 bp->flags |= NO_MCP_FLAG;
10635 return;
10636 }
10637
10638 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10639 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10640 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10641 BNX2X_ERR("BAD MCP validity signature\n");
10642
10643 if (!BP_NOMCP(bp)) {
10644 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10645 & DRV_MSG_SEQ_NUMBER_MASK);
10646 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10647 }
10648}
10649
493adb1f
WX
10650/**
10651 * bnx2x_io_error_detected - called when PCI error is detected
10652 * @pdev: Pointer to PCI device
10653 * @state: The current pci connection state
10654 *
10655 * This function is called after a PCI bus error affecting
10656 * this device has been detected.
10657 */
10658static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10659 pci_channel_state_t state)
10660{
10661 struct net_device *dev = pci_get_drvdata(pdev);
10662 struct bnx2x *bp = netdev_priv(dev);
10663
10664 rtnl_lock();
10665
10666 netif_device_detach(dev);
10667
10668 if (netif_running(dev))
f8ef6e44 10669 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10670
10671 pci_disable_device(pdev);
10672
10673 rtnl_unlock();
10674
10675 /* Request a slot reset */
10676 return PCI_ERS_RESULT_NEED_RESET;
10677}
10678
10679/**
10680 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10681 * @pdev: Pointer to PCI device
10682 *
10683 * Restart the card from scratch, as if from a cold-boot.
10684 */
10685static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10686{
10687 struct net_device *dev = pci_get_drvdata(pdev);
10688 struct bnx2x *bp = netdev_priv(dev);
10689
10690 rtnl_lock();
10691
10692 if (pci_enable_device(pdev)) {
10693 dev_err(&pdev->dev,
10694 "Cannot re-enable PCI device after reset\n");
10695 rtnl_unlock();
10696 return PCI_ERS_RESULT_DISCONNECT;
10697 }
10698
10699 pci_set_master(pdev);
10700 pci_restore_state(pdev);
10701
10702 if (netif_running(dev))
10703 bnx2x_set_power_state(bp, PCI_D0);
10704
10705 rtnl_unlock();
10706
10707 return PCI_ERS_RESULT_RECOVERED;
10708}
10709
10710/**
10711 * bnx2x_io_resume - called when traffic can start flowing again
10712 * @pdev: Pointer to PCI device
10713 *
10714 * This callback is called when the error recovery driver tells us that
10715 * its OK to resume normal operation.
10716 */
10717static void bnx2x_io_resume(struct pci_dev *pdev)
10718{
10719 struct net_device *dev = pci_get_drvdata(pdev);
10720 struct bnx2x *bp = netdev_priv(dev);
10721
10722 rtnl_lock();
10723
f8ef6e44
YG
10724 bnx2x_eeh_recover(bp);
10725
493adb1f 10726 if (netif_running(dev))
f8ef6e44 10727 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10728
10729 netif_device_attach(dev);
10730
10731 rtnl_unlock();
10732}
10733
10734static struct pci_error_handlers bnx2x_err_handler = {
10735 .error_detected = bnx2x_io_error_detected,
10736 .slot_reset = bnx2x_io_slot_reset,
10737 .resume = bnx2x_io_resume,
10738};
10739
a2fbb9ea 10740static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10741 .name = DRV_MODULE_NAME,
10742 .id_table = bnx2x_pci_tbl,
10743 .probe = bnx2x_init_one,
10744 .remove = __devexit_p(bnx2x_remove_one),
10745 .suspend = bnx2x_suspend,
10746 .resume = bnx2x_resume,
10747 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10748};
10749
10750static int __init bnx2x_init(void)
10751{
1cf167f2
EG
10752 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10753 if (bnx2x_wq == NULL) {
10754 printk(KERN_ERR PFX "Cannot create workqueue\n");
10755 return -ENOMEM;
10756 }
10757
a2fbb9ea
ET
10758 return pci_register_driver(&bnx2x_pci_driver);
10759}
10760
10761static void __exit bnx2x_cleanup(void)
10762{
10763 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10764
10765 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10766}
10767
10768module_init(bnx2x_init);
10769module_exit(bnx2x_cleanup);
10770