bnx2x: smp_mb and not just smp_rmb
[linux-2.6-block.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
8d5726c4
EG
91
92static int mrrs = -1;
93module_param(mrrs, int, 0);
94MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
95
a2fbb9ea 96module_param(debug, int, 0);
19680c48 97MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 98MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 99MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 100
1cf167f2 101static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
102
103enum bnx2x_board_type {
104 BCM57710 = 0,
34f80b04
EG
105 BCM57711 = 1,
106 BCM57711E = 2,
a2fbb9ea
ET
107};
108
34f80b04 109/* indexed by board_type, above */
53a10565 110static struct {
a2fbb9ea
ET
111 char *name;
112} board_info[] __devinitdata = {
34f80b04
EG
113 { "Broadcom NetXtreme II BCM57710 XGb" },
114 { "Broadcom NetXtreme II BCM57711 XGb" },
115 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
116};
117
34f80b04 118
a2fbb9ea
ET
119static const struct pci_device_id bnx2x_pci_tbl[] = {
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
126 { 0 }
127};
128
129MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
130
131/****************************************************************************
132* General service functions
133****************************************************************************/
134
135/* used only at init
136 * locking is done by mcp
137 */
138static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
139{
140 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
141 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
142 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
143 PCICFG_VENDOR_ID_OFFSET);
144}
145
a2fbb9ea
ET
146static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
147{
148 u32 val;
149
150 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
151 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
152 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
153 PCICFG_VENDOR_ID_OFFSET);
154
155 return val;
156}
a2fbb9ea
ET
157
158static const u32 dmae_reg_go_c[] = {
159 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
160 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
161 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
162 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
163};
164
165/* copy command into DMAE command memory and set DMAE command go */
166static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
167 int idx)
168{
169 u32 cmd_offset;
170 int i;
171
172 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
173 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
174 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
175
ad8d3948
EG
176 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
177 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
178 }
179 REG_WR(bp, dmae_reg_go_c[idx], 1);
180}
181
ad8d3948
EG
182void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
183 u32 len32)
a2fbb9ea 184{
ad8d3948 185 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 186 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
187 int cnt = 200;
188
189 if (!bp->dmae_ready) {
190 u32 *data = bnx2x_sp(bp, wb_data[0]);
191
192 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
193 " using indirect\n", dst_addr, len32);
194 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
195 return;
196 }
197
198 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
199
200 memset(dmae, 0, sizeof(struct dmae_command));
201
202 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
203 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
204 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
205#ifdef __BIG_ENDIAN
206 DMAE_CMD_ENDIANITY_B_DW_SWAP |
207#else
208 DMAE_CMD_ENDIANITY_DW_SWAP |
209#endif
34f80b04
EG
210 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
211 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
212 dmae->src_addr_lo = U64_LO(dma_addr);
213 dmae->src_addr_hi = U64_HI(dma_addr);
214 dmae->dst_addr_lo = dst_addr >> 2;
215 dmae->dst_addr_hi = 0;
216 dmae->len = len32;
217 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
218 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 219 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 220
ad8d3948 221 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
222 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
223 "dst_addr [%x:%08x (%08x)]\n"
224 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
225 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
226 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
227 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 228 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
229 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
230 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
231
232 *wb_comp = 0;
233
34f80b04 234 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
235
236 udelay(5);
ad8d3948
EG
237
238 while (*wb_comp != DMAE_COMP_VAL) {
239 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
240
ad8d3948 241 if (!cnt) {
a2fbb9ea
ET
242 BNX2X_ERR("dmae timeout!\n");
243 break;
244 }
ad8d3948 245 cnt--;
12469401
YG
246 /* adjust delay for emulation/FPGA */
247 if (CHIP_REV_IS_SLOW(bp))
248 msleep(100);
249 else
250 udelay(5);
a2fbb9ea 251 }
ad8d3948
EG
252
253 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
254}
255
c18487ee 256void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 257{
ad8d3948 258 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 259 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
260 int cnt = 200;
261
262 if (!bp->dmae_ready) {
263 u32 *data = bnx2x_sp(bp, wb_data[0]);
264 int i;
265
266 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
267 " using indirect\n", src_addr, len32);
268 for (i = 0; i < len32; i++)
269 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
270 return;
271 }
272
273 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
274
275 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
276 memset(dmae, 0, sizeof(struct dmae_command));
277
278 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
279 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
280 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
281#ifdef __BIG_ENDIAN
282 DMAE_CMD_ENDIANITY_B_DW_SWAP |
283#else
284 DMAE_CMD_ENDIANITY_DW_SWAP |
285#endif
34f80b04
EG
286 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
287 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
288 dmae->src_addr_lo = src_addr >> 2;
289 dmae->src_addr_hi = 0;
290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
292 dmae->len = len32;
293 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
294 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 295 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 296
ad8d3948 297 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
298 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
299 "dst_addr [%x:%08x (%08x)]\n"
300 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
301 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
302 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
303 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
304
305 *wb_comp = 0;
306
34f80b04 307 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
308
309 udelay(5);
ad8d3948
EG
310
311 while (*wb_comp != DMAE_COMP_VAL) {
312
ad8d3948 313 if (!cnt) {
a2fbb9ea
ET
314 BNX2X_ERR("dmae timeout!\n");
315 break;
316 }
ad8d3948 317 cnt--;
12469401
YG
318 /* adjust delay for emulation/FPGA */
319 if (CHIP_REV_IS_SLOW(bp))
320 msleep(100);
321 else
322 udelay(5);
a2fbb9ea 323 }
ad8d3948 324 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
325 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
326 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
327
328 mutex_unlock(&bp->dmae_mutex);
329}
330
331/* used only for slowpath so not inlined */
332static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
333{
334 u32 wb_write[2];
335
336 wb_write[0] = val_hi;
337 wb_write[1] = val_lo;
338 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 339}
a2fbb9ea 340
ad8d3948
EG
341#ifdef USE_WB_RD
342static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
343{
344 u32 wb_data[2];
345
346 REG_RD_DMAE(bp, reg, wb_data, 2);
347
348 return HILO_U64(wb_data[0], wb_data[1]);
349}
350#endif
351
a2fbb9ea
ET
352static int bnx2x_mc_assert(struct bnx2x *bp)
353{
a2fbb9ea 354 char last_idx;
34f80b04
EG
355 int i, rc = 0;
356 u32 row0, row1, row2, row3;
357
358 /* XSTORM */
359 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
360 XSTORM_ASSERT_LIST_INDEX_OFFSET);
361 if (last_idx)
362 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
363
364 /* print the asserts */
365 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
366
367 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i));
369 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
371 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
372 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
373 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
375
376 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
377 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
378 " 0x%08x 0x%08x 0x%08x\n",
379 i, row3, row2, row1, row0);
380 rc++;
381 } else {
382 break;
383 }
384 }
385
386 /* TSTORM */
387 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
388 TSTORM_ASSERT_LIST_INDEX_OFFSET);
389 if (last_idx)
390 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
391
392 /* print the asserts */
393 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
394
395 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i));
397 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
399 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
400 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
401 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
403
404 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
405 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
406 " 0x%08x 0x%08x 0x%08x\n",
407 i, row3, row2, row1, row0);
408 rc++;
409 } else {
410 break;
411 }
412 }
413
414 /* CSTORM */
415 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
416 CSTORM_ASSERT_LIST_INDEX_OFFSET);
417 if (last_idx)
418 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
419
420 /* print the asserts */
421 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
422
423 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i));
425 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
427 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
428 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
429 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
431
432 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
433 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
434 " 0x%08x 0x%08x 0x%08x\n",
435 i, row3, row2, row1, row0);
436 rc++;
437 } else {
438 break;
439 }
440 }
441
442 /* USTORM */
443 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
444 USTORM_ASSERT_LIST_INDEX_OFFSET);
445 if (last_idx)
446 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
447
448 /* print the asserts */
449 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
450
451 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i));
453 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 4);
455 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
456 USTORM_ASSERT_LIST_OFFSET(i) + 8);
457 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i) + 12);
459
460 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
461 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
462 " 0x%08x 0x%08x 0x%08x\n",
463 i, row3, row2, row1, row0);
464 rc++;
465 } else {
466 break;
a2fbb9ea
ET
467 }
468 }
34f80b04 469
a2fbb9ea
ET
470 return rc;
471}
c14423fe 472
a2fbb9ea
ET
473static void bnx2x_fw_dump(struct bnx2x *bp)
474{
475 u32 mark, offset;
476 u32 data[9];
477 int word;
478
479 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
480 mark = ((mark + 0x3) & ~0x3);
481 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
482
483 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
484 for (word = 0; word < 8; word++)
485 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
486 offset + 4*word));
487 data[8] = 0x0;
49d66772 488 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
489 }
490 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
491 for (word = 0; word < 8; word++)
492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
493 offset + 4*word));
494 data[8] = 0x0;
49d66772 495 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
496 }
497 printk("\n" KERN_ERR PFX "end of fw dump\n");
498}
499
500static void bnx2x_panic_dump(struct bnx2x *bp)
501{
502 int i;
503 u16 j, start, end;
504
66e855f3
YG
505 bp->stats_state = STATS_STATE_DISABLED;
506 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
507
a2fbb9ea
ET
508 BNX2X_ERR("begin crash dump -----------------\n");
509
510 for_each_queue(bp, i) {
511 struct bnx2x_fastpath *fp = &bp->fp[i];
512 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
513
514 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 515 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 516 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 517 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
518 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
519 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
520 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
521 fp->rx_bd_prod, fp->rx_bd_cons,
522 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
523 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
524 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
525 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
526 " *sb_u_idx(%x) bd data(%x,%x)\n",
527 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
528 fp->status_blk->c_status_block.status_block_index,
529 fp->fp_u_idx,
530 fp->status_blk->u_status_block.status_block_index,
531 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
532
533 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
534 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
535 for (j = start; j < end; j++) {
536 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
537
538 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
539 sw_bd->skb, sw_bd->first_bd);
540 }
541
542 start = TX_BD(fp->tx_bd_cons - 10);
543 end = TX_BD(fp->tx_bd_cons + 254);
544 for (j = start; j < end; j++) {
545 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
546
547 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
548 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
549 }
550
551 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
552 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
553 for (j = start; j < end; j++) {
554 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
555 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
556
557 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 558 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
559 }
560
3196a88a
EG
561 start = RX_SGE(fp->rx_sge_prod);
562 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
563 for (j = start; j < end; j++) {
564 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
565 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
566
567 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
568 j, rx_sge[1], rx_sge[0], sw_page->page);
569 }
570
a2fbb9ea
ET
571 start = RCQ_BD(fp->rx_comp_cons - 10);
572 end = RCQ_BD(fp->rx_comp_cons + 503);
573 for (j = start; j < end; j++) {
574 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
575
576 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
577 j, cqe[0], cqe[1], cqe[2], cqe[3]);
578 }
579 }
580
49d66772
ET
581 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
582 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 583 " spq_prod_idx(%u)\n",
49d66772 584 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
585 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
586
34f80b04 587 bnx2x_fw_dump(bp);
a2fbb9ea
ET
588 bnx2x_mc_assert(bp);
589 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
590}
591
615f8fd9 592static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 593{
34f80b04 594 int port = BP_PORT(bp);
a2fbb9ea
ET
595 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
596 u32 val = REG_RD(bp, addr);
597 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 598 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
599
600 if (msix) {
8badd27a
EG
601 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
602 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
603 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
605 } else if (msi) {
606 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
607 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
608 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
610 } else {
611 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 612 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
613 HC_CONFIG_0_REG_INT_LINE_EN_0 |
614 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 615
8badd27a
EG
616 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
617 val, port, addr);
615f8fd9
ET
618
619 REG_WR(bp, addr, val);
620
a2fbb9ea
ET
621 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
622 }
623
8badd27a
EG
624 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
625 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
626
627 REG_WR(bp, addr, val);
34f80b04
EG
628
629 if (CHIP_IS_E1H(bp)) {
630 /* init leading/trailing edge */
631 if (IS_E1HMF(bp)) {
8badd27a 632 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 633 if (bp->port.pmf)
4acac6a5
EG
634 /* enable nig and gpio3 attention */
635 val |= 0x1100;
34f80b04
EG
636 } else
637 val = 0xffff;
638
639 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
640 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
641 }
a2fbb9ea
ET
642}
643
615f8fd9 644static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 645{
34f80b04 646 int port = BP_PORT(bp);
a2fbb9ea
ET
647 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
648 u32 val = REG_RD(bp, addr);
649
650 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
651 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
652 HC_CONFIG_0_REG_INT_LINE_EN_0 |
653 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654
655 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
656 val, port, addr);
657
8badd27a
EG
658 /* flush all outstanding writes */
659 mmiowb();
660
a2fbb9ea
ET
661 REG_WR(bp, addr, val);
662 if (REG_RD(bp, addr) != val)
663 BNX2X_ERR("BUG! proper val not read from IGU!\n");
664}
665
f8ef6e44 666static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 667{
a2fbb9ea 668 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 669 int i, offset;
a2fbb9ea 670
34f80b04 671 /* disable interrupt handling */
a2fbb9ea 672 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
673 if (disable_hw)
674 /* prevent the HW from sending interrupts */
675 bnx2x_int_disable(bp);
a2fbb9ea
ET
676
677 /* make sure all ISRs are done */
678 if (msix) {
8badd27a
EG
679 synchronize_irq(bp->msix_table[0].vector);
680 offset = 1;
a2fbb9ea 681 for_each_queue(bp, i)
8badd27a 682 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
683 } else
684 synchronize_irq(bp->pdev->irq);
685
686 /* make sure sp_task is not running */
1cf167f2
EG
687 cancel_delayed_work(&bp->sp_task);
688 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
689}
690
34f80b04 691/* fast path */
a2fbb9ea
ET
692
693/*
34f80b04 694 * General service functions
a2fbb9ea
ET
695 */
696
34f80b04 697static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
698 u8 storm, u16 index, u8 op, u8 update)
699{
5c862848
EG
700 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
701 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
702 struct igu_ack_register igu_ack;
703
704 igu_ack.status_block_index = index;
705 igu_ack.sb_id_and_flags =
34f80b04 706 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
707 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
708 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
709 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
710
5c862848
EG
711 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
712 (*(u32 *)&igu_ack), hc_addr);
713 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
714}
715
716static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
717{
718 struct host_status_block *fpsb = fp->status_blk;
719 u16 rc = 0;
720
721 barrier(); /* status block is written to by the chip */
722 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
723 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
724 rc |= 1;
725 }
726 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
727 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
728 rc |= 2;
729 }
730 return rc;
731}
732
a2fbb9ea
ET
733static u16 bnx2x_ack_int(struct bnx2x *bp)
734{
5c862848
EG
735 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
736 COMMAND_REG_SIMD_MASK);
737 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 738
5c862848
EG
739 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
740 result, hc_addr);
a2fbb9ea 741
a2fbb9ea
ET
742 return result;
743}
744
745
746/*
747 * fast path service functions
748 */
749
237907c1
EG
750static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
751{
752 u16 tx_cons_sb;
753
754 /* Tell compiler that status block fields can change */
755 barrier();
756 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
757 return (fp->tx_pkt_cons != tx_cons_sb);
758}
759
760static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
761{
762 /* Tell compiler that consumer and producer can change */
763 barrier();
764 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
765
237907c1
EG
766}
767
a2fbb9ea
ET
768/* free skb in the packet ring at pos idx
769 * return idx of last bd freed
770 */
771static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
772 u16 idx)
773{
774 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
775 struct eth_tx_bd *tx_bd;
776 struct sk_buff *skb = tx_buf->skb;
34f80b04 777 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
778 int nbd;
779
780 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
781 idx, tx_buf, skb);
782
783 /* unmap first bd */
784 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
785 tx_bd = &fp->tx_desc_ring[bd_idx];
786 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
787 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
788
789 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 790 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
791#ifdef BNX2X_STOP_ON_ERROR
792 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 793 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
794 bnx2x_panic();
795 }
796#endif
797
798 /* Skip a parse bd and the TSO split header bd
799 since they have no mapping */
800 if (nbd)
801 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
802
803 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
804 ETH_TX_BD_FLAGS_TCP_CSUM |
805 ETH_TX_BD_FLAGS_SW_LSO)) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 tx_bd = &fp->tx_desc_ring[bd_idx];
809 /* is this a TSO split header bd? */
810 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
811 if (--nbd)
812 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
813 }
814 }
815
816 /* now free frags */
817 while (nbd > 0) {
818
819 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
820 tx_bd = &fp->tx_desc_ring[bd_idx];
821 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
822 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
823 if (--nbd)
824 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
825 }
826
827 /* release skb */
53e5e96e 828 WARN_ON(!skb);
a2fbb9ea
ET
829 dev_kfree_skb(skb);
830 tx_buf->first_bd = 0;
831 tx_buf->skb = NULL;
832
34f80b04 833 return new_cons;
a2fbb9ea
ET
834}
835
34f80b04 836static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 837{
34f80b04
EG
838 s16 used;
839 u16 prod;
840 u16 cons;
a2fbb9ea 841
34f80b04 842 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
843 prod = fp->tx_bd_prod;
844 cons = fp->tx_bd_cons;
845
34f80b04
EG
846 /* NUM_TX_RINGS = number of "next-page" entries
847 It will be used as a threshold */
848 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 849
34f80b04 850#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
851 WARN_ON(used < 0);
852 WARN_ON(used > fp->bp->tx_ring_size);
853 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 854#endif
a2fbb9ea 855
34f80b04 856 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
857}
858
859static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
860{
861 struct bnx2x *bp = fp->bp;
555f6c78 862 struct netdev_queue *txq;
a2fbb9ea
ET
863 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
864 int done = 0;
865
866#ifdef BNX2X_STOP_ON_ERROR
867 if (unlikely(bp->panic))
868 return;
869#endif
870
555f6c78 871 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
872 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
873 sw_cons = fp->tx_pkt_cons;
874
875 while (sw_cons != hw_cons) {
876 u16 pkt_cons;
877
878 pkt_cons = TX_BD(sw_cons);
879
880 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
881
34f80b04 882 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
883 hw_cons, sw_cons, pkt_cons);
884
34f80b04 885/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
886 rmb();
887 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
888 }
889*/
890 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
891 sw_cons++;
892 done++;
893
894 if (done == work)
895 break;
896 }
897
898 fp->tx_pkt_cons = sw_cons;
899 fp->tx_bd_cons = bd_cons;
900
555f6c78
EG
901 /* Need to make the tx_bd_cons update visible to start_xmit()
902 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
903 * memory barrier, there is a small possibility that start_xmit()
904 * will miss it and cause the queue to be stopped forever.
905 */
906 smp_mb();
907
908 /* TBD need a thresh? */
555f6c78 909 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 910
555f6c78 911 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 912
555f6c78 913 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 914 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 915 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 916 netif_tx_wake_queue(txq);
a2fbb9ea 917
555f6c78 918 __netif_tx_unlock(txq);
a2fbb9ea
ET
919 }
920}
921
3196a88a 922
a2fbb9ea
ET
923static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
924 union eth_rx_cqe *rr_cqe)
925{
926 struct bnx2x *bp = fp->bp;
927 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
928 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
929
34f80b04 930 DP(BNX2X_MSG_SP,
a2fbb9ea 931 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
932 FP_IDX(fp), cid, command, bp->state,
933 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
934
935 bp->spq_left++;
936
34f80b04 937 if (FP_IDX(fp)) {
a2fbb9ea
ET
938 switch (command | fp->state) {
939 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
940 BNX2X_FP_STATE_OPENING):
941 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
942 cid);
943 fp->state = BNX2X_FP_STATE_OPEN;
944 break;
945
946 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
947 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
948 cid);
949 fp->state = BNX2X_FP_STATE_HALTED;
950 break;
951
952 default:
34f80b04
EG
953 BNX2X_ERR("unexpected MC reply (%d) "
954 "fp->state is %x\n", command, fp->state);
955 break;
a2fbb9ea 956 }
34f80b04 957 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
958 return;
959 }
c14423fe 960
a2fbb9ea
ET
961 switch (command | bp->state) {
962 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
963 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
964 bp->state = BNX2X_STATE_OPEN;
965 break;
966
967 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
968 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
969 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
970 fp->state = BNX2X_FP_STATE_HALTED;
971 break;
972
a2fbb9ea 973 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 974 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 975 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
976 break;
977
3196a88a 978
a2fbb9ea 979 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 981 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 982 bp->set_mac_pending = 0;
a2fbb9ea
ET
983 break;
984
49d66772 985 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 986 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
987 break;
988
a2fbb9ea 989 default:
34f80b04 990 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 991 command, bp->state);
34f80b04 992 break;
a2fbb9ea 993 }
34f80b04 994 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
995}
996
7a9b2557
VZ
997static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
998 struct bnx2x_fastpath *fp, u16 index)
999{
1000 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1001 struct page *page = sw_buf->page;
1002 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1003
1004 /* Skip "next page" elements */
1005 if (!page)
1006 return;
1007
1008 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1009 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1010 __free_pages(page, PAGES_PER_SGE_SHIFT);
1011
1012 sw_buf->page = NULL;
1013 sge->addr_hi = 0;
1014 sge->addr_lo = 0;
1015}
1016
1017static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1018 struct bnx2x_fastpath *fp, int last)
1019{
1020 int i;
1021
1022 for (i = 0; i < last; i++)
1023 bnx2x_free_rx_sge(bp, fp, i);
1024}
1025
1026static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1027 struct bnx2x_fastpath *fp, u16 index)
1028{
1029 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1030 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1031 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1032 dma_addr_t mapping;
1033
1034 if (unlikely(page == NULL))
1035 return -ENOMEM;
1036
4f40f2cb 1037 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1038 PCI_DMA_FROMDEVICE);
8d8bb39b 1039 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1040 __free_pages(page, PAGES_PER_SGE_SHIFT);
1041 return -ENOMEM;
1042 }
1043
1044 sw_buf->page = page;
1045 pci_unmap_addr_set(sw_buf, mapping, mapping);
1046
1047 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1048 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1049
1050 return 0;
1051}
1052
a2fbb9ea
ET
1053static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1054 struct bnx2x_fastpath *fp, u16 index)
1055{
1056 struct sk_buff *skb;
1057 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1058 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1059 dma_addr_t mapping;
1060
1061 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1062 if (unlikely(skb == NULL))
1063 return -ENOMEM;
1064
437cf2f1 1065 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1066 PCI_DMA_FROMDEVICE);
8d8bb39b 1067 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1068 dev_kfree_skb(skb);
1069 return -ENOMEM;
1070 }
1071
1072 rx_buf->skb = skb;
1073 pci_unmap_addr_set(rx_buf, mapping, mapping);
1074
1075 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1076 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1077
1078 return 0;
1079}
1080
1081/* note that we are not allocating a new skb,
1082 * we are just moving one from cons to prod
1083 * we are not creating a new mapping,
1084 * so there is no need to check for dma_mapping_error().
1085 */
1086static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1087 struct sk_buff *skb, u16 cons, u16 prod)
1088{
1089 struct bnx2x *bp = fp->bp;
1090 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1091 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1092 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1093 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1094
1095 pci_dma_sync_single_for_device(bp->pdev,
1096 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1097 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1098
1099 prod_rx_buf->skb = cons_rx_buf->skb;
1100 pci_unmap_addr_set(prod_rx_buf, mapping,
1101 pci_unmap_addr(cons_rx_buf, mapping));
1102 *prod_bd = *cons_bd;
1103}
1104
7a9b2557
VZ
1105static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1106 u16 idx)
1107{
1108 u16 last_max = fp->last_max_sge;
1109
1110 if (SUB_S16(idx, last_max) > 0)
1111 fp->last_max_sge = idx;
1112}
1113
1114static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1115{
1116 int i, j;
1117
1118 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1119 int idx = RX_SGE_CNT * i - 1;
1120
1121 for (j = 0; j < 2; j++) {
1122 SGE_MASK_CLEAR_BIT(fp, idx);
1123 idx--;
1124 }
1125 }
1126}
1127
1128static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1129 struct eth_fast_path_rx_cqe *fp_cqe)
1130{
1131 struct bnx2x *bp = fp->bp;
4f40f2cb 1132 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1133 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1134 SGE_PAGE_SHIFT;
7a9b2557
VZ
1135 u16 last_max, last_elem, first_elem;
1136 u16 delta = 0;
1137 u16 i;
1138
1139 if (!sge_len)
1140 return;
1141
1142 /* First mark all used pages */
1143 for (i = 0; i < sge_len; i++)
1144 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1145
1146 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1147 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 /* Here we assume that the last SGE index is the biggest */
1150 prefetch((void *)(fp->sge_mask));
1151 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1152
1153 last_max = RX_SGE(fp->last_max_sge);
1154 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1155 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1156
1157 /* If ring is not full */
1158 if (last_elem + 1 != first_elem)
1159 last_elem++;
1160
1161 /* Now update the prod */
1162 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1163 if (likely(fp->sge_mask[i]))
1164 break;
1165
1166 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1167 delta += RX_SGE_MASK_ELEM_SZ;
1168 }
1169
1170 if (delta > 0) {
1171 fp->rx_sge_prod += delta;
1172 /* clear page-end entries */
1173 bnx2x_clear_sge_mask_next_elems(fp);
1174 }
1175
1176 DP(NETIF_MSG_RX_STATUS,
1177 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1178 fp->last_max_sge, fp->rx_sge_prod);
1179}
1180
1181static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1182{
1183 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1184 memset(fp->sge_mask, 0xff,
1185 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1186
33471629
EG
1187 /* Clear the two last indices in the page to 1:
1188 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1189 hence will never be indicated and should be removed from
1190 the calculations. */
1191 bnx2x_clear_sge_mask_next_elems(fp);
1192}
1193
1194static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1195 struct sk_buff *skb, u16 cons, u16 prod)
1196{
1197 struct bnx2x *bp = fp->bp;
1198 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1199 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1200 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1201 dma_addr_t mapping;
1202
1203 /* move empty skb from pool to prod and map it */
1204 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1205 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1206 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1207 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1208
1209 /* move partial skb from cons to pool (don't unmap yet) */
1210 fp->tpa_pool[queue] = *cons_rx_buf;
1211
1212 /* mark bin state as start - print error if current state != stop */
1213 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1214 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1215
1216 fp->tpa_state[queue] = BNX2X_TPA_START;
1217
1218 /* point prod_bd to new skb */
1219 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1220 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1221
1222#ifdef BNX2X_STOP_ON_ERROR
1223 fp->tpa_queue_used |= (1 << queue);
1224#ifdef __powerpc64__
1225 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1226#else
1227 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1228#endif
1229 fp->tpa_queue_used);
1230#endif
1231}
1232
1233static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1234 struct sk_buff *skb,
1235 struct eth_fast_path_rx_cqe *fp_cqe,
1236 u16 cqe_idx)
1237{
1238 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1239 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1240 u32 i, frag_len, frag_size, pages;
1241 int err;
1242 int j;
1243
1244 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1245 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1246
1247 /* This is needed in order to enable forwarding support */
1248 if (frag_size)
4f40f2cb 1249 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1250 max(frag_size, (u32)len_on_bd));
1251
1252#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1253 if (pages >
1254 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1255 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1256 pages, cqe_idx);
1257 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1258 fp_cqe->pkt_len, len_on_bd);
1259 bnx2x_panic();
1260 return -EINVAL;
1261 }
1262#endif
1263
1264 /* Run through the SGL and compose the fragmented skb */
1265 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1266 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1267
1268 /* FW gives the indices of the SGE as if the ring is an array
1269 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1270 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1271 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1272 old_rx_pg = *rx_pg;
1273
1274 /* If we fail to allocate a substitute page, we simply stop
1275 where we are and drop the whole packet */
1276 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1277 if (unlikely(err)) {
de832a55 1278 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1279 return err;
1280 }
1281
1282 /* Unmap the page as we r going to pass it to the stack */
1283 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1284 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1285
1286 /* Add one frag and update the appropriate fields in the skb */
1287 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1288
1289 skb->data_len += frag_len;
1290 skb->truesize += frag_len;
1291 skb->len += frag_len;
1292
1293 frag_size -= frag_len;
1294 }
1295
1296 return 0;
1297}
1298
1299static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1300 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1301 u16 cqe_idx)
1302{
1303 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1304 struct sk_buff *skb = rx_buf->skb;
1305 /* alloc new skb */
1306 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1307
1308 /* Unmap skb in the pool anyway, as we are going to change
1309 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1310 fails. */
1311 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1312 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1313
7a9b2557 1314 if (likely(new_skb)) {
66e855f3
YG
1315 /* fix ip xsum and give it to the stack */
1316 /* (no need to map the new skb) */
0c6671b0
EG
1317#ifdef BCM_VLAN
1318 int is_vlan_cqe =
1319 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1320 PARSING_FLAGS_VLAN);
1321 int is_not_hwaccel_vlan_cqe =
1322 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1323#endif
7a9b2557
VZ
1324
1325 prefetch(skb);
1326 prefetch(((char *)(skb)) + 128);
1327
7a9b2557
VZ
1328#ifdef BNX2X_STOP_ON_ERROR
1329 if (pad + len > bp->rx_buf_size) {
1330 BNX2X_ERR("skb_put is about to fail... "
1331 "pad %d len %d rx_buf_size %d\n",
1332 pad, len, bp->rx_buf_size);
1333 bnx2x_panic();
1334 return;
1335 }
1336#endif
1337
1338 skb_reserve(skb, pad);
1339 skb_put(skb, len);
1340
1341 skb->protocol = eth_type_trans(skb, bp->dev);
1342 skb->ip_summed = CHECKSUM_UNNECESSARY;
1343
1344 {
1345 struct iphdr *iph;
1346
1347 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1348#ifdef BCM_VLAN
1349 /* If there is no Rx VLAN offloading -
1350 take VLAN tag into an account */
1351 if (unlikely(is_not_hwaccel_vlan_cqe))
1352 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1353#endif
7a9b2557
VZ
1354 iph->check = 0;
1355 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1356 }
1357
1358 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1359 &cqe->fast_path_cqe, cqe_idx)) {
1360#ifdef BCM_VLAN
0c6671b0
EG
1361 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1362 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1363 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1364 le16_to_cpu(cqe->fast_path_cqe.
1365 vlan_tag));
1366 else
1367#endif
1368 netif_receive_skb(skb);
1369 } else {
1370 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1371 " - dropping packet!\n");
1372 dev_kfree_skb(skb);
1373 }
1374
7a9b2557
VZ
1375
1376 /* put new skb in bin */
1377 fp->tpa_pool[queue].skb = new_skb;
1378
1379 } else {
66e855f3 1380 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1381 DP(NETIF_MSG_RX_STATUS,
1382 "Failed to allocate new skb - dropping packet!\n");
de832a55 1383 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1384 }
1385
1386 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1387}
1388
1389static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1390 struct bnx2x_fastpath *fp,
1391 u16 bd_prod, u16 rx_comp_prod,
1392 u16 rx_sge_prod)
1393{
8d9c5f34 1394 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1395 int i;
1396
1397 /* Update producers */
1398 rx_prods.bd_prod = bd_prod;
1399 rx_prods.cqe_prod = rx_comp_prod;
1400 rx_prods.sge_prod = rx_sge_prod;
1401
58f4c4cf
EG
1402 /*
1403 * Make sure that the BD and SGE data is updated before updating the
1404 * producers since FW might read the BD/SGE right after the producer
1405 * is updated.
1406 * This is only applicable for weak-ordered memory model archs such
1407 * as IA-64. The following barrier is also mandatory since FW will
1408 * assumes BDs must have buffers.
1409 */
1410 wmb();
1411
8d9c5f34
EG
1412 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1413 REG_WR(bp, BAR_USTRORM_INTMEM +
1414 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1415 ((u32 *)&rx_prods)[i]);
1416
58f4c4cf
EG
1417 mmiowb(); /* keep prod updates ordered */
1418
7a9b2557 1419 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1420 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1421 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1422}
1423
a2fbb9ea
ET
1424static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1425{
1426 struct bnx2x *bp = fp->bp;
34f80b04 1427 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1428 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1429 int rx_pkt = 0;
1430
1431#ifdef BNX2X_STOP_ON_ERROR
1432 if (unlikely(bp->panic))
1433 return 0;
1434#endif
1435
34f80b04
EG
1436 /* CQ "next element" is of the size of the regular element,
1437 that's why it's ok here */
a2fbb9ea
ET
1438 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1439 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1440 hw_comp_cons++;
1441
1442 bd_cons = fp->rx_bd_cons;
1443 bd_prod = fp->rx_bd_prod;
34f80b04 1444 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1445 sw_comp_cons = fp->rx_comp_cons;
1446 sw_comp_prod = fp->rx_comp_prod;
1447
1448 /* Memory barrier necessary as speculative reads of the rx
1449 * buffer can be ahead of the index in the status block
1450 */
1451 rmb();
1452
1453 DP(NETIF_MSG_RX_STATUS,
1454 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1455 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1456
1457 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1458 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1459 struct sk_buff *skb;
1460 union eth_rx_cqe *cqe;
34f80b04
EG
1461 u8 cqe_fp_flags;
1462 u16 len, pad;
a2fbb9ea
ET
1463
1464 comp_ring_cons = RCQ_BD(sw_comp_cons);
1465 bd_prod = RX_BD(bd_prod);
1466 bd_cons = RX_BD(bd_cons);
1467
1468 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1469 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1470
a2fbb9ea 1471 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1472 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1473 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1474 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1475 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1476 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1477
1478 /* is this a slowpath msg? */
34f80b04 1479 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1480 bnx2x_sp_event(fp, cqe);
1481 goto next_cqe;
1482
1483 /* this is an rx packet */
1484 } else {
1485 rx_buf = &fp->rx_buf_ring[bd_cons];
1486 skb = rx_buf->skb;
a2fbb9ea
ET
1487 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1488 pad = cqe->fast_path_cqe.placement_offset;
1489
7a9b2557
VZ
1490 /* If CQE is marked both TPA_START and TPA_END
1491 it is a non-TPA CQE */
1492 if ((!fp->disable_tpa) &&
1493 (TPA_TYPE(cqe_fp_flags) !=
1494 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1495 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1496
1497 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1498 DP(NETIF_MSG_RX_STATUS,
1499 "calling tpa_start on queue %d\n",
1500 queue);
1501
1502 bnx2x_tpa_start(fp, queue, skb,
1503 bd_cons, bd_prod);
1504 goto next_rx;
1505 }
1506
1507 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1508 DP(NETIF_MSG_RX_STATUS,
1509 "calling tpa_stop on queue %d\n",
1510 queue);
1511
1512 if (!BNX2X_RX_SUM_FIX(cqe))
1513 BNX2X_ERR("STOP on none TCP "
1514 "data\n");
1515
1516 /* This is a size of the linear data
1517 on this skb */
1518 len = le16_to_cpu(cqe->fast_path_cqe.
1519 len_on_bd);
1520 bnx2x_tpa_stop(bp, fp, queue, pad,
1521 len, cqe, comp_ring_cons);
1522#ifdef BNX2X_STOP_ON_ERROR
1523 if (bp->panic)
1524 return -EINVAL;
1525#endif
1526
1527 bnx2x_update_sge_prod(fp,
1528 &cqe->fast_path_cqe);
1529 goto next_cqe;
1530 }
1531 }
1532
a2fbb9ea
ET
1533 pci_dma_sync_single_for_device(bp->pdev,
1534 pci_unmap_addr(rx_buf, mapping),
1535 pad + RX_COPY_THRESH,
1536 PCI_DMA_FROMDEVICE);
1537 prefetch(skb);
1538 prefetch(((char *)(skb)) + 128);
1539
1540 /* is this an error packet? */
34f80b04 1541 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1542 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1543 "ERROR flags %x rx packet %u\n",
1544 cqe_fp_flags, sw_comp_cons);
de832a55 1545 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1546 goto reuse_rx;
1547 }
1548
1549 /* Since we don't have a jumbo ring
1550 * copy small packets if mtu > 1500
1551 */
1552 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1553 (len <= RX_COPY_THRESH)) {
1554 struct sk_buff *new_skb;
1555
1556 new_skb = netdev_alloc_skb(bp->dev,
1557 len + pad);
1558 if (new_skb == NULL) {
1559 DP(NETIF_MSG_RX_ERR,
34f80b04 1560 "ERROR packet dropped "
a2fbb9ea 1561 "because of alloc failure\n");
de832a55 1562 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1563 goto reuse_rx;
1564 }
1565
1566 /* aligned copy */
1567 skb_copy_from_linear_data_offset(skb, pad,
1568 new_skb->data + pad, len);
1569 skb_reserve(new_skb, pad);
1570 skb_put(new_skb, len);
1571
1572 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1573
1574 skb = new_skb;
1575
1576 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1577 pci_unmap_single(bp->pdev,
1578 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1579 bp->rx_buf_size,
a2fbb9ea
ET
1580 PCI_DMA_FROMDEVICE);
1581 skb_reserve(skb, pad);
1582 skb_put(skb, len);
1583
1584 } else {
1585 DP(NETIF_MSG_RX_ERR,
34f80b04 1586 "ERROR packet dropped because "
a2fbb9ea 1587 "of alloc failure\n");
de832a55 1588 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1589reuse_rx:
1590 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1591 goto next_rx;
1592 }
1593
1594 skb->protocol = eth_type_trans(skb, bp->dev);
1595
1596 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1597 if (bp->rx_csum) {
1adcd8be
EG
1598 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1599 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1600 else
de832a55 1601 fp->eth_q_stats.hw_csum_err++;
66e855f3 1602 }
a2fbb9ea
ET
1603 }
1604
748e5439 1605 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1606#ifdef BCM_VLAN
0c6671b0 1607 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1608 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1609 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1610 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1611 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1612 else
1613#endif
34f80b04 1614 netif_receive_skb(skb);
a2fbb9ea 1615
a2fbb9ea
ET
1616
1617next_rx:
1618 rx_buf->skb = NULL;
1619
1620 bd_cons = NEXT_RX_IDX(bd_cons);
1621 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1622 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1623 rx_pkt++;
a2fbb9ea
ET
1624next_cqe:
1625 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1626 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1627
34f80b04 1628 if (rx_pkt == budget)
a2fbb9ea
ET
1629 break;
1630 } /* while */
1631
1632 fp->rx_bd_cons = bd_cons;
34f80b04 1633 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1634 fp->rx_comp_cons = sw_comp_cons;
1635 fp->rx_comp_prod = sw_comp_prod;
1636
7a9b2557
VZ
1637 /* Update producers */
1638 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1639 fp->rx_sge_prod);
a2fbb9ea
ET
1640
1641 fp->rx_pkt += rx_pkt;
1642 fp->rx_calls++;
1643
1644 return rx_pkt;
1645}
1646
1647static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1648{
1649 struct bnx2x_fastpath *fp = fp_cookie;
1650 struct bnx2x *bp = fp->bp;
34f80b04 1651 int index = FP_IDX(fp);
a2fbb9ea 1652
da5a662a
VZ
1653 /* Return here if interrupt is disabled */
1654 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1655 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1656 return IRQ_HANDLED;
1657 }
1658
34f80b04
EG
1659 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1660 index, FP_SB_ID(fp));
1661 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1662
1663#ifdef BNX2X_STOP_ON_ERROR
1664 if (unlikely(bp->panic))
1665 return IRQ_HANDLED;
1666#endif
1667
1668 prefetch(fp->rx_cons_sb);
1669 prefetch(fp->tx_cons_sb);
1670 prefetch(&fp->status_blk->c_status_block.status_block_index);
1671 prefetch(&fp->status_blk->u_status_block.status_block_index);
1672
288379f0 1673 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1674
a2fbb9ea
ET
1675 return IRQ_HANDLED;
1676}
1677
1678static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1679{
555f6c78 1680 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1681 u16 status = bnx2x_ack_int(bp);
34f80b04 1682 u16 mask;
a2fbb9ea 1683
34f80b04 1684 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1685 if (unlikely(status == 0)) {
1686 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1687 return IRQ_NONE;
1688 }
34f80b04 1689 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1690
34f80b04 1691 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1692 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1693 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1694 return IRQ_HANDLED;
1695 }
1696
3196a88a
EG
1697#ifdef BNX2X_STOP_ON_ERROR
1698 if (unlikely(bp->panic))
1699 return IRQ_HANDLED;
1700#endif
1701
34f80b04
EG
1702 mask = 0x2 << bp->fp[0].sb_id;
1703 if (status & mask) {
a2fbb9ea
ET
1704 struct bnx2x_fastpath *fp = &bp->fp[0];
1705
1706 prefetch(fp->rx_cons_sb);
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1709 prefetch(&fp->status_blk->u_status_block.status_block_index);
1710
288379f0 1711 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1712
34f80b04 1713 status &= ~mask;
a2fbb9ea
ET
1714 }
1715
a2fbb9ea 1716
34f80b04 1717 if (unlikely(status & 0x1)) {
1cf167f2 1718 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1719
1720 status &= ~0x1;
1721 if (!status)
1722 return IRQ_HANDLED;
1723 }
1724
34f80b04
EG
1725 if (status)
1726 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1727 status);
a2fbb9ea 1728
c18487ee 1729 return IRQ_HANDLED;
a2fbb9ea
ET
1730}
1731
c18487ee 1732/* end of fast path */
a2fbb9ea 1733
bb2a0f7a 1734static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1735
c18487ee
YR
1736/* Link */
1737
1738/*
1739 * General service functions
1740 */
a2fbb9ea 1741
4a37fb66 1742static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1743{
1744 u32 lock_status;
1745 u32 resource_bit = (1 << resource);
4a37fb66
YG
1746 int func = BP_FUNC(bp);
1747 u32 hw_lock_control_reg;
c18487ee 1748 int cnt;
a2fbb9ea 1749
c18487ee
YR
1750 /* Validating that the resource is within range */
1751 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1752 DP(NETIF_MSG_HW,
1753 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1754 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1755 return -EINVAL;
1756 }
a2fbb9ea 1757
4a37fb66
YG
1758 if (func <= 5) {
1759 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1760 } else {
1761 hw_lock_control_reg =
1762 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1763 }
1764
c18487ee 1765 /* Validating that the resource is not already taken */
4a37fb66 1766 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1767 if (lock_status & resource_bit) {
1768 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1769 lock_status, resource_bit);
1770 return -EEXIST;
1771 }
a2fbb9ea 1772
46230476
EG
1773 /* Try for 5 second every 5ms */
1774 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1775 /* Try to acquire the lock */
4a37fb66
YG
1776 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1777 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1778 if (lock_status & resource_bit)
1779 return 0;
a2fbb9ea 1780
c18487ee 1781 msleep(5);
a2fbb9ea 1782 }
c18487ee
YR
1783 DP(NETIF_MSG_HW, "Timeout\n");
1784 return -EAGAIN;
1785}
a2fbb9ea 1786
4a37fb66 1787static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1788{
1789 u32 lock_status;
1790 u32 resource_bit = (1 << resource);
4a37fb66
YG
1791 int func = BP_FUNC(bp);
1792 u32 hw_lock_control_reg;
a2fbb9ea 1793
c18487ee
YR
1794 /* Validating that the resource is within range */
1795 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1796 DP(NETIF_MSG_HW,
1797 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1798 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1799 return -EINVAL;
1800 }
1801
4a37fb66
YG
1802 if (func <= 5) {
1803 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1804 } else {
1805 hw_lock_control_reg =
1806 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1807 }
1808
c18487ee 1809 /* Validating that the resource is currently taken */
4a37fb66 1810 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1811 if (!(lock_status & resource_bit)) {
1812 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1813 lock_status, resource_bit);
1814 return -EFAULT;
a2fbb9ea
ET
1815 }
1816
4a37fb66 1817 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1818 return 0;
1819}
1820
1821/* HW Lock for shared dual port PHYs */
4a37fb66 1822static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1823{
34f80b04 1824 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1825
46c6a674
EG
1826 if (bp->port.need_hw_lock)
1827 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1828}
a2fbb9ea 1829
4a37fb66 1830static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1831{
46c6a674
EG
1832 if (bp->port.need_hw_lock)
1833 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1834
34f80b04 1835 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1836}
a2fbb9ea 1837
4acac6a5
EG
1838int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1839{
1840 /* The GPIO should be swapped if swap register is set and active */
1841 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1842 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1843 int gpio_shift = gpio_num +
1844 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1845 u32 gpio_mask = (1 << gpio_shift);
1846 u32 gpio_reg;
1847 int value;
1848
1849 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1850 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1851 return -EINVAL;
1852 }
1853
1854 /* read GPIO value */
1855 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1856
1857 /* get the requested pin value */
1858 if ((gpio_reg & gpio_mask) == gpio_mask)
1859 value = 1;
1860 else
1861 value = 0;
1862
1863 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1864
1865 return value;
1866}
1867
17de50b7 1868int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1869{
1870 /* The GPIO should be swapped if swap register is set and active */
1871 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1872 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1873 int gpio_shift = gpio_num +
1874 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1875 u32 gpio_mask = (1 << gpio_shift);
1876 u32 gpio_reg;
a2fbb9ea 1877
c18487ee
YR
1878 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1879 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1880 return -EINVAL;
1881 }
a2fbb9ea 1882
4a37fb66 1883 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1884 /* read GPIO and mask except the float bits */
1885 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1886
c18487ee
YR
1887 switch (mode) {
1888 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1889 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1890 gpio_num, gpio_shift);
1891 /* clear FLOAT and set CLR */
1892 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1893 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1894 break;
a2fbb9ea 1895
c18487ee
YR
1896 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1897 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1898 gpio_num, gpio_shift);
1899 /* clear FLOAT and set SET */
1900 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1901 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1902 break;
a2fbb9ea 1903
17de50b7 1904 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1905 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1906 gpio_num, gpio_shift);
1907 /* set FLOAT */
1908 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1909 break;
a2fbb9ea 1910
c18487ee
YR
1911 default:
1912 break;
a2fbb9ea
ET
1913 }
1914
c18487ee 1915 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1916 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1917
c18487ee 1918 return 0;
a2fbb9ea
ET
1919}
1920
4acac6a5
EG
1921int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1922{
1923 /* The GPIO should be swapped if swap register is set and active */
1924 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1925 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1926 int gpio_shift = gpio_num +
1927 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1928 u32 gpio_mask = (1 << gpio_shift);
1929 u32 gpio_reg;
1930
1931 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1932 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1933 return -EINVAL;
1934 }
1935
1936 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1937 /* read GPIO int */
1938 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1939
1940 switch (mode) {
1941 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1942 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1943 "output low\n", gpio_num, gpio_shift);
1944 /* clear SET and set CLR */
1945 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1946 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1947 break;
1948
1949 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1950 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1951 "output high\n", gpio_num, gpio_shift);
1952 /* clear CLR and set SET */
1953 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1954 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1955 break;
1956
1957 default:
1958 break;
1959 }
1960
1961 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1962 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1963
1964 return 0;
1965}
1966
c18487ee 1967static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1968{
c18487ee
YR
1969 u32 spio_mask = (1 << spio_num);
1970 u32 spio_reg;
a2fbb9ea 1971
c18487ee
YR
1972 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1973 (spio_num > MISC_REGISTERS_SPIO_7)) {
1974 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1975 return -EINVAL;
a2fbb9ea
ET
1976 }
1977
4a37fb66 1978 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1979 /* read SPIO and mask except the float bits */
1980 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1981
c18487ee 1982 switch (mode) {
6378c025 1983 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1984 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1985 /* clear FLOAT and set CLR */
1986 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1987 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1988 break;
a2fbb9ea 1989
6378c025 1990 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1991 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1992 /* clear FLOAT and set SET */
1993 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1994 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1995 break;
a2fbb9ea 1996
c18487ee
YR
1997 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1998 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1999 /* set FLOAT */
2000 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2001 break;
a2fbb9ea 2002
c18487ee
YR
2003 default:
2004 break;
a2fbb9ea
ET
2005 }
2006
c18487ee 2007 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2008 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2009
a2fbb9ea
ET
2010 return 0;
2011}
2012
c18487ee 2013static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2014{
ad33ea3a
EG
2015 switch (bp->link_vars.ieee_fc &
2016 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2017 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2018 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2019 ADVERTISED_Pause);
2020 break;
2021 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2022 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2023 ADVERTISED_Pause);
2024 break;
2025 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2026 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2027 break;
2028 default:
34f80b04 2029 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2030 ADVERTISED_Pause);
2031 break;
2032 }
2033}
f1410647 2034
c18487ee
YR
2035static void bnx2x_link_report(struct bnx2x *bp)
2036{
2037 if (bp->link_vars.link_up) {
2038 if (bp->state == BNX2X_STATE_OPEN)
2039 netif_carrier_on(bp->dev);
2040 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2041
c18487ee 2042 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2043
c18487ee
YR
2044 if (bp->link_vars.duplex == DUPLEX_FULL)
2045 printk("full duplex");
2046 else
2047 printk("half duplex");
f1410647 2048
c0700f90
DM
2049 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2050 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2051 printk(", receive ");
c0700f90 2052 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2053 printk("& transmit ");
2054 } else {
2055 printk(", transmit ");
2056 }
2057 printk("flow control ON");
2058 }
2059 printk("\n");
f1410647 2060
c18487ee
YR
2061 } else { /* link_down */
2062 netif_carrier_off(bp->dev);
2063 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2064 }
c18487ee
YR
2065}
2066
2067static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2068{
19680c48
EG
2069 if (!BP_NOMCP(bp)) {
2070 u8 rc;
a2fbb9ea 2071
19680c48 2072 /* Initialize link parameters structure variables */
8c99e7b0
YR
2073 /* It is recommended to turn off RX FC for jumbo frames
2074 for better performance */
2075 if (IS_E1HMF(bp))
c0700f90 2076 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2077 else if (bp->dev->mtu > 5000)
c0700f90 2078 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2079 else
c0700f90 2080 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2081
4a37fb66 2082 bnx2x_acquire_phy_lock(bp);
19680c48 2083 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2084 bnx2x_release_phy_lock(bp);
a2fbb9ea 2085
3c96c68b
EG
2086 bnx2x_calc_fc_adv(bp);
2087
19680c48
EG
2088 if (bp->link_vars.link_up)
2089 bnx2x_link_report(bp);
a2fbb9ea 2090
34f80b04 2091
19680c48
EG
2092 return rc;
2093 }
2094 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2095 return -EINVAL;
a2fbb9ea
ET
2096}
2097
c18487ee 2098static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2099{
19680c48 2100 if (!BP_NOMCP(bp)) {
4a37fb66 2101 bnx2x_acquire_phy_lock(bp);
19680c48 2102 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2103 bnx2x_release_phy_lock(bp);
a2fbb9ea 2104
19680c48
EG
2105 bnx2x_calc_fc_adv(bp);
2106 } else
2107 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2108}
a2fbb9ea 2109
c18487ee
YR
2110static void bnx2x__link_reset(struct bnx2x *bp)
2111{
19680c48 2112 if (!BP_NOMCP(bp)) {
4a37fb66 2113 bnx2x_acquire_phy_lock(bp);
589abe3a 2114 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2115 bnx2x_release_phy_lock(bp);
19680c48
EG
2116 } else
2117 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2118}
a2fbb9ea 2119
c18487ee
YR
2120static u8 bnx2x_link_test(struct bnx2x *bp)
2121{
2122 u8 rc;
a2fbb9ea 2123
4a37fb66 2124 bnx2x_acquire_phy_lock(bp);
c18487ee 2125 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2126 bnx2x_release_phy_lock(bp);
a2fbb9ea 2127
c18487ee
YR
2128 return rc;
2129}
a2fbb9ea 2130
8a1c38d1 2131static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2132{
8a1c38d1
EG
2133 u32 r_param = bp->link_vars.line_speed / 8;
2134 u32 fair_periodic_timeout_usec;
2135 u32 t_fair;
34f80b04 2136
8a1c38d1
EG
2137 memset(&(bp->cmng.rs_vars), 0,
2138 sizeof(struct rate_shaping_vars_per_port));
2139 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2140
8a1c38d1
EG
2141 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2142 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2143
8a1c38d1
EG
2144 /* this is the threshold below which no timer arming will occur
2145 1.25 coefficient is for the threshold to be a little bigger
2146 than the real time, to compensate for timer in-accuracy */
2147 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2148 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2149
8a1c38d1
EG
2150 /* resolution of fairness timer */
2151 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2152 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2153 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2154
8a1c38d1
EG
2155 /* this is the threshold below which we won't arm the timer anymore */
2156 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2157
8a1c38d1
EG
2158 /* we multiply by 1e3/8 to get bytes/msec.
2159 We don't want the credits to pass a credit
2160 of the t_fair*FAIR_MEM (algorithm resolution) */
2161 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2162 /* since each tick is 4 usec */
2163 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2164}
2165
8a1c38d1 2166static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2167{
2168 struct rate_shaping_vars_per_vn m_rs_vn;
2169 struct fairness_vars_per_vn m_fair_vn;
2170 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2171 u16 vn_min_rate, vn_max_rate;
2172 int i;
2173
2174 /* If function is hidden - set min and max to zeroes */
2175 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2176 vn_min_rate = 0;
2177 vn_max_rate = 0;
2178
2179 } else {
2180 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2181 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2182 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2183 if current min rate is zero - set it to 1.
33471629 2184 This is a requirement of the algorithm. */
8a1c38d1 2185 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2186 vn_min_rate = DEF_MIN_RATE;
2187 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2188 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2189 }
2190
8a1c38d1
EG
2191 DP(NETIF_MSG_IFUP,
2192 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2193 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2194
2195 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2196 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2197
2198 /* global vn counter - maximal Mbps for this vn */
2199 m_rs_vn.vn_counter.rate = vn_max_rate;
2200
2201 /* quota - number of bytes transmitted in this period */
2202 m_rs_vn.vn_counter.quota =
2203 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2204
8a1c38d1 2205 if (bp->vn_weight_sum) {
34f80b04
EG
2206 /* credit for each period of the fairness algorithm:
2207 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2208 vn_weight_sum should not be larger than 10000, thus
2209 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2210 than zero */
34f80b04 2211 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2212 max((u32)(vn_min_rate * (T_FAIR_COEF /
2213 (8 * bp->vn_weight_sum))),
2214 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2215 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2216 m_fair_vn.vn_credit_delta);
2217 }
2218
34f80b04
EG
2219 /* Store it to internal memory */
2220 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2221 REG_WR(bp, BAR_XSTRORM_INTMEM +
2222 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2223 ((u32 *)(&m_rs_vn))[i]);
2224
2225 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2226 REG_WR(bp, BAR_XSTRORM_INTMEM +
2227 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2228 ((u32 *)(&m_fair_vn))[i]);
2229}
2230
8a1c38d1 2231
c18487ee
YR
2232/* This function is called upon link interrupt */
2233static void bnx2x_link_attn(struct bnx2x *bp)
2234{
bb2a0f7a
YG
2235 /* Make sure that we are synced with the current statistics */
2236 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2237
c18487ee 2238 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2239
bb2a0f7a
YG
2240 if (bp->link_vars.link_up) {
2241
1c06328c
EG
2242 /* dropless flow control */
2243 if (CHIP_IS_E1H(bp)) {
2244 int port = BP_PORT(bp);
2245 u32 pause_enabled = 0;
2246
2247 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2248 pause_enabled = 1;
2249
2250 REG_WR(bp, BAR_USTRORM_INTMEM +
2251 USTORM_PAUSE_ENABLED_OFFSET(port),
2252 pause_enabled);
2253 }
2254
bb2a0f7a
YG
2255 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2256 struct host_port_stats *pstats;
2257
2258 pstats = bnx2x_sp(bp, port_stats);
2259 /* reset old bmac stats */
2260 memset(&(pstats->mac_stx[0]), 0,
2261 sizeof(struct mac_stx));
2262 }
2263 if ((bp->state == BNX2X_STATE_OPEN) ||
2264 (bp->state == BNX2X_STATE_DISABLED))
2265 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2266 }
2267
c18487ee
YR
2268 /* indicate link status */
2269 bnx2x_link_report(bp);
34f80b04
EG
2270
2271 if (IS_E1HMF(bp)) {
8a1c38d1 2272 int port = BP_PORT(bp);
34f80b04 2273 int func;
8a1c38d1 2274 int vn;
34f80b04
EG
2275
2276 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2277 if (vn == BP_E1HVN(bp))
2278 continue;
2279
8a1c38d1 2280 func = ((vn << 1) | port);
34f80b04
EG
2281
2282 /* Set the attention towards other drivers
2283 on the same port */
2284 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2285 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2286 }
34f80b04 2287
8a1c38d1
EG
2288 if (bp->link_vars.link_up) {
2289 int i;
2290
2291 /* Init rate shaping and fairness contexts */
2292 bnx2x_init_port_minmax(bp);
34f80b04 2293
34f80b04 2294 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2295 bnx2x_init_vn_minmax(bp, 2*vn + port);
2296
2297 /* Store it to internal memory */
2298 for (i = 0;
2299 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2300 REG_WR(bp, BAR_XSTRORM_INTMEM +
2301 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2302 ((u32 *)(&bp->cmng))[i]);
2303 }
34f80b04 2304 }
c18487ee 2305}
a2fbb9ea 2306
c18487ee
YR
2307static void bnx2x__link_status_update(struct bnx2x *bp)
2308{
2309 if (bp->state != BNX2X_STATE_OPEN)
2310 return;
a2fbb9ea 2311
c18487ee 2312 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2313
bb2a0f7a
YG
2314 if (bp->link_vars.link_up)
2315 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2316 else
2317 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2318
c18487ee
YR
2319 /* indicate link status */
2320 bnx2x_link_report(bp);
a2fbb9ea 2321}
a2fbb9ea 2322
34f80b04
EG
2323static void bnx2x_pmf_update(struct bnx2x *bp)
2324{
2325 int port = BP_PORT(bp);
2326 u32 val;
2327
2328 bp->port.pmf = 1;
2329 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2330
2331 /* enable nig attention */
2332 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2333 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2334 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2335
2336 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2337}
2338
c18487ee 2339/* end of Link */
a2fbb9ea
ET
2340
2341/* slow path */
2342
2343/*
2344 * General service functions
2345 */
2346
2347/* the slow path queue is odd since completions arrive on the fastpath ring */
2348static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2349 u32 data_hi, u32 data_lo, int common)
2350{
34f80b04 2351 int func = BP_FUNC(bp);
a2fbb9ea 2352
34f80b04
EG
2353 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2354 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2355 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2356 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2357 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2358
2359#ifdef BNX2X_STOP_ON_ERROR
2360 if (unlikely(bp->panic))
2361 return -EIO;
2362#endif
2363
34f80b04 2364 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2365
2366 if (!bp->spq_left) {
2367 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2368 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2369 bnx2x_panic();
2370 return -EBUSY;
2371 }
f1410647 2372
a2fbb9ea
ET
2373 /* CID needs port number to be encoded int it */
2374 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2375 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2376 HW_CID(bp, cid)));
2377 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2378 if (common)
2379 bp->spq_prod_bd->hdr.type |=
2380 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2381
2382 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2383 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2384
2385 bp->spq_left--;
2386
2387 if (bp->spq_prod_bd == bp->spq_last_bd) {
2388 bp->spq_prod_bd = bp->spq;
2389 bp->spq_prod_idx = 0;
2390 DP(NETIF_MSG_TIMER, "end of spq\n");
2391
2392 } else {
2393 bp->spq_prod_bd++;
2394 bp->spq_prod_idx++;
2395 }
2396
34f80b04 2397 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2398 bp->spq_prod_idx);
2399
34f80b04 2400 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2401 return 0;
2402}
2403
2404/* acquire split MCP access lock register */
4a37fb66 2405static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2406{
a2fbb9ea 2407 u32 i, j, val;
34f80b04 2408 int rc = 0;
a2fbb9ea
ET
2409
2410 might_sleep();
2411 i = 100;
2412 for (j = 0; j < i*10; j++) {
2413 val = (1UL << 31);
2414 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2415 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2416 if (val & (1L << 31))
2417 break;
2418
2419 msleep(5);
2420 }
a2fbb9ea 2421 if (!(val & (1L << 31))) {
19680c48 2422 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2423 rc = -EBUSY;
2424 }
2425
2426 return rc;
2427}
2428
4a37fb66
YG
2429/* release split MCP access lock register */
2430static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2431{
2432 u32 val = 0;
2433
2434 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2435}
2436
2437static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2438{
2439 struct host_def_status_block *def_sb = bp->def_status_blk;
2440 u16 rc = 0;
2441
2442 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2443 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2444 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2445 rc |= 1;
2446 }
2447 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2448 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2449 rc |= 2;
2450 }
2451 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2452 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2453 rc |= 4;
2454 }
2455 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2456 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2457 rc |= 8;
2458 }
2459 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2460 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2461 rc |= 16;
2462 }
2463 return rc;
2464}
2465
2466/*
2467 * slow path service functions
2468 */
2469
2470static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2471{
34f80b04 2472 int port = BP_PORT(bp);
5c862848
EG
2473 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2474 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2475 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2476 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2477 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2478 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2479 u32 aeu_mask;
87942b46 2480 u32 nig_mask = 0;
a2fbb9ea 2481
a2fbb9ea
ET
2482 if (bp->attn_state & asserted)
2483 BNX2X_ERR("IGU ERROR\n");
2484
3fcaf2e5
EG
2485 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2486 aeu_mask = REG_RD(bp, aeu_addr);
2487
a2fbb9ea 2488 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2489 aeu_mask, asserted);
2490 aeu_mask &= ~(asserted & 0xff);
2491 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2492
3fcaf2e5
EG
2493 REG_WR(bp, aeu_addr, aeu_mask);
2494 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2495
3fcaf2e5 2496 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2497 bp->attn_state |= asserted;
3fcaf2e5 2498 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2499
2500 if (asserted & ATTN_HARD_WIRED_MASK) {
2501 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2502
a5e9a7cf
EG
2503 bnx2x_acquire_phy_lock(bp);
2504
877e9aa4 2505 /* save nig interrupt mask */
87942b46 2506 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2507 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2508
c18487ee 2509 bnx2x_link_attn(bp);
a2fbb9ea
ET
2510
2511 /* handle unicore attn? */
2512 }
2513 if (asserted & ATTN_SW_TIMER_4_FUNC)
2514 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2515
2516 if (asserted & GPIO_2_FUNC)
2517 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2518
2519 if (asserted & GPIO_3_FUNC)
2520 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2521
2522 if (asserted & GPIO_4_FUNC)
2523 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2524
2525 if (port == 0) {
2526 if (asserted & ATTN_GENERAL_ATTN_1) {
2527 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2528 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2529 }
2530 if (asserted & ATTN_GENERAL_ATTN_2) {
2531 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2532 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2533 }
2534 if (asserted & ATTN_GENERAL_ATTN_3) {
2535 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2536 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2537 }
2538 } else {
2539 if (asserted & ATTN_GENERAL_ATTN_4) {
2540 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2541 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2542 }
2543 if (asserted & ATTN_GENERAL_ATTN_5) {
2544 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2545 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2546 }
2547 if (asserted & ATTN_GENERAL_ATTN_6) {
2548 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2549 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2550 }
2551 }
2552
2553 } /* if hardwired */
2554
5c862848
EG
2555 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2556 asserted, hc_addr);
2557 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2558
2559 /* now set back the mask */
a5e9a7cf 2560 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2561 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2562 bnx2x_release_phy_lock(bp);
2563 }
a2fbb9ea
ET
2564}
2565
877e9aa4 2566static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2567{
34f80b04 2568 int port = BP_PORT(bp);
877e9aa4
ET
2569 int reg_offset;
2570 u32 val;
2571
34f80b04
EG
2572 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2573 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2574
34f80b04 2575 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2576
2577 val = REG_RD(bp, reg_offset);
2578 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2579 REG_WR(bp, reg_offset, val);
2580
2581 BNX2X_ERR("SPIO5 hw attention\n");
2582
35b19ba5
EG
2583 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2584 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2585 /* Fan failure attention */
2586
17de50b7 2587 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2588 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2589 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2590 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2591 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2592 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2593 /* mark the failure */
c18487ee 2594 bp->link_params.ext_phy_config &=
877e9aa4 2595 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2596 bp->link_params.ext_phy_config |=
877e9aa4
ET
2597 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2598 SHMEM_WR(bp,
2599 dev_info.port_hw_config[port].
2600 external_phy_config,
c18487ee 2601 bp->link_params.ext_phy_config);
877e9aa4
ET
2602 /* log the failure */
2603 printk(KERN_ERR PFX "Fan Failure on Network"
2604 " Controller %s has caused the driver to"
2605 " shutdown the card to prevent permanent"
2606 " damage. Please contact Dell Support for"
2607 " assistance\n", bp->dev->name);
2608 break;
2609
2610 default:
2611 break;
2612 }
2613 }
34f80b04 2614
589abe3a
EG
2615 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2616 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2617 bnx2x_acquire_phy_lock(bp);
2618 bnx2x_handle_module_detect_int(&bp->link_params);
2619 bnx2x_release_phy_lock(bp);
2620 }
2621
34f80b04
EG
2622 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2623
2624 val = REG_RD(bp, reg_offset);
2625 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2626 REG_WR(bp, reg_offset, val);
2627
2628 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2629 (attn & HW_INTERRUT_ASSERT_SET_0));
2630 bnx2x_panic();
2631 }
877e9aa4
ET
2632}
2633
2634static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2635{
2636 u32 val;
2637
2638 if (attn & BNX2X_DOORQ_ASSERT) {
2639
2640 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2641 BNX2X_ERR("DB hw attention 0x%x\n", val);
2642 /* DORQ discard attention */
2643 if (val & 0x2)
2644 BNX2X_ERR("FATAL error from DORQ\n");
2645 }
34f80b04
EG
2646
2647 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2648
2649 int port = BP_PORT(bp);
2650 int reg_offset;
2651
2652 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2653 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2654
2655 val = REG_RD(bp, reg_offset);
2656 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2657 REG_WR(bp, reg_offset, val);
2658
2659 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2660 (attn & HW_INTERRUT_ASSERT_SET_1));
2661 bnx2x_panic();
2662 }
877e9aa4
ET
2663}
2664
2665static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2666{
2667 u32 val;
2668
2669 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2670
2671 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2672 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2673 /* CFC error attention */
2674 if (val & 0x2)
2675 BNX2X_ERR("FATAL error from CFC\n");
2676 }
2677
2678 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2679
2680 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2681 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2682 /* RQ_USDMDP_FIFO_OVERFLOW */
2683 if (val & 0x18000)
2684 BNX2X_ERR("FATAL error from PXP\n");
2685 }
34f80b04
EG
2686
2687 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2688
2689 int port = BP_PORT(bp);
2690 int reg_offset;
2691
2692 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2693 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2694
2695 val = REG_RD(bp, reg_offset);
2696 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2697 REG_WR(bp, reg_offset, val);
2698
2699 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2700 (attn & HW_INTERRUT_ASSERT_SET_2));
2701 bnx2x_panic();
2702 }
877e9aa4
ET
2703}
2704
2705static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2706{
34f80b04
EG
2707 u32 val;
2708
877e9aa4
ET
2709 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2710
34f80b04
EG
2711 if (attn & BNX2X_PMF_LINK_ASSERT) {
2712 int func = BP_FUNC(bp);
2713
2714 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2715 bnx2x__link_status_update(bp);
2716 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2717 DRV_STATUS_PMF)
2718 bnx2x_pmf_update(bp);
2719
2720 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2721
2722 BNX2X_ERR("MC assert!\n");
2723 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2724 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2725 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2726 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2727 bnx2x_panic();
2728
2729 } else if (attn & BNX2X_MCP_ASSERT) {
2730
2731 BNX2X_ERR("MCP assert!\n");
2732 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2733 bnx2x_fw_dump(bp);
877e9aa4
ET
2734
2735 } else
2736 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2737 }
2738
2739 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2740 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2741 if (attn & BNX2X_GRC_TIMEOUT) {
2742 val = CHIP_IS_E1H(bp) ?
2743 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2744 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2745 }
2746 if (attn & BNX2X_GRC_RSV) {
2747 val = CHIP_IS_E1H(bp) ?
2748 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2749 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2750 }
877e9aa4 2751 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2752 }
2753}
2754
2755static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2756{
a2fbb9ea
ET
2757 struct attn_route attn;
2758 struct attn_route group_mask;
34f80b04 2759 int port = BP_PORT(bp);
877e9aa4 2760 int index;
a2fbb9ea
ET
2761 u32 reg_addr;
2762 u32 val;
3fcaf2e5 2763 u32 aeu_mask;
a2fbb9ea
ET
2764
2765 /* need to take HW lock because MCP or other port might also
2766 try to handle this event */
4a37fb66 2767 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2768
2769 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2770 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2771 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2772 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2773 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2774 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2775
2776 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2777 if (deasserted & (1 << index)) {
2778 group_mask = bp->attn_group[index];
2779
34f80b04
EG
2780 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2781 index, group_mask.sig[0], group_mask.sig[1],
2782 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2783
877e9aa4
ET
2784 bnx2x_attn_int_deasserted3(bp,
2785 attn.sig[3] & group_mask.sig[3]);
2786 bnx2x_attn_int_deasserted1(bp,
2787 attn.sig[1] & group_mask.sig[1]);
2788 bnx2x_attn_int_deasserted2(bp,
2789 attn.sig[2] & group_mask.sig[2]);
2790 bnx2x_attn_int_deasserted0(bp,
2791 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2792
a2fbb9ea
ET
2793 if ((attn.sig[0] & group_mask.sig[0] &
2794 HW_PRTY_ASSERT_SET_0) ||
2795 (attn.sig[1] & group_mask.sig[1] &
2796 HW_PRTY_ASSERT_SET_1) ||
2797 (attn.sig[2] & group_mask.sig[2] &
2798 HW_PRTY_ASSERT_SET_2))
6378c025 2799 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2800 }
2801 }
2802
4a37fb66 2803 bnx2x_release_alr(bp);
a2fbb9ea 2804
5c862848 2805 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2806
2807 val = ~deasserted;
3fcaf2e5
EG
2808 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2809 val, reg_addr);
5c862848 2810 REG_WR(bp, reg_addr, val);
a2fbb9ea 2811
a2fbb9ea 2812 if (~bp->attn_state & deasserted)
3fcaf2e5 2813 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2814
2815 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2816 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2817
3fcaf2e5
EG
2818 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2819 aeu_mask = REG_RD(bp, reg_addr);
2820
2821 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2822 aeu_mask, deasserted);
2823 aeu_mask |= (deasserted & 0xff);
2824 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2825
3fcaf2e5
EG
2826 REG_WR(bp, reg_addr, aeu_mask);
2827 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2828
2829 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2830 bp->attn_state &= ~deasserted;
2831 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2832}
2833
2834static void bnx2x_attn_int(struct bnx2x *bp)
2835{
2836 /* read local copy of bits */
68d59484
EG
2837 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2838 attn_bits);
2839 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2840 attn_bits_ack);
a2fbb9ea
ET
2841 u32 attn_state = bp->attn_state;
2842
2843 /* look for changed bits */
2844 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2845 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2846
2847 DP(NETIF_MSG_HW,
2848 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2849 attn_bits, attn_ack, asserted, deasserted);
2850
2851 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2852 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2853
2854 /* handle bits that were raised */
2855 if (asserted)
2856 bnx2x_attn_int_asserted(bp, asserted);
2857
2858 if (deasserted)
2859 bnx2x_attn_int_deasserted(bp, deasserted);
2860}
2861
2862static void bnx2x_sp_task(struct work_struct *work)
2863{
1cf167f2 2864 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2865 u16 status;
2866
34f80b04 2867
a2fbb9ea
ET
2868 /* Return here if interrupt is disabled */
2869 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2870 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2871 return;
2872 }
2873
2874 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2875/* if (status == 0) */
2876/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2877
3196a88a 2878 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2879
877e9aa4
ET
2880 /* HW attentions */
2881 if (status & 0x1)
a2fbb9ea 2882 bnx2x_attn_int(bp);
a2fbb9ea 2883
68d59484 2884 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2885 IGU_INT_NOP, 1);
2886 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2887 IGU_INT_NOP, 1);
2888 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2889 IGU_INT_NOP, 1);
2890 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2891 IGU_INT_NOP, 1);
2892 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2893 IGU_INT_ENABLE, 1);
877e9aa4 2894
a2fbb9ea
ET
2895}
2896
2897static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2898{
2899 struct net_device *dev = dev_instance;
2900 struct bnx2x *bp = netdev_priv(dev);
2901
2902 /* Return here if interrupt is disabled */
2903 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2904 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2905 return IRQ_HANDLED;
2906 }
2907
8d9c5f34 2908 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2909
2910#ifdef BNX2X_STOP_ON_ERROR
2911 if (unlikely(bp->panic))
2912 return IRQ_HANDLED;
2913#endif
2914
1cf167f2 2915 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2916
2917 return IRQ_HANDLED;
2918}
2919
2920/* end of slow path */
2921
2922/* Statistics */
2923
2924/****************************************************************************
2925* Macros
2926****************************************************************************/
2927
a2fbb9ea
ET
2928/* sum[hi:lo] += add[hi:lo] */
2929#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2930 do { \
2931 s_lo += a_lo; \
f5ba6772 2932 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2933 } while (0)
2934
2935/* difference = minuend - subtrahend */
2936#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2937 do { \
bb2a0f7a
YG
2938 if (m_lo < s_lo) { \
2939 /* underflow */ \
a2fbb9ea 2940 d_hi = m_hi - s_hi; \
bb2a0f7a 2941 if (d_hi > 0) { \
6378c025 2942 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2943 d_hi--; \
2944 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2945 } else { \
6378c025 2946 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2947 d_hi = 0; \
2948 d_lo = 0; \
2949 } \
bb2a0f7a
YG
2950 } else { \
2951 /* m_lo >= s_lo */ \
a2fbb9ea 2952 if (m_hi < s_hi) { \
bb2a0f7a
YG
2953 d_hi = 0; \
2954 d_lo = 0; \
2955 } else { \
6378c025 2956 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2957 d_hi = m_hi - s_hi; \
2958 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2959 } \
2960 } \
2961 } while (0)
2962
bb2a0f7a 2963#define UPDATE_STAT64(s, t) \
a2fbb9ea 2964 do { \
bb2a0f7a
YG
2965 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2966 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2967 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2968 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2969 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2970 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2971 } while (0)
2972
bb2a0f7a 2973#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2974 do { \
bb2a0f7a
YG
2975 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2976 diff.lo, new->s##_lo, old->s##_lo); \
2977 ADD_64(estats->t##_hi, diff.hi, \
2978 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2979 } while (0)
2980
2981/* sum[hi:lo] += add */
2982#define ADD_EXTEND_64(s_hi, s_lo, a) \
2983 do { \
2984 s_lo += a; \
2985 s_hi += (s_lo < a) ? 1 : 0; \
2986 } while (0)
2987
bb2a0f7a 2988#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2989 do { \
bb2a0f7a
YG
2990 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2991 pstats->mac_stx[1].s##_lo, \
2992 new->s); \
a2fbb9ea
ET
2993 } while (0)
2994
bb2a0f7a 2995#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2996 do { \
2997 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2998 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
2999 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3000 } while (0)
3001
3002#define UPDATE_EXTEND_USTAT(s, t) \
3003 do { \
3004 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3005 old_uclient->s = uclient->s; \
3006 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3007 } while (0)
3008
3009#define UPDATE_EXTEND_XSTAT(s, t) \
3010 do { \
3011 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3012 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
3013 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3014 } while (0)
3015
3016/* minuend -= subtrahend */
3017#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3018 do { \
3019 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3020 } while (0)
3021
3022/* minuend[hi:lo] -= subtrahend */
3023#define SUB_EXTEND_64(m_hi, m_lo, s) \
3024 do { \
3025 SUB_64(m_hi, 0, m_lo, s); \
3026 } while (0)
3027
3028#define SUB_EXTEND_USTAT(s, t) \
3029 do { \
3030 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3031 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3032 } while (0)
3033
3034/*
3035 * General service functions
3036 */
3037
3038static inline long bnx2x_hilo(u32 *hiref)
3039{
3040 u32 lo = *(hiref + 1);
3041#if (BITS_PER_LONG == 64)
3042 u32 hi = *hiref;
3043
3044 return HILO_U64(hi, lo);
3045#else
3046 return lo;
3047#endif
3048}
3049
3050/*
3051 * Init service functions
3052 */
3053
bb2a0f7a
YG
3054static void bnx2x_storm_stats_post(struct bnx2x *bp)
3055{
3056 if (!bp->stats_pending) {
3057 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3058 int i, rc;
bb2a0f7a
YG
3059
3060 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3061 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3062 for_each_queue(bp, i)
3063 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3064
3065 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3066 ((u32 *)&ramrod_data)[1],
3067 ((u32 *)&ramrod_data)[0], 0);
3068 if (rc == 0) {
3069 /* stats ramrod has it's own slot on the spq */
3070 bp->spq_left++;
3071 bp->stats_pending = 1;
3072 }
3073 }
3074}
3075
3076static void bnx2x_stats_init(struct bnx2x *bp)
3077{
3078 int port = BP_PORT(bp);
de832a55 3079 int i;
bb2a0f7a 3080
de832a55 3081 bp->stats_pending = 0;
bb2a0f7a
YG
3082 bp->executer_idx = 0;
3083 bp->stats_counter = 0;
3084
3085 /* port stats */
3086 if (!BP_NOMCP(bp))
3087 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3088 else
3089 bp->port.port_stx = 0;
3090 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3091
3092 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3093 bp->port.old_nig_stats.brb_discard =
3094 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3095 bp->port.old_nig_stats.brb_truncate =
3096 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3097 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3098 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3099 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3100 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3101
3102 /* function stats */
de832a55
EG
3103 for_each_queue(bp, i) {
3104 struct bnx2x_fastpath *fp = &bp->fp[i];
3105
3106 memset(&fp->old_tclient, 0,
3107 sizeof(struct tstorm_per_client_stats));
3108 memset(&fp->old_uclient, 0,
3109 sizeof(struct ustorm_per_client_stats));
3110 memset(&fp->old_xclient, 0,
3111 sizeof(struct xstorm_per_client_stats));
3112 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3113 }
3114
bb2a0f7a 3115 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3116 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3117
3118 bp->stats_state = STATS_STATE_DISABLED;
3119 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3120 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3121}
3122
3123static void bnx2x_hw_stats_post(struct bnx2x *bp)
3124{
3125 struct dmae_command *dmae = &bp->stats_dmae;
3126 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3127
3128 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3129 if (CHIP_REV_IS_SLOW(bp))
3130 return;
bb2a0f7a
YG
3131
3132 /* loader */
3133 if (bp->executer_idx) {
3134 int loader_idx = PMF_DMAE_C(bp);
3135
3136 memset(dmae, 0, sizeof(struct dmae_command));
3137
3138 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3139 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3140 DMAE_CMD_DST_RESET |
3141#ifdef __BIG_ENDIAN
3142 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3143#else
3144 DMAE_CMD_ENDIANITY_DW_SWAP |
3145#endif
3146 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3147 DMAE_CMD_PORT_0) |
3148 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3149 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3150 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3151 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3152 sizeof(struct dmae_command) *
3153 (loader_idx + 1)) >> 2;
3154 dmae->dst_addr_hi = 0;
3155 dmae->len = sizeof(struct dmae_command) >> 2;
3156 if (CHIP_IS_E1(bp))
3157 dmae->len--;
3158 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3159 dmae->comp_addr_hi = 0;
3160 dmae->comp_val = 1;
3161
3162 *stats_comp = 0;
3163 bnx2x_post_dmae(bp, dmae, loader_idx);
3164
3165 } else if (bp->func_stx) {
3166 *stats_comp = 0;
3167 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3168 }
3169}
3170
3171static int bnx2x_stats_comp(struct bnx2x *bp)
3172{
3173 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3174 int cnt = 10;
3175
3176 might_sleep();
3177 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3178 if (!cnt) {
3179 BNX2X_ERR("timeout waiting for stats finished\n");
3180 break;
3181 }
3182 cnt--;
12469401 3183 msleep(1);
bb2a0f7a
YG
3184 }
3185 return 1;
3186}
3187
3188/*
3189 * Statistics service functions
3190 */
3191
3192static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3193{
3194 struct dmae_command *dmae;
3195 u32 opcode;
3196 int loader_idx = PMF_DMAE_C(bp);
3197 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3198
3199 /* sanity */
3200 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3201 BNX2X_ERR("BUG!\n");
3202 return;
3203 }
3204
3205 bp->executer_idx = 0;
3206
3207 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3208 DMAE_CMD_C_ENABLE |
3209 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3210#ifdef __BIG_ENDIAN
3211 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3212#else
3213 DMAE_CMD_ENDIANITY_DW_SWAP |
3214#endif
3215 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3216 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3217
3218 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3219 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3220 dmae->src_addr_lo = bp->port.port_stx >> 2;
3221 dmae->src_addr_hi = 0;
3222 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3223 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3224 dmae->len = DMAE_LEN32_RD_MAX;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3226 dmae->comp_addr_hi = 0;
3227 dmae->comp_val = 1;
3228
3229 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3230 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3231 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3232 dmae->src_addr_hi = 0;
7a9b2557
VZ
3233 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3234 DMAE_LEN32_RD_MAX * 4);
3235 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3236 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3237 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3238 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3239 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3240 dmae->comp_val = DMAE_COMP_VAL;
3241
3242 *stats_comp = 0;
3243 bnx2x_hw_stats_post(bp);
3244 bnx2x_stats_comp(bp);
3245}
3246
3247static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3248{
3249 struct dmae_command *dmae;
34f80b04 3250 int port = BP_PORT(bp);
bb2a0f7a 3251 int vn = BP_E1HVN(bp);
a2fbb9ea 3252 u32 opcode;
bb2a0f7a 3253 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3254 u32 mac_addr;
bb2a0f7a
YG
3255 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3256
3257 /* sanity */
3258 if (!bp->link_vars.link_up || !bp->port.pmf) {
3259 BNX2X_ERR("BUG!\n");
3260 return;
3261 }
a2fbb9ea
ET
3262
3263 bp->executer_idx = 0;
bb2a0f7a
YG
3264
3265 /* MCP */
3266 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3267 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3268 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3269#ifdef __BIG_ENDIAN
bb2a0f7a 3270 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3271#else
bb2a0f7a 3272 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3273#endif
bb2a0f7a
YG
3274 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3275 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3276
bb2a0f7a 3277 if (bp->port.port_stx) {
a2fbb9ea
ET
3278
3279 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3280 dmae->opcode = opcode;
bb2a0f7a
YG
3281 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3282 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3283 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3284 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3285 dmae->len = sizeof(struct host_port_stats) >> 2;
3286 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3287 dmae->comp_addr_hi = 0;
3288 dmae->comp_val = 1;
a2fbb9ea
ET
3289 }
3290
bb2a0f7a
YG
3291 if (bp->func_stx) {
3292
3293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 dmae->opcode = opcode;
3295 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3296 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3297 dmae->dst_addr_lo = bp->func_stx >> 2;
3298 dmae->dst_addr_hi = 0;
3299 dmae->len = sizeof(struct host_func_stats) >> 2;
3300 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3301 dmae->comp_addr_hi = 0;
3302 dmae->comp_val = 1;
a2fbb9ea
ET
3303 }
3304
bb2a0f7a 3305 /* MAC */
a2fbb9ea
ET
3306 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3307 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3308 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3309#ifdef __BIG_ENDIAN
3310 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3311#else
3312 DMAE_CMD_ENDIANITY_DW_SWAP |
3313#endif
bb2a0f7a
YG
3314 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3315 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3316
c18487ee 3317 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3318
3319 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3320 NIG_REG_INGRESS_BMAC0_MEM);
3321
3322 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3323 BIGMAC_REGISTER_TX_STAT_GTBYT */
3324 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3325 dmae->opcode = opcode;
3326 dmae->src_addr_lo = (mac_addr +
3327 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3328 dmae->src_addr_hi = 0;
3329 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3330 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3331 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3332 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3333 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3334 dmae->comp_addr_hi = 0;
3335 dmae->comp_val = 1;
3336
3337 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3338 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3339 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3340 dmae->opcode = opcode;
3341 dmae->src_addr_lo = (mac_addr +
3342 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3343 dmae->src_addr_hi = 0;
3344 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3345 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3346 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3347 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3348 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3349 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3350 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3351 dmae->comp_addr_hi = 0;
3352 dmae->comp_val = 1;
3353
c18487ee 3354 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3355
3356 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3357
3358 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3359 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3360 dmae->opcode = opcode;
3361 dmae->src_addr_lo = (mac_addr +
3362 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3363 dmae->src_addr_hi = 0;
3364 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3365 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3366 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3367 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368 dmae->comp_addr_hi = 0;
3369 dmae->comp_val = 1;
3370
3371 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3372 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3373 dmae->opcode = opcode;
3374 dmae->src_addr_lo = (mac_addr +
3375 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3376 dmae->src_addr_hi = 0;
3377 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3378 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3379 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3380 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3381 dmae->len = 1;
3382 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3383 dmae->comp_addr_hi = 0;
3384 dmae->comp_val = 1;
3385
3386 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388 dmae->opcode = opcode;
3389 dmae->src_addr_lo = (mac_addr +
3390 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3391 dmae->src_addr_hi = 0;
3392 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3393 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3394 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3395 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3396 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3397 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3398 dmae->comp_addr_hi = 0;
3399 dmae->comp_val = 1;
3400 }
3401
3402 /* NIG */
bb2a0f7a
YG
3403 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3404 dmae->opcode = opcode;
3405 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3406 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3407 dmae->src_addr_hi = 0;
3408 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3409 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3410 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3411 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3412 dmae->comp_addr_hi = 0;
3413 dmae->comp_val = 1;
3414
3415 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3416 dmae->opcode = opcode;
3417 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3418 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3419 dmae->src_addr_hi = 0;
3420 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3421 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3422 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3423 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3424 dmae->len = (2*sizeof(u32)) >> 2;
3425 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3426 dmae->comp_addr_hi = 0;
3427 dmae->comp_val = 1;
3428
a2fbb9ea
ET
3429 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3430 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3431 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3432 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3433#ifdef __BIG_ENDIAN
3434 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3435#else
3436 DMAE_CMD_ENDIANITY_DW_SWAP |
3437#endif
bb2a0f7a
YG
3438 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3439 (vn << DMAE_CMD_E1HVN_SHIFT));
3440 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3441 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3442 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3443 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3444 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3445 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3446 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3447 dmae->len = (2*sizeof(u32)) >> 2;
3448 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3449 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3450 dmae->comp_val = DMAE_COMP_VAL;
3451
3452 *stats_comp = 0;
a2fbb9ea
ET
3453}
3454
bb2a0f7a 3455static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3456{
bb2a0f7a
YG
3457 struct dmae_command *dmae = &bp->stats_dmae;
3458 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3459
bb2a0f7a
YG
3460 /* sanity */
3461 if (!bp->func_stx) {
3462 BNX2X_ERR("BUG!\n");
3463 return;
3464 }
a2fbb9ea 3465
bb2a0f7a
YG
3466 bp->executer_idx = 0;
3467 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3468
bb2a0f7a
YG
3469 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3470 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3471 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3472#ifdef __BIG_ENDIAN
3473 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3474#else
3475 DMAE_CMD_ENDIANITY_DW_SWAP |
3476#endif
3477 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3478 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3479 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3480 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3481 dmae->dst_addr_lo = bp->func_stx >> 2;
3482 dmae->dst_addr_hi = 0;
3483 dmae->len = sizeof(struct host_func_stats) >> 2;
3484 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3485 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3486 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3487
bb2a0f7a
YG
3488 *stats_comp = 0;
3489}
a2fbb9ea 3490
bb2a0f7a
YG
3491static void bnx2x_stats_start(struct bnx2x *bp)
3492{
3493 if (bp->port.pmf)
3494 bnx2x_port_stats_init(bp);
3495
3496 else if (bp->func_stx)
3497 bnx2x_func_stats_init(bp);
3498
3499 bnx2x_hw_stats_post(bp);
3500 bnx2x_storm_stats_post(bp);
3501}
3502
3503static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3504{
3505 bnx2x_stats_comp(bp);
3506 bnx2x_stats_pmf_update(bp);
3507 bnx2x_stats_start(bp);
3508}
3509
3510static void bnx2x_stats_restart(struct bnx2x *bp)
3511{
3512 bnx2x_stats_comp(bp);
3513 bnx2x_stats_start(bp);
3514}
3515
3516static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3517{
3518 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3519 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3520 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3521 struct regpair diff;
3522
3523 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3524 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3525 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3526 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3527 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3528 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3529 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3530 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3531 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3532 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3533 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3534 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3535 UPDATE_STAT64(tx_stat_gt127,
3536 tx_stat_etherstatspkts65octetsto127octets);
3537 UPDATE_STAT64(tx_stat_gt255,
3538 tx_stat_etherstatspkts128octetsto255octets);
3539 UPDATE_STAT64(tx_stat_gt511,
3540 tx_stat_etherstatspkts256octetsto511octets);
3541 UPDATE_STAT64(tx_stat_gt1023,
3542 tx_stat_etherstatspkts512octetsto1023octets);
3543 UPDATE_STAT64(tx_stat_gt1518,
3544 tx_stat_etherstatspkts1024octetsto1522octets);
3545 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3546 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3547 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3548 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3549 UPDATE_STAT64(tx_stat_gterr,
3550 tx_stat_dot3statsinternalmactransmiterrors);
3551 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3552
3553 estats->pause_frames_received_hi =
3554 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3555 estats->pause_frames_received_lo =
3556 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3557
3558 estats->pause_frames_sent_hi =
3559 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3560 estats->pause_frames_sent_lo =
3561 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3562}
3563
3564static void bnx2x_emac_stats_update(struct bnx2x *bp)
3565{
3566 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3567 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3568 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3569
3570 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3571 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3572 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3573 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3574 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3575 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3576 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3577 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3578 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3579 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3580 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3581 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3582 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3583 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3584 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3585 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3586 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3587 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3588 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3589 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3590 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3591 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3592 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3593 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3594 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3595 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3596 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3597 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3598 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3599 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3600 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3601
3602 estats->pause_frames_received_hi =
3603 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3604 estats->pause_frames_received_lo =
3605 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3606 ADD_64(estats->pause_frames_received_hi,
3607 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3608 estats->pause_frames_received_lo,
3609 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3610
3611 estats->pause_frames_sent_hi =
3612 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3613 estats->pause_frames_sent_lo =
3614 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3615 ADD_64(estats->pause_frames_sent_hi,
3616 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3617 estats->pause_frames_sent_lo,
3618 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3619}
3620
3621static int bnx2x_hw_stats_update(struct bnx2x *bp)
3622{
3623 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3624 struct nig_stats *old = &(bp->port.old_nig_stats);
3625 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3626 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3627 struct regpair diff;
de832a55 3628 u32 nig_timer_max;
bb2a0f7a
YG
3629
3630 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3631 bnx2x_bmac_stats_update(bp);
3632
3633 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3634 bnx2x_emac_stats_update(bp);
3635
3636 else { /* unreached */
3637 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3638 return -1;
3639 }
a2fbb9ea 3640
bb2a0f7a
YG
3641 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3642 new->brb_discard - old->brb_discard);
66e855f3
YG
3643 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3644 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3645
bb2a0f7a
YG
3646 UPDATE_STAT64_NIG(egress_mac_pkt0,
3647 etherstatspkts1024octetsto1522octets);
3648 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3649
bb2a0f7a 3650 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3651
bb2a0f7a
YG
3652 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3653 sizeof(struct mac_stx));
3654 estats->brb_drop_hi = pstats->brb_drop_hi;
3655 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3656
bb2a0f7a 3657 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3658
de832a55
EG
3659 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3660 if (nig_timer_max != estats->nig_timer_max) {
3661 estats->nig_timer_max = nig_timer_max;
3662 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3663 }
3664
bb2a0f7a 3665 return 0;
a2fbb9ea
ET
3666}
3667
bb2a0f7a 3668static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3669{
3670 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3671 struct tstorm_per_port_stats *tport =
de832a55 3672 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3673 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3674 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3675 int i;
3676
3677 memset(&(fstats->total_bytes_received_hi), 0,
3678 sizeof(struct host_func_stats) - 2*sizeof(u32));
3679 estats->error_bytes_received_hi = 0;
3680 estats->error_bytes_received_lo = 0;
3681 estats->etherstatsoverrsizepkts_hi = 0;
3682 estats->etherstatsoverrsizepkts_lo = 0;
3683 estats->no_buff_discard_hi = 0;
3684 estats->no_buff_discard_lo = 0;
a2fbb9ea 3685
de832a55
EG
3686 for_each_queue(bp, i) {
3687 struct bnx2x_fastpath *fp = &bp->fp[i];
3688 int cl_id = fp->cl_id;
3689 struct tstorm_per_client_stats *tclient =
3690 &stats->tstorm_common.client_statistics[cl_id];
3691 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3692 struct ustorm_per_client_stats *uclient =
3693 &stats->ustorm_common.client_statistics[cl_id];
3694 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3695 struct xstorm_per_client_stats *xclient =
3696 &stats->xstorm_common.client_statistics[cl_id];
3697 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3698 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3699 u32 diff;
3700
3701 /* are storm stats valid? */
3702 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3703 bp->stats_counter) {
de832a55
EG
3704 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3705 " xstorm counter (%d) != stats_counter (%d)\n",
3706 i, xclient->stats_counter, bp->stats_counter);
3707 return -1;
3708 }
3709 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3710 bp->stats_counter) {
de832a55
EG
3711 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3712 " tstorm counter (%d) != stats_counter (%d)\n",
3713 i, tclient->stats_counter, bp->stats_counter);
3714 return -2;
3715 }
3716 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3717 bp->stats_counter) {
3718 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3719 " ustorm counter (%d) != stats_counter (%d)\n",
3720 i, uclient->stats_counter, bp->stats_counter);
3721 return -4;
3722 }
a2fbb9ea 3723
de832a55
EG
3724 qstats->total_bytes_received_hi =
3725 qstats->valid_bytes_received_hi =
a2fbb9ea 3726 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3727 qstats->total_bytes_received_lo =
3728 qstats->valid_bytes_received_lo =
a2fbb9ea 3729 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3730
de832a55 3731 qstats->error_bytes_received_hi =
bb2a0f7a 3732 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3733 qstats->error_bytes_received_lo =
bb2a0f7a 3734 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3735
de832a55
EG
3736 ADD_64(qstats->total_bytes_received_hi,
3737 qstats->error_bytes_received_hi,
3738 qstats->total_bytes_received_lo,
3739 qstats->error_bytes_received_lo);
3740
3741 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3742 total_unicast_packets_received);
3743 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3744 total_multicast_packets_received);
3745 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3746 total_broadcast_packets_received);
3747 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3748 etherstatsoverrsizepkts);
3749 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3750
3751 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3752 total_unicast_packets_received);
3753 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3754 total_multicast_packets_received);
3755 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3756 total_broadcast_packets_received);
3757 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3758 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3759 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3760
3761 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3762 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3763 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3764 le32_to_cpu(xclient->total_sent_bytes.lo);
3765
de832a55
EG
3766 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3767 total_unicast_packets_transmitted);
3768 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3769 total_multicast_packets_transmitted);
3770 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3771 total_broadcast_packets_transmitted);
3772
3773 old_tclient->checksum_discard = tclient->checksum_discard;
3774 old_tclient->ttl0_discard = tclient->ttl0_discard;
3775
3776 ADD_64(fstats->total_bytes_received_hi,
3777 qstats->total_bytes_received_hi,
3778 fstats->total_bytes_received_lo,
3779 qstats->total_bytes_received_lo);
3780 ADD_64(fstats->total_bytes_transmitted_hi,
3781 qstats->total_bytes_transmitted_hi,
3782 fstats->total_bytes_transmitted_lo,
3783 qstats->total_bytes_transmitted_lo);
3784 ADD_64(fstats->total_unicast_packets_received_hi,
3785 qstats->total_unicast_packets_received_hi,
3786 fstats->total_unicast_packets_received_lo,
3787 qstats->total_unicast_packets_received_lo);
3788 ADD_64(fstats->total_multicast_packets_received_hi,
3789 qstats->total_multicast_packets_received_hi,
3790 fstats->total_multicast_packets_received_lo,
3791 qstats->total_multicast_packets_received_lo);
3792 ADD_64(fstats->total_broadcast_packets_received_hi,
3793 qstats->total_broadcast_packets_received_hi,
3794 fstats->total_broadcast_packets_received_lo,
3795 qstats->total_broadcast_packets_received_lo);
3796 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3797 qstats->total_unicast_packets_transmitted_hi,
3798 fstats->total_unicast_packets_transmitted_lo,
3799 qstats->total_unicast_packets_transmitted_lo);
3800 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3801 qstats->total_multicast_packets_transmitted_hi,
3802 fstats->total_multicast_packets_transmitted_lo,
3803 qstats->total_multicast_packets_transmitted_lo);
3804 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3805 qstats->total_broadcast_packets_transmitted_hi,
3806 fstats->total_broadcast_packets_transmitted_lo,
3807 qstats->total_broadcast_packets_transmitted_lo);
3808 ADD_64(fstats->valid_bytes_received_hi,
3809 qstats->valid_bytes_received_hi,
3810 fstats->valid_bytes_received_lo,
3811 qstats->valid_bytes_received_lo);
3812
3813 ADD_64(estats->error_bytes_received_hi,
3814 qstats->error_bytes_received_hi,
3815 estats->error_bytes_received_lo,
3816 qstats->error_bytes_received_lo);
3817 ADD_64(estats->etherstatsoverrsizepkts_hi,
3818 qstats->etherstatsoverrsizepkts_hi,
3819 estats->etherstatsoverrsizepkts_lo,
3820 qstats->etherstatsoverrsizepkts_lo);
3821 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3822 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3823 }
3824
3825 ADD_64(fstats->total_bytes_received_hi,
3826 estats->rx_stat_ifhcinbadoctets_hi,
3827 fstats->total_bytes_received_lo,
3828 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3829
3830 memcpy(estats, &(fstats->total_bytes_received_hi),
3831 sizeof(struct host_func_stats) - 2*sizeof(u32));
3832
de832a55
EG
3833 ADD_64(estats->etherstatsoverrsizepkts_hi,
3834 estats->rx_stat_dot3statsframestoolong_hi,
3835 estats->etherstatsoverrsizepkts_lo,
3836 estats->rx_stat_dot3statsframestoolong_lo);
3837 ADD_64(estats->error_bytes_received_hi,
3838 estats->rx_stat_ifhcinbadoctets_hi,
3839 estats->error_bytes_received_lo,
3840 estats->rx_stat_ifhcinbadoctets_lo);
3841
3842 if (bp->port.pmf) {
3843 estats->mac_filter_discard =
3844 le32_to_cpu(tport->mac_filter_discard);
3845 estats->xxoverflow_discard =
3846 le32_to_cpu(tport->xxoverflow_discard);
3847 estats->brb_truncate_discard =
bb2a0f7a 3848 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3849 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3850 }
bb2a0f7a
YG
3851
3852 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3853
de832a55
EG
3854 bp->stats_pending = 0;
3855
a2fbb9ea
ET
3856 return 0;
3857}
3858
bb2a0f7a 3859static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3860{
bb2a0f7a 3861 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3862 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3863 int i;
a2fbb9ea
ET
3864
3865 nstats->rx_packets =
3866 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3867 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3868 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3869
3870 nstats->tx_packets =
3871 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3872 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3873 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3874
de832a55 3875 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3876
0e39e645 3877 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3878
de832a55
EG
3879 nstats->rx_dropped = estats->mac_discard;
3880 for_each_queue(bp, i)
3881 nstats->rx_dropped +=
3882 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3883
a2fbb9ea
ET
3884 nstats->tx_dropped = 0;
3885
3886 nstats->multicast =
de832a55 3887 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3888
bb2a0f7a 3889 nstats->collisions =
de832a55 3890 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3891
3892 nstats->rx_length_errors =
de832a55
EG
3893 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3894 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3895 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3896 bnx2x_hilo(&estats->brb_truncate_hi);
3897 nstats->rx_crc_errors =
3898 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3899 nstats->rx_frame_errors =
3900 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3901 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3902 nstats->rx_missed_errors = estats->xxoverflow_discard;
3903
3904 nstats->rx_errors = nstats->rx_length_errors +
3905 nstats->rx_over_errors +
3906 nstats->rx_crc_errors +
3907 nstats->rx_frame_errors +
0e39e645
ET
3908 nstats->rx_fifo_errors +
3909 nstats->rx_missed_errors;
a2fbb9ea 3910
bb2a0f7a 3911 nstats->tx_aborted_errors =
de832a55
EG
3912 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3913 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3914 nstats->tx_carrier_errors =
3915 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3916 nstats->tx_fifo_errors = 0;
3917 nstats->tx_heartbeat_errors = 0;
3918 nstats->tx_window_errors = 0;
3919
3920 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3921 nstats->tx_carrier_errors +
3922 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3923}
3924
3925static void bnx2x_drv_stats_update(struct bnx2x *bp)
3926{
3927 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3928 int i;
3929
3930 estats->driver_xoff = 0;
3931 estats->rx_err_discard_pkt = 0;
3932 estats->rx_skb_alloc_failed = 0;
3933 estats->hw_csum_err = 0;
3934 for_each_queue(bp, i) {
3935 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3936
3937 estats->driver_xoff += qstats->driver_xoff;
3938 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3939 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3940 estats->hw_csum_err += qstats->hw_csum_err;
3941 }
a2fbb9ea
ET
3942}
3943
bb2a0f7a 3944static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3945{
bb2a0f7a 3946 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3947
bb2a0f7a
YG
3948 if (*stats_comp != DMAE_COMP_VAL)
3949 return;
3950
3951 if (bp->port.pmf)
de832a55 3952 bnx2x_hw_stats_update(bp);
a2fbb9ea 3953
de832a55
EG
3954 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3955 BNX2X_ERR("storm stats were not updated for 3 times\n");
3956 bnx2x_panic();
3957 return;
a2fbb9ea
ET
3958 }
3959
de832a55
EG
3960 bnx2x_net_stats_update(bp);
3961 bnx2x_drv_stats_update(bp);
3962
a2fbb9ea 3963 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3964 struct tstorm_per_client_stats *old_tclient =
3965 &bp->fp->old_tclient;
3966 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3967 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3968 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3969 int i;
a2fbb9ea
ET
3970
3971 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3972 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3973 " tx pkt (%lx)\n",
3974 bnx2x_tx_avail(bp->fp),
7a9b2557 3975 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3976 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3977 " rx pkt (%lx)\n",
7a9b2557
VZ
3978 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3979 bp->fp->rx_comp_cons),
3980 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3981 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3982 "brb truncate %u\n",
3983 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3984 qstats->driver_xoff,
3985 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3986 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3987 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3988 "mac_discard %u mac_filter_discard %u "
3989 "xxovrflow_discard %u brb_truncate_discard %u "
3990 "ttl0_discard %u\n",
bb2a0f7a 3991 old_tclient->checksum_discard,
de832a55
EG
3992 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3993 bnx2x_hilo(&qstats->no_buff_discard_hi),
3994 estats->mac_discard, estats->mac_filter_discard,
3995 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 3996 old_tclient->ttl0_discard);
a2fbb9ea
ET
3997
3998 for_each_queue(bp, i) {
3999 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4000 bnx2x_fp(bp, i, tx_pkt),
4001 bnx2x_fp(bp, i, rx_pkt),
4002 bnx2x_fp(bp, i, rx_calls));
4003 }
4004 }
4005
bb2a0f7a
YG
4006 bnx2x_hw_stats_post(bp);
4007 bnx2x_storm_stats_post(bp);
4008}
a2fbb9ea 4009
bb2a0f7a
YG
4010static void bnx2x_port_stats_stop(struct bnx2x *bp)
4011{
4012 struct dmae_command *dmae;
4013 u32 opcode;
4014 int loader_idx = PMF_DMAE_C(bp);
4015 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4016
bb2a0f7a 4017 bp->executer_idx = 0;
a2fbb9ea 4018
bb2a0f7a
YG
4019 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4020 DMAE_CMD_C_ENABLE |
4021 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4022#ifdef __BIG_ENDIAN
bb2a0f7a 4023 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4024#else
bb2a0f7a 4025 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4026#endif
bb2a0f7a
YG
4027 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4028 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4029
4030 if (bp->port.port_stx) {
4031
4032 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4033 if (bp->func_stx)
4034 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4035 else
4036 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4037 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4038 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4039 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4040 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4041 dmae->len = sizeof(struct host_port_stats) >> 2;
4042 if (bp->func_stx) {
4043 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4044 dmae->comp_addr_hi = 0;
4045 dmae->comp_val = 1;
4046 } else {
4047 dmae->comp_addr_lo =
4048 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4049 dmae->comp_addr_hi =
4050 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4051 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4052
bb2a0f7a
YG
4053 *stats_comp = 0;
4054 }
a2fbb9ea
ET
4055 }
4056
bb2a0f7a
YG
4057 if (bp->func_stx) {
4058
4059 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4060 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4061 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4062 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4063 dmae->dst_addr_lo = bp->func_stx >> 2;
4064 dmae->dst_addr_hi = 0;
4065 dmae->len = sizeof(struct host_func_stats) >> 2;
4066 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4067 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4068 dmae->comp_val = DMAE_COMP_VAL;
4069
4070 *stats_comp = 0;
a2fbb9ea 4071 }
bb2a0f7a
YG
4072}
4073
4074static void bnx2x_stats_stop(struct bnx2x *bp)
4075{
4076 int update = 0;
4077
4078 bnx2x_stats_comp(bp);
4079
4080 if (bp->port.pmf)
4081 update = (bnx2x_hw_stats_update(bp) == 0);
4082
4083 update |= (bnx2x_storm_stats_update(bp) == 0);
4084
4085 if (update) {
4086 bnx2x_net_stats_update(bp);
a2fbb9ea 4087
bb2a0f7a
YG
4088 if (bp->port.pmf)
4089 bnx2x_port_stats_stop(bp);
4090
4091 bnx2x_hw_stats_post(bp);
4092 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4093 }
4094}
4095
bb2a0f7a
YG
4096static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4097{
4098}
4099
4100static const struct {
4101 void (*action)(struct bnx2x *bp);
4102 enum bnx2x_stats_state next_state;
4103} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4104/* state event */
4105{
4106/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4107/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4108/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4109/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4110},
4111{
4112/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4113/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4114/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4115/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4116}
4117};
4118
4119static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4120{
4121 enum bnx2x_stats_state state = bp->stats_state;
4122
4123 bnx2x_stats_stm[state][event].action(bp);
4124 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4125
4126 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4127 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4128 state, event, bp->stats_state);
4129}
4130
a2fbb9ea
ET
4131static void bnx2x_timer(unsigned long data)
4132{
4133 struct bnx2x *bp = (struct bnx2x *) data;
4134
4135 if (!netif_running(bp->dev))
4136 return;
4137
4138 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4139 goto timer_restart;
a2fbb9ea
ET
4140
4141 if (poll) {
4142 struct bnx2x_fastpath *fp = &bp->fp[0];
4143 int rc;
4144
4145 bnx2x_tx_int(fp, 1000);
4146 rc = bnx2x_rx_int(fp, 1000);
4147 }
4148
34f80b04
EG
4149 if (!BP_NOMCP(bp)) {
4150 int func = BP_FUNC(bp);
a2fbb9ea
ET
4151 u32 drv_pulse;
4152 u32 mcp_pulse;
4153
4154 ++bp->fw_drv_pulse_wr_seq;
4155 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4156 /* TBD - add SYSTEM_TIME */
4157 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4158 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4159
34f80b04 4160 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4161 MCP_PULSE_SEQ_MASK);
4162 /* The delta between driver pulse and mcp response
4163 * should be 1 (before mcp response) or 0 (after mcp response)
4164 */
4165 if ((drv_pulse != mcp_pulse) &&
4166 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4167 /* someone lost a heartbeat... */
4168 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4169 drv_pulse, mcp_pulse);
4170 }
4171 }
4172
bb2a0f7a
YG
4173 if ((bp->state == BNX2X_STATE_OPEN) ||
4174 (bp->state == BNX2X_STATE_DISABLED))
4175 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4176
f1410647 4177timer_restart:
a2fbb9ea
ET
4178 mod_timer(&bp->timer, jiffies + bp->current_interval);
4179}
4180
4181/* end of Statistics */
4182
4183/* nic init */
4184
4185/*
4186 * nic init service functions
4187 */
4188
34f80b04 4189static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4190{
34f80b04
EG
4191 int port = BP_PORT(bp);
4192
4193 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4194 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4195 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4196 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4197 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4198 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4199}
4200
5c862848
EG
4201static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4202 dma_addr_t mapping, int sb_id)
34f80b04
EG
4203{
4204 int port = BP_PORT(bp);
bb2a0f7a 4205 int func = BP_FUNC(bp);
a2fbb9ea 4206 int index;
34f80b04 4207 u64 section;
a2fbb9ea
ET
4208
4209 /* USTORM */
4210 section = ((u64)mapping) + offsetof(struct host_status_block,
4211 u_status_block);
34f80b04 4212 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4213
4214 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4215 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4216 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4217 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4218 U64_HI(section));
bb2a0f7a
YG
4219 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4220 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4221
4222 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4223 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4224 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4225
4226 /* CSTORM */
4227 section = ((u64)mapping) + offsetof(struct host_status_block,
4228 c_status_block);
34f80b04 4229 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4230
4231 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4232 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4233 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4234 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4235 U64_HI(section));
7a9b2557
VZ
4236 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4237 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4238
4239 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4240 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4241 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4242
4243 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4244}
4245
4246static void bnx2x_zero_def_sb(struct bnx2x *bp)
4247{
4248 int func = BP_FUNC(bp);
a2fbb9ea 4249
34f80b04
EG
4250 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4251 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4252 sizeof(struct ustorm_def_status_block)/4);
4253 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4254 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4255 sizeof(struct cstorm_def_status_block)/4);
4256 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4257 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4258 sizeof(struct xstorm_def_status_block)/4);
4259 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4260 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4261 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4262}
4263
4264static void bnx2x_init_def_sb(struct bnx2x *bp,
4265 struct host_def_status_block *def_sb,
34f80b04 4266 dma_addr_t mapping, int sb_id)
a2fbb9ea 4267{
34f80b04
EG
4268 int port = BP_PORT(bp);
4269 int func = BP_FUNC(bp);
a2fbb9ea
ET
4270 int index, val, reg_offset;
4271 u64 section;
4272
4273 /* ATTN */
4274 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4275 atten_status_block);
34f80b04 4276 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4277
49d66772
ET
4278 bp->attn_state = 0;
4279
a2fbb9ea
ET
4280 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4281 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4282
34f80b04 4283 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4284 bp->attn_group[index].sig[0] = REG_RD(bp,
4285 reg_offset + 0x10*index);
4286 bp->attn_group[index].sig[1] = REG_RD(bp,
4287 reg_offset + 0x4 + 0x10*index);
4288 bp->attn_group[index].sig[2] = REG_RD(bp,
4289 reg_offset + 0x8 + 0x10*index);
4290 bp->attn_group[index].sig[3] = REG_RD(bp,
4291 reg_offset + 0xc + 0x10*index);
4292 }
4293
a2fbb9ea
ET
4294 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4295 HC_REG_ATTN_MSG0_ADDR_L);
4296
4297 REG_WR(bp, reg_offset, U64_LO(section));
4298 REG_WR(bp, reg_offset + 4, U64_HI(section));
4299
4300 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4301
4302 val = REG_RD(bp, reg_offset);
34f80b04 4303 val |= sb_id;
a2fbb9ea
ET
4304 REG_WR(bp, reg_offset, val);
4305
4306 /* USTORM */
4307 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4308 u_def_status_block);
34f80b04 4309 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4310
4311 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4312 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4313 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4314 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4315 U64_HI(section));
5c862848 4316 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4317 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4318
4319 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4320 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4321 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4322
4323 /* CSTORM */
4324 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4325 c_def_status_block);
34f80b04 4326 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4327
4328 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4329 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4330 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4331 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4332 U64_HI(section));
5c862848 4333 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4334 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4335
4336 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4337 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4338 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4339
4340 /* TSTORM */
4341 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4342 t_def_status_block);
34f80b04 4343 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4344
4345 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4346 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4347 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4348 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4349 U64_HI(section));
5c862848 4350 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4351 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4352
4353 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4354 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4355 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4356
4357 /* XSTORM */
4358 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4359 x_def_status_block);
34f80b04 4360 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4361
4362 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4363 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4364 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4365 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4366 U64_HI(section));
5c862848 4367 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4368 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4369
4370 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4371 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4372 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4373
bb2a0f7a 4374 bp->stats_pending = 0;
66e855f3 4375 bp->set_mac_pending = 0;
bb2a0f7a 4376
34f80b04 4377 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4378}
4379
4380static void bnx2x_update_coalesce(struct bnx2x *bp)
4381{
34f80b04 4382 int port = BP_PORT(bp);
a2fbb9ea
ET
4383 int i;
4384
4385 for_each_queue(bp, i) {
34f80b04 4386 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4387
4388 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4389 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4390 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4391 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4392 bp->rx_ticks/12);
a2fbb9ea 4393 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4394 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4395 U_SB_ETH_RX_CQ_INDEX),
4396 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4397
4398 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4399 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4400 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4401 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4402 bp->tx_ticks/12);
a2fbb9ea 4403 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4404 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4405 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4406 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4407 }
4408}
4409
7a9b2557
VZ
4410static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4411 struct bnx2x_fastpath *fp, int last)
4412{
4413 int i;
4414
4415 for (i = 0; i < last; i++) {
4416 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4417 struct sk_buff *skb = rx_buf->skb;
4418
4419 if (skb == NULL) {
4420 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4421 continue;
4422 }
4423
4424 if (fp->tpa_state[i] == BNX2X_TPA_START)
4425 pci_unmap_single(bp->pdev,
4426 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4427 bp->rx_buf_size,
7a9b2557
VZ
4428 PCI_DMA_FROMDEVICE);
4429
4430 dev_kfree_skb(skb);
4431 rx_buf->skb = NULL;
4432 }
4433}
4434
a2fbb9ea
ET
4435static void bnx2x_init_rx_rings(struct bnx2x *bp)
4436{
7a9b2557 4437 int func = BP_FUNC(bp);
32626230
EG
4438 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4439 ETH_MAX_AGGREGATION_QUEUES_E1H;
4440 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4441 int i, j;
a2fbb9ea 4442
87942b46 4443 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4444 DP(NETIF_MSG_IFUP,
4445 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4446
7a9b2557 4447 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4448
555f6c78 4449 for_each_rx_queue(bp, j) {
32626230 4450 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4451
32626230 4452 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4453 fp->tpa_pool[i].skb =
4454 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4455 if (!fp->tpa_pool[i].skb) {
4456 BNX2X_ERR("Failed to allocate TPA "
4457 "skb pool for queue[%d] - "
4458 "disabling TPA on this "
4459 "queue!\n", j);
4460 bnx2x_free_tpa_pool(bp, fp, i);
4461 fp->disable_tpa = 1;
4462 break;
4463 }
4464 pci_unmap_addr_set((struct sw_rx_bd *)
4465 &bp->fp->tpa_pool[i],
4466 mapping, 0);
4467 fp->tpa_state[i] = BNX2X_TPA_STOP;
4468 }
4469 }
4470 }
4471
555f6c78 4472 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4473 struct bnx2x_fastpath *fp = &bp->fp[j];
4474
4475 fp->rx_bd_cons = 0;
4476 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4477 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4478
4479 /* "next page" elements initialization */
4480 /* SGE ring */
4481 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4482 struct eth_rx_sge *sge;
4483
4484 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4485 sge->addr_hi =
4486 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4487 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4488 sge->addr_lo =
4489 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4490 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4491 }
4492
4493 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4494
7a9b2557 4495 /* RX BD ring */
a2fbb9ea
ET
4496 for (i = 1; i <= NUM_RX_RINGS; i++) {
4497 struct eth_rx_bd *rx_bd;
4498
4499 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4500 rx_bd->addr_hi =
4501 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4502 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4503 rx_bd->addr_lo =
4504 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4505 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4506 }
4507
34f80b04 4508 /* CQ ring */
a2fbb9ea
ET
4509 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4510 struct eth_rx_cqe_next_page *nextpg;
4511
4512 nextpg = (struct eth_rx_cqe_next_page *)
4513 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4514 nextpg->addr_hi =
4515 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4516 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4517 nextpg->addr_lo =
4518 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4519 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4520 }
4521
7a9b2557
VZ
4522 /* Allocate SGEs and initialize the ring elements */
4523 for (i = 0, ring_prod = 0;
4524 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4525
7a9b2557
VZ
4526 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4527 BNX2X_ERR("was only able to allocate "
4528 "%d rx sges\n", i);
4529 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4530 /* Cleanup already allocated elements */
4531 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4532 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4533 fp->disable_tpa = 1;
4534 ring_prod = 0;
4535 break;
4536 }
4537 ring_prod = NEXT_SGE_IDX(ring_prod);
4538 }
4539 fp->rx_sge_prod = ring_prod;
4540
4541 /* Allocate BDs and initialize BD ring */
66e855f3 4542 fp->rx_comp_cons = 0;
7a9b2557 4543 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4544 for (i = 0; i < bp->rx_ring_size; i++) {
4545 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4546 BNX2X_ERR("was only able to allocate "
de832a55
EG
4547 "%d rx skbs on queue[%d]\n", i, j);
4548 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4549 break;
4550 }
4551 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4552 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4553 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4554 }
4555
7a9b2557
VZ
4556 fp->rx_bd_prod = ring_prod;
4557 /* must not have more available CQEs than BDs */
4558 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4559 cqe_ring_prod);
a2fbb9ea
ET
4560 fp->rx_pkt = fp->rx_calls = 0;
4561
7a9b2557
VZ
4562 /* Warning!
4563 * this will generate an interrupt (to the TSTORM)
4564 * must only be done after chip is initialized
4565 */
4566 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4567 fp->rx_sge_prod);
a2fbb9ea
ET
4568 if (j != 0)
4569 continue;
4570
4571 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4572 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4573 U64_LO(fp->rx_comp_mapping));
4574 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4575 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4576 U64_HI(fp->rx_comp_mapping));
4577 }
4578}
4579
4580static void bnx2x_init_tx_ring(struct bnx2x *bp)
4581{
4582 int i, j;
4583
555f6c78 4584 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4585 struct bnx2x_fastpath *fp = &bp->fp[j];
4586
4587 for (i = 1; i <= NUM_TX_RINGS; i++) {
4588 struct eth_tx_bd *tx_bd =
4589 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4590
4591 tx_bd->addr_hi =
4592 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4593 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4594 tx_bd->addr_lo =
4595 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4596 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4597 }
4598
4599 fp->tx_pkt_prod = 0;
4600 fp->tx_pkt_cons = 0;
4601 fp->tx_bd_prod = 0;
4602 fp->tx_bd_cons = 0;
4603 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4604 fp->tx_pkt = 0;
4605 }
4606}
4607
4608static void bnx2x_init_sp_ring(struct bnx2x *bp)
4609{
34f80b04 4610 int func = BP_FUNC(bp);
a2fbb9ea
ET
4611
4612 spin_lock_init(&bp->spq_lock);
4613
4614 bp->spq_left = MAX_SPQ_PENDING;
4615 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4616 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4617 bp->spq_prod_bd = bp->spq;
4618 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4619
34f80b04 4620 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4621 U64_LO(bp->spq_mapping));
34f80b04
EG
4622 REG_WR(bp,
4623 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4624 U64_HI(bp->spq_mapping));
4625
34f80b04 4626 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4627 bp->spq_prod_idx);
4628}
4629
4630static void bnx2x_init_context(struct bnx2x *bp)
4631{
4632 int i;
4633
4634 for_each_queue(bp, i) {
4635 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4636 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4637 u8 cl_id = fp->cl_id;
34f80b04 4638 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4639
34f80b04
EG
4640 context->ustorm_st_context.common.sb_index_numbers =
4641 BNX2X_RX_SB_INDEX_NUM;
4642 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4643 context->ustorm_st_context.common.status_block_id = sb_id;
4644 context->ustorm_st_context.common.flags =
de832a55
EG
4645 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4646 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4647 context->ustorm_st_context.common.statistics_counter_id =
4648 cl_id;
8d9c5f34 4649 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4650 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4651 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4652 bp->rx_buf_size;
34f80b04 4653 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4654 U64_HI(fp->rx_desc_mapping);
34f80b04 4655 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4656 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4657 if (!fp->disable_tpa) {
4658 context->ustorm_st_context.common.flags |=
4659 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4660 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4661 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4662 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4663 (u32)0xffff);
7a9b2557
VZ
4664 context->ustorm_st_context.common.sge_page_base_hi =
4665 U64_HI(fp->rx_sge_mapping);
4666 context->ustorm_st_context.common.sge_page_base_lo =
4667 U64_LO(fp->rx_sge_mapping);
4668 }
4669
8d9c5f34
EG
4670 context->ustorm_ag_context.cdu_usage =
4671 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4672 CDU_REGION_NUMBER_UCM_AG,
4673 ETH_CONNECTION_TYPE);
4674
4675 context->xstorm_st_context.tx_bd_page_base_hi =
4676 U64_HI(fp->tx_desc_mapping);
4677 context->xstorm_st_context.tx_bd_page_base_lo =
4678 U64_LO(fp->tx_desc_mapping);
4679 context->xstorm_st_context.db_data_addr_hi =
4680 U64_HI(fp->tx_prods_mapping);
4681 context->xstorm_st_context.db_data_addr_lo =
4682 U64_LO(fp->tx_prods_mapping);
4683 context->xstorm_st_context.statistics_data = (fp->cl_id |
4684 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4685 context->cstorm_st_context.sb_index_number =
5c862848 4686 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4687 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4688
4689 context->xstorm_ag_context.cdu_reserved =
4690 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4691 CDU_REGION_NUMBER_XCM_AG,
4692 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4693 }
4694}
4695
4696static void bnx2x_init_ind_table(struct bnx2x *bp)
4697{
26c8fa4d 4698 int func = BP_FUNC(bp);
a2fbb9ea
ET
4699 int i;
4700
555f6c78 4701 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4702 return;
4703
555f6c78
EG
4704 DP(NETIF_MSG_IFUP,
4705 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4706 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4707 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4708 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4709 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4710}
4711
49d66772
ET
4712static void bnx2x_set_client_config(struct bnx2x *bp)
4713{
49d66772 4714 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4715 int port = BP_PORT(bp);
4716 int i;
49d66772 4717
e7799c5f 4718 tstorm_client.mtu = bp->dev->mtu;
49d66772 4719 tstorm_client.config_flags =
de832a55
EG
4720 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4721 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4722#ifdef BCM_VLAN
0c6671b0 4723 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4724 tstorm_client.config_flags |=
8d9c5f34 4725 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4726 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4727 }
4728#endif
49d66772 4729
7a9b2557
VZ
4730 if (bp->flags & TPA_ENABLE_FLAG) {
4731 tstorm_client.max_sges_for_packet =
4f40f2cb 4732 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4733 tstorm_client.max_sges_for_packet =
4734 ((tstorm_client.max_sges_for_packet +
4735 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4736 PAGES_PER_SGE_SHIFT;
4737
4738 tstorm_client.config_flags |=
4739 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4740 }
4741
49d66772 4742 for_each_queue(bp, i) {
de832a55
EG
4743 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4744
49d66772 4745 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4746 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4747 ((u32 *)&tstorm_client)[0]);
4748 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4749 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4750 ((u32 *)&tstorm_client)[1]);
4751 }
4752
34f80b04
EG
4753 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4754 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4755}
4756
a2fbb9ea
ET
4757static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4758{
a2fbb9ea 4759 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4760 int mode = bp->rx_mode;
4761 int mask = (1 << BP_L_ID(bp));
4762 int func = BP_FUNC(bp);
a2fbb9ea
ET
4763 int i;
4764
3196a88a 4765 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4766
4767 switch (mode) {
4768 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4769 tstorm_mac_filter.ucast_drop_all = mask;
4770 tstorm_mac_filter.mcast_drop_all = mask;
4771 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4772 break;
4773 case BNX2X_RX_MODE_NORMAL:
34f80b04 4774 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4775 break;
4776 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4777 tstorm_mac_filter.mcast_accept_all = mask;
4778 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4779 break;
4780 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4781 tstorm_mac_filter.ucast_accept_all = mask;
4782 tstorm_mac_filter.mcast_accept_all = mask;
4783 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4784 break;
4785 default:
34f80b04
EG
4786 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4787 break;
a2fbb9ea
ET
4788 }
4789
4790 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4791 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4792 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4793 ((u32 *)&tstorm_mac_filter)[i]);
4794
34f80b04 4795/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4796 ((u32 *)&tstorm_mac_filter)[i]); */
4797 }
a2fbb9ea 4798
49d66772
ET
4799 if (mode != BNX2X_RX_MODE_NONE)
4800 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4801}
4802
471de716
EG
4803static void bnx2x_init_internal_common(struct bnx2x *bp)
4804{
4805 int i;
4806
3cdf1db7
YG
4807 if (bp->flags & TPA_ENABLE_FLAG) {
4808 struct tstorm_eth_tpa_exist tpa = {0};
4809
4810 tpa.tpa_exist = 1;
4811
4812 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4813 ((u32 *)&tpa)[0]);
4814 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4815 ((u32 *)&tpa)[1]);
4816 }
4817
471de716
EG
4818 /* Zero this manually as its initialization is
4819 currently missing in the initTool */
4820 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4821 REG_WR(bp, BAR_USTRORM_INTMEM +
4822 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4823}
4824
4825static void bnx2x_init_internal_port(struct bnx2x *bp)
4826{
4827 int port = BP_PORT(bp);
4828
4829 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4830 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4831 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4832 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4833}
4834
8a1c38d1
EG
4835/* Calculates the sum of vn_min_rates.
4836 It's needed for further normalizing of the min_rates.
4837 Returns:
4838 sum of vn_min_rates.
4839 or
4840 0 - if all the min_rates are 0.
4841 In the later case fainess algorithm should be deactivated.
4842 If not all min_rates are zero then those that are zeroes will be set to 1.
4843 */
4844static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4845{
4846 int all_zero = 1;
4847 int port = BP_PORT(bp);
4848 int vn;
4849
4850 bp->vn_weight_sum = 0;
4851 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4852 int func = 2*vn + port;
4853 u32 vn_cfg =
4854 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4855 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4856 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4857
4858 /* Skip hidden vns */
4859 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4860 continue;
4861
4862 /* If min rate is zero - set it to 1 */
4863 if (!vn_min_rate)
4864 vn_min_rate = DEF_MIN_RATE;
4865 else
4866 all_zero = 0;
4867
4868 bp->vn_weight_sum += vn_min_rate;
4869 }
4870
4871 /* ... only if all min rates are zeros - disable fairness */
4872 if (all_zero)
4873 bp->vn_weight_sum = 0;
4874}
4875
471de716 4876static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4877{
a2fbb9ea
ET
4878 struct tstorm_eth_function_common_config tstorm_config = {0};
4879 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4880 int port = BP_PORT(bp);
4881 int func = BP_FUNC(bp);
de832a55
EG
4882 int i, j;
4883 u32 offset;
471de716 4884 u16 max_agg_size;
a2fbb9ea
ET
4885
4886 if (is_multi(bp)) {
555f6c78 4887 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4888 tstorm_config.rss_result_mask = MULTI_MASK;
4889 }
8d9c5f34
EG
4890 if (IS_E1HMF(bp))
4891 tstorm_config.config_flags |=
4892 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4893
34f80b04
EG
4894 tstorm_config.leading_client_id = BP_L_ID(bp);
4895
a2fbb9ea 4896 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4897 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4898 (*(u32 *)&tstorm_config));
4899
c14423fe 4900 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4901 bnx2x_set_storm_rx_mode(bp);
4902
de832a55
EG
4903 for_each_queue(bp, i) {
4904 u8 cl_id = bp->fp[i].cl_id;
4905
4906 /* reset xstorm per client statistics */
4907 offset = BAR_XSTRORM_INTMEM +
4908 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4909 for (j = 0;
4910 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4911 REG_WR(bp, offset + j*4, 0);
4912
4913 /* reset tstorm per client statistics */
4914 offset = BAR_TSTRORM_INTMEM +
4915 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4916 for (j = 0;
4917 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4918 REG_WR(bp, offset + j*4, 0);
4919
4920 /* reset ustorm per client statistics */
4921 offset = BAR_USTRORM_INTMEM +
4922 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4923 for (j = 0;
4924 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4925 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4926 }
4927
4928 /* Init statistics related context */
34f80b04 4929 stats_flags.collect_eth = 1;
a2fbb9ea 4930
66e855f3 4931 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4932 ((u32 *)&stats_flags)[0]);
66e855f3 4933 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4934 ((u32 *)&stats_flags)[1]);
4935
66e855f3 4936 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4937 ((u32 *)&stats_flags)[0]);
66e855f3 4938 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4939 ((u32 *)&stats_flags)[1]);
4940
de832a55
EG
4941 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4942 ((u32 *)&stats_flags)[0]);
4943 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4944 ((u32 *)&stats_flags)[1]);
4945
66e855f3 4946 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4947 ((u32 *)&stats_flags)[0]);
66e855f3 4948 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4949 ((u32 *)&stats_flags)[1]);
4950
66e855f3
YG
4951 REG_WR(bp, BAR_XSTRORM_INTMEM +
4952 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4953 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4954 REG_WR(bp, BAR_XSTRORM_INTMEM +
4955 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4956 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4957
4958 REG_WR(bp, BAR_TSTRORM_INTMEM +
4959 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4960 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4961 REG_WR(bp, BAR_TSTRORM_INTMEM +
4962 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4963 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4964
de832a55
EG
4965 REG_WR(bp, BAR_USTRORM_INTMEM +
4966 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4967 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4968 REG_WR(bp, BAR_USTRORM_INTMEM +
4969 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4970 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4971
34f80b04
EG
4972 if (CHIP_IS_E1H(bp)) {
4973 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4974 IS_E1HMF(bp));
4975 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4976 IS_E1HMF(bp));
4977 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4978 IS_E1HMF(bp));
4979 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4980 IS_E1HMF(bp));
4981
7a9b2557
VZ
4982 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4983 bp->e1hov);
34f80b04
EG
4984 }
4985
4f40f2cb
EG
4986 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4987 max_agg_size =
4988 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4989 SGE_PAGE_SIZE * PAGES_PER_SGE),
4990 (u32)0xffff);
555f6c78 4991 for_each_rx_queue(bp, i) {
7a9b2557 4992 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4993
4994 REG_WR(bp, BAR_USTRORM_INTMEM +
4995 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4996 U64_LO(fp->rx_comp_mapping));
4997 REG_WR(bp, BAR_USTRORM_INTMEM +
4998 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4999 U64_HI(fp->rx_comp_mapping));
5000
7a9b2557
VZ
5001 REG_WR16(bp, BAR_USTRORM_INTMEM +
5002 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
5003 max_agg_size);
5004 }
8a1c38d1 5005
1c06328c
EG
5006 /* dropless flow control */
5007 if (CHIP_IS_E1H(bp)) {
5008 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5009
5010 rx_pause.bd_thr_low = 250;
5011 rx_pause.cqe_thr_low = 250;
5012 rx_pause.cos = 1;
5013 rx_pause.sge_thr_low = 0;
5014 rx_pause.bd_thr_high = 350;
5015 rx_pause.cqe_thr_high = 350;
5016 rx_pause.sge_thr_high = 0;
5017
5018 for_each_rx_queue(bp, i) {
5019 struct bnx2x_fastpath *fp = &bp->fp[i];
5020
5021 if (!fp->disable_tpa) {
5022 rx_pause.sge_thr_low = 150;
5023 rx_pause.sge_thr_high = 250;
5024 }
5025
5026
5027 offset = BAR_USTRORM_INTMEM +
5028 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5029 fp->cl_id);
5030 for (j = 0;
5031 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5032 j++)
5033 REG_WR(bp, offset + j*4,
5034 ((u32 *)&rx_pause)[j]);
5035 }
5036 }
5037
8a1c38d1
EG
5038 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5039
5040 /* Init rate shaping and fairness contexts */
5041 if (IS_E1HMF(bp)) {
5042 int vn;
5043
5044 /* During init there is no active link
5045 Until link is up, set link rate to 10Gbps */
5046 bp->link_vars.line_speed = SPEED_10000;
5047 bnx2x_init_port_minmax(bp);
5048
5049 bnx2x_calc_vn_weight_sum(bp);
5050
5051 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5052 bnx2x_init_vn_minmax(bp, 2*vn + port);
5053
5054 /* Enable rate shaping and fairness */
5055 bp->cmng.flags.cmng_enables =
5056 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5057 if (bp->vn_weight_sum)
5058 bp->cmng.flags.cmng_enables |=
5059 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5060 else
5061 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5062 " fairness will be disabled\n");
5063 } else {
5064 /* rate shaping and fairness are disabled */
5065 DP(NETIF_MSG_IFUP,
5066 "single function mode minmax will be disabled\n");
5067 }
5068
5069
5070 /* Store it to internal memory */
5071 if (bp->port.pmf)
5072 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5073 REG_WR(bp, BAR_XSTRORM_INTMEM +
5074 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5075 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5076}
5077
471de716
EG
5078static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5079{
5080 switch (load_code) {
5081 case FW_MSG_CODE_DRV_LOAD_COMMON:
5082 bnx2x_init_internal_common(bp);
5083 /* no break */
5084
5085 case FW_MSG_CODE_DRV_LOAD_PORT:
5086 bnx2x_init_internal_port(bp);
5087 /* no break */
5088
5089 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5090 bnx2x_init_internal_func(bp);
5091 break;
5092
5093 default:
5094 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5095 break;
5096 }
5097}
5098
5099static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5100{
5101 int i;
5102
5103 for_each_queue(bp, i) {
5104 struct bnx2x_fastpath *fp = &bp->fp[i];
5105
34f80b04 5106 fp->bp = bp;
a2fbb9ea 5107 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5108 fp->index = i;
34f80b04
EG
5109 fp->cl_id = BP_L_ID(bp) + i;
5110 fp->sb_id = fp->cl_id;
5111 DP(NETIF_MSG_IFUP,
5112 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5113 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5114 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5115 FP_SB_ID(fp));
5116 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5117 }
5118
5c862848
EG
5119 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5120 DEF_SB_ID);
5121 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5122 bnx2x_update_coalesce(bp);
5123 bnx2x_init_rx_rings(bp);
5124 bnx2x_init_tx_ring(bp);
5125 bnx2x_init_sp_ring(bp);
5126 bnx2x_init_context(bp);
471de716 5127 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5128 bnx2x_init_ind_table(bp);
0ef00459
EG
5129 bnx2x_stats_init(bp);
5130
5131 /* At this point, we are ready for interrupts */
5132 atomic_set(&bp->intr_sem, 0);
5133
5134 /* flush all before enabling interrupts */
5135 mb();
5136 mmiowb();
5137
615f8fd9 5138 bnx2x_int_enable(bp);
a2fbb9ea
ET
5139}
5140
5141/* end of nic init */
5142
5143/*
5144 * gzip service functions
5145 */
5146
5147static int bnx2x_gunzip_init(struct bnx2x *bp)
5148{
5149 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5150 &bp->gunzip_mapping);
5151 if (bp->gunzip_buf == NULL)
5152 goto gunzip_nomem1;
5153
5154 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5155 if (bp->strm == NULL)
5156 goto gunzip_nomem2;
5157
5158 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5159 GFP_KERNEL);
5160 if (bp->strm->workspace == NULL)
5161 goto gunzip_nomem3;
5162
5163 return 0;
5164
5165gunzip_nomem3:
5166 kfree(bp->strm);
5167 bp->strm = NULL;
5168
5169gunzip_nomem2:
5170 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5171 bp->gunzip_mapping);
5172 bp->gunzip_buf = NULL;
5173
5174gunzip_nomem1:
5175 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5176 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5177 return -ENOMEM;
5178}
5179
5180static void bnx2x_gunzip_end(struct bnx2x *bp)
5181{
5182 kfree(bp->strm->workspace);
5183
5184 kfree(bp->strm);
5185 bp->strm = NULL;
5186
5187 if (bp->gunzip_buf) {
5188 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5189 bp->gunzip_mapping);
5190 bp->gunzip_buf = NULL;
5191 }
5192}
5193
5194static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5195{
5196 int n, rc;
5197
5198 /* check gzip header */
5199 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5200 return -EINVAL;
5201
5202 n = 10;
5203
34f80b04 5204#define FNAME 0x8
a2fbb9ea
ET
5205
5206 if (zbuf[3] & FNAME)
5207 while ((zbuf[n++] != 0) && (n < len));
5208
5209 bp->strm->next_in = zbuf + n;
5210 bp->strm->avail_in = len - n;
5211 bp->strm->next_out = bp->gunzip_buf;
5212 bp->strm->avail_out = FW_BUF_SIZE;
5213
5214 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5215 if (rc != Z_OK)
5216 return rc;
5217
5218 rc = zlib_inflate(bp->strm, Z_FINISH);
5219 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5220 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5221 bp->dev->name, bp->strm->msg);
5222
5223 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5224 if (bp->gunzip_outlen & 0x3)
5225 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5226 " gunzip_outlen (%d) not aligned\n",
5227 bp->dev->name, bp->gunzip_outlen);
5228 bp->gunzip_outlen >>= 2;
5229
5230 zlib_inflateEnd(bp->strm);
5231
5232 if (rc == Z_STREAM_END)
5233 return 0;
5234
5235 return rc;
5236}
5237
5238/* nic load/unload */
5239
5240/*
34f80b04 5241 * General service functions
a2fbb9ea
ET
5242 */
5243
5244/* send a NIG loopback debug packet */
5245static void bnx2x_lb_pckt(struct bnx2x *bp)
5246{
a2fbb9ea 5247 u32 wb_write[3];
a2fbb9ea
ET
5248
5249 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5250 wb_write[0] = 0x55555555;
5251 wb_write[1] = 0x55555555;
34f80b04 5252 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5253 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5254
5255 /* NON-IP protocol */
a2fbb9ea
ET
5256 wb_write[0] = 0x09000000;
5257 wb_write[1] = 0x55555555;
34f80b04 5258 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5259 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5260}
5261
5262/* some of the internal memories
5263 * are not directly readable from the driver
5264 * to test them we send debug packets
5265 */
5266static int bnx2x_int_mem_test(struct bnx2x *bp)
5267{
5268 int factor;
5269 int count, i;
5270 u32 val = 0;
5271
ad8d3948 5272 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5273 factor = 120;
ad8d3948
EG
5274 else if (CHIP_REV_IS_EMUL(bp))
5275 factor = 200;
5276 else
a2fbb9ea 5277 factor = 1;
a2fbb9ea
ET
5278
5279 DP(NETIF_MSG_HW, "start part1\n");
5280
5281 /* Disable inputs of parser neighbor blocks */
5282 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5283 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5284 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5285 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5286
5287 /* Write 0 to parser credits for CFC search request */
5288 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5289
5290 /* send Ethernet packet */
5291 bnx2x_lb_pckt(bp);
5292
5293 /* TODO do i reset NIG statistic? */
5294 /* Wait until NIG register shows 1 packet of size 0x10 */
5295 count = 1000 * factor;
5296 while (count) {
34f80b04 5297
a2fbb9ea
ET
5298 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5299 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5300 if (val == 0x10)
5301 break;
5302
5303 msleep(10);
5304 count--;
5305 }
5306 if (val != 0x10) {
5307 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5308 return -1;
5309 }
5310
5311 /* Wait until PRS register shows 1 packet */
5312 count = 1000 * factor;
5313 while (count) {
5314 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5315 if (val == 1)
5316 break;
5317
5318 msleep(10);
5319 count--;
5320 }
5321 if (val != 0x1) {
5322 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5323 return -2;
5324 }
5325
5326 /* Reset and init BRB, PRS */
34f80b04 5327 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5328 msleep(50);
34f80b04 5329 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5330 msleep(50);
5331 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5332 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5333
5334 DP(NETIF_MSG_HW, "part2\n");
5335
5336 /* Disable inputs of parser neighbor blocks */
5337 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5338 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5339 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5340 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5341
5342 /* Write 0 to parser credits for CFC search request */
5343 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5344
5345 /* send 10 Ethernet packets */
5346 for (i = 0; i < 10; i++)
5347 bnx2x_lb_pckt(bp);
5348
5349 /* Wait until NIG register shows 10 + 1
5350 packets of size 11*0x10 = 0xb0 */
5351 count = 1000 * factor;
5352 while (count) {
34f80b04 5353
a2fbb9ea
ET
5354 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5355 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5356 if (val == 0xb0)
5357 break;
5358
5359 msleep(10);
5360 count--;
5361 }
5362 if (val != 0xb0) {
5363 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5364 return -3;
5365 }
5366
5367 /* Wait until PRS register shows 2 packets */
5368 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5369 if (val != 2)
5370 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5371
5372 /* Write 1 to parser credits for CFC search request */
5373 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5374
5375 /* Wait until PRS register shows 3 packets */
5376 msleep(10 * factor);
5377 /* Wait until NIG register shows 1 packet of size 0x10 */
5378 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5379 if (val != 3)
5380 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5381
5382 /* clear NIG EOP FIFO */
5383 for (i = 0; i < 11; i++)
5384 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5385 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5386 if (val != 1) {
5387 BNX2X_ERR("clear of NIG failed\n");
5388 return -4;
5389 }
5390
5391 /* Reset and init BRB, PRS, NIG */
5392 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5393 msleep(50);
5394 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5395 msleep(50);
5396 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5397 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5398#ifndef BCM_ISCSI
5399 /* set NIC mode */
5400 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5401#endif
5402
5403 /* Enable inputs of parser neighbor blocks */
5404 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5405 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5406 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5407 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5408
5409 DP(NETIF_MSG_HW, "done\n");
5410
5411 return 0; /* OK */
5412}
5413
5414static void enable_blocks_attention(struct bnx2x *bp)
5415{
5416 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5417 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5418 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5419 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5420 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5421 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5422 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5423 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5424 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5425/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5426/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5427 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5428 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5429 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5430/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5431/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5432 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5433 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5434 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5435 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5436/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5437/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5438 if (CHIP_REV_IS_FPGA(bp))
5439 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5440 else
5441 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5442 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5443 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5444 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5445/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5446/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5447 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5448 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5449/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5450 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5451}
5452
34f80b04 5453
81f75bbf
EG
5454static void bnx2x_reset_common(struct bnx2x *bp)
5455{
5456 /* reset_common */
5457 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5458 0xd3ffff7f);
5459 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5460}
5461
34f80b04 5462static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5463{
a2fbb9ea 5464 u32 val, i;
a2fbb9ea 5465
34f80b04 5466 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5467
81f75bbf 5468 bnx2x_reset_common(bp);
34f80b04
EG
5469 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5470 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5471
34f80b04
EG
5472 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5473 if (CHIP_IS_E1H(bp))
5474 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5475
34f80b04
EG
5476 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5477 msleep(30);
5478 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5479
34f80b04
EG
5480 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5481 if (CHIP_IS_E1(bp)) {
5482 /* enable HW interrupt from PXP on USDM overflow
5483 bit 16 on INT_MASK_0 */
5484 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5485 }
a2fbb9ea 5486
34f80b04
EG
5487 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5488 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5489
5490#ifdef __BIG_ENDIAN
34f80b04
EG
5491 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5492 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5493 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5494 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5495 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5496 /* make sure this value is 0 */
5497 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5498
5499/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5500 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5501 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5502 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5503 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5504#endif
5505
34f80b04 5506 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5507#ifdef BCM_ISCSI
34f80b04
EG
5508 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5509 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5510 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5511#endif
5512
34f80b04
EG
5513 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5514 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5515
34f80b04
EG
5516 /* let the HW do it's magic ... */
5517 msleep(100);
5518 /* finish PXP init */
5519 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5520 if (val != 1) {
5521 BNX2X_ERR("PXP2 CFG failed\n");
5522 return -EBUSY;
5523 }
5524 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5525 if (val != 1) {
5526 BNX2X_ERR("PXP2 RD_INIT failed\n");
5527 return -EBUSY;
5528 }
a2fbb9ea 5529
34f80b04
EG
5530 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5531 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5532
34f80b04 5533 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5534
34f80b04
EG
5535 /* clean the DMAE memory */
5536 bp->dmae_ready = 1;
5537 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5538
34f80b04
EG
5539 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5540 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5541 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5542 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5543
34f80b04
EG
5544 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5545 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5546 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5547 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5548
5549 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5550 /* soft reset pulse */
5551 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5552 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5553
5554#ifdef BCM_ISCSI
34f80b04 5555 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5556#endif
a2fbb9ea 5557
34f80b04
EG
5558 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5559 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5560 if (!CHIP_REV_IS_SLOW(bp)) {
5561 /* enable hw interrupt from doorbell Q */
5562 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5563 }
a2fbb9ea 5564
34f80b04 5565 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5566 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5567 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5568 /* set NIC mode */
5569 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5570 if (CHIP_IS_E1H(bp))
5571 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5572
34f80b04
EG
5573 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5574 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5575 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5576 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5577
34f80b04
EG
5578 if (CHIP_IS_E1H(bp)) {
5579 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5580 STORM_INTMEM_SIZE_E1H/2);
5581 bnx2x_init_fill(bp,
5582 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5583 0, STORM_INTMEM_SIZE_E1H/2);
5584 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5585 STORM_INTMEM_SIZE_E1H/2);
5586 bnx2x_init_fill(bp,
5587 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5588 0, STORM_INTMEM_SIZE_E1H/2);
5589 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5590 STORM_INTMEM_SIZE_E1H/2);
5591 bnx2x_init_fill(bp,
5592 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5593 0, STORM_INTMEM_SIZE_E1H/2);
5594 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5595 STORM_INTMEM_SIZE_E1H/2);
5596 bnx2x_init_fill(bp,
5597 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5598 0, STORM_INTMEM_SIZE_E1H/2);
5599 } else { /* E1 */
ad8d3948
EG
5600 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5601 STORM_INTMEM_SIZE_E1);
5602 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5603 STORM_INTMEM_SIZE_E1);
5604 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5605 STORM_INTMEM_SIZE_E1);
5606 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5607 STORM_INTMEM_SIZE_E1);
34f80b04 5608 }
a2fbb9ea 5609
34f80b04
EG
5610 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5611 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5612 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5613 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5614
34f80b04
EG
5615 /* sync semi rtc */
5616 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5617 0x80000000);
5618 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5619 0x80000000);
a2fbb9ea 5620
34f80b04
EG
5621 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5622 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5623 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5624
34f80b04
EG
5625 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5626 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5627 REG_WR(bp, i, 0xc0cac01a);
5628 /* TODO: replace with something meaningful */
5629 }
8d9c5f34 5630 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5631 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5632
34f80b04
EG
5633 if (sizeof(union cdu_context) != 1024)
5634 /* we currently assume that a context is 1024 bytes */
5635 printk(KERN_ALERT PFX "please adjust the size of"
5636 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5637
34f80b04
EG
5638 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5639 val = (4 << 24) + (0 << 12) + 1024;
5640 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5641 if (CHIP_IS_E1(bp)) {
5642 /* !!! fix pxp client crdit until excel update */
5643 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5644 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5645 }
a2fbb9ea 5646
34f80b04
EG
5647 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5648 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5649 /* enable context validation interrupt from CFC */
5650 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5651
5652 /* set the thresholds to prevent CFC/CDU race */
5653 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5654
34f80b04
EG
5655 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5656 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5657
34f80b04
EG
5658 /* PXPCS COMMON comes here */
5659 /* Reset PCIE errors for debug */
5660 REG_WR(bp, 0x2814, 0xffffffff);
5661 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5662
34f80b04
EG
5663 /* EMAC0 COMMON comes here */
5664 /* EMAC1 COMMON comes here */
5665 /* DBU COMMON comes here */
5666 /* DBG COMMON comes here */
5667
5668 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5669 if (CHIP_IS_E1H(bp)) {
5670 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5671 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5672 }
5673
5674 if (CHIP_REV_IS_SLOW(bp))
5675 msleep(200);
5676
5677 /* finish CFC init */
5678 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5679 if (val != 1) {
5680 BNX2X_ERR("CFC LL_INIT failed\n");
5681 return -EBUSY;
5682 }
5683 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5684 if (val != 1) {
5685 BNX2X_ERR("CFC AC_INIT failed\n");
5686 return -EBUSY;
5687 }
5688 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5689 if (val != 1) {
5690 BNX2X_ERR("CFC CAM_INIT failed\n");
5691 return -EBUSY;
5692 }
5693 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5694
34f80b04
EG
5695 /* read NIG statistic
5696 to see if this is our first up since powerup */
5697 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5698 val = *bnx2x_sp(bp, wb_data[0]);
5699
5700 /* do internal memory self test */
5701 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5702 BNX2X_ERR("internal mem self test failed\n");
5703 return -EBUSY;
5704 }
5705
35b19ba5 5706 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
5707 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5708 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5709 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5710 bp->port.need_hw_lock = 1;
5711 break;
5712
35b19ba5 5713 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5714 /* Fan failure is indicated by SPIO 5 */
5715 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5716 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5717
5718 /* set to active low mode */
5719 val = REG_RD(bp, MISC_REG_SPIO_INT);
5720 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5721 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5722 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5723
34f80b04
EG
5724 /* enable interrupt to signal the IGU */
5725 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5726 val |= (1 << MISC_REGISTERS_SPIO_5);
5727 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5728 break;
f1410647 5729
34f80b04
EG
5730 default:
5731 break;
5732 }
f1410647 5733
34f80b04
EG
5734 /* clear PXP2 attentions */
5735 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5736
34f80b04 5737 enable_blocks_attention(bp);
a2fbb9ea 5738
6bbca910
YR
5739 if (!BP_NOMCP(bp)) {
5740 bnx2x_acquire_phy_lock(bp);
5741 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5742 bnx2x_release_phy_lock(bp);
5743 } else
5744 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5745
34f80b04
EG
5746 return 0;
5747}
a2fbb9ea 5748
34f80b04
EG
5749static int bnx2x_init_port(struct bnx2x *bp)
5750{
5751 int port = BP_PORT(bp);
1c06328c 5752 u32 low, high;
34f80b04 5753 u32 val;
a2fbb9ea 5754
34f80b04
EG
5755 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5756
5757 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5758
5759 /* Port PXP comes here */
5760 /* Port PXP2 comes here */
a2fbb9ea
ET
5761#ifdef BCM_ISCSI
5762 /* Port0 1
5763 * Port1 385 */
5764 i++;
5765 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5766 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5767 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5768 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5769
5770 /* Port0 2
5771 * Port1 386 */
5772 i++;
5773 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5774 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5775 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5776 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5777
5778 /* Port0 3
5779 * Port1 387 */
5780 i++;
5781 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5782 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5783 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5784 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5785#endif
34f80b04 5786 /* Port CMs come here */
8d9c5f34
EG
5787 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5788 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5789
5790 /* Port QM comes here */
a2fbb9ea
ET
5791#ifdef BCM_ISCSI
5792 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5793 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5794
5795 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5796 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5797#endif
5798 /* Port DQ comes here */
1c06328c
EG
5799
5800 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5801 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5802 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5803 /* no pause for emulation and FPGA */
5804 low = 0;
5805 high = 513;
5806 } else {
5807 if (IS_E1HMF(bp))
5808 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5809 else if (bp->dev->mtu > 4096) {
5810 if (bp->flags & ONE_PORT_FLAG)
5811 low = 160;
5812 else {
5813 val = bp->dev->mtu;
5814 /* (24*1024 + val*4)/256 */
5815 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5816 }
5817 } else
5818 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5819 high = low + 56; /* 14*1024/256 */
5820 }
5821 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5822 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5823
5824
ad8d3948 5825 /* Port PRS comes here */
a2fbb9ea
ET
5826 /* Port TSDM comes here */
5827 /* Port CSDM comes here */
5828 /* Port USDM comes here */
5829 /* Port XSDM comes here */
34f80b04
EG
5830 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5831 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5832 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5833 port ? USEM_PORT1_END : USEM_PORT0_END);
5834 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5835 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5836 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5837 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5838 /* Port UPB comes here */
34f80b04
EG
5839 /* Port XPB comes here */
5840
5841 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5842 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5843
5844 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5845 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5846
5847 /* update threshold */
34f80b04 5848 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5849 /* update init credit */
34f80b04 5850 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5851
5852 /* probe changes */
34f80b04 5853 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5854 msleep(5);
34f80b04 5855 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5856
5857#ifdef BCM_ISCSI
5858 /* tell the searcher where the T2 table is */
5859 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5860
5861 wb_write[0] = U64_LO(bp->t2_mapping);
5862 wb_write[1] = U64_HI(bp->t2_mapping);
5863 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5864 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5865 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5866 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5867
5868 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5869 /* Port SRCH comes here */
5870#endif
5871 /* Port CDU comes here */
5872 /* Port CFC comes here */
34f80b04
EG
5873
5874 if (CHIP_IS_E1(bp)) {
5875 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5876 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5877 }
5878 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5879 port ? HC_PORT1_END : HC_PORT0_END);
5880
5881 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5882 MISC_AEU_PORT0_START,
34f80b04
EG
5883 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5884 /* init aeu_mask_attn_func_0/1:
5885 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5886 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5887 * bits 4-7 are used for "per vn group attention" */
5888 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5889 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5890
a2fbb9ea
ET
5891 /* Port PXPCS comes here */
5892 /* Port EMAC0 comes here */
5893 /* Port EMAC1 comes here */
5894 /* Port DBU comes here */
5895 /* Port DBG comes here */
34f80b04
EG
5896 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5897 port ? NIG_PORT1_END : NIG_PORT0_END);
5898
5899 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5900
5901 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5902 /* 0x2 disable e1hov, 0x1 enable */
5903 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5904 (IS_E1HMF(bp) ? 0x1 : 0x2));
5905
1c06328c
EG
5906 /* support pause requests from USDM, TSDM and BRB */
5907 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5908
5909 {
5910 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5911 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5912 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5913 }
34f80b04
EG
5914 }
5915
a2fbb9ea
ET
5916 /* Port MCP comes here */
5917 /* Port DMAE comes here */
5918
35b19ba5 5919 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
5920 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5921 {
5922 u32 swap_val, swap_override, aeu_gpio_mask, offset;
5923
5924 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
5925 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
5926
5927 /* The GPIO should be swapped if the swap register is
5928 set and active */
5929 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5930 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5931
5932 /* Select function upon port-swap configuration */
5933 if (port == 0) {
5934 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
5935 aeu_gpio_mask = (swap_val && swap_override) ?
5936 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
5937 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
5938 } else {
5939 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
5940 aeu_gpio_mask = (swap_val && swap_override) ?
5941 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
5942 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
5943 }
5944 val = REG_RD(bp, offset);
5945 /* add GPIO3 to group */
5946 val |= aeu_gpio_mask;
5947 REG_WR(bp, offset, val);
5948 }
5949 break;
5950
35b19ba5 5951 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5952 /* add SPIO 5 to group 0 */
5953 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5954 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5955 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5956 break;
5957
5958 default:
5959 break;
5960 }
5961
c18487ee 5962 bnx2x__link_reset(bp);
a2fbb9ea 5963
34f80b04
EG
5964 return 0;
5965}
5966
5967#define ILT_PER_FUNC (768/2)
5968#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5969/* the phys address is shifted right 12 bits and has an added
5970 1=valid bit added to the 53rd bit
5971 then since this is a wide register(TM)
5972 we split it into two 32 bit writes
5973 */
5974#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5975#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5976#define PXP_ONE_ILT(x) (((x) << 10) | x)
5977#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5978
5979#define CNIC_ILT_LINES 0
5980
5981static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5982{
5983 int reg;
5984
5985 if (CHIP_IS_E1H(bp))
5986 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5987 else /* E1 */
5988 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5989
5990 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5991}
5992
5993static int bnx2x_init_func(struct bnx2x *bp)
5994{
5995 int port = BP_PORT(bp);
5996 int func = BP_FUNC(bp);
8badd27a 5997 u32 addr, val;
34f80b04
EG
5998 int i;
5999
6000 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6001
8badd27a
EG
6002 /* set MSI reconfigure capability */
6003 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6004 val = REG_RD(bp, addr);
6005 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6006 REG_WR(bp, addr, val);
6007
34f80b04
EG
6008 i = FUNC_ILT_BASE(func);
6009
6010 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6011 if (CHIP_IS_E1H(bp)) {
6012 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6013 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6014 } else /* E1 */
6015 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6016 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6017
6018
6019 if (CHIP_IS_E1H(bp)) {
6020 for (i = 0; i < 9; i++)
6021 bnx2x_init_block(bp,
6022 cm_start[func][i], cm_end[func][i]);
6023
6024 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6025 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6026 }
6027
6028 /* HC init per function */
6029 if (CHIP_IS_E1H(bp)) {
6030 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6031
6032 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6033 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6034 }
6035 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
6036
c14423fe 6037 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6038 REG_WR(bp, 0x2114, 0xffffffff);
6039 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6040
34f80b04
EG
6041 return 0;
6042}
6043
6044static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6045{
6046 int i, rc = 0;
a2fbb9ea 6047
34f80b04
EG
6048 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6049 BP_FUNC(bp), load_code);
a2fbb9ea 6050
34f80b04
EG
6051 bp->dmae_ready = 0;
6052 mutex_init(&bp->dmae_mutex);
6053 bnx2x_gunzip_init(bp);
a2fbb9ea 6054
34f80b04
EG
6055 switch (load_code) {
6056 case FW_MSG_CODE_DRV_LOAD_COMMON:
6057 rc = bnx2x_init_common(bp);
6058 if (rc)
6059 goto init_hw_err;
6060 /* no break */
6061
6062 case FW_MSG_CODE_DRV_LOAD_PORT:
6063 bp->dmae_ready = 1;
6064 rc = bnx2x_init_port(bp);
6065 if (rc)
6066 goto init_hw_err;
6067 /* no break */
6068
6069 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6070 bp->dmae_ready = 1;
6071 rc = bnx2x_init_func(bp);
6072 if (rc)
6073 goto init_hw_err;
6074 break;
6075
6076 default:
6077 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6078 break;
6079 }
6080
6081 if (!BP_NOMCP(bp)) {
6082 int func = BP_FUNC(bp);
a2fbb9ea
ET
6083
6084 bp->fw_drv_pulse_wr_seq =
34f80b04 6085 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6086 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6087 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6088 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6089 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6090 } else
6091 bp->func_stx = 0;
a2fbb9ea 6092
34f80b04
EG
6093 /* this needs to be done before gunzip end */
6094 bnx2x_zero_def_sb(bp);
6095 for_each_queue(bp, i)
6096 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6097
6098init_hw_err:
6099 bnx2x_gunzip_end(bp);
6100
6101 return rc;
a2fbb9ea
ET
6102}
6103
c14423fe 6104/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6105static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6106{
34f80b04 6107 int func = BP_FUNC(bp);
f1410647
ET
6108 u32 seq = ++bp->fw_seq;
6109 u32 rc = 0;
19680c48
EG
6110 u32 cnt = 1;
6111 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6112
34f80b04 6113 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6114 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6115
19680c48
EG
6116 do {
6117 /* let the FW do it's magic ... */
6118 msleep(delay);
a2fbb9ea 6119
19680c48 6120 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6121
19680c48
EG
6122 /* Give the FW up to 2 second (200*10ms) */
6123 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6124
6125 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6126 cnt*delay, rc, seq);
a2fbb9ea
ET
6127
6128 /* is this a reply to our command? */
6129 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6130 rc &= FW_MSG_CODE_MASK;
f1410647 6131
a2fbb9ea
ET
6132 } else {
6133 /* FW BUG! */
6134 BNX2X_ERR("FW failed to respond!\n");
6135 bnx2x_fw_dump(bp);
6136 rc = 0;
6137 }
f1410647 6138
a2fbb9ea
ET
6139 return rc;
6140}
6141
6142static void bnx2x_free_mem(struct bnx2x *bp)
6143{
6144
6145#define BNX2X_PCI_FREE(x, y, size) \
6146 do { \
6147 if (x) { \
6148 pci_free_consistent(bp->pdev, size, x, y); \
6149 x = NULL; \
6150 y = 0; \
6151 } \
6152 } while (0)
6153
6154#define BNX2X_FREE(x) \
6155 do { \
6156 if (x) { \
6157 vfree(x); \
6158 x = NULL; \
6159 } \
6160 } while (0)
6161
6162 int i;
6163
6164 /* fastpath */
555f6c78 6165 /* Common */
a2fbb9ea
ET
6166 for_each_queue(bp, i) {
6167
555f6c78 6168 /* status blocks */
a2fbb9ea
ET
6169 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6170 bnx2x_fp(bp, i, status_blk_mapping),
6171 sizeof(struct host_status_block) +
6172 sizeof(struct eth_tx_db_data));
555f6c78
EG
6173 }
6174 /* Rx */
6175 for_each_rx_queue(bp, i) {
a2fbb9ea 6176
555f6c78 6177 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6178 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6179 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6180 bnx2x_fp(bp, i, rx_desc_mapping),
6181 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6182
6183 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6184 bnx2x_fp(bp, i, rx_comp_mapping),
6185 sizeof(struct eth_fast_path_rx_cqe) *
6186 NUM_RCQ_BD);
a2fbb9ea 6187
7a9b2557 6188 /* SGE ring */
32626230 6189 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6190 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6191 bnx2x_fp(bp, i, rx_sge_mapping),
6192 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6193 }
555f6c78
EG
6194 /* Tx */
6195 for_each_tx_queue(bp, i) {
6196
6197 /* fastpath tx rings: tx_buf tx_desc */
6198 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6199 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6200 bnx2x_fp(bp, i, tx_desc_mapping),
6201 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6202 }
a2fbb9ea
ET
6203 /* end of fastpath */
6204
6205 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6206 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6207
6208 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6209 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6210
6211#ifdef BCM_ISCSI
6212 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6213 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6214 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6215 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6216#endif
7a9b2557 6217 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6218
6219#undef BNX2X_PCI_FREE
6220#undef BNX2X_KFREE
6221}
6222
6223static int bnx2x_alloc_mem(struct bnx2x *bp)
6224{
6225
6226#define BNX2X_PCI_ALLOC(x, y, size) \
6227 do { \
6228 x = pci_alloc_consistent(bp->pdev, size, y); \
6229 if (x == NULL) \
6230 goto alloc_mem_err; \
6231 memset(x, 0, size); \
6232 } while (0)
6233
6234#define BNX2X_ALLOC(x, size) \
6235 do { \
6236 x = vmalloc(size); \
6237 if (x == NULL) \
6238 goto alloc_mem_err; \
6239 memset(x, 0, size); \
6240 } while (0)
6241
6242 int i;
6243
6244 /* fastpath */
555f6c78 6245 /* Common */
a2fbb9ea
ET
6246 for_each_queue(bp, i) {
6247 bnx2x_fp(bp, i, bp) = bp;
6248
555f6c78 6249 /* status blocks */
a2fbb9ea
ET
6250 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6251 &bnx2x_fp(bp, i, status_blk_mapping),
6252 sizeof(struct host_status_block) +
6253 sizeof(struct eth_tx_db_data));
555f6c78
EG
6254 }
6255 /* Rx */
6256 for_each_rx_queue(bp, i) {
a2fbb9ea 6257
555f6c78 6258 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6259 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6260 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6261 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6262 &bnx2x_fp(bp, i, rx_desc_mapping),
6263 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6264
6265 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6266 &bnx2x_fp(bp, i, rx_comp_mapping),
6267 sizeof(struct eth_fast_path_rx_cqe) *
6268 NUM_RCQ_BD);
6269
7a9b2557
VZ
6270 /* SGE ring */
6271 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6272 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6273 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6274 &bnx2x_fp(bp, i, rx_sge_mapping),
6275 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6276 }
555f6c78
EG
6277 /* Tx */
6278 for_each_tx_queue(bp, i) {
6279
6280 bnx2x_fp(bp, i, hw_tx_prods) =
6281 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6282
6283 bnx2x_fp(bp, i, tx_prods_mapping) =
6284 bnx2x_fp(bp, i, status_blk_mapping) +
6285 sizeof(struct host_status_block);
6286
6287 /* fastpath tx rings: tx_buf tx_desc */
6288 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6289 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6290 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6291 &bnx2x_fp(bp, i, tx_desc_mapping),
6292 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6293 }
a2fbb9ea
ET
6294 /* end of fastpath */
6295
6296 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6297 sizeof(struct host_def_status_block));
6298
6299 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6300 sizeof(struct bnx2x_slowpath));
6301
6302#ifdef BCM_ISCSI
6303 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6304
6305 /* Initialize T1 */
6306 for (i = 0; i < 64*1024; i += 64) {
6307 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6308 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6309 }
6310
6311 /* allocate searcher T2 table
6312 we allocate 1/4 of alloc num for T2
6313 (which is not entered into the ILT) */
6314 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6315
6316 /* Initialize T2 */
6317 for (i = 0; i < 16*1024; i += 64)
6318 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6319
c14423fe 6320 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6321 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6322
6323 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6324 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6325
6326 /* QM queues (128*MAX_CONN) */
6327 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6328#endif
6329
6330 /* Slow path ring */
6331 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6332
6333 return 0;
6334
6335alloc_mem_err:
6336 bnx2x_free_mem(bp);
6337 return -ENOMEM;
6338
6339#undef BNX2X_PCI_ALLOC
6340#undef BNX2X_ALLOC
6341}
6342
6343static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6344{
6345 int i;
6346
555f6c78 6347 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6348 struct bnx2x_fastpath *fp = &bp->fp[i];
6349
6350 u16 bd_cons = fp->tx_bd_cons;
6351 u16 sw_prod = fp->tx_pkt_prod;
6352 u16 sw_cons = fp->tx_pkt_cons;
6353
a2fbb9ea
ET
6354 while (sw_cons != sw_prod) {
6355 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6356 sw_cons++;
6357 }
6358 }
6359}
6360
6361static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6362{
6363 int i, j;
6364
555f6c78 6365 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6366 struct bnx2x_fastpath *fp = &bp->fp[j];
6367
a2fbb9ea
ET
6368 for (i = 0; i < NUM_RX_BD; i++) {
6369 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6370 struct sk_buff *skb = rx_buf->skb;
6371
6372 if (skb == NULL)
6373 continue;
6374
6375 pci_unmap_single(bp->pdev,
6376 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6377 bp->rx_buf_size,
a2fbb9ea
ET
6378 PCI_DMA_FROMDEVICE);
6379
6380 rx_buf->skb = NULL;
6381 dev_kfree_skb(skb);
6382 }
7a9b2557 6383 if (!fp->disable_tpa)
32626230
EG
6384 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6385 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6386 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6387 }
6388}
6389
6390static void bnx2x_free_skbs(struct bnx2x *bp)
6391{
6392 bnx2x_free_tx_skbs(bp);
6393 bnx2x_free_rx_skbs(bp);
6394}
6395
6396static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6397{
34f80b04 6398 int i, offset = 1;
a2fbb9ea
ET
6399
6400 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6401 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6402 bp->msix_table[0].vector);
6403
6404 for_each_queue(bp, i) {
c14423fe 6405 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6406 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6407 bnx2x_fp(bp, i, state));
6408
34f80b04 6409 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6410 }
a2fbb9ea
ET
6411}
6412
6413static void bnx2x_free_irq(struct bnx2x *bp)
6414{
a2fbb9ea 6415 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6416 bnx2x_free_msix_irqs(bp);
6417 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6418 bp->flags &= ~USING_MSIX_FLAG;
6419
8badd27a
EG
6420 } else if (bp->flags & USING_MSI_FLAG) {
6421 free_irq(bp->pdev->irq, bp->dev);
6422 pci_disable_msi(bp->pdev);
6423 bp->flags &= ~USING_MSI_FLAG;
6424
a2fbb9ea
ET
6425 } else
6426 free_irq(bp->pdev->irq, bp->dev);
6427}
6428
6429static int bnx2x_enable_msix(struct bnx2x *bp)
6430{
8badd27a
EG
6431 int i, rc, offset = 1;
6432 int igu_vec = 0;
a2fbb9ea 6433
8badd27a
EG
6434 bp->msix_table[0].entry = igu_vec;
6435 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6436
34f80b04 6437 for_each_queue(bp, i) {
8badd27a 6438 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6439 bp->msix_table[i + offset].entry = igu_vec;
6440 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6441 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6442 }
6443
34f80b04 6444 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6445 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6446 if (rc) {
8badd27a
EG
6447 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6448 return rc;
34f80b04 6449 }
8badd27a 6450
a2fbb9ea
ET
6451 bp->flags |= USING_MSIX_FLAG;
6452
6453 return 0;
a2fbb9ea
ET
6454}
6455
a2fbb9ea
ET
6456static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6457{
34f80b04 6458 int i, rc, offset = 1;
a2fbb9ea 6459
a2fbb9ea
ET
6460 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6461 bp->dev->name, bp->dev);
a2fbb9ea
ET
6462 if (rc) {
6463 BNX2X_ERR("request sp irq failed\n");
6464 return -EBUSY;
6465 }
6466
6467 for_each_queue(bp, i) {
555f6c78
EG
6468 struct bnx2x_fastpath *fp = &bp->fp[i];
6469
6470 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6471 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6472 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6473 if (rc) {
555f6c78 6474 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6475 bnx2x_free_msix_irqs(bp);
6476 return -EBUSY;
6477 }
6478
555f6c78 6479 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6480 }
6481
555f6c78
EG
6482 i = BNX2X_NUM_QUEUES(bp);
6483 if (is_multi(bp))
6484 printk(KERN_INFO PFX
6485 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6486 bp->dev->name, bp->msix_table[0].vector,
6487 bp->msix_table[offset].vector,
6488 bp->msix_table[offset + i - 1].vector);
6489 else
6490 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6491 bp->dev->name, bp->msix_table[0].vector,
6492 bp->msix_table[offset + i - 1].vector);
6493
a2fbb9ea 6494 return 0;
a2fbb9ea
ET
6495}
6496
8badd27a
EG
6497static int bnx2x_enable_msi(struct bnx2x *bp)
6498{
6499 int rc;
6500
6501 rc = pci_enable_msi(bp->pdev);
6502 if (rc) {
6503 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6504 return -1;
6505 }
6506 bp->flags |= USING_MSI_FLAG;
6507
6508 return 0;
6509}
6510
a2fbb9ea
ET
6511static int bnx2x_req_irq(struct bnx2x *bp)
6512{
8badd27a 6513 unsigned long flags;
34f80b04 6514 int rc;
a2fbb9ea 6515
8badd27a
EG
6516 if (bp->flags & USING_MSI_FLAG)
6517 flags = 0;
6518 else
6519 flags = IRQF_SHARED;
6520
6521 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6522 bp->dev->name, bp->dev);
a2fbb9ea
ET
6523 if (!rc)
6524 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6525
6526 return rc;
a2fbb9ea
ET
6527}
6528
65abd74d
YG
6529static void bnx2x_napi_enable(struct bnx2x *bp)
6530{
6531 int i;
6532
555f6c78 6533 for_each_rx_queue(bp, i)
65abd74d
YG
6534 napi_enable(&bnx2x_fp(bp, i, napi));
6535}
6536
6537static void bnx2x_napi_disable(struct bnx2x *bp)
6538{
6539 int i;
6540
555f6c78 6541 for_each_rx_queue(bp, i)
65abd74d
YG
6542 napi_disable(&bnx2x_fp(bp, i, napi));
6543}
6544
6545static void bnx2x_netif_start(struct bnx2x *bp)
6546{
6547 if (atomic_dec_and_test(&bp->intr_sem)) {
6548 if (netif_running(bp->dev)) {
65abd74d
YG
6549 bnx2x_napi_enable(bp);
6550 bnx2x_int_enable(bp);
555f6c78
EG
6551 if (bp->state == BNX2X_STATE_OPEN)
6552 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6553 }
6554 }
6555}
6556
f8ef6e44 6557static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6558{
f8ef6e44 6559 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6560 bnx2x_napi_disable(bp);
65abd74d 6561 if (netif_running(bp->dev)) {
65abd74d
YG
6562 netif_tx_disable(bp->dev);
6563 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6564 }
6565}
6566
a2fbb9ea
ET
6567/*
6568 * Init service functions
6569 */
6570
3101c2bc 6571static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6572{
6573 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6574 int port = BP_PORT(bp);
a2fbb9ea
ET
6575
6576 /* CAM allocation
6577 * unicasts 0-31:port0 32-63:port1
6578 * multicast 64-127:port0 128-191:port1
6579 */
8d9c5f34 6580 config->hdr.length = 2;
af246401 6581 config->hdr.offset = port ? 32 : 0;
34f80b04 6582 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6583 config->hdr.reserved1 = 0;
6584
6585 /* primary MAC */
6586 config->config_table[0].cam_entry.msb_mac_addr =
6587 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6588 config->config_table[0].cam_entry.middle_mac_addr =
6589 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6590 config->config_table[0].cam_entry.lsb_mac_addr =
6591 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6592 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6593 if (set)
6594 config->config_table[0].target_table_entry.flags = 0;
6595 else
6596 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6597 config->config_table[0].target_table_entry.client_id = 0;
6598 config->config_table[0].target_table_entry.vlan_id = 0;
6599
3101c2bc
YG
6600 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6601 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6602 config->config_table[0].cam_entry.msb_mac_addr,
6603 config->config_table[0].cam_entry.middle_mac_addr,
6604 config->config_table[0].cam_entry.lsb_mac_addr);
6605
6606 /* broadcast */
6607 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6608 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6609 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6610 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6611 if (set)
6612 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6613 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6614 else
6615 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6616 config->config_table[1].target_table_entry.client_id = 0;
6617 config->config_table[1].target_table_entry.vlan_id = 0;
6618
6619 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6620 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6621 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6622}
6623
3101c2bc 6624static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6625{
6626 struct mac_configuration_cmd_e1h *config =
6627 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6628
3101c2bc 6629 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6630 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6631 return;
6632 }
6633
6634 /* CAM allocation for E1H
6635 * unicasts: by func number
6636 * multicast: 20+FUNC*20, 20 each
6637 */
8d9c5f34 6638 config->hdr.length = 1;
34f80b04
EG
6639 config->hdr.offset = BP_FUNC(bp);
6640 config->hdr.client_id = BP_CL_ID(bp);
6641 config->hdr.reserved1 = 0;
6642
6643 /* primary MAC */
6644 config->config_table[0].msb_mac_addr =
6645 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6646 config->config_table[0].middle_mac_addr =
6647 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6648 config->config_table[0].lsb_mac_addr =
6649 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6650 config->config_table[0].client_id = BP_L_ID(bp);
6651 config->config_table[0].vlan_id = 0;
6652 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6653 if (set)
6654 config->config_table[0].flags = BP_PORT(bp);
6655 else
6656 config->config_table[0].flags =
6657 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6658
3101c2bc
YG
6659 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6660 (set ? "setting" : "clearing"),
34f80b04
EG
6661 config->config_table[0].msb_mac_addr,
6662 config->config_table[0].middle_mac_addr,
6663 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6664
6665 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6666 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6667 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6668}
6669
a2fbb9ea
ET
6670static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6671 int *state_p, int poll)
6672{
6673 /* can take a while if any port is running */
8b3a0f0b 6674 int cnt = 5000;
a2fbb9ea 6675
c14423fe
ET
6676 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6677 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6678
6679 might_sleep();
34f80b04 6680 while (cnt--) {
a2fbb9ea
ET
6681 if (poll) {
6682 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6683 /* if index is different from 0
6684 * the reply for some commands will
3101c2bc 6685 * be on the non default queue
a2fbb9ea
ET
6686 */
6687 if (idx)
6688 bnx2x_rx_int(&bp->fp[idx], 10);
6689 }
a2fbb9ea 6690
3101c2bc 6691 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6692 if (*state_p == state) {
6693#ifdef BNX2X_STOP_ON_ERROR
6694 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6695#endif
a2fbb9ea 6696 return 0;
8b3a0f0b 6697 }
a2fbb9ea 6698
a2fbb9ea 6699 msleep(1);
a2fbb9ea
ET
6700 }
6701
a2fbb9ea 6702 /* timeout! */
49d66772
ET
6703 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6704 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6705#ifdef BNX2X_STOP_ON_ERROR
6706 bnx2x_panic();
6707#endif
a2fbb9ea 6708
49d66772 6709 return -EBUSY;
a2fbb9ea
ET
6710}
6711
6712static int bnx2x_setup_leading(struct bnx2x *bp)
6713{
34f80b04 6714 int rc;
a2fbb9ea 6715
c14423fe 6716 /* reset IGU state */
34f80b04 6717 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6718
6719 /* SETUP ramrod */
6720 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6721
34f80b04
EG
6722 /* Wait for completion */
6723 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6724
34f80b04 6725 return rc;
a2fbb9ea
ET
6726}
6727
6728static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6729{
555f6c78
EG
6730 struct bnx2x_fastpath *fp = &bp->fp[index];
6731
a2fbb9ea 6732 /* reset IGU state */
555f6c78 6733 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6734
228241eb 6735 /* SETUP ramrod */
555f6c78
EG
6736 fp->state = BNX2X_FP_STATE_OPENING;
6737 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6738 fp->cl_id, 0);
a2fbb9ea
ET
6739
6740 /* Wait for completion */
6741 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6742 &(fp->state), 0);
a2fbb9ea
ET
6743}
6744
a2fbb9ea 6745static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6746
8badd27a 6747static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6748{
555f6c78 6749 int num_queues;
a2fbb9ea 6750
8badd27a
EG
6751 switch (int_mode) {
6752 case INT_MODE_INTx:
6753 case INT_MODE_MSI:
555f6c78
EG
6754 num_queues = 1;
6755 bp->num_rx_queues = num_queues;
6756 bp->num_tx_queues = num_queues;
6757 DP(NETIF_MSG_IFUP,
6758 "set number of queues to %d\n", num_queues);
8badd27a
EG
6759 break;
6760
6761 case INT_MODE_MSIX:
6762 default:
555f6c78
EG
6763 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6764 num_queues = min_t(u32, num_online_cpus(),
6765 BNX2X_MAX_QUEUES(bp));
34f80b04 6766 else
555f6c78
EG
6767 num_queues = 1;
6768 bp->num_rx_queues = num_queues;
6769 bp->num_tx_queues = num_queues;
6770 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6771 " number of tx queues to %d\n",
6772 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6773 /* if we can't use MSI-X we only need one fp,
6774 * so try to enable MSI-X with the requested number of fp's
6775 * and fallback to MSI or legacy INTx with one fp
6776 */
8badd27a 6777 if (bnx2x_enable_msix(bp)) {
34f80b04 6778 /* failed to enable MSI-X */
555f6c78
EG
6779 num_queues = 1;
6780 bp->num_rx_queues = num_queues;
6781 bp->num_tx_queues = num_queues;
6782 if (bp->multi_mode)
6783 BNX2X_ERR("Multi requested but failed to "
6784 "enable MSI-X set number of "
6785 "queues to %d\n", num_queues);
a2fbb9ea 6786 }
8badd27a 6787 break;
a2fbb9ea 6788 }
555f6c78 6789 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6790}
6791
6792static void bnx2x_set_rx_mode(struct net_device *dev);
6793
6794/* must be called with rtnl_lock */
6795static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6796{
6797 u32 load_code;
6798 int i, rc = 0;
6799#ifdef BNX2X_STOP_ON_ERROR
6800 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6801 if (unlikely(bp->panic))
6802 return -EPERM;
6803#endif
6804
6805 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6806
6807 bnx2x_set_int_mode(bp);
c14423fe 6808
a2fbb9ea
ET
6809 if (bnx2x_alloc_mem(bp))
6810 return -ENOMEM;
6811
555f6c78 6812 for_each_rx_queue(bp, i)
7a9b2557
VZ
6813 bnx2x_fp(bp, i, disable_tpa) =
6814 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6815
555f6c78 6816 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6817 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6818 bnx2x_poll, 128);
6819
6820#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6821 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6822 struct bnx2x_fastpath *fp = &bp->fp[i];
6823
6824 fp->poll_no_work = 0;
6825 fp->poll_calls = 0;
6826 fp->poll_max_calls = 0;
6827 fp->poll_complete = 0;
6828 fp->poll_exit = 0;
6829 }
6830#endif
6831 bnx2x_napi_enable(bp);
6832
34f80b04
EG
6833 if (bp->flags & USING_MSIX_FLAG) {
6834 rc = bnx2x_req_msix_irqs(bp);
6835 if (rc) {
6836 pci_disable_msix(bp->pdev);
2dfe0e1f 6837 goto load_error1;
34f80b04
EG
6838 }
6839 } else {
8badd27a
EG
6840 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6841 bnx2x_enable_msi(bp);
34f80b04
EG
6842 bnx2x_ack_int(bp);
6843 rc = bnx2x_req_irq(bp);
6844 if (rc) {
2dfe0e1f 6845 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6846 if (bp->flags & USING_MSI_FLAG)
6847 pci_disable_msi(bp->pdev);
2dfe0e1f 6848 goto load_error1;
a2fbb9ea 6849 }
8badd27a
EG
6850 if (bp->flags & USING_MSI_FLAG) {
6851 bp->dev->irq = bp->pdev->irq;
6852 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6853 bp->dev->name, bp->pdev->irq);
6854 }
a2fbb9ea
ET
6855 }
6856
2dfe0e1f
EG
6857 /* Send LOAD_REQUEST command to MCP
6858 Returns the type of LOAD command:
6859 if it is the first port to be initialized
6860 common blocks should be initialized, otherwise - not
6861 */
6862 if (!BP_NOMCP(bp)) {
6863 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6864 if (!load_code) {
6865 BNX2X_ERR("MCP response failure, aborting\n");
6866 rc = -EBUSY;
6867 goto load_error2;
6868 }
6869 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6870 rc = -EBUSY; /* other port in diagnostic mode */
6871 goto load_error2;
6872 }
6873
6874 } else {
6875 int port = BP_PORT(bp);
6876
6877 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6878 load_count[0], load_count[1], load_count[2]);
6879 load_count[0]++;
6880 load_count[1 + port]++;
6881 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6882 load_count[0], load_count[1], load_count[2]);
6883 if (load_count[0] == 1)
6884 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6885 else if (load_count[1 + port] == 1)
6886 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6887 else
6888 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6889 }
6890
6891 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6892 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6893 bp->port.pmf = 1;
6894 else
6895 bp->port.pmf = 0;
6896 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6897
a2fbb9ea 6898 /* Initialize HW */
34f80b04
EG
6899 rc = bnx2x_init_hw(bp, load_code);
6900 if (rc) {
a2fbb9ea 6901 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6902 goto load_error2;
a2fbb9ea
ET
6903 }
6904
a2fbb9ea 6905 /* Setup NIC internals and enable interrupts */
471de716 6906 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6907
6908 /* Send LOAD_DONE command to MCP */
34f80b04 6909 if (!BP_NOMCP(bp)) {
228241eb
ET
6910 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6911 if (!load_code) {
da5a662a 6912 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6913 rc = -EBUSY;
2dfe0e1f 6914 goto load_error3;
a2fbb9ea
ET
6915 }
6916 }
6917
6918 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6919
34f80b04
EG
6920 rc = bnx2x_setup_leading(bp);
6921 if (rc) {
da5a662a 6922 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6923 goto load_error3;
34f80b04 6924 }
a2fbb9ea 6925
34f80b04
EG
6926 if (CHIP_IS_E1H(bp))
6927 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6928 BNX2X_ERR("!!! mf_cfg function disabled\n");
6929 bp->state = BNX2X_STATE_DISABLED;
6930 }
a2fbb9ea 6931
34f80b04
EG
6932 if (bp->state == BNX2X_STATE_OPEN)
6933 for_each_nondefault_queue(bp, i) {
6934 rc = bnx2x_setup_multi(bp, i);
6935 if (rc)
2dfe0e1f 6936 goto load_error3;
34f80b04 6937 }
a2fbb9ea 6938
34f80b04 6939 if (CHIP_IS_E1(bp))
3101c2bc 6940 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6941 else
3101c2bc 6942 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6943
6944 if (bp->port.pmf)
6945 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6946
6947 /* Start fast path */
34f80b04
EG
6948 switch (load_mode) {
6949 case LOAD_NORMAL:
6950 /* Tx queue should be only reenabled */
555f6c78 6951 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6952 /* Initialize the receive filter. */
34f80b04
EG
6953 bnx2x_set_rx_mode(bp->dev);
6954 break;
6955
6956 case LOAD_OPEN:
555f6c78 6957 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6958 /* Initialize the receive filter. */
34f80b04 6959 bnx2x_set_rx_mode(bp->dev);
34f80b04 6960 break;
a2fbb9ea 6961
34f80b04 6962 case LOAD_DIAG:
2dfe0e1f 6963 /* Initialize the receive filter. */
a2fbb9ea 6964 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6965 bp->state = BNX2X_STATE_DIAG;
6966 break;
6967
6968 default:
6969 break;
a2fbb9ea
ET
6970 }
6971
34f80b04
EG
6972 if (!bp->port.pmf)
6973 bnx2x__link_status_update(bp);
6974
a2fbb9ea
ET
6975 /* start the timer */
6976 mod_timer(&bp->timer, jiffies + bp->current_interval);
6977
34f80b04 6978
a2fbb9ea
ET
6979 return 0;
6980
2dfe0e1f
EG
6981load_error3:
6982 bnx2x_int_disable_sync(bp, 1);
6983 if (!BP_NOMCP(bp)) {
6984 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6985 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6986 }
6987 bp->port.pmf = 0;
7a9b2557
VZ
6988 /* Free SKBs, SGEs, TPA pool and driver internals */
6989 bnx2x_free_skbs(bp);
555f6c78 6990 for_each_rx_queue(bp, i)
3196a88a 6991 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6992load_error2:
d1014634
YG
6993 /* Release IRQs */
6994 bnx2x_free_irq(bp);
2dfe0e1f
EG
6995load_error1:
6996 bnx2x_napi_disable(bp);
555f6c78 6997 for_each_rx_queue(bp, i)
7cde1c8b 6998 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6999 bnx2x_free_mem(bp);
7000
7001 /* TBD we really need to reset the chip
7002 if we want to recover from this */
34f80b04 7003 return rc;
a2fbb9ea
ET
7004}
7005
7006static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7007{
555f6c78 7008 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7009 int rc;
7010
c14423fe 7011 /* halt the connection */
555f6c78
EG
7012 fp->state = BNX2X_FP_STATE_HALTING;
7013 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7014
34f80b04 7015 /* Wait for completion */
a2fbb9ea 7016 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7017 &(fp->state), 1);
c14423fe 7018 if (rc) /* timeout */
a2fbb9ea
ET
7019 return rc;
7020
7021 /* delete cfc entry */
7022 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7023
34f80b04
EG
7024 /* Wait for completion */
7025 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7026 &(fp->state), 1);
34f80b04 7027 return rc;
a2fbb9ea
ET
7028}
7029
da5a662a 7030static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7031{
49d66772 7032 u16 dsb_sp_prod_idx;
c14423fe 7033 /* if the other port is handling traffic,
a2fbb9ea 7034 this can take a lot of time */
34f80b04
EG
7035 int cnt = 500;
7036 int rc;
a2fbb9ea
ET
7037
7038 might_sleep();
7039
7040 /* Send HALT ramrod */
7041 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 7042 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 7043
34f80b04
EG
7044 /* Wait for completion */
7045 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7046 &(bp->fp[0].state), 1);
7047 if (rc) /* timeout */
da5a662a 7048 return rc;
a2fbb9ea 7049
49d66772 7050 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7051
228241eb 7052 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7053 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7054
49d66772 7055 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7056 we are going to reset the chip anyway
7057 so there is not much to do if this times out
7058 */
34f80b04 7059 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7060 if (!cnt) {
7061 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7062 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7063 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7064#ifdef BNX2X_STOP_ON_ERROR
7065 bnx2x_panic();
7066#endif
36e552ab 7067 rc = -EBUSY;
34f80b04
EG
7068 break;
7069 }
7070 cnt--;
da5a662a 7071 msleep(1);
5650d9d4 7072 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7073 }
7074 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7075 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7076
7077 return rc;
a2fbb9ea
ET
7078}
7079
34f80b04
EG
7080static void bnx2x_reset_func(struct bnx2x *bp)
7081{
7082 int port = BP_PORT(bp);
7083 int func = BP_FUNC(bp);
7084 int base, i;
7085
7086 /* Configure IGU */
7087 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7088 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7089
34f80b04
EG
7090 /* Clear ILT */
7091 base = FUNC_ILT_BASE(func);
7092 for (i = base; i < base + ILT_PER_FUNC; i++)
7093 bnx2x_ilt_wr(bp, i, 0);
7094}
7095
7096static void bnx2x_reset_port(struct bnx2x *bp)
7097{
7098 int port = BP_PORT(bp);
7099 u32 val;
7100
7101 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7102
7103 /* Do not rcv packets to BRB */
7104 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7105 /* Do not direct rcv packets that are not for MCP to the BRB */
7106 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7107 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7108
7109 /* Configure AEU */
7110 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7111
7112 msleep(100);
7113 /* Check for BRB port occupancy */
7114 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7115 if (val)
7116 DP(NETIF_MSG_IFDOWN,
33471629 7117 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7118
7119 /* TODO: Close Doorbell port? */
7120}
7121
34f80b04
EG
7122static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7123{
7124 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7125 BP_FUNC(bp), reset_code);
7126
7127 switch (reset_code) {
7128 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7129 bnx2x_reset_port(bp);
7130 bnx2x_reset_func(bp);
7131 bnx2x_reset_common(bp);
7132 break;
7133
7134 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7135 bnx2x_reset_port(bp);
7136 bnx2x_reset_func(bp);
7137 break;
7138
7139 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7140 bnx2x_reset_func(bp);
7141 break;
49d66772 7142
34f80b04
EG
7143 default:
7144 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7145 break;
7146 }
7147}
7148
33471629 7149/* must be called with rtnl_lock */
34f80b04 7150static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7151{
da5a662a 7152 int port = BP_PORT(bp);
a2fbb9ea 7153 u32 reset_code = 0;
da5a662a 7154 int i, cnt, rc;
a2fbb9ea
ET
7155
7156 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7157
228241eb
ET
7158 bp->rx_mode = BNX2X_RX_MODE_NONE;
7159 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7160
f8ef6e44 7161 bnx2x_netif_stop(bp, 1);
e94d8af3 7162
34f80b04
EG
7163 del_timer_sync(&bp->timer);
7164 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7165 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7166 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7167
70b9986c
EG
7168 /* Release IRQs */
7169 bnx2x_free_irq(bp);
7170
555f6c78
EG
7171 /* Wait until tx fastpath tasks complete */
7172 for_each_tx_queue(bp, i) {
228241eb
ET
7173 struct bnx2x_fastpath *fp = &bp->fp[i];
7174
34f80b04 7175 cnt = 1000;
3e5b510e 7176 smp_mb();
e8b5fc51 7177 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7178
65abd74d 7179 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7180 if (!cnt) {
7181 BNX2X_ERR("timeout waiting for queue[%d]\n",
7182 i);
7183#ifdef BNX2X_STOP_ON_ERROR
7184 bnx2x_panic();
7185 return -EBUSY;
7186#else
7187 break;
7188#endif
7189 }
7190 cnt--;
da5a662a 7191 msleep(1);
3e5b510e 7192 smp_mb();
34f80b04 7193 }
228241eb 7194 }
da5a662a
VZ
7195 /* Give HW time to discard old tx messages */
7196 msleep(1);
a2fbb9ea 7197
3101c2bc
YG
7198 if (CHIP_IS_E1(bp)) {
7199 struct mac_configuration_cmd *config =
7200 bnx2x_sp(bp, mcast_config);
7201
7202 bnx2x_set_mac_addr_e1(bp, 0);
7203
8d9c5f34 7204 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7205 CAM_INVALIDATE(config->config_table[i]);
7206
8d9c5f34 7207 config->hdr.length = i;
3101c2bc
YG
7208 if (CHIP_REV_IS_SLOW(bp))
7209 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7210 else
7211 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7212 config->hdr.client_id = BP_CL_ID(bp);
7213 config->hdr.reserved1 = 0;
7214
7215 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7216 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7217 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7218
7219 } else { /* E1H */
65abd74d
YG
7220 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7221
3101c2bc
YG
7222 bnx2x_set_mac_addr_e1h(bp, 0);
7223
7224 for (i = 0; i < MC_HASH_SIZE; i++)
7225 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7226 }
7227
65abd74d
YG
7228 if (unload_mode == UNLOAD_NORMAL)
7229 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7230
7231 else if (bp->flags & NO_WOL_FLAG) {
7232 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7233 if (CHIP_IS_E1H(bp))
7234 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7235
7236 } else if (bp->wol) {
7237 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7238 u8 *mac_addr = bp->dev->dev_addr;
7239 u32 val;
7240 /* The mac address is written to entries 1-4 to
7241 preserve entry 0 which is used by the PMF */
7242 u8 entry = (BP_E1HVN(bp) + 1)*8;
7243
7244 val = (mac_addr[0] << 8) | mac_addr[1];
7245 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7246
7247 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7248 (mac_addr[4] << 8) | mac_addr[5];
7249 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7250
7251 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7252
7253 } else
7254 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7255
34f80b04
EG
7256 /* Close multi and leading connections
7257 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7258 for_each_nondefault_queue(bp, i)
7259 if (bnx2x_stop_multi(bp, i))
228241eb 7260 goto unload_error;
a2fbb9ea 7261
da5a662a
VZ
7262 rc = bnx2x_stop_leading(bp);
7263 if (rc) {
34f80b04 7264 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7265#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7266 return -EBUSY;
da5a662a
VZ
7267#else
7268 goto unload_error;
34f80b04 7269#endif
228241eb
ET
7270 }
7271
7272unload_error:
34f80b04 7273 if (!BP_NOMCP(bp))
228241eb 7274 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7275 else {
7276 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7277 load_count[0], load_count[1], load_count[2]);
7278 load_count[0]--;
da5a662a 7279 load_count[1 + port]--;
34f80b04
EG
7280 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7281 load_count[0], load_count[1], load_count[2]);
7282 if (load_count[0] == 0)
7283 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7284 else if (load_count[1 + port] == 0)
34f80b04
EG
7285 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7286 else
7287 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7288 }
a2fbb9ea 7289
34f80b04
EG
7290 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7291 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7292 bnx2x__link_reset(bp);
a2fbb9ea
ET
7293
7294 /* Reset the chip */
228241eb 7295 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7296
7297 /* Report UNLOAD_DONE to MCP */
34f80b04 7298 if (!BP_NOMCP(bp))
a2fbb9ea 7299 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7300 bp->port.pmf = 0;
a2fbb9ea 7301
7a9b2557 7302 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7303 bnx2x_free_skbs(bp);
555f6c78 7304 for_each_rx_queue(bp, i)
3196a88a 7305 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7306 for_each_rx_queue(bp, i)
7cde1c8b 7307 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7308 bnx2x_free_mem(bp);
7309
7310 bp->state = BNX2X_STATE_CLOSED;
228241eb 7311
a2fbb9ea
ET
7312 netif_carrier_off(bp->dev);
7313
7314 return 0;
7315}
7316
34f80b04
EG
7317static void bnx2x_reset_task(struct work_struct *work)
7318{
7319 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7320
7321#ifdef BNX2X_STOP_ON_ERROR
7322 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7323 " so reset not done to allow debug dump,\n"
7324 KERN_ERR " you will need to reboot when done\n");
7325 return;
7326#endif
7327
7328 rtnl_lock();
7329
7330 if (!netif_running(bp->dev))
7331 goto reset_task_exit;
7332
7333 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7334 bnx2x_nic_load(bp, LOAD_NORMAL);
7335
7336reset_task_exit:
7337 rtnl_unlock();
7338}
7339
a2fbb9ea
ET
7340/* end of nic load/unload */
7341
7342/* ethtool_ops */
7343
7344/*
7345 * Init service functions
7346 */
7347
f1ef27ef
EG
7348static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7349{
7350 switch (func) {
7351 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7352 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7353 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7354 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7355 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7356 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7357 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7358 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7359 default:
7360 BNX2X_ERR("Unsupported function index: %d\n", func);
7361 return (u32)(-1);
7362 }
7363}
7364
7365static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7366{
7367 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7368
7369 /* Flush all outstanding writes */
7370 mmiowb();
7371
7372 /* Pretend to be function 0 */
7373 REG_WR(bp, reg, 0);
7374 /* Flush the GRC transaction (in the chip) */
7375 new_val = REG_RD(bp, reg);
7376 if (new_val != 0) {
7377 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7378 new_val);
7379 BUG();
7380 }
7381
7382 /* From now we are in the "like-E1" mode */
7383 bnx2x_int_disable(bp);
7384
7385 /* Flush all outstanding writes */
7386 mmiowb();
7387
7388 /* Restore the original funtion settings */
7389 REG_WR(bp, reg, orig_func);
7390 new_val = REG_RD(bp, reg);
7391 if (new_val != orig_func) {
7392 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7393 orig_func, new_val);
7394 BUG();
7395 }
7396}
7397
7398static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7399{
7400 if (CHIP_IS_E1H(bp))
7401 bnx2x_undi_int_disable_e1h(bp, func);
7402 else
7403 bnx2x_int_disable(bp);
7404}
7405
34f80b04
EG
7406static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7407{
7408 u32 val;
7409
7410 /* Check if there is any driver already loaded */
7411 val = REG_RD(bp, MISC_REG_UNPREPARED);
7412 if (val == 0x1) {
7413 /* Check if it is the UNDI driver
7414 * UNDI driver initializes CID offset for normal bell to 0x7
7415 */
4a37fb66 7416 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7417 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7418 if (val == 0x7) {
7419 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7420 /* save our func */
34f80b04 7421 int func = BP_FUNC(bp);
da5a662a
VZ
7422 u32 swap_en;
7423 u32 swap_val;
34f80b04 7424
b4661739
EG
7425 /* clear the UNDI indication */
7426 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7427
34f80b04
EG
7428 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7429
7430 /* try unload UNDI on port 0 */
7431 bp->func = 0;
da5a662a
VZ
7432 bp->fw_seq =
7433 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7434 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7435 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7436
7437 /* if UNDI is loaded on the other port */
7438 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7439
da5a662a
VZ
7440 /* send "DONE" for previous unload */
7441 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7442
7443 /* unload UNDI on port 1 */
34f80b04 7444 bp->func = 1;
da5a662a
VZ
7445 bp->fw_seq =
7446 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7447 DRV_MSG_SEQ_NUMBER_MASK);
7448 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7449
7450 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7451 }
7452
b4661739
EG
7453 /* now it's safe to release the lock */
7454 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7455
f1ef27ef 7456 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7457
7458 /* close input traffic and wait for it */
7459 /* Do not rcv packets to BRB */
7460 REG_WR(bp,
7461 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7462 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7463 /* Do not direct rcv packets that are not for MCP to
7464 * the BRB */
7465 REG_WR(bp,
7466 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7467 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7468 /* clear AEU */
7469 REG_WR(bp,
7470 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7471 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7472 msleep(10);
7473
7474 /* save NIG port swap info */
7475 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7476 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7477 /* reset device */
7478 REG_WR(bp,
7479 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7480 0xd3ffffff);
34f80b04
EG
7481 REG_WR(bp,
7482 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7483 0x1403);
da5a662a
VZ
7484 /* take the NIG out of reset and restore swap values */
7485 REG_WR(bp,
7486 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7487 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7488 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7489 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7490
7491 /* send unload done to the MCP */
7492 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7493
7494 /* restore our func and fw_seq */
7495 bp->func = func;
7496 bp->fw_seq =
7497 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7498 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7499
7500 } else
7501 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7502 }
7503}
7504
7505static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7506{
7507 u32 val, val2, val3, val4, id;
72ce58c3 7508 u16 pmc;
34f80b04
EG
7509
7510 /* Get the chip revision id and number. */
7511 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7512 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7513 id = ((val & 0xffff) << 16);
7514 val = REG_RD(bp, MISC_REG_CHIP_REV);
7515 id |= ((val & 0xf) << 12);
7516 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7517 id |= ((val & 0xff) << 4);
5a40e08e 7518 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7519 id |= (val & 0xf);
7520 bp->common.chip_id = id;
7521 bp->link_params.chip_id = bp->common.chip_id;
7522 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7523
1c06328c
EG
7524 val = (REG_RD(bp, 0x2874) & 0x55);
7525 if ((bp->common.chip_id & 0x1) ||
7526 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7527 bp->flags |= ONE_PORT_FLAG;
7528 BNX2X_DEV_INFO("single port device\n");
7529 }
7530
34f80b04
EG
7531 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7532 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7533 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7534 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7535 bp->common.flash_size, bp->common.flash_size);
7536
7537 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7538 bp->link_params.shmem_base = bp->common.shmem_base;
7539 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7540
7541 if (!bp->common.shmem_base ||
7542 (bp->common.shmem_base < 0xA0000) ||
7543 (bp->common.shmem_base >= 0xC0000)) {
7544 BNX2X_DEV_INFO("MCP not active\n");
7545 bp->flags |= NO_MCP_FLAG;
7546 return;
7547 }
7548
7549 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7550 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7551 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7552 BNX2X_ERR("BAD MCP validity signature\n");
7553
7554 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7555 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7556
7557 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7558 SHARED_HW_CFG_LED_MODE_MASK) >>
7559 SHARED_HW_CFG_LED_MODE_SHIFT);
7560
c2c8b03e
EG
7561 bp->link_params.feature_config_flags = 0;
7562 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7563 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7564 bp->link_params.feature_config_flags |=
7565 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7566 else
7567 bp->link_params.feature_config_flags &=
7568 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7569
34f80b04
EG
7570 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7571 bp->common.bc_ver = val;
7572 BNX2X_DEV_INFO("bc_ver %X\n", val);
7573 if (val < BNX2X_BC_VER) {
7574 /* for now only warn
7575 * later we might need to enforce this */
7576 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7577 " please upgrade BC\n", BNX2X_BC_VER, val);
7578 }
72ce58c3
EG
7579
7580 if (BP_E1HVN(bp) == 0) {
7581 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7582 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7583 } else {
7584 /* no WOL capability for E1HVN != 0 */
7585 bp->flags |= NO_WOL_FLAG;
7586 }
7587 BNX2X_DEV_INFO("%sWoL capable\n",
7588 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7589
7590 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7591 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7592 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7593 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7594
7595 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7596 val, val2, val3, val4);
7597}
7598
7599static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7600 u32 switch_cfg)
a2fbb9ea 7601{
34f80b04 7602 int port = BP_PORT(bp);
a2fbb9ea
ET
7603 u32 ext_phy_type;
7604
a2fbb9ea
ET
7605 switch (switch_cfg) {
7606 case SWITCH_CFG_1G:
7607 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7608
c18487ee
YR
7609 ext_phy_type =
7610 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7611 switch (ext_phy_type) {
7612 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7613 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7614 ext_phy_type);
7615
34f80b04
EG
7616 bp->port.supported |= (SUPPORTED_10baseT_Half |
7617 SUPPORTED_10baseT_Full |
7618 SUPPORTED_100baseT_Half |
7619 SUPPORTED_100baseT_Full |
7620 SUPPORTED_1000baseT_Full |
7621 SUPPORTED_2500baseX_Full |
7622 SUPPORTED_TP |
7623 SUPPORTED_FIBRE |
7624 SUPPORTED_Autoneg |
7625 SUPPORTED_Pause |
7626 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7627 break;
7628
7629 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7630 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7631 ext_phy_type);
7632
34f80b04
EG
7633 bp->port.supported |= (SUPPORTED_10baseT_Half |
7634 SUPPORTED_10baseT_Full |
7635 SUPPORTED_100baseT_Half |
7636 SUPPORTED_100baseT_Full |
7637 SUPPORTED_1000baseT_Full |
7638 SUPPORTED_TP |
7639 SUPPORTED_FIBRE |
7640 SUPPORTED_Autoneg |
7641 SUPPORTED_Pause |
7642 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7643 break;
7644
7645 default:
7646 BNX2X_ERR("NVRAM config error. "
7647 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7648 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7649 return;
7650 }
7651
34f80b04
EG
7652 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7653 port*0x10);
7654 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7655 break;
7656
7657 case SWITCH_CFG_10G:
7658 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7659
c18487ee
YR
7660 ext_phy_type =
7661 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7662 switch (ext_phy_type) {
7663 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7664 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7665 ext_phy_type);
7666
34f80b04
EG
7667 bp->port.supported |= (SUPPORTED_10baseT_Half |
7668 SUPPORTED_10baseT_Full |
7669 SUPPORTED_100baseT_Half |
7670 SUPPORTED_100baseT_Full |
7671 SUPPORTED_1000baseT_Full |
7672 SUPPORTED_2500baseX_Full |
7673 SUPPORTED_10000baseT_Full |
7674 SUPPORTED_TP |
7675 SUPPORTED_FIBRE |
7676 SUPPORTED_Autoneg |
7677 SUPPORTED_Pause |
7678 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7679 break;
7680
589abe3a
EG
7681 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7682 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 7683 ext_phy_type);
f1410647 7684
34f80b04 7685 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7686 SUPPORTED_1000baseT_Full |
34f80b04 7687 SUPPORTED_FIBRE |
589abe3a 7688 SUPPORTED_Autoneg |
34f80b04
EG
7689 SUPPORTED_Pause |
7690 SUPPORTED_Asym_Pause);
f1410647
ET
7691 break;
7692
589abe3a
EG
7693 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7694 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
7695 ext_phy_type);
7696
34f80b04 7697 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 7698 SUPPORTED_2500baseX_Full |
34f80b04 7699 SUPPORTED_1000baseT_Full |
589abe3a
EG
7700 SUPPORTED_FIBRE |
7701 SUPPORTED_Autoneg |
7702 SUPPORTED_Pause |
7703 SUPPORTED_Asym_Pause);
7704 break;
7705
7706 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7707 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7708 ext_phy_type);
7709
7710 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
7711 SUPPORTED_FIBRE |
7712 SUPPORTED_Pause |
7713 SUPPORTED_Asym_Pause);
f1410647
ET
7714 break;
7715
589abe3a
EG
7716 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7717 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
7718 ext_phy_type);
7719
34f80b04
EG
7720 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7721 SUPPORTED_1000baseT_Full |
7722 SUPPORTED_FIBRE |
34f80b04
EG
7723 SUPPORTED_Pause |
7724 SUPPORTED_Asym_Pause);
f1410647
ET
7725 break;
7726
589abe3a
EG
7727 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7728 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
7729 ext_phy_type);
7730
34f80b04 7731 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 7732 SUPPORTED_1000baseT_Full |
34f80b04 7733 SUPPORTED_Autoneg |
589abe3a 7734 SUPPORTED_FIBRE |
34f80b04
EG
7735 SUPPORTED_Pause |
7736 SUPPORTED_Asym_Pause);
c18487ee
YR
7737 break;
7738
f1410647
ET
7739 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7740 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7741 ext_phy_type);
7742
34f80b04
EG
7743 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7744 SUPPORTED_TP |
7745 SUPPORTED_Autoneg |
7746 SUPPORTED_Pause |
7747 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7748 break;
7749
28577185
EG
7750 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7751 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7752 ext_phy_type);
7753
7754 bp->port.supported |= (SUPPORTED_10baseT_Half |
7755 SUPPORTED_10baseT_Full |
7756 SUPPORTED_100baseT_Half |
7757 SUPPORTED_100baseT_Full |
7758 SUPPORTED_1000baseT_Full |
7759 SUPPORTED_10000baseT_Full |
7760 SUPPORTED_TP |
7761 SUPPORTED_Autoneg |
7762 SUPPORTED_Pause |
7763 SUPPORTED_Asym_Pause);
7764 break;
7765
c18487ee
YR
7766 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7767 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7768 bp->link_params.ext_phy_config);
7769 break;
7770
a2fbb9ea
ET
7771 default:
7772 BNX2X_ERR("NVRAM config error. "
7773 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7774 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7775 return;
7776 }
7777
34f80b04
EG
7778 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7779 port*0x18);
7780 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7781
a2fbb9ea
ET
7782 break;
7783
7784 default:
7785 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7786 bp->port.link_config);
a2fbb9ea
ET
7787 return;
7788 }
34f80b04 7789 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7790
7791 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7792 if (!(bp->link_params.speed_cap_mask &
7793 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7794 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7795
c18487ee
YR
7796 if (!(bp->link_params.speed_cap_mask &
7797 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7798 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7799
c18487ee
YR
7800 if (!(bp->link_params.speed_cap_mask &
7801 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7802 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7803
c18487ee
YR
7804 if (!(bp->link_params.speed_cap_mask &
7805 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7806 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7807
c18487ee
YR
7808 if (!(bp->link_params.speed_cap_mask &
7809 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7810 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7811 SUPPORTED_1000baseT_Full);
a2fbb9ea 7812
c18487ee
YR
7813 if (!(bp->link_params.speed_cap_mask &
7814 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7815 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7816
c18487ee
YR
7817 if (!(bp->link_params.speed_cap_mask &
7818 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7819 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7820
34f80b04 7821 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7822}
7823
34f80b04 7824static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7825{
c18487ee 7826 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7827
34f80b04 7828 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7829 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7830 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7831 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7832 bp->port.advertising = bp->port.supported;
a2fbb9ea 7833 } else {
c18487ee
YR
7834 u32 ext_phy_type =
7835 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7836
7837 if ((ext_phy_type ==
7838 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7839 (ext_phy_type ==
7840 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7841 /* force 10G, no AN */
c18487ee 7842 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7843 bp->port.advertising =
a2fbb9ea
ET
7844 (ADVERTISED_10000baseT_Full |
7845 ADVERTISED_FIBRE);
7846 break;
7847 }
7848 BNX2X_ERR("NVRAM config error. "
7849 "Invalid link_config 0x%x"
7850 " Autoneg not supported\n",
34f80b04 7851 bp->port.link_config);
a2fbb9ea
ET
7852 return;
7853 }
7854 break;
7855
7856 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7857 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7858 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7859 bp->port.advertising = (ADVERTISED_10baseT_Full |
7860 ADVERTISED_TP);
a2fbb9ea
ET
7861 } else {
7862 BNX2X_ERR("NVRAM config error. "
7863 "Invalid link_config 0x%x"
7864 " speed_cap_mask 0x%x\n",
34f80b04 7865 bp->port.link_config,
c18487ee 7866 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7867 return;
7868 }
7869 break;
7870
7871 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7872 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7873 bp->link_params.req_line_speed = SPEED_10;
7874 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7875 bp->port.advertising = (ADVERTISED_10baseT_Half |
7876 ADVERTISED_TP);
a2fbb9ea
ET
7877 } else {
7878 BNX2X_ERR("NVRAM config error. "
7879 "Invalid link_config 0x%x"
7880 " speed_cap_mask 0x%x\n",
34f80b04 7881 bp->port.link_config,
c18487ee 7882 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7883 return;
7884 }
7885 break;
7886
7887 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7888 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7889 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7890 bp->port.advertising = (ADVERTISED_100baseT_Full |
7891 ADVERTISED_TP);
a2fbb9ea
ET
7892 } else {
7893 BNX2X_ERR("NVRAM config error. "
7894 "Invalid link_config 0x%x"
7895 " speed_cap_mask 0x%x\n",
34f80b04 7896 bp->port.link_config,
c18487ee 7897 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7898 return;
7899 }
7900 break;
7901
7902 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7903 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7904 bp->link_params.req_line_speed = SPEED_100;
7905 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7906 bp->port.advertising = (ADVERTISED_100baseT_Half |
7907 ADVERTISED_TP);
a2fbb9ea
ET
7908 } else {
7909 BNX2X_ERR("NVRAM config error. "
7910 "Invalid link_config 0x%x"
7911 " speed_cap_mask 0x%x\n",
34f80b04 7912 bp->port.link_config,
c18487ee 7913 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7914 return;
7915 }
7916 break;
7917
7918 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7919 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7920 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7921 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7922 ADVERTISED_TP);
a2fbb9ea
ET
7923 } else {
7924 BNX2X_ERR("NVRAM config error. "
7925 "Invalid link_config 0x%x"
7926 " speed_cap_mask 0x%x\n",
34f80b04 7927 bp->port.link_config,
c18487ee 7928 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7929 return;
7930 }
7931 break;
7932
7933 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7934 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7935 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7936 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7937 ADVERTISED_TP);
a2fbb9ea
ET
7938 } else {
7939 BNX2X_ERR("NVRAM config error. "
7940 "Invalid link_config 0x%x"
7941 " speed_cap_mask 0x%x\n",
34f80b04 7942 bp->port.link_config,
c18487ee 7943 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7944 return;
7945 }
7946 break;
7947
7948 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7949 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7950 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7951 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7952 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7953 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7954 ADVERTISED_FIBRE);
a2fbb9ea
ET
7955 } else {
7956 BNX2X_ERR("NVRAM config error. "
7957 "Invalid link_config 0x%x"
7958 " speed_cap_mask 0x%x\n",
34f80b04 7959 bp->port.link_config,
c18487ee 7960 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7961 return;
7962 }
7963 break;
7964
7965 default:
7966 BNX2X_ERR("NVRAM config error. "
7967 "BAD link speed link_config 0x%x\n",
34f80b04 7968 bp->port.link_config);
c18487ee 7969 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7970 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7971 break;
7972 }
a2fbb9ea 7973
34f80b04
EG
7974 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7975 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7976 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7977 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7978 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7979
c18487ee 7980 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7981 " advertising 0x%x\n",
c18487ee
YR
7982 bp->link_params.req_line_speed,
7983 bp->link_params.req_duplex,
34f80b04 7984 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7985}
7986
34f80b04 7987static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7988{
34f80b04
EG
7989 int port = BP_PORT(bp);
7990 u32 val, val2;
589abe3a 7991 u32 config;
c2c8b03e 7992 u16 i;
a2fbb9ea 7993
c18487ee 7994 bp->link_params.bp = bp;
34f80b04 7995 bp->link_params.port = port;
c18487ee 7996
c18487ee 7997 bp->link_params.lane_config =
a2fbb9ea 7998 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7999 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8000 SHMEM_RD(bp,
8001 dev_info.port_hw_config[port].external_phy_config);
c18487ee 8002 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8003 SHMEM_RD(bp,
8004 dev_info.port_hw_config[port].speed_capability_mask);
8005
34f80b04 8006 bp->port.link_config =
a2fbb9ea
ET
8007 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8008
c2c8b03e
EG
8009 /* Get the 4 lanes xgxs config rx and tx */
8010 for (i = 0; i < 2; i++) {
8011 val = SHMEM_RD(bp,
8012 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8013 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8014 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8015
8016 val = SHMEM_RD(bp,
8017 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8018 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8019 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8020 }
8021
589abe3a
EG
8022 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8023 if (config & PORT_FEAT_CFG_OPT_MDL_ENFRCMNT_ENABLED)
8024 bp->link_params.feature_config_flags |=
8025 FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8026 else
8027 bp->link_params.feature_config_flags &=
8028 ~FEATURE_CONFIG_MODULE_ENFORCMENT_ENABLED;
8029
c2c8b03e
EG
8030 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8031 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8032 bp->link_params.lane_config,
8033 bp->link_params.ext_phy_config,
34f80b04 8034 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8035
34f80b04 8036 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
8037 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8038 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8039
8040 bnx2x_link_settings_requested(bp);
8041
8042 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8043 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8044 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8045 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8046 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8047 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8048 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8049 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8050 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8051 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8052}
8053
8054static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8055{
8056 int func = BP_FUNC(bp);
8057 u32 val, val2;
8058 int rc = 0;
a2fbb9ea 8059
34f80b04 8060 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8061
34f80b04
EG
8062 bp->e1hov = 0;
8063 bp->e1hmf = 0;
8064 if (CHIP_IS_E1H(bp)) {
8065 bp->mf_config =
8066 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8067
3196a88a
EG
8068 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8069 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 8070 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 8071
34f80b04
EG
8072 bp->e1hov = val;
8073 bp->e1hmf = 1;
8074 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8075 "(0x%04x)\n",
8076 func, bp->e1hov, bp->e1hov);
8077 } else {
8078 BNX2X_DEV_INFO("Single function mode\n");
8079 if (BP_E1HVN(bp)) {
8080 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8081 " aborting\n", func);
8082 rc = -EPERM;
8083 }
8084 }
8085 }
a2fbb9ea 8086
34f80b04
EG
8087 if (!BP_NOMCP(bp)) {
8088 bnx2x_get_port_hwinfo(bp);
8089
8090 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8091 DRV_MSG_SEQ_NUMBER_MASK);
8092 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8093 }
8094
8095 if (IS_E1HMF(bp)) {
8096 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8097 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8098 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8099 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8100 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8101 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8102 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8103 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8104 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8105 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8106 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8107 ETH_ALEN);
8108 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8109 ETH_ALEN);
a2fbb9ea 8110 }
34f80b04
EG
8111
8112 return rc;
a2fbb9ea
ET
8113 }
8114
34f80b04
EG
8115 if (BP_NOMCP(bp)) {
8116 /* only supposed to happen on emulation/FPGA */
33471629 8117 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8118 random_ether_addr(bp->dev->dev_addr);
8119 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8120 }
a2fbb9ea 8121
34f80b04
EG
8122 return rc;
8123}
8124
8125static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8126{
8127 int func = BP_FUNC(bp);
87942b46 8128 int timer_interval;
34f80b04
EG
8129 int rc;
8130
da5a662a
VZ
8131 /* Disable interrupt handling until HW is initialized */
8132 atomic_set(&bp->intr_sem, 1);
8133
34f80b04 8134 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8135
1cf167f2 8136 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8137 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8138
8139 rc = bnx2x_get_hwinfo(bp);
8140
8141 /* need to reset chip if undi was active */
8142 if (!BP_NOMCP(bp))
8143 bnx2x_undi_unload(bp);
8144
8145 if (CHIP_REV_IS_FPGA(bp))
8146 printk(KERN_ERR PFX "FPGA detected\n");
8147
8148 if (BP_NOMCP(bp) && (func == 0))
8149 printk(KERN_ERR PFX
8150 "MCP disabled, must load devices in order!\n");
8151
555f6c78 8152 /* Set multi queue mode */
8badd27a
EG
8153 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8154 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8155 printk(KERN_ERR PFX
8badd27a 8156 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8157 multi_mode = ETH_RSS_MODE_DISABLED;
8158 }
8159 bp->multi_mode = multi_mode;
8160
8161
7a9b2557
VZ
8162 /* Set TPA flags */
8163 if (disable_tpa) {
8164 bp->flags &= ~TPA_ENABLE_FLAG;
8165 bp->dev->features &= ~NETIF_F_LRO;
8166 } else {
8167 bp->flags |= TPA_ENABLE_FLAG;
8168 bp->dev->features |= NETIF_F_LRO;
8169 }
8170
8d5726c4 8171 bp->mrrs = mrrs;
7a9b2557 8172
34f80b04
EG
8173 bp->tx_ring_size = MAX_TX_AVAIL;
8174 bp->rx_ring_size = MAX_RX_AVAIL;
8175
8176 bp->rx_csum = 1;
34f80b04
EG
8177
8178 bp->tx_ticks = 50;
8179 bp->rx_ticks = 25;
8180
87942b46
EG
8181 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8182 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8183
8184 init_timer(&bp->timer);
8185 bp->timer.expires = jiffies + bp->current_interval;
8186 bp->timer.data = (unsigned long) bp;
8187 bp->timer.function = bnx2x_timer;
8188
8189 return rc;
a2fbb9ea
ET
8190}
8191
8192/*
8193 * ethtool service functions
8194 */
8195
8196/* All ethtool functions called with rtnl_lock */
8197
8198static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8199{
8200 struct bnx2x *bp = netdev_priv(dev);
8201
34f80b04
EG
8202 cmd->supported = bp->port.supported;
8203 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8204
8205 if (netif_carrier_ok(dev)) {
c18487ee
YR
8206 cmd->speed = bp->link_vars.line_speed;
8207 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8208 } else {
c18487ee
YR
8209 cmd->speed = bp->link_params.req_line_speed;
8210 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8211 }
34f80b04
EG
8212 if (IS_E1HMF(bp)) {
8213 u16 vn_max_rate;
8214
8215 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8216 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8217 if (vn_max_rate < cmd->speed)
8218 cmd->speed = vn_max_rate;
8219 }
a2fbb9ea 8220
c18487ee
YR
8221 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8222 u32 ext_phy_type =
8223 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8224
8225 switch (ext_phy_type) {
8226 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8227 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8228 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8229 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8230 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8231 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
f1410647
ET
8232 cmd->port = PORT_FIBRE;
8233 break;
8234
8235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8237 cmd->port = PORT_TP;
8238 break;
8239
c18487ee
YR
8240 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8241 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8242 bp->link_params.ext_phy_config);
8243 break;
8244
f1410647
ET
8245 default:
8246 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8247 bp->link_params.ext_phy_config);
8248 break;
f1410647
ET
8249 }
8250 } else
a2fbb9ea 8251 cmd->port = PORT_TP;
a2fbb9ea 8252
34f80b04 8253 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8254 cmd->transceiver = XCVR_INTERNAL;
8255
c18487ee 8256 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8257 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8258 else
a2fbb9ea 8259 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8260
8261 cmd->maxtxpkt = 0;
8262 cmd->maxrxpkt = 0;
8263
8264 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8265 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8266 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8267 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8268 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8269 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8270 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8271
8272 return 0;
8273}
8274
8275static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8276{
8277 struct bnx2x *bp = netdev_priv(dev);
8278 u32 advertising;
8279
34f80b04
EG
8280 if (IS_E1HMF(bp))
8281 return 0;
8282
a2fbb9ea
ET
8283 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8284 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8285 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8286 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8287 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8288 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8289 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8290
a2fbb9ea 8291 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8292 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8293 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8294 return -EINVAL;
f1410647 8295 }
a2fbb9ea
ET
8296
8297 /* advertise the requested speed and duplex if supported */
34f80b04 8298 cmd->advertising &= bp->port.supported;
a2fbb9ea 8299
c18487ee
YR
8300 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8301 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8302 bp->port.advertising |= (ADVERTISED_Autoneg |
8303 cmd->advertising);
a2fbb9ea
ET
8304
8305 } else { /* forced speed */
8306 /* advertise the requested speed and duplex if supported */
8307 switch (cmd->speed) {
8308 case SPEED_10:
8309 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8310 if (!(bp->port.supported &
f1410647
ET
8311 SUPPORTED_10baseT_Full)) {
8312 DP(NETIF_MSG_LINK,
8313 "10M full not supported\n");
a2fbb9ea 8314 return -EINVAL;
f1410647 8315 }
a2fbb9ea
ET
8316
8317 advertising = (ADVERTISED_10baseT_Full |
8318 ADVERTISED_TP);
8319 } else {
34f80b04 8320 if (!(bp->port.supported &
f1410647
ET
8321 SUPPORTED_10baseT_Half)) {
8322 DP(NETIF_MSG_LINK,
8323 "10M half not supported\n");
a2fbb9ea 8324 return -EINVAL;
f1410647 8325 }
a2fbb9ea
ET
8326
8327 advertising = (ADVERTISED_10baseT_Half |
8328 ADVERTISED_TP);
8329 }
8330 break;
8331
8332 case SPEED_100:
8333 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8334 if (!(bp->port.supported &
f1410647
ET
8335 SUPPORTED_100baseT_Full)) {
8336 DP(NETIF_MSG_LINK,
8337 "100M full not supported\n");
a2fbb9ea 8338 return -EINVAL;
f1410647 8339 }
a2fbb9ea
ET
8340
8341 advertising = (ADVERTISED_100baseT_Full |
8342 ADVERTISED_TP);
8343 } else {
34f80b04 8344 if (!(bp->port.supported &
f1410647
ET
8345 SUPPORTED_100baseT_Half)) {
8346 DP(NETIF_MSG_LINK,
8347 "100M half not supported\n");
a2fbb9ea 8348 return -EINVAL;
f1410647 8349 }
a2fbb9ea
ET
8350
8351 advertising = (ADVERTISED_100baseT_Half |
8352 ADVERTISED_TP);
8353 }
8354 break;
8355
8356 case SPEED_1000:
f1410647
ET
8357 if (cmd->duplex != DUPLEX_FULL) {
8358 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8359 return -EINVAL;
f1410647 8360 }
a2fbb9ea 8361
34f80b04 8362 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8363 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8364 return -EINVAL;
f1410647 8365 }
a2fbb9ea
ET
8366
8367 advertising = (ADVERTISED_1000baseT_Full |
8368 ADVERTISED_TP);
8369 break;
8370
8371 case SPEED_2500:
f1410647
ET
8372 if (cmd->duplex != DUPLEX_FULL) {
8373 DP(NETIF_MSG_LINK,
8374 "2.5G half not supported\n");
a2fbb9ea 8375 return -EINVAL;
f1410647 8376 }
a2fbb9ea 8377
34f80b04 8378 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8379 DP(NETIF_MSG_LINK,
8380 "2.5G full not supported\n");
a2fbb9ea 8381 return -EINVAL;
f1410647 8382 }
a2fbb9ea 8383
f1410647 8384 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8385 ADVERTISED_TP);
8386 break;
8387
8388 case SPEED_10000:
f1410647
ET
8389 if (cmd->duplex != DUPLEX_FULL) {
8390 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8391 return -EINVAL;
f1410647 8392 }
a2fbb9ea 8393
34f80b04 8394 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8395 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8396 return -EINVAL;
f1410647 8397 }
a2fbb9ea
ET
8398
8399 advertising = (ADVERTISED_10000baseT_Full |
8400 ADVERTISED_FIBRE);
8401 break;
8402
8403 default:
f1410647 8404 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8405 return -EINVAL;
8406 }
8407
c18487ee
YR
8408 bp->link_params.req_line_speed = cmd->speed;
8409 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8410 bp->port.advertising = advertising;
a2fbb9ea
ET
8411 }
8412
c18487ee 8413 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8414 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8415 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8416 bp->port.advertising);
a2fbb9ea 8417
34f80b04 8418 if (netif_running(dev)) {
bb2a0f7a 8419 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8420 bnx2x_link_set(bp);
8421 }
a2fbb9ea
ET
8422
8423 return 0;
8424}
8425
c18487ee
YR
8426#define PHY_FW_VER_LEN 10
8427
a2fbb9ea
ET
8428static void bnx2x_get_drvinfo(struct net_device *dev,
8429 struct ethtool_drvinfo *info)
8430{
8431 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8432 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8433
8434 strcpy(info->driver, DRV_MODULE_NAME);
8435 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8436
8437 phy_fw_ver[0] = '\0';
34f80b04 8438 if (bp->port.pmf) {
4a37fb66 8439 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8440 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8441 (bp->state != BNX2X_STATE_CLOSED),
8442 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8443 bnx2x_release_phy_lock(bp);
34f80b04 8444 }
c18487ee 8445
f0e53a84
EG
8446 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8447 (bp->common.bc_ver & 0xff0000) >> 16,
8448 (bp->common.bc_ver & 0xff00) >> 8,
8449 (bp->common.bc_ver & 0xff),
8450 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8451 strcpy(info->bus_info, pci_name(bp->pdev));
8452 info->n_stats = BNX2X_NUM_STATS;
8453 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8454 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8455 info->regdump_len = 0;
8456}
8457
8458static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8459{
8460 struct bnx2x *bp = netdev_priv(dev);
8461
8462 if (bp->flags & NO_WOL_FLAG) {
8463 wol->supported = 0;
8464 wol->wolopts = 0;
8465 } else {
8466 wol->supported = WAKE_MAGIC;
8467 if (bp->wol)
8468 wol->wolopts = WAKE_MAGIC;
8469 else
8470 wol->wolopts = 0;
8471 }
8472 memset(&wol->sopass, 0, sizeof(wol->sopass));
8473}
8474
8475static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8476{
8477 struct bnx2x *bp = netdev_priv(dev);
8478
8479 if (wol->wolopts & ~WAKE_MAGIC)
8480 return -EINVAL;
8481
8482 if (wol->wolopts & WAKE_MAGIC) {
8483 if (bp->flags & NO_WOL_FLAG)
8484 return -EINVAL;
8485
8486 bp->wol = 1;
34f80b04 8487 } else
a2fbb9ea 8488 bp->wol = 0;
34f80b04 8489
a2fbb9ea
ET
8490 return 0;
8491}
8492
8493static u32 bnx2x_get_msglevel(struct net_device *dev)
8494{
8495 struct bnx2x *bp = netdev_priv(dev);
8496
8497 return bp->msglevel;
8498}
8499
8500static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8501{
8502 struct bnx2x *bp = netdev_priv(dev);
8503
8504 if (capable(CAP_NET_ADMIN))
8505 bp->msglevel = level;
8506}
8507
8508static int bnx2x_nway_reset(struct net_device *dev)
8509{
8510 struct bnx2x *bp = netdev_priv(dev);
8511
34f80b04
EG
8512 if (!bp->port.pmf)
8513 return 0;
a2fbb9ea 8514
34f80b04 8515 if (netif_running(dev)) {
bb2a0f7a 8516 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8517 bnx2x_link_set(bp);
8518 }
a2fbb9ea
ET
8519
8520 return 0;
8521}
8522
8523static int bnx2x_get_eeprom_len(struct net_device *dev)
8524{
8525 struct bnx2x *bp = netdev_priv(dev);
8526
34f80b04 8527 return bp->common.flash_size;
a2fbb9ea
ET
8528}
8529
8530static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8531{
34f80b04 8532 int port = BP_PORT(bp);
a2fbb9ea
ET
8533 int count, i;
8534 u32 val = 0;
8535
8536 /* adjust timeout for emulation/FPGA */
8537 count = NVRAM_TIMEOUT_COUNT;
8538 if (CHIP_REV_IS_SLOW(bp))
8539 count *= 100;
8540
8541 /* request access to nvram interface */
8542 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8543 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8544
8545 for (i = 0; i < count*10; i++) {
8546 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8547 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8548 break;
8549
8550 udelay(5);
8551 }
8552
8553 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8554 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8555 return -EBUSY;
8556 }
8557
8558 return 0;
8559}
8560
8561static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8562{
34f80b04 8563 int port = BP_PORT(bp);
a2fbb9ea
ET
8564 int count, i;
8565 u32 val = 0;
8566
8567 /* adjust timeout for emulation/FPGA */
8568 count = NVRAM_TIMEOUT_COUNT;
8569 if (CHIP_REV_IS_SLOW(bp))
8570 count *= 100;
8571
8572 /* relinquish nvram interface */
8573 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8574 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8575
8576 for (i = 0; i < count*10; i++) {
8577 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8578 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8579 break;
8580
8581 udelay(5);
8582 }
8583
8584 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8585 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8586 return -EBUSY;
8587 }
8588
8589 return 0;
8590}
8591
8592static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8593{
8594 u32 val;
8595
8596 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8597
8598 /* enable both bits, even on read */
8599 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8600 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8601 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8602}
8603
8604static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8605{
8606 u32 val;
8607
8608 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8609
8610 /* disable both bits, even after read */
8611 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8612 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8613 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8614}
8615
8616static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8617 u32 cmd_flags)
8618{
f1410647 8619 int count, i, rc;
a2fbb9ea
ET
8620 u32 val;
8621
8622 /* build the command word */
8623 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8624
8625 /* need to clear DONE bit separately */
8626 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8627
8628 /* address of the NVRAM to read from */
8629 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8630 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8631
8632 /* issue a read command */
8633 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8634
8635 /* adjust timeout for emulation/FPGA */
8636 count = NVRAM_TIMEOUT_COUNT;
8637 if (CHIP_REV_IS_SLOW(bp))
8638 count *= 100;
8639
8640 /* wait for completion */
8641 *ret_val = 0;
8642 rc = -EBUSY;
8643 for (i = 0; i < count; i++) {
8644 udelay(5);
8645 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8646
8647 if (val & MCPR_NVM_COMMAND_DONE) {
8648 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8649 /* we read nvram data in cpu order
8650 * but ethtool sees it as an array of bytes
8651 * converting to big-endian will do the work */
8652 val = cpu_to_be32(val);
8653 *ret_val = val;
8654 rc = 0;
8655 break;
8656 }
8657 }
8658
8659 return rc;
8660}
8661
8662static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8663 int buf_size)
8664{
8665 int rc;
8666 u32 cmd_flags;
8667 u32 val;
8668
8669 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8670 DP(BNX2X_MSG_NVM,
c14423fe 8671 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8672 offset, buf_size);
8673 return -EINVAL;
8674 }
8675
34f80b04
EG
8676 if (offset + buf_size > bp->common.flash_size) {
8677 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8678 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8679 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8680 return -EINVAL;
8681 }
8682
8683 /* request access to nvram interface */
8684 rc = bnx2x_acquire_nvram_lock(bp);
8685 if (rc)
8686 return rc;
8687
8688 /* enable access to nvram interface */
8689 bnx2x_enable_nvram_access(bp);
8690
8691 /* read the first word(s) */
8692 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8693 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8694 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8695 memcpy(ret_buf, &val, 4);
8696
8697 /* advance to the next dword */
8698 offset += sizeof(u32);
8699 ret_buf += sizeof(u32);
8700 buf_size -= sizeof(u32);
8701 cmd_flags = 0;
8702 }
8703
8704 if (rc == 0) {
8705 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8706 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8707 memcpy(ret_buf, &val, 4);
8708 }
8709
8710 /* disable access to nvram interface */
8711 bnx2x_disable_nvram_access(bp);
8712 bnx2x_release_nvram_lock(bp);
8713
8714 return rc;
8715}
8716
8717static int bnx2x_get_eeprom(struct net_device *dev,
8718 struct ethtool_eeprom *eeprom, u8 *eebuf)
8719{
8720 struct bnx2x *bp = netdev_priv(dev);
8721 int rc;
8722
2add3acb
EG
8723 if (!netif_running(dev))
8724 return -EAGAIN;
8725
34f80b04 8726 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8727 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8728 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8729 eeprom->len, eeprom->len);
8730
8731 /* parameters already validated in ethtool_get_eeprom */
8732
8733 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8734
8735 return rc;
8736}
8737
8738static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8739 u32 cmd_flags)
8740{
f1410647 8741 int count, i, rc;
a2fbb9ea
ET
8742
8743 /* build the command word */
8744 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8745
8746 /* need to clear DONE bit separately */
8747 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8748
8749 /* write the data */
8750 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8751
8752 /* address of the NVRAM to write to */
8753 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8754 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8755
8756 /* issue the write command */
8757 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8758
8759 /* adjust timeout for emulation/FPGA */
8760 count = NVRAM_TIMEOUT_COUNT;
8761 if (CHIP_REV_IS_SLOW(bp))
8762 count *= 100;
8763
8764 /* wait for completion */
8765 rc = -EBUSY;
8766 for (i = 0; i < count; i++) {
8767 udelay(5);
8768 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8769 if (val & MCPR_NVM_COMMAND_DONE) {
8770 rc = 0;
8771 break;
8772 }
8773 }
8774
8775 return rc;
8776}
8777
f1410647 8778#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8779
8780static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8781 int buf_size)
8782{
8783 int rc;
8784 u32 cmd_flags;
8785 u32 align_offset;
8786 u32 val;
8787
34f80b04
EG
8788 if (offset + buf_size > bp->common.flash_size) {
8789 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8790 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8791 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8792 return -EINVAL;
8793 }
8794
8795 /* request access to nvram interface */
8796 rc = bnx2x_acquire_nvram_lock(bp);
8797 if (rc)
8798 return rc;
8799
8800 /* enable access to nvram interface */
8801 bnx2x_enable_nvram_access(bp);
8802
8803 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8804 align_offset = (offset & ~0x03);
8805 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8806
8807 if (rc == 0) {
8808 val &= ~(0xff << BYTE_OFFSET(offset));
8809 val |= (*data_buf << BYTE_OFFSET(offset));
8810
8811 /* nvram data is returned as an array of bytes
8812 * convert it back to cpu order */
8813 val = be32_to_cpu(val);
8814
a2fbb9ea
ET
8815 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8816 cmd_flags);
8817 }
8818
8819 /* disable access to nvram interface */
8820 bnx2x_disable_nvram_access(bp);
8821 bnx2x_release_nvram_lock(bp);
8822
8823 return rc;
8824}
8825
8826static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8827 int buf_size)
8828{
8829 int rc;
8830 u32 cmd_flags;
8831 u32 val;
8832 u32 written_so_far;
8833
34f80b04 8834 if (buf_size == 1) /* ethtool */
a2fbb9ea 8835 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8836
8837 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8838 DP(BNX2X_MSG_NVM,
c14423fe 8839 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8840 offset, buf_size);
8841 return -EINVAL;
8842 }
8843
34f80b04
EG
8844 if (offset + buf_size > bp->common.flash_size) {
8845 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8846 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8847 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8848 return -EINVAL;
8849 }
8850
8851 /* request access to nvram interface */
8852 rc = bnx2x_acquire_nvram_lock(bp);
8853 if (rc)
8854 return rc;
8855
8856 /* enable access to nvram interface */
8857 bnx2x_enable_nvram_access(bp);
8858
8859 written_so_far = 0;
8860 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8861 while ((written_so_far < buf_size) && (rc == 0)) {
8862 if (written_so_far == (buf_size - sizeof(u32)))
8863 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8864 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8865 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8866 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8867 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8868
8869 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8870
8871 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8872
8873 /* advance to the next dword */
8874 offset += sizeof(u32);
8875 data_buf += sizeof(u32);
8876 written_so_far += sizeof(u32);
8877 cmd_flags = 0;
8878 }
8879
8880 /* disable access to nvram interface */
8881 bnx2x_disable_nvram_access(bp);
8882 bnx2x_release_nvram_lock(bp);
8883
8884 return rc;
8885}
8886
8887static int bnx2x_set_eeprom(struct net_device *dev,
8888 struct ethtool_eeprom *eeprom, u8 *eebuf)
8889{
8890 struct bnx2x *bp = netdev_priv(dev);
8891 int rc;
8892
9f4c9583
EG
8893 if (!netif_running(dev))
8894 return -EAGAIN;
8895
34f80b04 8896 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8897 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8898 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8899 eeprom->len, eeprom->len);
8900
8901 /* parameters already validated in ethtool_set_eeprom */
8902
c18487ee 8903 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8904 if (eeprom->magic == 0x00504859)
8905 if (bp->port.pmf) {
8906
4a37fb66 8907 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8908 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8909 bp->link_params.ext_phy_config,
8910 (bp->state != BNX2X_STATE_CLOSED),
8911 eebuf, eeprom->len);
bb2a0f7a
YG
8912 if ((bp->state == BNX2X_STATE_OPEN) ||
8913 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04 8914 rc |= bnx2x_link_reset(&bp->link_params,
589abe3a 8915 &bp->link_vars, 1);
34f80b04
EG
8916 rc |= bnx2x_phy_init(&bp->link_params,
8917 &bp->link_vars);
bb2a0f7a 8918 }
4a37fb66 8919 bnx2x_release_phy_lock(bp);
34f80b04
EG
8920
8921 } else /* Only the PMF can access the PHY */
8922 return -EINVAL;
8923 else
c18487ee 8924 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8925
8926 return rc;
8927}
8928
8929static int bnx2x_get_coalesce(struct net_device *dev,
8930 struct ethtool_coalesce *coal)
8931{
8932 struct bnx2x *bp = netdev_priv(dev);
8933
8934 memset(coal, 0, sizeof(struct ethtool_coalesce));
8935
8936 coal->rx_coalesce_usecs = bp->rx_ticks;
8937 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8938
8939 return 0;
8940}
8941
8942static int bnx2x_set_coalesce(struct net_device *dev,
8943 struct ethtool_coalesce *coal)
8944{
8945 struct bnx2x *bp = netdev_priv(dev);
8946
8947 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8948 if (bp->rx_ticks > 3000)
8949 bp->rx_ticks = 3000;
8950
8951 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8952 if (bp->tx_ticks > 0x3000)
8953 bp->tx_ticks = 0x3000;
8954
34f80b04 8955 if (netif_running(dev))
a2fbb9ea
ET
8956 bnx2x_update_coalesce(bp);
8957
8958 return 0;
8959}
8960
8961static void bnx2x_get_ringparam(struct net_device *dev,
8962 struct ethtool_ringparam *ering)
8963{
8964 struct bnx2x *bp = netdev_priv(dev);
8965
8966 ering->rx_max_pending = MAX_RX_AVAIL;
8967 ering->rx_mini_max_pending = 0;
8968 ering->rx_jumbo_max_pending = 0;
8969
8970 ering->rx_pending = bp->rx_ring_size;
8971 ering->rx_mini_pending = 0;
8972 ering->rx_jumbo_pending = 0;
8973
8974 ering->tx_max_pending = MAX_TX_AVAIL;
8975 ering->tx_pending = bp->tx_ring_size;
8976}
8977
8978static int bnx2x_set_ringparam(struct net_device *dev,
8979 struct ethtool_ringparam *ering)
8980{
8981 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8982 int rc = 0;
a2fbb9ea
ET
8983
8984 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8985 (ering->tx_pending > MAX_TX_AVAIL) ||
8986 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8987 return -EINVAL;
8988
8989 bp->rx_ring_size = ering->rx_pending;
8990 bp->tx_ring_size = ering->tx_pending;
8991
34f80b04
EG
8992 if (netif_running(dev)) {
8993 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8994 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8995 }
8996
34f80b04 8997 return rc;
a2fbb9ea
ET
8998}
8999
9000static void bnx2x_get_pauseparam(struct net_device *dev,
9001 struct ethtool_pauseparam *epause)
9002{
9003 struct bnx2x *bp = netdev_priv(dev);
9004
c0700f90 9005 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9006 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9007
c0700f90
DM
9008 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9009 BNX2X_FLOW_CTRL_RX);
9010 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9011 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9012
9013 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9014 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9015 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9016}
9017
9018static int bnx2x_set_pauseparam(struct net_device *dev,
9019 struct ethtool_pauseparam *epause)
9020{
9021 struct bnx2x *bp = netdev_priv(dev);
9022
34f80b04
EG
9023 if (IS_E1HMF(bp))
9024 return 0;
9025
a2fbb9ea
ET
9026 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9027 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9028 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9029
c0700f90 9030 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9031
f1410647 9032 if (epause->rx_pause)
c0700f90 9033 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9034
f1410647 9035 if (epause->tx_pause)
c0700f90 9036 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9037
c0700f90
DM
9038 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9039 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9040
c18487ee 9041 if (epause->autoneg) {
34f80b04 9042 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9043 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9044 return -EINVAL;
9045 }
a2fbb9ea 9046
c18487ee 9047 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9048 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9049 }
a2fbb9ea 9050
c18487ee
YR
9051 DP(NETIF_MSG_LINK,
9052 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9053
9054 if (netif_running(dev)) {
bb2a0f7a 9055 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9056 bnx2x_link_set(bp);
9057 }
a2fbb9ea
ET
9058
9059 return 0;
9060}
9061
df0f2343
VZ
9062static int bnx2x_set_flags(struct net_device *dev, u32 data)
9063{
9064 struct bnx2x *bp = netdev_priv(dev);
9065 int changed = 0;
9066 int rc = 0;
9067
9068 /* TPA requires Rx CSUM offloading */
9069 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9070 if (!(dev->features & NETIF_F_LRO)) {
9071 dev->features |= NETIF_F_LRO;
9072 bp->flags |= TPA_ENABLE_FLAG;
9073 changed = 1;
9074 }
9075
9076 } else if (dev->features & NETIF_F_LRO) {
9077 dev->features &= ~NETIF_F_LRO;
9078 bp->flags &= ~TPA_ENABLE_FLAG;
9079 changed = 1;
9080 }
9081
9082 if (changed && netif_running(dev)) {
9083 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9084 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9085 }
9086
9087 return rc;
9088}
9089
a2fbb9ea
ET
9090static u32 bnx2x_get_rx_csum(struct net_device *dev)
9091{
9092 struct bnx2x *bp = netdev_priv(dev);
9093
9094 return bp->rx_csum;
9095}
9096
9097static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9098{
9099 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9100 int rc = 0;
a2fbb9ea
ET
9101
9102 bp->rx_csum = data;
df0f2343
VZ
9103
9104 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9105 TPA'ed packets will be discarded due to wrong TCP CSUM */
9106 if (!data) {
9107 u32 flags = ethtool_op_get_flags(dev);
9108
9109 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9110 }
9111
9112 return rc;
a2fbb9ea
ET
9113}
9114
9115static int bnx2x_set_tso(struct net_device *dev, u32 data)
9116{
755735eb 9117 if (data) {
a2fbb9ea 9118 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9119 dev->features |= NETIF_F_TSO6;
9120 } else {
a2fbb9ea 9121 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9122 dev->features &= ~NETIF_F_TSO6;
9123 }
9124
a2fbb9ea
ET
9125 return 0;
9126}
9127
f3c87cdd 9128static const struct {
a2fbb9ea
ET
9129 char string[ETH_GSTRING_LEN];
9130} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9131 { "register_test (offline)" },
9132 { "memory_test (offline)" },
9133 { "loopback_test (offline)" },
9134 { "nvram_test (online)" },
9135 { "interrupt_test (online)" },
9136 { "link_test (online)" },
d3d4f495 9137 { "idle check (online)" }
a2fbb9ea
ET
9138};
9139
9140static int bnx2x_self_test_count(struct net_device *dev)
9141{
9142 return BNX2X_NUM_TESTS;
9143}
9144
f3c87cdd
YG
9145static int bnx2x_test_registers(struct bnx2x *bp)
9146{
9147 int idx, i, rc = -ENODEV;
9148 u32 wr_val = 0;
9dabc424 9149 int port = BP_PORT(bp);
f3c87cdd
YG
9150 static const struct {
9151 u32 offset0;
9152 u32 offset1;
9153 u32 mask;
9154 } reg_tbl[] = {
9155/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9156 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9157 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9158 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9159 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9160 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9161 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9162 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9163 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9164 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9165/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9166 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9167 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9168 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9169 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9170 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9171 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9172 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9173 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9174 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9175/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9176 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9177 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9178 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9179 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9180 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9181 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9182 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9183 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9184 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9185/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9186 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9187 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9188 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9189 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9190 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9191 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9192 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9193
9194 { 0xffffffff, 0, 0x00000000 }
9195 };
9196
9197 if (!netif_running(bp->dev))
9198 return rc;
9199
9200 /* Repeat the test twice:
9201 First by writing 0x00000000, second by writing 0xffffffff */
9202 for (idx = 0; idx < 2; idx++) {
9203
9204 switch (idx) {
9205 case 0:
9206 wr_val = 0;
9207 break;
9208 case 1:
9209 wr_val = 0xffffffff;
9210 break;
9211 }
9212
9213 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9214 u32 offset, mask, save_val, val;
f3c87cdd
YG
9215
9216 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9217 mask = reg_tbl[i].mask;
9218
9219 save_val = REG_RD(bp, offset);
9220
9221 REG_WR(bp, offset, wr_val);
9222 val = REG_RD(bp, offset);
9223
9224 /* Restore the original register's value */
9225 REG_WR(bp, offset, save_val);
9226
9227 /* verify that value is as expected value */
9228 if ((val & mask) != (wr_val & mask))
9229 goto test_reg_exit;
9230 }
9231 }
9232
9233 rc = 0;
9234
9235test_reg_exit:
9236 return rc;
9237}
9238
9239static int bnx2x_test_memory(struct bnx2x *bp)
9240{
9241 int i, j, rc = -ENODEV;
9242 u32 val;
9243 static const struct {
9244 u32 offset;
9245 int size;
9246 } mem_tbl[] = {
9247 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9248 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9249 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9250 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9251 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9252 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9253 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9254
9255 { 0xffffffff, 0 }
9256 };
9257 static const struct {
9258 char *name;
9259 u32 offset;
9dabc424
YG
9260 u32 e1_mask;
9261 u32 e1h_mask;
f3c87cdd 9262 } prty_tbl[] = {
9dabc424
YG
9263 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9264 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9265 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9266 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9267 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9268 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9269
9270 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9271 };
9272
9273 if (!netif_running(bp->dev))
9274 return rc;
9275
9276 /* Go through all the memories */
9277 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9278 for (j = 0; j < mem_tbl[i].size; j++)
9279 REG_RD(bp, mem_tbl[i].offset + j*4);
9280
9281 /* Check the parity status */
9282 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9283 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9284 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9285 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9286 DP(NETIF_MSG_HW,
9287 "%s is 0x%x\n", prty_tbl[i].name, val);
9288 goto test_mem_exit;
9289 }
9290 }
9291
9292 rc = 0;
9293
9294test_mem_exit:
9295 return rc;
9296}
9297
f3c87cdd
YG
9298static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9299{
9300 int cnt = 1000;
9301
9302 if (link_up)
9303 while (bnx2x_link_test(bp) && cnt--)
9304 msleep(10);
9305}
9306
9307static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9308{
9309 unsigned int pkt_size, num_pkts, i;
9310 struct sk_buff *skb;
9311 unsigned char *packet;
9312 struct bnx2x_fastpath *fp = &bp->fp[0];
9313 u16 tx_start_idx, tx_idx;
9314 u16 rx_start_idx, rx_idx;
9315 u16 pkt_prod;
9316 struct sw_tx_bd *tx_buf;
9317 struct eth_tx_bd *tx_bd;
9318 dma_addr_t mapping;
9319 union eth_rx_cqe *cqe;
9320 u8 cqe_fp_flags;
9321 struct sw_rx_bd *rx_buf;
9322 u16 len;
9323 int rc = -ENODEV;
9324
9325 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9326 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9327 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9328
9329 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9330 u16 cnt = 1000;
f3c87cdd 9331 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9332 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9333 /* wait until link state is restored */
3910c8ae
EG
9334 if (link_up)
9335 while (cnt-- && bnx2x_test_link(&bp->link_params,
9336 &bp->link_vars))
9337 msleep(10);
f3c87cdd
YG
9338 } else
9339 return -EINVAL;
9340
9341 pkt_size = 1514;
9342 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9343 if (!skb) {
9344 rc = -ENOMEM;
9345 goto test_loopback_exit;
9346 }
9347 packet = skb_put(skb, pkt_size);
9348 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9349 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9350 for (i = ETH_HLEN; i < pkt_size; i++)
9351 packet[i] = (unsigned char) (i & 0xff);
9352
9353 num_pkts = 0;
9354 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9355 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9356
9357 pkt_prod = fp->tx_pkt_prod++;
9358 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9359 tx_buf->first_bd = fp->tx_bd_prod;
9360 tx_buf->skb = skb;
9361
9362 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9363 mapping = pci_map_single(bp->pdev, skb->data,
9364 skb_headlen(skb), PCI_DMA_TODEVICE);
9365 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9366 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9367 tx_bd->nbd = cpu_to_le16(1);
9368 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9369 tx_bd->vlan = cpu_to_le16(pkt_prod);
9370 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9371 ETH_TX_BD_FLAGS_END_BD);
9372 tx_bd->general_data = ((UNICAST_ADDRESS <<
9373 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9374
58f4c4cf
EG
9375 wmb();
9376
f3c87cdd
YG
9377 fp->hw_tx_prods->bds_prod =
9378 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9379 mb(); /* FW restriction: must not reorder writing nbd and packets */
9380 fp->hw_tx_prods->packets_prod =
9381 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9382 DOORBELL(bp, FP_IDX(fp), 0);
9383
9384 mmiowb();
9385
9386 num_pkts++;
9387 fp->tx_bd_prod++;
9388 bp->dev->trans_start = jiffies;
9389
9390 udelay(100);
9391
9392 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9393 if (tx_idx != tx_start_idx + num_pkts)
9394 goto test_loopback_exit;
9395
9396 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9397 if (rx_idx != rx_start_idx + num_pkts)
9398 goto test_loopback_exit;
9399
9400 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9401 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9402 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9403 goto test_loopback_rx_exit;
9404
9405 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9406 if (len != pkt_size)
9407 goto test_loopback_rx_exit;
9408
9409 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9410 skb = rx_buf->skb;
9411 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9412 for (i = ETH_HLEN; i < pkt_size; i++)
9413 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9414 goto test_loopback_rx_exit;
9415
9416 rc = 0;
9417
9418test_loopback_rx_exit:
f3c87cdd
YG
9419
9420 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9421 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9422 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9423 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9424
9425 /* Update producers */
9426 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9427 fp->rx_sge_prod);
f3c87cdd
YG
9428
9429test_loopback_exit:
9430 bp->link_params.loopback_mode = LOOPBACK_NONE;
9431
9432 return rc;
9433}
9434
9435static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9436{
9437 int rc = 0;
9438
9439 if (!netif_running(bp->dev))
9440 return BNX2X_LOOPBACK_FAILED;
9441
f8ef6e44 9442 bnx2x_netif_stop(bp, 1);
3910c8ae 9443 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9444
9445 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9446 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9447 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9448 }
9449
9450 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9451 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9452 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9453 }
9454
3910c8ae 9455 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9456 bnx2x_netif_start(bp);
9457
9458 return rc;
9459}
9460
9461#define CRC32_RESIDUAL 0xdebb20e3
9462
9463static int bnx2x_test_nvram(struct bnx2x *bp)
9464{
9465 static const struct {
9466 int offset;
9467 int size;
9468 } nvram_tbl[] = {
9469 { 0, 0x14 }, /* bootstrap */
9470 { 0x14, 0xec }, /* dir */
9471 { 0x100, 0x350 }, /* manuf_info */
9472 { 0x450, 0xf0 }, /* feature_info */
9473 { 0x640, 0x64 }, /* upgrade_key_info */
9474 { 0x6a4, 0x64 },
9475 { 0x708, 0x70 }, /* manuf_key_info */
9476 { 0x778, 0x70 },
9477 { 0, 0 }
9478 };
9479 u32 buf[0x350 / 4];
9480 u8 *data = (u8 *)buf;
9481 int i, rc;
9482 u32 magic, csum;
9483
9484 rc = bnx2x_nvram_read(bp, 0, data, 4);
9485 if (rc) {
9486 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9487 goto test_nvram_exit;
9488 }
9489
9490 magic = be32_to_cpu(buf[0]);
9491 if (magic != 0x669955aa) {
9492 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9493 rc = -ENODEV;
9494 goto test_nvram_exit;
9495 }
9496
9497 for (i = 0; nvram_tbl[i].size; i++) {
9498
9499 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9500 nvram_tbl[i].size);
9501 if (rc) {
9502 DP(NETIF_MSG_PROBE,
9503 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9504 goto test_nvram_exit;
9505 }
9506
9507 csum = ether_crc_le(nvram_tbl[i].size, data);
9508 if (csum != CRC32_RESIDUAL) {
9509 DP(NETIF_MSG_PROBE,
9510 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9511 rc = -ENODEV;
9512 goto test_nvram_exit;
9513 }
9514 }
9515
9516test_nvram_exit:
9517 return rc;
9518}
9519
9520static int bnx2x_test_intr(struct bnx2x *bp)
9521{
9522 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9523 int i, rc;
9524
9525 if (!netif_running(bp->dev))
9526 return -ENODEV;
9527
8d9c5f34 9528 config->hdr.length = 0;
af246401
EG
9529 if (CHIP_IS_E1(bp))
9530 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9531 else
9532 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9533 config->hdr.client_id = BP_CL_ID(bp);
9534 config->hdr.reserved1 = 0;
9535
9536 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9537 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9538 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9539 if (rc == 0) {
9540 bp->set_mac_pending++;
9541 for (i = 0; i < 10; i++) {
9542 if (!bp->set_mac_pending)
9543 break;
9544 msleep_interruptible(10);
9545 }
9546 if (i == 10)
9547 rc = -ENODEV;
9548 }
9549
9550 return rc;
9551}
9552
a2fbb9ea
ET
9553static void bnx2x_self_test(struct net_device *dev,
9554 struct ethtool_test *etest, u64 *buf)
9555{
9556 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9557
9558 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9559
f3c87cdd 9560 if (!netif_running(dev))
a2fbb9ea 9561 return;
a2fbb9ea 9562
33471629 9563 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9564 if (IS_E1HMF(bp))
9565 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9566
9567 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9568 u8 link_up;
9569
9570 link_up = bp->link_vars.link_up;
9571 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9572 bnx2x_nic_load(bp, LOAD_DIAG);
9573 /* wait until link state is restored */
9574 bnx2x_wait_for_link(bp, link_up);
9575
9576 if (bnx2x_test_registers(bp) != 0) {
9577 buf[0] = 1;
9578 etest->flags |= ETH_TEST_FL_FAILED;
9579 }
9580 if (bnx2x_test_memory(bp) != 0) {
9581 buf[1] = 1;
9582 etest->flags |= ETH_TEST_FL_FAILED;
9583 }
9584 buf[2] = bnx2x_test_loopback(bp, link_up);
9585 if (buf[2] != 0)
9586 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9587
f3c87cdd
YG
9588 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9589 bnx2x_nic_load(bp, LOAD_NORMAL);
9590 /* wait until link state is restored */
9591 bnx2x_wait_for_link(bp, link_up);
9592 }
9593 if (bnx2x_test_nvram(bp) != 0) {
9594 buf[3] = 1;
a2fbb9ea
ET
9595 etest->flags |= ETH_TEST_FL_FAILED;
9596 }
f3c87cdd
YG
9597 if (bnx2x_test_intr(bp) != 0) {
9598 buf[4] = 1;
9599 etest->flags |= ETH_TEST_FL_FAILED;
9600 }
9601 if (bp->port.pmf)
9602 if (bnx2x_link_test(bp) != 0) {
9603 buf[5] = 1;
9604 etest->flags |= ETH_TEST_FL_FAILED;
9605 }
f3c87cdd
YG
9606
9607#ifdef BNX2X_EXTRA_DEBUG
9608 bnx2x_panic_dump(bp);
9609#endif
a2fbb9ea
ET
9610}
9611
de832a55
EG
9612static const struct {
9613 long offset;
9614 int size;
9615 u8 string[ETH_GSTRING_LEN];
9616} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9617/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9618 { Q_STATS_OFFSET32(error_bytes_received_hi),
9619 8, "[%d]: rx_error_bytes" },
9620 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9621 8, "[%d]: rx_ucast_packets" },
9622 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9623 8, "[%d]: rx_mcast_packets" },
9624 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9625 8, "[%d]: rx_bcast_packets" },
9626 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9627 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9628 4, "[%d]: rx_phy_ip_err_discards"},
9629 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9630 4, "[%d]: rx_skb_alloc_discard" },
9631 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9632
9633/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9634 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9635 8, "[%d]: tx_packets" }
9636};
9637
bb2a0f7a
YG
9638static const struct {
9639 long offset;
9640 int size;
9641 u32 flags;
66e855f3
YG
9642#define STATS_FLAGS_PORT 1
9643#define STATS_FLAGS_FUNC 2
de832a55 9644#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9645 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9646} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9647/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9648 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9649 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9650 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9651 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9652 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9653 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9654 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9655 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9656 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9657 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9658 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9659 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9660 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9661 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9662 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9663 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9664 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9665/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9666 8, STATS_FLAGS_PORT, "rx_fragments" },
9667 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9668 8, STATS_FLAGS_PORT, "rx_jabbers" },
9669 { STATS_OFFSET32(no_buff_discard_hi),
9670 8, STATS_FLAGS_BOTH, "rx_discards" },
9671 { STATS_OFFSET32(mac_filter_discard),
9672 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9673 { STATS_OFFSET32(xxoverflow_discard),
9674 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9675 { STATS_OFFSET32(brb_drop_hi),
9676 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9677 { STATS_OFFSET32(brb_truncate_hi),
9678 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9679 { STATS_OFFSET32(pause_frames_received_hi),
9680 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9681 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9682 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9683 { STATS_OFFSET32(nig_timer_max),
9684 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9685/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9686 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9687 { STATS_OFFSET32(rx_skb_alloc_failed),
9688 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9689 { STATS_OFFSET32(hw_csum_err),
9690 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9691
9692 { STATS_OFFSET32(total_bytes_transmitted_hi),
9693 8, STATS_FLAGS_BOTH, "tx_bytes" },
9694 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9695 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9696 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9697 8, STATS_FLAGS_BOTH, "tx_packets" },
9698 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9699 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9700 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9701 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9702 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9703 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9704 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9705 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9706/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9707 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9708 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9709 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9710 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9711 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9712 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9713 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9714 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9715 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9716 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9717 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9718 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9719 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9720 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9721 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9722 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9723 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9724 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9725 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9726/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9727 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9728 { STATS_OFFSET32(pause_frames_sent_hi),
9729 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9730};
9731
de832a55
EG
9732#define IS_PORT_STAT(i) \
9733 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9734#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9735#define IS_E1HMF_MODE_STAT(bp) \
9736 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9737
a2fbb9ea
ET
9738static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9739{
bb2a0f7a 9740 struct bnx2x *bp = netdev_priv(dev);
de832a55 9741 int i, j, k;
bb2a0f7a 9742
a2fbb9ea
ET
9743 switch (stringset) {
9744 case ETH_SS_STATS:
de832a55
EG
9745 if (is_multi(bp)) {
9746 k = 0;
9747 for_each_queue(bp, i) {
9748 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9749 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9750 bnx2x_q_stats_arr[j].string, i);
9751 k += BNX2X_NUM_Q_STATS;
9752 }
9753 if (IS_E1HMF_MODE_STAT(bp))
9754 break;
9755 for (j = 0; j < BNX2X_NUM_STATS; j++)
9756 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9757 bnx2x_stats_arr[j].string);
9758 } else {
9759 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9760 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9761 continue;
9762 strcpy(buf + j*ETH_GSTRING_LEN,
9763 bnx2x_stats_arr[i].string);
9764 j++;
9765 }
bb2a0f7a 9766 }
a2fbb9ea
ET
9767 break;
9768
9769 case ETH_SS_TEST:
9770 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9771 break;
9772 }
9773}
9774
9775static int bnx2x_get_stats_count(struct net_device *dev)
9776{
bb2a0f7a 9777 struct bnx2x *bp = netdev_priv(dev);
de832a55 9778 int i, num_stats;
bb2a0f7a 9779
de832a55
EG
9780 if (is_multi(bp)) {
9781 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9782 if (!IS_E1HMF_MODE_STAT(bp))
9783 num_stats += BNX2X_NUM_STATS;
9784 } else {
9785 if (IS_E1HMF_MODE_STAT(bp)) {
9786 num_stats = 0;
9787 for (i = 0; i < BNX2X_NUM_STATS; i++)
9788 if (IS_FUNC_STAT(i))
9789 num_stats++;
9790 } else
9791 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9792 }
de832a55 9793
bb2a0f7a 9794 return num_stats;
a2fbb9ea
ET
9795}
9796
9797static void bnx2x_get_ethtool_stats(struct net_device *dev,
9798 struct ethtool_stats *stats, u64 *buf)
9799{
9800 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9801 u32 *hw_stats, *offset;
9802 int i, j, k;
bb2a0f7a 9803
de832a55
EG
9804 if (is_multi(bp)) {
9805 k = 0;
9806 for_each_queue(bp, i) {
9807 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9808 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9809 if (bnx2x_q_stats_arr[j].size == 0) {
9810 /* skip this counter */
9811 buf[k + j] = 0;
9812 continue;
9813 }
9814 offset = (hw_stats +
9815 bnx2x_q_stats_arr[j].offset);
9816 if (bnx2x_q_stats_arr[j].size == 4) {
9817 /* 4-byte counter */
9818 buf[k + j] = (u64) *offset;
9819 continue;
9820 }
9821 /* 8-byte counter */
9822 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9823 }
9824 k += BNX2X_NUM_Q_STATS;
9825 }
9826 if (IS_E1HMF_MODE_STAT(bp))
9827 return;
9828 hw_stats = (u32 *)&bp->eth_stats;
9829 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9830 if (bnx2x_stats_arr[j].size == 0) {
9831 /* skip this counter */
9832 buf[k + j] = 0;
9833 continue;
9834 }
9835 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9836 if (bnx2x_stats_arr[j].size == 4) {
9837 /* 4-byte counter */
9838 buf[k + j] = (u64) *offset;
9839 continue;
9840 }
9841 /* 8-byte counter */
9842 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9843 }
de832a55
EG
9844 } else {
9845 hw_stats = (u32 *)&bp->eth_stats;
9846 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9847 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9848 continue;
9849 if (bnx2x_stats_arr[i].size == 0) {
9850 /* skip this counter */
9851 buf[j] = 0;
9852 j++;
9853 continue;
9854 }
9855 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9856 if (bnx2x_stats_arr[i].size == 4) {
9857 /* 4-byte counter */
9858 buf[j] = (u64) *offset;
9859 j++;
9860 continue;
9861 }
9862 /* 8-byte counter */
9863 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9864 j++;
a2fbb9ea 9865 }
a2fbb9ea
ET
9866 }
9867}
9868
9869static int bnx2x_phys_id(struct net_device *dev, u32 data)
9870{
9871 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9872 int port = BP_PORT(bp);
a2fbb9ea
ET
9873 int i;
9874
34f80b04
EG
9875 if (!netif_running(dev))
9876 return 0;
9877
9878 if (!bp->port.pmf)
9879 return 0;
9880
a2fbb9ea
ET
9881 if (data == 0)
9882 data = 2;
9883
9884 for (i = 0; i < (data * 2); i++) {
c18487ee 9885 if ((i % 2) == 0)
34f80b04 9886 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9887 bp->link_params.hw_led_mode,
9888 bp->link_params.chip_id);
9889 else
34f80b04 9890 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9891 bp->link_params.hw_led_mode,
9892 bp->link_params.chip_id);
9893
a2fbb9ea
ET
9894 msleep_interruptible(500);
9895 if (signal_pending(current))
9896 break;
9897 }
9898
c18487ee 9899 if (bp->link_vars.link_up)
34f80b04 9900 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9901 bp->link_vars.line_speed,
9902 bp->link_params.hw_led_mode,
9903 bp->link_params.chip_id);
a2fbb9ea
ET
9904
9905 return 0;
9906}
9907
9908static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9909 .get_settings = bnx2x_get_settings,
9910 .set_settings = bnx2x_set_settings,
9911 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9912 .get_wol = bnx2x_get_wol,
9913 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9914 .get_msglevel = bnx2x_get_msglevel,
9915 .set_msglevel = bnx2x_set_msglevel,
9916 .nway_reset = bnx2x_nway_reset,
9917 .get_link = ethtool_op_get_link,
9918 .get_eeprom_len = bnx2x_get_eeprom_len,
9919 .get_eeprom = bnx2x_get_eeprom,
9920 .set_eeprom = bnx2x_set_eeprom,
9921 .get_coalesce = bnx2x_get_coalesce,
9922 .set_coalesce = bnx2x_set_coalesce,
9923 .get_ringparam = bnx2x_get_ringparam,
9924 .set_ringparam = bnx2x_set_ringparam,
9925 .get_pauseparam = bnx2x_get_pauseparam,
9926 .set_pauseparam = bnx2x_set_pauseparam,
9927 .get_rx_csum = bnx2x_get_rx_csum,
9928 .set_rx_csum = bnx2x_set_rx_csum,
9929 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9930 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9931 .set_flags = bnx2x_set_flags,
9932 .get_flags = ethtool_op_get_flags,
9933 .get_sg = ethtool_op_get_sg,
9934 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9935 .get_tso = ethtool_op_get_tso,
9936 .set_tso = bnx2x_set_tso,
9937 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9938 .self_test = bnx2x_self_test,
9939 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9940 .phys_id = bnx2x_phys_id,
9941 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9942 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9943};
9944
9945/* end of ethtool_ops */
9946
9947/****************************************************************************
9948* General service functions
9949****************************************************************************/
9950
9951static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9952{
9953 u16 pmcsr;
9954
9955 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9956
9957 switch (state) {
9958 case PCI_D0:
34f80b04 9959 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9960 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9961 PCI_PM_CTRL_PME_STATUS));
9962
9963 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9964 /* delay required during transition out of D3hot */
a2fbb9ea 9965 msleep(20);
34f80b04 9966 break;
a2fbb9ea 9967
34f80b04
EG
9968 case PCI_D3hot:
9969 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9970 pmcsr |= 3;
a2fbb9ea 9971
34f80b04
EG
9972 if (bp->wol)
9973 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9974
34f80b04
EG
9975 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9976 pmcsr);
a2fbb9ea 9977
34f80b04
EG
9978 /* No more memory access after this point until
9979 * device is brought back to D0.
9980 */
9981 break;
9982
9983 default:
9984 return -EINVAL;
9985 }
9986 return 0;
a2fbb9ea
ET
9987}
9988
237907c1
EG
9989static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9990{
9991 u16 rx_cons_sb;
9992
9993 /* Tell compiler that status block fields can change */
9994 barrier();
9995 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9996 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9997 rx_cons_sb++;
9998 return (fp->rx_comp_cons != rx_cons_sb);
9999}
10000
34f80b04
EG
10001/*
10002 * net_device service functions
10003 */
10004
a2fbb9ea
ET
10005static int bnx2x_poll(struct napi_struct *napi, int budget)
10006{
10007 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10008 napi);
10009 struct bnx2x *bp = fp->bp;
10010 int work_done = 0;
10011
10012#ifdef BNX2X_STOP_ON_ERROR
10013 if (unlikely(bp->panic))
34f80b04 10014 goto poll_panic;
a2fbb9ea
ET
10015#endif
10016
10017 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10018 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10019 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10020
10021 bnx2x_update_fpsb_idx(fp);
10022
237907c1 10023 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
10024 bnx2x_tx_int(fp, budget);
10025
237907c1 10026 if (bnx2x_has_rx_work(fp))
a2fbb9ea 10027 work_done = bnx2x_rx_int(fp, budget);
da5a662a 10028 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
10029
10030 /* must not complete if we consumed full budget */
da5a662a 10031 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
10032
10033#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10034poll_panic:
a2fbb9ea 10035#endif
288379f0 10036 napi_complete(napi);
a2fbb9ea 10037
34f80b04 10038 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 10039 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 10040 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
10041 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10042 }
a2fbb9ea
ET
10043 return work_done;
10044}
10045
755735eb
EG
10046
10047/* we split the first BD into headers and data BDs
33471629 10048 * to ease the pain of our fellow microcode engineers
755735eb
EG
10049 * we use one mapping for both BDs
10050 * So far this has only been observed to happen
10051 * in Other Operating Systems(TM)
10052 */
10053static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10054 struct bnx2x_fastpath *fp,
10055 struct eth_tx_bd **tx_bd, u16 hlen,
10056 u16 bd_prod, int nbd)
10057{
10058 struct eth_tx_bd *h_tx_bd = *tx_bd;
10059 struct eth_tx_bd *d_tx_bd;
10060 dma_addr_t mapping;
10061 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10062
10063 /* first fix first BD */
10064 h_tx_bd->nbd = cpu_to_le16(nbd);
10065 h_tx_bd->nbytes = cpu_to_le16(hlen);
10066
10067 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10068 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10069 h_tx_bd->addr_lo, h_tx_bd->nbd);
10070
10071 /* now get a new data BD
10072 * (after the pbd) and fill it */
10073 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10074 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10075
10076 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10077 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10078
10079 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10080 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10081 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10082 d_tx_bd->vlan = 0;
10083 /* this marks the BD as one that has no individual mapping
10084 * the FW ignores this flag in a BD not marked start
10085 */
10086 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10087 DP(NETIF_MSG_TX_QUEUED,
10088 "TSO split data size is %d (%x:%x)\n",
10089 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10090
10091 /* update tx_bd for marking the last BD flag */
10092 *tx_bd = d_tx_bd;
10093
10094 return bd_prod;
10095}
10096
10097static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10098{
10099 if (fix > 0)
10100 csum = (u16) ~csum_fold(csum_sub(csum,
10101 csum_partial(t_header - fix, fix, 0)));
10102
10103 else if (fix < 0)
10104 csum = (u16) ~csum_fold(csum_add(csum,
10105 csum_partial(t_header, -fix, 0)));
10106
10107 return swab16(csum);
10108}
10109
10110static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10111{
10112 u32 rc;
10113
10114 if (skb->ip_summed != CHECKSUM_PARTIAL)
10115 rc = XMIT_PLAIN;
10116
10117 else {
10118 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10119 rc = XMIT_CSUM_V6;
10120 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10121 rc |= XMIT_CSUM_TCP;
10122
10123 } else {
10124 rc = XMIT_CSUM_V4;
10125 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10126 rc |= XMIT_CSUM_TCP;
10127 }
10128 }
10129
10130 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10131 rc |= XMIT_GSO_V4;
10132
10133 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10134 rc |= XMIT_GSO_V6;
10135
10136 return rc;
10137}
10138
632da4d6 10139#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10140/* check if packet requires linearization (packet is too fragmented) */
10141static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10142 u32 xmit_type)
10143{
10144 int to_copy = 0;
10145 int hlen = 0;
10146 int first_bd_sz = 0;
10147
10148 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10149 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10150
10151 if (xmit_type & XMIT_GSO) {
10152 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10153 /* Check if LSO packet needs to be copied:
10154 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10155 int wnd_size = MAX_FETCH_BD - 3;
33471629 10156 /* Number of windows to check */
755735eb
EG
10157 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10158 int wnd_idx = 0;
10159 int frag_idx = 0;
10160 u32 wnd_sum = 0;
10161
10162 /* Headers length */
10163 hlen = (int)(skb_transport_header(skb) - skb->data) +
10164 tcp_hdrlen(skb);
10165
10166 /* Amount of data (w/o headers) on linear part of SKB*/
10167 first_bd_sz = skb_headlen(skb) - hlen;
10168
10169 wnd_sum = first_bd_sz;
10170
10171 /* Calculate the first sum - it's special */
10172 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10173 wnd_sum +=
10174 skb_shinfo(skb)->frags[frag_idx].size;
10175
10176 /* If there was data on linear skb data - check it */
10177 if (first_bd_sz > 0) {
10178 if (unlikely(wnd_sum < lso_mss)) {
10179 to_copy = 1;
10180 goto exit_lbl;
10181 }
10182
10183 wnd_sum -= first_bd_sz;
10184 }
10185
10186 /* Others are easier: run through the frag list and
10187 check all windows */
10188 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10189 wnd_sum +=
10190 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10191
10192 if (unlikely(wnd_sum < lso_mss)) {
10193 to_copy = 1;
10194 break;
10195 }
10196 wnd_sum -=
10197 skb_shinfo(skb)->frags[wnd_idx].size;
10198 }
10199
10200 } else {
10201 /* in non-LSO too fragmented packet should always
10202 be linearized */
10203 to_copy = 1;
10204 }
10205 }
10206
10207exit_lbl:
10208 if (unlikely(to_copy))
10209 DP(NETIF_MSG_TX_QUEUED,
10210 "Linearization IS REQUIRED for %s packet. "
10211 "num_frags %d hlen %d first_bd_sz %d\n",
10212 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10213 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10214
10215 return to_copy;
10216}
632da4d6 10217#endif
755735eb
EG
10218
10219/* called with netif_tx_lock
a2fbb9ea 10220 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10221 * netif_wake_queue()
a2fbb9ea
ET
10222 */
10223static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10224{
10225 struct bnx2x *bp = netdev_priv(dev);
10226 struct bnx2x_fastpath *fp;
555f6c78 10227 struct netdev_queue *txq;
a2fbb9ea
ET
10228 struct sw_tx_bd *tx_buf;
10229 struct eth_tx_bd *tx_bd;
10230 struct eth_tx_parse_bd *pbd = NULL;
10231 u16 pkt_prod, bd_prod;
755735eb 10232 int nbd, fp_index;
a2fbb9ea 10233 dma_addr_t mapping;
755735eb
EG
10234 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10235 int vlan_off = (bp->e1hov ? 4 : 0);
10236 int i;
10237 u8 hlen = 0;
a2fbb9ea
ET
10238
10239#ifdef BNX2X_STOP_ON_ERROR
10240 if (unlikely(bp->panic))
10241 return NETDEV_TX_BUSY;
10242#endif
10243
555f6c78
EG
10244 fp_index = skb_get_queue_mapping(skb);
10245 txq = netdev_get_tx_queue(dev, fp_index);
10246
a2fbb9ea 10247 fp = &bp->fp[fp_index];
755735eb 10248
231fd58a 10249 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10250 fp->eth_q_stats.driver_xoff++,
555f6c78 10251 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10252 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10253 return NETDEV_TX_BUSY;
10254 }
10255
755735eb
EG
10256 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10257 " gso type %x xmit_type %x\n",
10258 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10259 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10260
632da4d6 10261#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10262 /* First, check if we need to linearize the skb
755735eb
EG
10263 (due to FW restrictions) */
10264 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10265 /* Statistics of linearization */
10266 bp->lin_cnt++;
10267 if (skb_linearize(skb) != 0) {
10268 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10269 "silently dropping this SKB\n");
10270 dev_kfree_skb_any(skb);
da5a662a 10271 return NETDEV_TX_OK;
755735eb
EG
10272 }
10273 }
632da4d6 10274#endif
755735eb 10275
a2fbb9ea 10276 /*
755735eb 10277 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10278 then for TSO or xsum we have a parsing info BD,
755735eb 10279 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10280 (don't forget to mark the last one as last,
10281 and to unmap only AFTER you write to the BD ...)
755735eb 10282 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10283 */
10284
10285 pkt_prod = fp->tx_pkt_prod++;
755735eb 10286 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10287
755735eb 10288 /* get a tx_buf and first BD */
a2fbb9ea
ET
10289 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10290 tx_bd = &fp->tx_desc_ring[bd_prod];
10291
10292 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10293 tx_bd->general_data = (UNICAST_ADDRESS <<
10294 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10295 /* header nbd */
10296 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10297
755735eb
EG
10298 /* remember the first BD of the packet */
10299 tx_buf->first_bd = fp->tx_bd_prod;
10300 tx_buf->skb = skb;
a2fbb9ea
ET
10301
10302 DP(NETIF_MSG_TX_QUEUED,
10303 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10304 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10305
0c6671b0
EG
10306#ifdef BCM_VLAN
10307 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10308 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10309 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10310 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10311 vlan_off += 4;
10312 } else
0c6671b0 10313#endif
755735eb 10314 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10315
755735eb 10316 if (xmit_type) {
755735eb 10317 /* turn on parsing and get a BD */
a2fbb9ea
ET
10318 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10319 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10320
10321 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10322 }
10323
10324 if (xmit_type & XMIT_CSUM) {
10325 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10326
10327 /* for now NS flag is not used in Linux */
755735eb 10328 pbd->global_data = (hlen |
96fc1784 10329 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10330 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10331
755735eb
EG
10332 pbd->ip_hlen = (skb_transport_header(skb) -
10333 skb_network_header(skb)) / 2;
10334
10335 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10336
755735eb
EG
10337 pbd->total_hlen = cpu_to_le16(hlen);
10338 hlen = hlen*2 - vlan_off;
a2fbb9ea 10339
755735eb
EG
10340 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10341
10342 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10343 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10344 ETH_TX_BD_FLAGS_IP_CSUM;
10345 else
10346 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10347
10348 if (xmit_type & XMIT_CSUM_TCP) {
10349 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10350
10351 } else {
10352 s8 fix = SKB_CS_OFF(skb); /* signed! */
10353
a2fbb9ea 10354 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10355 pbd->cs_offset = fix / 2;
a2fbb9ea 10356
755735eb
EG
10357 DP(NETIF_MSG_TX_QUEUED,
10358 "hlen %d offset %d fix %d csum before fix %x\n",
10359 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10360 SKB_CS(skb));
10361
10362 /* HW bug: fixup the CSUM */
10363 pbd->tcp_pseudo_csum =
10364 bnx2x_csum_fix(skb_transport_header(skb),
10365 SKB_CS(skb), fix);
10366
10367 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10368 pbd->tcp_pseudo_csum);
10369 }
a2fbb9ea
ET
10370 }
10371
10372 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10373 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10374
10375 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10376 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10377 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10378 tx_bd->nbd = cpu_to_le16(nbd);
10379 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10380
10381 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10382 " nbytes %d flags %x vlan %x\n",
10383 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10384 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10385 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10386
755735eb 10387 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10388
10389 DP(NETIF_MSG_TX_QUEUED,
10390 "TSO packet len %d hlen %d total len %d tso size %d\n",
10391 skb->len, hlen, skb_headlen(skb),
10392 skb_shinfo(skb)->gso_size);
10393
10394 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10395
755735eb
EG
10396 if (unlikely(skb_headlen(skb) > hlen))
10397 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10398 bd_prod, ++nbd);
a2fbb9ea
ET
10399
10400 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10401 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10402 pbd->tcp_flags = pbd_tcp_flags(skb);
10403
10404 if (xmit_type & XMIT_GSO_V4) {
10405 pbd->ip_id = swab16(ip_hdr(skb)->id);
10406 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10407 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10408 ip_hdr(skb)->daddr,
10409 0, IPPROTO_TCP, 0));
755735eb
EG
10410
10411 } else
10412 pbd->tcp_pseudo_csum =
10413 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10414 &ipv6_hdr(skb)->daddr,
10415 0, IPPROTO_TCP, 0));
10416
a2fbb9ea
ET
10417 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10418 }
10419
755735eb
EG
10420 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10421 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10422
755735eb
EG
10423 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10424 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10425
755735eb
EG
10426 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10427 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10428
755735eb
EG
10429 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10430 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10431 tx_bd->nbytes = cpu_to_le16(frag->size);
10432 tx_bd->vlan = cpu_to_le16(pkt_prod);
10433 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10434
755735eb
EG
10435 DP(NETIF_MSG_TX_QUEUED,
10436 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10437 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10438 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10439 }
10440
755735eb 10441 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10442 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10443
10444 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10445 tx_bd, tx_bd->bd_flags.as_bitfield);
10446
a2fbb9ea
ET
10447 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10448
755735eb 10449 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10450 * if the packet contains or ends with it
10451 */
10452 if (TX_BD_POFF(bd_prod) < nbd)
10453 nbd++;
10454
10455 if (pbd)
10456 DP(NETIF_MSG_TX_QUEUED,
10457 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10458 " tcp_flags %x xsum %x seq %u hlen %u\n",
10459 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10460 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10461 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10462
755735eb 10463 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10464
58f4c4cf
EG
10465 /*
10466 * Make sure that the BD data is updated before updating the producer
10467 * since FW might read the BD right after the producer is updated.
10468 * This is only applicable for weak-ordered memory model archs such
10469 * as IA-64. The following barrier is also mandatory since FW will
10470 * assumes packets must have BDs.
10471 */
10472 wmb();
10473
96fc1784
ET
10474 fp->hw_tx_prods->bds_prod =
10475 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10476 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10477 fp->hw_tx_prods->packets_prod =
10478 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10479 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10480
10481 mmiowb();
10482
755735eb 10483 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10484 dev->trans_start = jiffies;
10485
10486 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10487 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10488 if we put Tx into XOFF state. */
10489 smp_mb();
555f6c78 10490 netif_tx_stop_queue(txq);
de832a55 10491 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10492 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10493 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10494 }
10495 fp->tx_pkt++;
10496
10497 return NETDEV_TX_OK;
10498}
10499
bb2a0f7a 10500/* called with rtnl_lock */
a2fbb9ea
ET
10501static int bnx2x_open(struct net_device *dev)
10502{
10503 struct bnx2x *bp = netdev_priv(dev);
10504
6eccabb3
EG
10505 netif_carrier_off(dev);
10506
a2fbb9ea
ET
10507 bnx2x_set_power_state(bp, PCI_D0);
10508
bb2a0f7a 10509 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10510}
10511
bb2a0f7a 10512/* called with rtnl_lock */
a2fbb9ea
ET
10513static int bnx2x_close(struct net_device *dev)
10514{
a2fbb9ea
ET
10515 struct bnx2x *bp = netdev_priv(dev);
10516
10517 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10518 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10519 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10520 if (!CHIP_REV_IS_SLOW(bp))
10521 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10522
10523 return 0;
10524}
10525
34f80b04
EG
10526/* called with netif_tx_lock from set_multicast */
10527static void bnx2x_set_rx_mode(struct net_device *dev)
10528{
10529 struct bnx2x *bp = netdev_priv(dev);
10530 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10531 int port = BP_PORT(bp);
10532
10533 if (bp->state != BNX2X_STATE_OPEN) {
10534 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10535 return;
10536 }
10537
10538 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10539
10540 if (dev->flags & IFF_PROMISC)
10541 rx_mode = BNX2X_RX_MODE_PROMISC;
10542
10543 else if ((dev->flags & IFF_ALLMULTI) ||
10544 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10545 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10546
10547 else { /* some multicasts */
10548 if (CHIP_IS_E1(bp)) {
10549 int i, old, offset;
10550 struct dev_mc_list *mclist;
10551 struct mac_configuration_cmd *config =
10552 bnx2x_sp(bp, mcast_config);
10553
10554 for (i = 0, mclist = dev->mc_list;
10555 mclist && (i < dev->mc_count);
10556 i++, mclist = mclist->next) {
10557
10558 config->config_table[i].
10559 cam_entry.msb_mac_addr =
10560 swab16(*(u16 *)&mclist->dmi_addr[0]);
10561 config->config_table[i].
10562 cam_entry.middle_mac_addr =
10563 swab16(*(u16 *)&mclist->dmi_addr[2]);
10564 config->config_table[i].
10565 cam_entry.lsb_mac_addr =
10566 swab16(*(u16 *)&mclist->dmi_addr[4]);
10567 config->config_table[i].cam_entry.flags =
10568 cpu_to_le16(port);
10569 config->config_table[i].
10570 target_table_entry.flags = 0;
10571 config->config_table[i].
10572 target_table_entry.client_id = 0;
10573 config->config_table[i].
10574 target_table_entry.vlan_id = 0;
10575
10576 DP(NETIF_MSG_IFUP,
10577 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10578 config->config_table[i].
10579 cam_entry.msb_mac_addr,
10580 config->config_table[i].
10581 cam_entry.middle_mac_addr,
10582 config->config_table[i].
10583 cam_entry.lsb_mac_addr);
10584 }
8d9c5f34 10585 old = config->hdr.length;
34f80b04
EG
10586 if (old > i) {
10587 for (; i < old; i++) {
10588 if (CAM_IS_INVALID(config->
10589 config_table[i])) {
af246401 10590 /* already invalidated */
34f80b04
EG
10591 break;
10592 }
10593 /* invalidate */
10594 CAM_INVALIDATE(config->
10595 config_table[i]);
10596 }
10597 }
10598
10599 if (CHIP_REV_IS_SLOW(bp))
10600 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10601 else
10602 offset = BNX2X_MAX_MULTICAST*(1 + port);
10603
8d9c5f34 10604 config->hdr.length = i;
34f80b04 10605 config->hdr.offset = offset;
8d9c5f34 10606 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10607 config->hdr.reserved1 = 0;
10608
10609 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10610 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10611 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10612 0);
10613 } else { /* E1H */
10614 /* Accept one or more multicasts */
10615 struct dev_mc_list *mclist;
10616 u32 mc_filter[MC_HASH_SIZE];
10617 u32 crc, bit, regidx;
10618 int i;
10619
10620 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10621
10622 for (i = 0, mclist = dev->mc_list;
10623 mclist && (i < dev->mc_count);
10624 i++, mclist = mclist->next) {
10625
7c510e4b
JB
10626 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10627 mclist->dmi_addr);
34f80b04
EG
10628
10629 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10630 bit = (crc >> 24) & 0xff;
10631 regidx = bit >> 5;
10632 bit &= 0x1f;
10633 mc_filter[regidx] |= (1 << bit);
10634 }
10635
10636 for (i = 0; i < MC_HASH_SIZE; i++)
10637 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10638 mc_filter[i]);
10639 }
10640 }
10641
10642 bp->rx_mode = rx_mode;
10643 bnx2x_set_storm_rx_mode(bp);
10644}
10645
10646/* called with rtnl_lock */
a2fbb9ea
ET
10647static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10648{
10649 struct sockaddr *addr = p;
10650 struct bnx2x *bp = netdev_priv(dev);
10651
34f80b04 10652 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10653 return -EINVAL;
10654
10655 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10656 if (netif_running(dev)) {
10657 if (CHIP_IS_E1(bp))
3101c2bc 10658 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10659 else
3101c2bc 10660 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10661 }
a2fbb9ea
ET
10662
10663 return 0;
10664}
10665
c18487ee 10666/* called with rtnl_lock */
a2fbb9ea
ET
10667static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10668{
10669 struct mii_ioctl_data *data = if_mii(ifr);
10670 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10671 int port = BP_PORT(bp);
a2fbb9ea
ET
10672 int err;
10673
10674 switch (cmd) {
10675 case SIOCGMIIPHY:
34f80b04 10676 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10677
c14423fe 10678 /* fallthrough */
c18487ee 10679
a2fbb9ea 10680 case SIOCGMIIREG: {
c18487ee 10681 u16 mii_regval;
a2fbb9ea 10682
c18487ee
YR
10683 if (!netif_running(dev))
10684 return -EAGAIN;
a2fbb9ea 10685
34f80b04 10686 mutex_lock(&bp->port.phy_mutex);
3196a88a 10687 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10688 DEFAULT_PHY_DEV_ADDR,
10689 (data->reg_num & 0x1f), &mii_regval);
10690 data->val_out = mii_regval;
34f80b04 10691 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10692 return err;
10693 }
10694
10695 case SIOCSMIIREG:
10696 if (!capable(CAP_NET_ADMIN))
10697 return -EPERM;
10698
c18487ee
YR
10699 if (!netif_running(dev))
10700 return -EAGAIN;
10701
34f80b04 10702 mutex_lock(&bp->port.phy_mutex);
3196a88a 10703 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10704 DEFAULT_PHY_DEV_ADDR,
10705 (data->reg_num & 0x1f), data->val_in);
34f80b04 10706 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10707 return err;
10708
10709 default:
10710 /* do nothing */
10711 break;
10712 }
10713
10714 return -EOPNOTSUPP;
10715}
10716
34f80b04 10717/* called with rtnl_lock */
a2fbb9ea
ET
10718static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10719{
10720 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10721 int rc = 0;
a2fbb9ea
ET
10722
10723 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10724 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10725 return -EINVAL;
10726
10727 /* This does not race with packet allocation
c14423fe 10728 * because the actual alloc size is
a2fbb9ea
ET
10729 * only updated as part of load
10730 */
10731 dev->mtu = new_mtu;
10732
10733 if (netif_running(dev)) {
34f80b04
EG
10734 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10735 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10736 }
34f80b04
EG
10737
10738 return rc;
a2fbb9ea
ET
10739}
10740
10741static void bnx2x_tx_timeout(struct net_device *dev)
10742{
10743 struct bnx2x *bp = netdev_priv(dev);
10744
10745#ifdef BNX2X_STOP_ON_ERROR
10746 if (!bp->panic)
10747 bnx2x_panic();
10748#endif
10749 /* This allows the netif to be shutdown gracefully before resetting */
10750 schedule_work(&bp->reset_task);
10751}
10752
10753#ifdef BCM_VLAN
34f80b04 10754/* called with rtnl_lock */
a2fbb9ea
ET
10755static void bnx2x_vlan_rx_register(struct net_device *dev,
10756 struct vlan_group *vlgrp)
10757{
10758 struct bnx2x *bp = netdev_priv(dev);
10759
10760 bp->vlgrp = vlgrp;
0c6671b0
EG
10761
10762 /* Set flags according to the required capabilities */
10763 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10764
10765 if (dev->features & NETIF_F_HW_VLAN_TX)
10766 bp->flags |= HW_VLAN_TX_FLAG;
10767
10768 if (dev->features & NETIF_F_HW_VLAN_RX)
10769 bp->flags |= HW_VLAN_RX_FLAG;
10770
a2fbb9ea 10771 if (netif_running(dev))
49d66772 10772 bnx2x_set_client_config(bp);
a2fbb9ea 10773}
34f80b04 10774
a2fbb9ea
ET
10775#endif
10776
10777#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10778static void poll_bnx2x(struct net_device *dev)
10779{
10780 struct bnx2x *bp = netdev_priv(dev);
10781
10782 disable_irq(bp->pdev->irq);
10783 bnx2x_interrupt(bp->pdev->irq, dev);
10784 enable_irq(bp->pdev->irq);
10785}
10786#endif
10787
c64213cd
SH
10788static const struct net_device_ops bnx2x_netdev_ops = {
10789 .ndo_open = bnx2x_open,
10790 .ndo_stop = bnx2x_close,
10791 .ndo_start_xmit = bnx2x_start_xmit,
10792 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10793 .ndo_set_mac_address = bnx2x_change_mac_addr,
10794 .ndo_validate_addr = eth_validate_addr,
10795 .ndo_do_ioctl = bnx2x_ioctl,
10796 .ndo_change_mtu = bnx2x_change_mtu,
10797 .ndo_tx_timeout = bnx2x_tx_timeout,
10798#ifdef BCM_VLAN
10799 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10800#endif
10801#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10802 .ndo_poll_controller = poll_bnx2x,
10803#endif
10804};
10805
10806
34f80b04
EG
10807static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10808 struct net_device *dev)
a2fbb9ea
ET
10809{
10810 struct bnx2x *bp;
10811 int rc;
10812
10813 SET_NETDEV_DEV(dev, &pdev->dev);
10814 bp = netdev_priv(dev);
10815
34f80b04
EG
10816 bp->dev = dev;
10817 bp->pdev = pdev;
a2fbb9ea 10818 bp->flags = 0;
34f80b04 10819 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10820
10821 rc = pci_enable_device(pdev);
10822 if (rc) {
10823 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10824 goto err_out;
10825 }
10826
10827 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10828 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10829 " aborting\n");
10830 rc = -ENODEV;
10831 goto err_out_disable;
10832 }
10833
10834 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10835 printk(KERN_ERR PFX "Cannot find second PCI device"
10836 " base address, aborting\n");
10837 rc = -ENODEV;
10838 goto err_out_disable;
10839 }
10840
34f80b04
EG
10841 if (atomic_read(&pdev->enable_cnt) == 1) {
10842 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10843 if (rc) {
10844 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10845 " aborting\n");
10846 goto err_out_disable;
10847 }
a2fbb9ea 10848
34f80b04
EG
10849 pci_set_master(pdev);
10850 pci_save_state(pdev);
10851 }
a2fbb9ea
ET
10852
10853 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10854 if (bp->pm_cap == 0) {
10855 printk(KERN_ERR PFX "Cannot find power management"
10856 " capability, aborting\n");
10857 rc = -EIO;
10858 goto err_out_release;
10859 }
10860
10861 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10862 if (bp->pcie_cap == 0) {
10863 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10864 " aborting\n");
10865 rc = -EIO;
10866 goto err_out_release;
10867 }
10868
10869 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10870 bp->flags |= USING_DAC_FLAG;
10871 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10872 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10873 " failed, aborting\n");
10874 rc = -EIO;
10875 goto err_out_release;
10876 }
10877
10878 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10879 printk(KERN_ERR PFX "System does not support DMA,"
10880 " aborting\n");
10881 rc = -EIO;
10882 goto err_out_release;
10883 }
10884
34f80b04
EG
10885 dev->mem_start = pci_resource_start(pdev, 0);
10886 dev->base_addr = dev->mem_start;
10887 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10888
10889 dev->irq = pdev->irq;
10890
275f165f 10891 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10892 if (!bp->regview) {
10893 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10894 rc = -ENOMEM;
10895 goto err_out_release;
10896 }
10897
34f80b04
EG
10898 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10899 min_t(u64, BNX2X_DB_SIZE,
10900 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10901 if (!bp->doorbells) {
10902 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10903 rc = -ENOMEM;
10904 goto err_out_unmap;
10905 }
10906
10907 bnx2x_set_power_state(bp, PCI_D0);
10908
34f80b04
EG
10909 /* clean indirect addresses */
10910 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10911 PCICFG_VENDOR_ID_OFFSET);
10912 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10913 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10914 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10915 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10916
34f80b04 10917 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10918
c64213cd 10919 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10920 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10921 dev->features |= NETIF_F_SG;
10922 dev->features |= NETIF_F_HW_CSUM;
10923 if (bp->flags & USING_DAC_FLAG)
10924 dev->features |= NETIF_F_HIGHDMA;
10925#ifdef BCM_VLAN
10926 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10927 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10928#endif
10929 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10930 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10931
10932 return 0;
10933
10934err_out_unmap:
10935 if (bp->regview) {
10936 iounmap(bp->regview);
10937 bp->regview = NULL;
10938 }
a2fbb9ea
ET
10939 if (bp->doorbells) {
10940 iounmap(bp->doorbells);
10941 bp->doorbells = NULL;
10942 }
10943
10944err_out_release:
34f80b04
EG
10945 if (atomic_read(&pdev->enable_cnt) == 1)
10946 pci_release_regions(pdev);
a2fbb9ea
ET
10947
10948err_out_disable:
10949 pci_disable_device(pdev);
10950 pci_set_drvdata(pdev, NULL);
10951
10952err_out:
10953 return rc;
10954}
10955
25047950
ET
10956static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10957{
10958 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10959
10960 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10961 return val;
10962}
10963
10964/* return value of 1=2.5GHz 2=5GHz */
10965static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10966{
10967 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10968
10969 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10970 return val;
10971}
10972
a2fbb9ea
ET
10973static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10974 const struct pci_device_id *ent)
10975{
10976 static int version_printed;
10977 struct net_device *dev = NULL;
10978 struct bnx2x *bp;
25047950 10979 int rc;
a2fbb9ea
ET
10980
10981 if (version_printed++ == 0)
10982 printk(KERN_INFO "%s", version);
10983
10984 /* dev zeroed in init_etherdev */
555f6c78 10985 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10986 if (!dev) {
10987 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10988 return -ENOMEM;
34f80b04 10989 }
a2fbb9ea 10990
a2fbb9ea
ET
10991 bp = netdev_priv(dev);
10992 bp->msglevel = debug;
10993
34f80b04 10994 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10995 if (rc < 0) {
10996 free_netdev(dev);
10997 return rc;
10998 }
10999
a2fbb9ea
ET
11000 pci_set_drvdata(pdev, dev);
11001
34f80b04 11002 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11003 if (rc)
11004 goto init_one_exit;
11005
11006 rc = register_netdev(dev);
34f80b04 11007 if (rc) {
693fc0d1 11008 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11009 goto init_one_exit;
11010 }
11011
25047950 11012 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11013 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11014 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
11015 bnx2x_get_pcie_width(bp),
11016 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11017 dev->base_addr, bp->pdev->irq);
e174961c 11018 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 11019 return 0;
34f80b04
EG
11020
11021init_one_exit:
11022 if (bp->regview)
11023 iounmap(bp->regview);
11024
11025 if (bp->doorbells)
11026 iounmap(bp->doorbells);
11027
11028 free_netdev(dev);
11029
11030 if (atomic_read(&pdev->enable_cnt) == 1)
11031 pci_release_regions(pdev);
11032
11033 pci_disable_device(pdev);
11034 pci_set_drvdata(pdev, NULL);
11035
11036 return rc;
a2fbb9ea
ET
11037}
11038
11039static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11040{
11041 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11042 struct bnx2x *bp;
11043
11044 if (!dev) {
228241eb
ET
11045 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11046 return;
11047 }
228241eb 11048 bp = netdev_priv(dev);
a2fbb9ea 11049
a2fbb9ea
ET
11050 unregister_netdev(dev);
11051
11052 if (bp->regview)
11053 iounmap(bp->regview);
11054
11055 if (bp->doorbells)
11056 iounmap(bp->doorbells);
11057
11058 free_netdev(dev);
34f80b04
EG
11059
11060 if (atomic_read(&pdev->enable_cnt) == 1)
11061 pci_release_regions(pdev);
11062
a2fbb9ea
ET
11063 pci_disable_device(pdev);
11064 pci_set_drvdata(pdev, NULL);
11065}
11066
11067static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11068{
11069 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11070 struct bnx2x *bp;
11071
34f80b04
EG
11072 if (!dev) {
11073 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11074 return -ENODEV;
11075 }
11076 bp = netdev_priv(dev);
a2fbb9ea 11077
34f80b04 11078 rtnl_lock();
a2fbb9ea 11079
34f80b04 11080 pci_save_state(pdev);
228241eb 11081
34f80b04
EG
11082 if (!netif_running(dev)) {
11083 rtnl_unlock();
11084 return 0;
11085 }
a2fbb9ea
ET
11086
11087 netif_device_detach(dev);
a2fbb9ea 11088
da5a662a 11089 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 11090
a2fbb9ea 11091 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 11092
34f80b04
EG
11093 rtnl_unlock();
11094
a2fbb9ea
ET
11095 return 0;
11096}
11097
11098static int bnx2x_resume(struct pci_dev *pdev)
11099{
11100 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 11101 struct bnx2x *bp;
a2fbb9ea
ET
11102 int rc;
11103
228241eb
ET
11104 if (!dev) {
11105 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11106 return -ENODEV;
11107 }
228241eb 11108 bp = netdev_priv(dev);
a2fbb9ea 11109
34f80b04
EG
11110 rtnl_lock();
11111
228241eb 11112 pci_restore_state(pdev);
34f80b04
EG
11113
11114 if (!netif_running(dev)) {
11115 rtnl_unlock();
11116 return 0;
11117 }
11118
a2fbb9ea
ET
11119 bnx2x_set_power_state(bp, PCI_D0);
11120 netif_device_attach(dev);
11121
da5a662a 11122 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11123
34f80b04
EG
11124 rtnl_unlock();
11125
11126 return rc;
a2fbb9ea
ET
11127}
11128
f8ef6e44
YG
11129static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11130{
11131 int i;
11132
11133 bp->state = BNX2X_STATE_ERROR;
11134
11135 bp->rx_mode = BNX2X_RX_MODE_NONE;
11136
11137 bnx2x_netif_stop(bp, 0);
11138
11139 del_timer_sync(&bp->timer);
11140 bp->stats_state = STATS_STATE_DISABLED;
11141 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11142
11143 /* Release IRQs */
11144 bnx2x_free_irq(bp);
11145
11146 if (CHIP_IS_E1(bp)) {
11147 struct mac_configuration_cmd *config =
11148 bnx2x_sp(bp, mcast_config);
11149
8d9c5f34 11150 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11151 CAM_INVALIDATE(config->config_table[i]);
11152 }
11153
11154 /* Free SKBs, SGEs, TPA pool and driver internals */
11155 bnx2x_free_skbs(bp);
555f6c78 11156 for_each_rx_queue(bp, i)
f8ef6e44 11157 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11158 for_each_rx_queue(bp, i)
7cde1c8b 11159 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11160 bnx2x_free_mem(bp);
11161
11162 bp->state = BNX2X_STATE_CLOSED;
11163
11164 netif_carrier_off(bp->dev);
11165
11166 return 0;
11167}
11168
11169static void bnx2x_eeh_recover(struct bnx2x *bp)
11170{
11171 u32 val;
11172
11173 mutex_init(&bp->port.phy_mutex);
11174
11175 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11176 bp->link_params.shmem_base = bp->common.shmem_base;
11177 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11178
11179 if (!bp->common.shmem_base ||
11180 (bp->common.shmem_base < 0xA0000) ||
11181 (bp->common.shmem_base >= 0xC0000)) {
11182 BNX2X_DEV_INFO("MCP not active\n");
11183 bp->flags |= NO_MCP_FLAG;
11184 return;
11185 }
11186
11187 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11188 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11189 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11190 BNX2X_ERR("BAD MCP validity signature\n");
11191
11192 if (!BP_NOMCP(bp)) {
11193 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11194 & DRV_MSG_SEQ_NUMBER_MASK);
11195 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11196 }
11197}
11198
493adb1f
WX
11199/**
11200 * bnx2x_io_error_detected - called when PCI error is detected
11201 * @pdev: Pointer to PCI device
11202 * @state: The current pci connection state
11203 *
11204 * This function is called after a PCI bus error affecting
11205 * this device has been detected.
11206 */
11207static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11208 pci_channel_state_t state)
11209{
11210 struct net_device *dev = pci_get_drvdata(pdev);
11211 struct bnx2x *bp = netdev_priv(dev);
11212
11213 rtnl_lock();
11214
11215 netif_device_detach(dev);
11216
11217 if (netif_running(dev))
f8ef6e44 11218 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11219
11220 pci_disable_device(pdev);
11221
11222 rtnl_unlock();
11223
11224 /* Request a slot reset */
11225 return PCI_ERS_RESULT_NEED_RESET;
11226}
11227
11228/**
11229 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11230 * @pdev: Pointer to PCI device
11231 *
11232 * Restart the card from scratch, as if from a cold-boot.
11233 */
11234static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11235{
11236 struct net_device *dev = pci_get_drvdata(pdev);
11237 struct bnx2x *bp = netdev_priv(dev);
11238
11239 rtnl_lock();
11240
11241 if (pci_enable_device(pdev)) {
11242 dev_err(&pdev->dev,
11243 "Cannot re-enable PCI device after reset\n");
11244 rtnl_unlock();
11245 return PCI_ERS_RESULT_DISCONNECT;
11246 }
11247
11248 pci_set_master(pdev);
11249 pci_restore_state(pdev);
11250
11251 if (netif_running(dev))
11252 bnx2x_set_power_state(bp, PCI_D0);
11253
11254 rtnl_unlock();
11255
11256 return PCI_ERS_RESULT_RECOVERED;
11257}
11258
11259/**
11260 * bnx2x_io_resume - called when traffic can start flowing again
11261 * @pdev: Pointer to PCI device
11262 *
11263 * This callback is called when the error recovery driver tells us that
11264 * its OK to resume normal operation.
11265 */
11266static void bnx2x_io_resume(struct pci_dev *pdev)
11267{
11268 struct net_device *dev = pci_get_drvdata(pdev);
11269 struct bnx2x *bp = netdev_priv(dev);
11270
11271 rtnl_lock();
11272
f8ef6e44
YG
11273 bnx2x_eeh_recover(bp);
11274
493adb1f 11275 if (netif_running(dev))
f8ef6e44 11276 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11277
11278 netif_device_attach(dev);
11279
11280 rtnl_unlock();
11281}
11282
11283static struct pci_error_handlers bnx2x_err_handler = {
11284 .error_detected = bnx2x_io_error_detected,
11285 .slot_reset = bnx2x_io_slot_reset,
11286 .resume = bnx2x_io_resume,
11287};
11288
a2fbb9ea 11289static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11290 .name = DRV_MODULE_NAME,
11291 .id_table = bnx2x_pci_tbl,
11292 .probe = bnx2x_init_one,
11293 .remove = __devexit_p(bnx2x_remove_one),
11294 .suspend = bnx2x_suspend,
11295 .resume = bnx2x_resume,
11296 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11297};
11298
11299static int __init bnx2x_init(void)
11300{
1cf167f2
EG
11301 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11302 if (bnx2x_wq == NULL) {
11303 printk(KERN_ERR PFX "Cannot create workqueue\n");
11304 return -ENOMEM;
11305 }
11306
a2fbb9ea
ET
11307 return pci_register_driver(&bnx2x_pci_driver);
11308}
11309
11310static void __exit bnx2x_cleanup(void)
11311{
11312 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11313
11314 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11315}
11316
11317module_init(bnx2x_init);
11318module_exit(bnx2x_cleanup);
11319