Merge branch 'for-2.6.28' of git://linux-nfs.org/~bfields/linux
[linux-2.6-block.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea
ET
53#include <linux/io.h>
54
55#include "bnx2x_reg.h"
56#include "bnx2x_fw_defs.h"
57#include "bnx2x_hsi.h"
c18487ee 58#include "bnx2x_link.h"
a2fbb9ea
ET
59#include "bnx2x.h"
60#include "bnx2x_init.h"
61
ca8eac55
EG
62#define DRV_MODULE_VERSION "1.45.23"
63#define DRV_MODULE_RELDATE "2008/11/03"
34f80b04 64#define BNX2X_BC_VER 0x040200
a2fbb9ea 65
34f80b04
EG
66/* Time in jiffies before concluding the transmitter is hung */
67#define TX_TIMEOUT (5*HZ)
a2fbb9ea 68
53a10565 69static char version[] __devinitdata =
34f80b04 70 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
71 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
24e3fcef 73MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 77
19680c48 78static int disable_tpa;
a2fbb9ea
ET
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
83static int use_multi;
84
19680c48 85module_param(disable_tpa, int, 0);
a2fbb9ea
ET
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
19680c48 89MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
93
94#ifdef BNX2X_MULTI
95module_param(use_multi, int, 0);
96MODULE_PARM_DESC(use_multi, "use per-CPU queues");
97#endif
98
99enum bnx2x_board_type {
100 BCM57710 = 0,
34f80b04
EG
101 BCM57711 = 1,
102 BCM57711E = 2,
a2fbb9ea
ET
103};
104
34f80b04 105/* indexed by board_type, above */
53a10565 106static struct {
a2fbb9ea
ET
107 char *name;
108} board_info[] __devinitdata = {
34f80b04
EG
109 { "Broadcom NetXtreme II BCM57710 XGb" },
110 { "Broadcom NetXtreme II BCM57711 XGb" },
111 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
112};
113
34f80b04 114
a2fbb9ea
ET
115static const struct pci_device_id bnx2x_pci_tbl[] = {
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
122 { 0 }
123};
124
125MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
126
127/****************************************************************************
128* General service functions
129****************************************************************************/
130
131/* used only at init
132 * locking is done by mcp
133 */
134static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
135{
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
139 PCICFG_VENDOR_ID_OFFSET);
140}
141
a2fbb9ea
ET
142static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
143{
144 u32 val;
145
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
150
151 return val;
152}
a2fbb9ea
ET
153
154static const u32 dmae_reg_go_c[] = {
155 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
156 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
157 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
158 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
159};
160
161/* copy command into DMAE command memory and set DMAE command go */
162static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
163 int idx)
164{
165 u32 cmd_offset;
166 int i;
167
168 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
169 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
170 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
171
ad8d3948
EG
172 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
173 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
174 }
175 REG_WR(bp, dmae_reg_go_c[idx], 1);
176}
177
ad8d3948
EG
178void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
179 u32 len32)
a2fbb9ea 180{
ad8d3948 181 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 182 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
183 int cnt = 200;
184
185 if (!bp->dmae_ready) {
186 u32 *data = bnx2x_sp(bp, wb_data[0]);
187
188 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
189 " using indirect\n", dst_addr, len32);
190 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
191 return;
192 }
193
194 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
195
196 memset(dmae, 0, sizeof(struct dmae_command));
197
198 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
199 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
200 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
201#ifdef __BIG_ENDIAN
202 DMAE_CMD_ENDIANITY_B_DW_SWAP |
203#else
204 DMAE_CMD_ENDIANITY_DW_SWAP |
205#endif
34f80b04
EG
206 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
207 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
208 dmae->src_addr_lo = U64_LO(dma_addr);
209 dmae->src_addr_hi = U64_HI(dma_addr);
210 dmae->dst_addr_lo = dst_addr >> 2;
211 dmae->dst_addr_hi = 0;
212 dmae->len = len32;
213 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
214 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 215 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 216
ad8d3948 217 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
218 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
219 "dst_addr [%x:%08x (%08x)]\n"
220 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
221 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
222 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
223 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 224 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
225 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
226 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
227
228 *wb_comp = 0;
229
34f80b04 230 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
231
232 udelay(5);
ad8d3948
EG
233
234 while (*wb_comp != DMAE_COMP_VAL) {
235 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
236
ad8d3948 237 if (!cnt) {
a2fbb9ea
ET
238 BNX2X_ERR("dmae timeout!\n");
239 break;
240 }
ad8d3948 241 cnt--;
12469401
YG
242 /* adjust delay for emulation/FPGA */
243 if (CHIP_REV_IS_SLOW(bp))
244 msleep(100);
245 else
246 udelay(5);
a2fbb9ea 247 }
ad8d3948
EG
248
249 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
250}
251
c18487ee 252void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 253{
ad8d3948 254 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 255 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
256 int cnt = 200;
257
258 if (!bp->dmae_ready) {
259 u32 *data = bnx2x_sp(bp, wb_data[0]);
260 int i;
261
262 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
263 " using indirect\n", src_addr, len32);
264 for (i = 0; i < len32; i++)
265 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
266 return;
267 }
268
269 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
270
271 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
272 memset(dmae, 0, sizeof(struct dmae_command));
273
274 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
275 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
277#ifdef __BIG_ENDIAN
278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
279#else
280 DMAE_CMD_ENDIANITY_DW_SWAP |
281#endif
34f80b04
EG
282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
284 dmae->src_addr_lo = src_addr >> 2;
285 dmae->src_addr_hi = 0;
286 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
287 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
288 dmae->len = len32;
289 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
290 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 291 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 292
ad8d3948 293 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
294 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
295 "dst_addr [%x:%08x (%08x)]\n"
296 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
297 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
298 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
299 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
300
301 *wb_comp = 0;
302
34f80b04 303 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
304
305 udelay(5);
ad8d3948
EG
306
307 while (*wb_comp != DMAE_COMP_VAL) {
308
ad8d3948 309 if (!cnt) {
a2fbb9ea
ET
310 BNX2X_ERR("dmae timeout!\n");
311 break;
312 }
ad8d3948 313 cnt--;
12469401
YG
314 /* adjust delay for emulation/FPGA */
315 if (CHIP_REV_IS_SLOW(bp))
316 msleep(100);
317 else
318 udelay(5);
a2fbb9ea 319 }
ad8d3948 320 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
321 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
322 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
323
324 mutex_unlock(&bp->dmae_mutex);
325}
326
327/* used only for slowpath so not inlined */
328static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
329{
330 u32 wb_write[2];
331
332 wb_write[0] = val_hi;
333 wb_write[1] = val_lo;
334 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 335}
a2fbb9ea 336
ad8d3948
EG
337#ifdef USE_WB_RD
338static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
339{
340 u32 wb_data[2];
341
342 REG_RD_DMAE(bp, reg, wb_data, 2);
343
344 return HILO_U64(wb_data[0], wb_data[1]);
345}
346#endif
347
a2fbb9ea
ET
348static int bnx2x_mc_assert(struct bnx2x *bp)
349{
a2fbb9ea 350 char last_idx;
34f80b04
EG
351 int i, rc = 0;
352 u32 row0, row1, row2, row3;
353
354 /* XSTORM */
355 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
356 XSTORM_ASSERT_LIST_INDEX_OFFSET);
357 if (last_idx)
358 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
359
360 /* print the asserts */
361 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
362
363 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
364 XSTORM_ASSERT_LIST_OFFSET(i));
365 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
367 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
368 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
369 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
370 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
371
372 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
373 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
374 " 0x%08x 0x%08x 0x%08x\n",
375 i, row3, row2, row1, row0);
376 rc++;
377 } else {
378 break;
379 }
380 }
381
382 /* TSTORM */
383 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
384 TSTORM_ASSERT_LIST_INDEX_OFFSET);
385 if (last_idx)
386 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
387
388 /* print the asserts */
389 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
390
391 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
392 TSTORM_ASSERT_LIST_OFFSET(i));
393 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
395 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
396 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
397 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
398 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
399
400 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
401 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
402 " 0x%08x 0x%08x 0x%08x\n",
403 i, row3, row2, row1, row0);
404 rc++;
405 } else {
406 break;
407 }
408 }
409
410 /* CSTORM */
411 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
412 CSTORM_ASSERT_LIST_INDEX_OFFSET);
413 if (last_idx)
414 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
415
416 /* print the asserts */
417 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
418
419 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
420 CSTORM_ASSERT_LIST_OFFSET(i));
421 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
423 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
424 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
425 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
426 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
427
428 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
429 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
430 " 0x%08x 0x%08x 0x%08x\n",
431 i, row3, row2, row1, row0);
432 rc++;
433 } else {
434 break;
435 }
436 }
437
438 /* USTORM */
439 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
440 USTORM_ASSERT_LIST_INDEX_OFFSET);
441 if (last_idx)
442 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
443
444 /* print the asserts */
445 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
446
447 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
448 USTORM_ASSERT_LIST_OFFSET(i));
449 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_OFFSET(i) + 4);
451 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
452 USTORM_ASSERT_LIST_OFFSET(i) + 8);
453 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
454 USTORM_ASSERT_LIST_OFFSET(i) + 12);
455
456 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
457 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
458 " 0x%08x 0x%08x 0x%08x\n",
459 i, row3, row2, row1, row0);
460 rc++;
461 } else {
462 break;
a2fbb9ea
ET
463 }
464 }
34f80b04 465
a2fbb9ea
ET
466 return rc;
467}
c14423fe 468
a2fbb9ea
ET
469static void bnx2x_fw_dump(struct bnx2x *bp)
470{
471 u32 mark, offset;
472 u32 data[9];
473 int word;
474
475 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
476 mark = ((mark + 0x3) & ~0x3);
477 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
478
479 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
480 for (word = 0; word < 8; word++)
481 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
482 offset + 4*word));
483 data[8] = 0x0;
49d66772 484 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
485 }
486 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
487 for (word = 0; word < 8; word++)
488 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
489 offset + 4*word));
490 data[8] = 0x0;
49d66772 491 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
492 }
493 printk("\n" KERN_ERR PFX "end of fw dump\n");
494}
495
496static void bnx2x_panic_dump(struct bnx2x *bp)
497{
498 int i;
499 u16 j, start, end;
500
66e855f3
YG
501 bp->stats_state = STATS_STATE_DISABLED;
502 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
503
a2fbb9ea
ET
504 BNX2X_ERR("begin crash dump -----------------\n");
505
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
514 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
515 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
516 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
517 fp->rx_bd_prod, fp->rx_bd_cons,
518 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
519 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
520 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
521 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
522 " *sb_u_idx(%x) bd data(%x,%x)\n",
523 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
524 fp->status_blk->c_status_block.status_block_index,
525 fp->fp_u_idx,
526 fp->status_blk->u_status_block.status_block_index,
527 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
528
529 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
530 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
531 for (j = start; j < end; j++) {
532 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
533
534 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
535 sw_bd->skb, sw_bd->first_bd);
536 }
537
538 start = TX_BD(fp->tx_bd_cons - 10);
539 end = TX_BD(fp->tx_bd_cons + 254);
540 for (j = start; j < end; j++) {
541 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
542
543 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
544 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
545 }
546
547 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
548 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
549 for (j = start; j < end; j++) {
550 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
551 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
552
553 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 554 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
555 }
556
3196a88a
EG
557 start = RX_SGE(fp->rx_sge_prod);
558 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
559 for (j = start; j < end; j++) {
560 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
561 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
562
563 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
564 j, rx_sge[1], rx_sge[0], sw_page->page);
565 }
566
a2fbb9ea
ET
567 start = RCQ_BD(fp->rx_comp_cons - 10);
568 end = RCQ_BD(fp->rx_comp_cons + 503);
569 for (j = start; j < end; j++) {
570 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
571
572 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
573 j, cqe[0], cqe[1], cqe[2], cqe[3]);
574 }
575 }
576
49d66772
ET
577 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
578 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 579 " spq_prod_idx(%u)\n",
49d66772 580 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
581 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
582
34f80b04 583 bnx2x_fw_dump(bp);
a2fbb9ea
ET
584 bnx2x_mc_assert(bp);
585 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
586}
587
615f8fd9 588static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 589{
34f80b04 590 int port = BP_PORT(bp);
a2fbb9ea
ET
591 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
592 u32 val = REG_RD(bp, addr);
593 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
594
595 if (msix) {
596 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
597 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
598 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
599 } else {
600 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 601 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
602 HC_CONFIG_0_REG_INT_LINE_EN_0 |
603 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 604
615f8fd9
ET
605 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
606 val, port, addr, msix);
607
608 REG_WR(bp, addr, val);
609
a2fbb9ea
ET
610 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
611 }
612
615f8fd9 613 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
614 val, port, addr, msix);
615
616 REG_WR(bp, addr, val);
34f80b04
EG
617
618 if (CHIP_IS_E1H(bp)) {
619 /* init leading/trailing edge */
620 if (IS_E1HMF(bp)) {
621 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
622 if (bp->port.pmf)
623 /* enable nig attention */
624 val |= 0x0100;
625 } else
626 val = 0xffff;
627
628 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
629 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
630 }
a2fbb9ea
ET
631}
632
615f8fd9 633static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 634{
34f80b04 635 int port = BP_PORT(bp);
a2fbb9ea
ET
636 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
637 u32 val = REG_RD(bp, addr);
638
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
643
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
645 val, port, addr);
646
647 REG_WR(bp, addr, val);
648 if (REG_RD(bp, addr) != val)
649 BNX2X_ERR("BUG! proper val not read from IGU!\n");
650}
651
f8ef6e44 652static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 653{
a2fbb9ea
ET
654 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
655 int i;
656
34f80b04 657 /* disable interrupt handling */
a2fbb9ea 658 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
659 if (disable_hw)
660 /* prevent the HW from sending interrupts */
661 bnx2x_int_disable(bp);
a2fbb9ea
ET
662
663 /* make sure all ISRs are done */
664 if (msix) {
665 for_each_queue(bp, i)
666 synchronize_irq(bp->msix_table[i].vector);
667
668 /* one more for the Slow Path IRQ */
669 synchronize_irq(bp->msix_table[i].vector);
670 } else
671 synchronize_irq(bp->pdev->irq);
672
673 /* make sure sp_task is not running */
674 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
675}
676
34f80b04 677/* fast path */
a2fbb9ea
ET
678
679/*
34f80b04 680 * General service functions
a2fbb9ea
ET
681 */
682
34f80b04 683static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
684 u8 storm, u16 index, u8 op, u8 update)
685{
5c862848
EG
686 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
687 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
688 struct igu_ack_register igu_ack;
689
690 igu_ack.status_block_index = index;
691 igu_ack.sb_id_and_flags =
34f80b04 692 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
693 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
694 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
695 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
696
5c862848
EG
697 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
698 (*(u32 *)&igu_ack), hc_addr);
699 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
700}
701
702static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
703{
704 struct host_status_block *fpsb = fp->status_blk;
705 u16 rc = 0;
706
707 barrier(); /* status block is written to by the chip */
708 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
709 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
710 rc |= 1;
711 }
712 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
713 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
714 rc |= 2;
715 }
716 return rc;
717}
718
a2fbb9ea
ET
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
5c862848
EG
721 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
722 COMMAND_REG_SIMD_MASK);
723 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 724
5c862848
EG
725 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
726 result, hc_addr);
a2fbb9ea 727
a2fbb9ea
ET
728 return result;
729}
730
731
732/*
733 * fast path service functions
734 */
735
736/* free skb in the packet ring at pos idx
737 * return idx of last bd freed
738 */
739static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
740 u16 idx)
741{
742 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
743 struct eth_tx_bd *tx_bd;
744 struct sk_buff *skb = tx_buf->skb;
34f80b04 745 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
746 int nbd;
747
748 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
749 idx, tx_buf, skb);
750
751 /* unmap first bd */
752 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
753 tx_bd = &fp->tx_desc_ring[bd_idx];
754 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
755 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
756
757 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 758 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
759#ifdef BNX2X_STOP_ON_ERROR
760 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 761 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
762 bnx2x_panic();
763 }
764#endif
765
766 /* Skip a parse bd and the TSO split header bd
767 since they have no mapping */
768 if (nbd)
769 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
770
771 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
772 ETH_TX_BD_FLAGS_TCP_CSUM |
773 ETH_TX_BD_FLAGS_SW_LSO)) {
774 if (--nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776 tx_bd = &fp->tx_desc_ring[bd_idx];
777 /* is this a TSO split header bd? */
778 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
779 if (--nbd)
780 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
781 }
782 }
783
784 /* now free frags */
785 while (nbd > 0) {
786
787 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
788 tx_bd = &fp->tx_desc_ring[bd_idx];
789 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
790 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
791 if (--nbd)
792 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
793 }
794
795 /* release skb */
53e5e96e 796 WARN_ON(!skb);
a2fbb9ea
ET
797 dev_kfree_skb(skb);
798 tx_buf->first_bd = 0;
799 tx_buf->skb = NULL;
800
34f80b04 801 return new_cons;
a2fbb9ea
ET
802}
803
34f80b04 804static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 805{
34f80b04
EG
806 s16 used;
807 u16 prod;
808 u16 cons;
a2fbb9ea 809
34f80b04 810 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
811 prod = fp->tx_bd_prod;
812 cons = fp->tx_bd_cons;
813
34f80b04
EG
814 /* NUM_TX_RINGS = number of "next-page" entries
815 It will be used as a threshold */
816 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 817
34f80b04 818#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
819 WARN_ON(used < 0);
820 WARN_ON(used > fp->bp->tx_ring_size);
821 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 822#endif
a2fbb9ea 823
34f80b04 824 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
825}
826
827static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
828{
829 struct bnx2x *bp = fp->bp;
830 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
831 int done = 0;
832
833#ifdef BNX2X_STOP_ON_ERROR
834 if (unlikely(bp->panic))
835 return;
836#endif
837
838 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
839 sw_cons = fp->tx_pkt_cons;
840
841 while (sw_cons != hw_cons) {
842 u16 pkt_cons;
843
844 pkt_cons = TX_BD(sw_cons);
845
846 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
847
34f80b04 848 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
849 hw_cons, sw_cons, pkt_cons);
850
34f80b04 851/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
852 rmb();
853 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
854 }
855*/
856 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
857 sw_cons++;
858 done++;
859
860 if (done == work)
861 break;
862 }
863
864 fp->tx_pkt_cons = sw_cons;
865 fp->tx_bd_cons = bd_cons;
866
867 /* Need to make the tx_cons update visible to start_xmit()
868 * before checking for netif_queue_stopped(). Without the
869 * memory barrier, there is a small possibility that start_xmit()
870 * will miss it and cause the queue to be stopped forever.
871 */
872 smp_mb();
873
874 /* TBD need a thresh? */
875 if (unlikely(netif_queue_stopped(bp->dev))) {
876
877 netif_tx_lock(bp->dev);
878
879 if (netif_queue_stopped(bp->dev) &&
da5a662a 880 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea
ET
881 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
882 netif_wake_queue(bp->dev);
883
884 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
885 }
886}
887
3196a88a 888
a2fbb9ea
ET
889static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
890 union eth_rx_cqe *rr_cqe)
891{
892 struct bnx2x *bp = fp->bp;
893 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
894 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
895
34f80b04 896 DP(BNX2X_MSG_SP,
a2fbb9ea 897 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
898 FP_IDX(fp), cid, command, bp->state,
899 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
900
901 bp->spq_left++;
902
34f80b04 903 if (FP_IDX(fp)) {
a2fbb9ea
ET
904 switch (command | fp->state) {
905 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
906 BNX2X_FP_STATE_OPENING):
907 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
908 cid);
909 fp->state = BNX2X_FP_STATE_OPEN;
910 break;
911
912 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
913 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
914 cid);
915 fp->state = BNX2X_FP_STATE_HALTED;
916 break;
917
918 default:
34f80b04
EG
919 BNX2X_ERR("unexpected MC reply (%d) "
920 "fp->state is %x\n", command, fp->state);
921 break;
a2fbb9ea 922 }
34f80b04 923 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
924 return;
925 }
c14423fe 926
a2fbb9ea
ET
927 switch (command | bp->state) {
928 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
929 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
930 bp->state = BNX2X_STATE_OPEN;
931 break;
932
933 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
934 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
935 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
936 fp->state = BNX2X_FP_STATE_HALTED;
937 break;
938
a2fbb9ea 939 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 940 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 941 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
942 break;
943
3196a88a 944
a2fbb9ea 945 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 946 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 947 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 948 bp->set_mac_pending = 0;
a2fbb9ea
ET
949 break;
950
49d66772 951 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 952 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
953 break;
954
a2fbb9ea 955 default:
34f80b04 956 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 957 command, bp->state);
34f80b04 958 break;
a2fbb9ea 959 }
34f80b04 960 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
961}
962
7a9b2557
VZ
963static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
964 struct bnx2x_fastpath *fp, u16 index)
965{
966 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
967 struct page *page = sw_buf->page;
968 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
969
970 /* Skip "next page" elements */
971 if (!page)
972 return;
973
974 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
975 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
976 __free_pages(page, PAGES_PER_SGE_SHIFT);
977
978 sw_buf->page = NULL;
979 sge->addr_hi = 0;
980 sge->addr_lo = 0;
981}
982
983static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
984 struct bnx2x_fastpath *fp, int last)
985{
986 int i;
987
988 for (i = 0; i < last; i++)
989 bnx2x_free_rx_sge(bp, fp, i);
990}
991
992static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
996 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998 dma_addr_t mapping;
999
1000 if (unlikely(page == NULL))
1001 return -ENOMEM;
1002
1003 mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
1004 PCI_DMA_FROMDEVICE);
8d8bb39b 1005 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1006 __free_pages(page, PAGES_PER_SGE_SHIFT);
1007 return -ENOMEM;
1008 }
1009
1010 sw_buf->page = page;
1011 pci_unmap_addr_set(sw_buf, mapping, mapping);
1012
1013 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1014 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1015
1016 return 0;
1017}
1018
a2fbb9ea
ET
1019static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1020 struct bnx2x_fastpath *fp, u16 index)
1021{
1022 struct sk_buff *skb;
1023 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1024 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1025 dma_addr_t mapping;
1026
1027 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1028 if (unlikely(skb == NULL))
1029 return -ENOMEM;
1030
437cf2f1 1031 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1032 PCI_DMA_FROMDEVICE);
8d8bb39b 1033 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1034 dev_kfree_skb(skb);
1035 return -ENOMEM;
1036 }
1037
1038 rx_buf->skb = skb;
1039 pci_unmap_addr_set(rx_buf, mapping, mapping);
1040
1041 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1042 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1043
1044 return 0;
1045}
1046
1047/* note that we are not allocating a new skb,
1048 * we are just moving one from cons to prod
1049 * we are not creating a new mapping,
1050 * so there is no need to check for dma_mapping_error().
1051 */
1052static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1053 struct sk_buff *skb, u16 cons, u16 prod)
1054{
1055 struct bnx2x *bp = fp->bp;
1056 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1057 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1058 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1059 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1060
1061 pci_dma_sync_single_for_device(bp->pdev,
1062 pci_unmap_addr(cons_rx_buf, mapping),
1063 bp->rx_offset + RX_COPY_THRESH,
1064 PCI_DMA_FROMDEVICE);
1065
1066 prod_rx_buf->skb = cons_rx_buf->skb;
1067 pci_unmap_addr_set(prod_rx_buf, mapping,
1068 pci_unmap_addr(cons_rx_buf, mapping));
1069 *prod_bd = *cons_bd;
1070}
1071
7a9b2557
VZ
1072static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1073 u16 idx)
1074{
1075 u16 last_max = fp->last_max_sge;
1076
1077 if (SUB_S16(idx, last_max) > 0)
1078 fp->last_max_sge = idx;
1079}
1080
1081static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1082{
1083 int i, j;
1084
1085 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1086 int idx = RX_SGE_CNT * i - 1;
1087
1088 for (j = 0; j < 2; j++) {
1089 SGE_MASK_CLEAR_BIT(fp, idx);
1090 idx--;
1091 }
1092 }
1093}
1094
1095static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1096 struct eth_fast_path_rx_cqe *fp_cqe)
1097{
1098 struct bnx2x *bp = fp->bp;
1099 u16 sge_len = BCM_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1100 le16_to_cpu(fp_cqe->len_on_bd)) >>
1101 BCM_PAGE_SHIFT;
1102 u16 last_max, last_elem, first_elem;
1103 u16 delta = 0;
1104 u16 i;
1105
1106 if (!sge_len)
1107 return;
1108
1109 /* First mark all used pages */
1110 for (i = 0; i < sge_len; i++)
1111 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1112
1113 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1114 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1115
1116 /* Here we assume that the last SGE index is the biggest */
1117 prefetch((void *)(fp->sge_mask));
1118 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1119
1120 last_max = RX_SGE(fp->last_max_sge);
1121 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1122 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1123
1124 /* If ring is not full */
1125 if (last_elem + 1 != first_elem)
1126 last_elem++;
1127
1128 /* Now update the prod */
1129 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1130 if (likely(fp->sge_mask[i]))
1131 break;
1132
1133 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1134 delta += RX_SGE_MASK_ELEM_SZ;
1135 }
1136
1137 if (delta > 0) {
1138 fp->rx_sge_prod += delta;
1139 /* clear page-end entries */
1140 bnx2x_clear_sge_mask_next_elems(fp);
1141 }
1142
1143 DP(NETIF_MSG_RX_STATUS,
1144 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1145 fp->last_max_sge, fp->rx_sge_prod);
1146}
1147
1148static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1149{
1150 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1151 memset(fp->sge_mask, 0xff,
1152 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1153
33471629
EG
1154 /* Clear the two last indices in the page to 1:
1155 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1156 hence will never be indicated and should be removed from
1157 the calculations. */
1158 bnx2x_clear_sge_mask_next_elems(fp);
1159}
1160
1161static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1162 struct sk_buff *skb, u16 cons, u16 prod)
1163{
1164 struct bnx2x *bp = fp->bp;
1165 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1166 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1167 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1168 dma_addr_t mapping;
1169
1170 /* move empty skb from pool to prod and map it */
1171 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1172 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1173 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1174 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1175
1176 /* move partial skb from cons to pool (don't unmap yet) */
1177 fp->tpa_pool[queue] = *cons_rx_buf;
1178
1179 /* mark bin state as start - print error if current state != stop */
1180 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1181 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1182
1183 fp->tpa_state[queue] = BNX2X_TPA_START;
1184
1185 /* point prod_bd to new skb */
1186 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1187 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1188
1189#ifdef BNX2X_STOP_ON_ERROR
1190 fp->tpa_queue_used |= (1 << queue);
1191#ifdef __powerpc64__
1192 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1193#else
1194 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1195#endif
1196 fp->tpa_queue_used);
1197#endif
1198}
1199
1200static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1201 struct sk_buff *skb,
1202 struct eth_fast_path_rx_cqe *fp_cqe,
1203 u16 cqe_idx)
1204{
1205 struct sw_rx_page *rx_pg, old_rx_pg;
1206 struct page *sge;
1207 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1208 u32 i, frag_len, frag_size, pages;
1209 int err;
1210 int j;
1211
1212 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1213 pages = BCM_PAGE_ALIGN(frag_size) >> BCM_PAGE_SHIFT;
1214
1215 /* This is needed in order to enable forwarding support */
1216 if (frag_size)
1217 skb_shinfo(skb)->gso_size = min((u32)BCM_PAGE_SIZE,
1218 max(frag_size, (u32)len_on_bd));
1219
1220#ifdef BNX2X_STOP_ON_ERROR
1221 if (pages > 8*PAGES_PER_SGE) {
1222 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1223 pages, cqe_idx);
1224 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1225 fp_cqe->pkt_len, len_on_bd);
1226 bnx2x_panic();
1227 return -EINVAL;
1228 }
1229#endif
1230
1231 /* Run through the SGL and compose the fragmented skb */
1232 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1233 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1234
1235 /* FW gives the indices of the SGE as if the ring is an array
1236 (meaning that "next" element will consume 2 indices) */
1237 frag_len = min(frag_size, (u32)(BCM_PAGE_SIZE*PAGES_PER_SGE));
1238 rx_pg = &fp->rx_page_ring[sge_idx];
1239 sge = rx_pg->page;
1240 old_rx_pg = *rx_pg;
1241
1242 /* If we fail to allocate a substitute page, we simply stop
1243 where we are and drop the whole packet */
1244 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1245 if (unlikely(err)) {
66e855f3 1246 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1247 return err;
1248 }
1249
1250 /* Unmap the page as we r going to pass it to the stack */
1251 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1252 BCM_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1253
1254 /* Add one frag and update the appropriate fields in the skb */
1255 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1256
1257 skb->data_len += frag_len;
1258 skb->truesize += frag_len;
1259 skb->len += frag_len;
1260
1261 frag_size -= frag_len;
1262 }
1263
1264 return 0;
1265}
1266
1267static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1268 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1269 u16 cqe_idx)
1270{
1271 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1272 struct sk_buff *skb = rx_buf->skb;
1273 /* alloc new skb */
1274 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1275
1276 /* Unmap skb in the pool anyway, as we are going to change
1277 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1278 fails. */
1279 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1280 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1281
7a9b2557 1282 if (likely(new_skb)) {
66e855f3
YG
1283 /* fix ip xsum and give it to the stack */
1284 /* (no need to map the new skb) */
7a9b2557
VZ
1285
1286 prefetch(skb);
1287 prefetch(((char *)(skb)) + 128);
1288
7a9b2557
VZ
1289#ifdef BNX2X_STOP_ON_ERROR
1290 if (pad + len > bp->rx_buf_size) {
1291 BNX2X_ERR("skb_put is about to fail... "
1292 "pad %d len %d rx_buf_size %d\n",
1293 pad, len, bp->rx_buf_size);
1294 bnx2x_panic();
1295 return;
1296 }
1297#endif
1298
1299 skb_reserve(skb, pad);
1300 skb_put(skb, len);
1301
1302 skb->protocol = eth_type_trans(skb, bp->dev);
1303 skb->ip_summed = CHECKSUM_UNNECESSARY;
1304
1305 {
1306 struct iphdr *iph;
1307
1308 iph = (struct iphdr *)skb->data;
1309 iph->check = 0;
1310 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1311 }
1312
1313 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1314 &cqe->fast_path_cqe, cqe_idx)) {
1315#ifdef BCM_VLAN
1316 if ((bp->vlgrp != NULL) &&
1317 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1318 PARSING_FLAGS_VLAN))
1319 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1320 le16_to_cpu(cqe->fast_path_cqe.
1321 vlan_tag));
1322 else
1323#endif
1324 netif_receive_skb(skb);
1325 } else {
1326 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1327 " - dropping packet!\n");
1328 dev_kfree_skb(skb);
1329 }
1330
1331 bp->dev->last_rx = jiffies;
1332
1333 /* put new skb in bin */
1334 fp->tpa_pool[queue].skb = new_skb;
1335
1336 } else {
66e855f3 1337 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1338 DP(NETIF_MSG_RX_STATUS,
1339 "Failed to allocate new skb - dropping packet!\n");
66e855f3 1340 bp->eth_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1341 }
1342
1343 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1344}
1345
1346static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1347 struct bnx2x_fastpath *fp,
1348 u16 bd_prod, u16 rx_comp_prod,
1349 u16 rx_sge_prod)
1350{
1351 struct tstorm_eth_rx_producers rx_prods = {0};
1352 int i;
1353
1354 /* Update producers */
1355 rx_prods.bd_prod = bd_prod;
1356 rx_prods.cqe_prod = rx_comp_prod;
1357 rx_prods.sge_prod = rx_sge_prod;
1358
1359 for (i = 0; i < sizeof(struct tstorm_eth_rx_producers)/4; i++)
1360 REG_WR(bp, BAR_TSTRORM_INTMEM +
1361 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
1362 ((u32 *)&rx_prods)[i]);
1363
1364 DP(NETIF_MSG_RX_STATUS,
1365 "Wrote: bd_prod %u cqe_prod %u sge_prod %u\n",
1366 bd_prod, rx_comp_prod, rx_sge_prod);
1367}
1368
a2fbb9ea
ET
1369static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1370{
1371 struct bnx2x *bp = fp->bp;
34f80b04 1372 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1373 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1374 int rx_pkt = 0;
1375
1376#ifdef BNX2X_STOP_ON_ERROR
1377 if (unlikely(bp->panic))
1378 return 0;
1379#endif
1380
34f80b04
EG
1381 /* CQ "next element" is of the size of the regular element,
1382 that's why it's ok here */
a2fbb9ea
ET
1383 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1384 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1385 hw_comp_cons++;
1386
1387 bd_cons = fp->rx_bd_cons;
1388 bd_prod = fp->rx_bd_prod;
34f80b04 1389 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1390 sw_comp_cons = fp->rx_comp_cons;
1391 sw_comp_prod = fp->rx_comp_prod;
1392
1393 /* Memory barrier necessary as speculative reads of the rx
1394 * buffer can be ahead of the index in the status block
1395 */
1396 rmb();
1397
1398 DP(NETIF_MSG_RX_STATUS,
1399 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1400 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1401
1402 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1403 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1404 struct sk_buff *skb;
1405 union eth_rx_cqe *cqe;
34f80b04
EG
1406 u8 cqe_fp_flags;
1407 u16 len, pad;
a2fbb9ea
ET
1408
1409 comp_ring_cons = RCQ_BD(sw_comp_cons);
1410 bd_prod = RX_BD(bd_prod);
1411 bd_cons = RX_BD(bd_cons);
1412
1413 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1414 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1415
a2fbb9ea 1416 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1417 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1418 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1419 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1420 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1421 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1422
1423 /* is this a slowpath msg? */
34f80b04 1424 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1425 bnx2x_sp_event(fp, cqe);
1426 goto next_cqe;
1427
1428 /* this is an rx packet */
1429 } else {
1430 rx_buf = &fp->rx_buf_ring[bd_cons];
1431 skb = rx_buf->skb;
a2fbb9ea
ET
1432 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1433 pad = cqe->fast_path_cqe.placement_offset;
1434
7a9b2557
VZ
1435 /* If CQE is marked both TPA_START and TPA_END
1436 it is a non-TPA CQE */
1437 if ((!fp->disable_tpa) &&
1438 (TPA_TYPE(cqe_fp_flags) !=
1439 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1440 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1441
1442 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1443 DP(NETIF_MSG_RX_STATUS,
1444 "calling tpa_start on queue %d\n",
1445 queue);
1446
1447 bnx2x_tpa_start(fp, queue, skb,
1448 bd_cons, bd_prod);
1449 goto next_rx;
1450 }
1451
1452 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1453 DP(NETIF_MSG_RX_STATUS,
1454 "calling tpa_stop on queue %d\n",
1455 queue);
1456
1457 if (!BNX2X_RX_SUM_FIX(cqe))
1458 BNX2X_ERR("STOP on none TCP "
1459 "data\n");
1460
1461 /* This is a size of the linear data
1462 on this skb */
1463 len = le16_to_cpu(cqe->fast_path_cqe.
1464 len_on_bd);
1465 bnx2x_tpa_stop(bp, fp, queue, pad,
1466 len, cqe, comp_ring_cons);
1467#ifdef BNX2X_STOP_ON_ERROR
1468 if (bp->panic)
1469 return -EINVAL;
1470#endif
1471
1472 bnx2x_update_sge_prod(fp,
1473 &cqe->fast_path_cqe);
1474 goto next_cqe;
1475 }
1476 }
1477
a2fbb9ea
ET
1478 pci_dma_sync_single_for_device(bp->pdev,
1479 pci_unmap_addr(rx_buf, mapping),
1480 pad + RX_COPY_THRESH,
1481 PCI_DMA_FROMDEVICE);
1482 prefetch(skb);
1483 prefetch(((char *)(skb)) + 128);
1484
1485 /* is this an error packet? */
34f80b04 1486 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1487 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1488 "ERROR flags %x rx packet %u\n",
1489 cqe_fp_flags, sw_comp_cons);
66e855f3 1490 bp->eth_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1491 goto reuse_rx;
1492 }
1493
1494 /* Since we don't have a jumbo ring
1495 * copy small packets if mtu > 1500
1496 */
1497 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1498 (len <= RX_COPY_THRESH)) {
1499 struct sk_buff *new_skb;
1500
1501 new_skb = netdev_alloc_skb(bp->dev,
1502 len + pad);
1503 if (new_skb == NULL) {
1504 DP(NETIF_MSG_RX_ERR,
34f80b04 1505 "ERROR packet dropped "
a2fbb9ea 1506 "because of alloc failure\n");
66e855f3 1507 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1508 goto reuse_rx;
1509 }
1510
1511 /* aligned copy */
1512 skb_copy_from_linear_data_offset(skb, pad,
1513 new_skb->data + pad, len);
1514 skb_reserve(new_skb, pad);
1515 skb_put(new_skb, len);
1516
1517 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1518
1519 skb = new_skb;
1520
1521 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1522 pci_unmap_single(bp->pdev,
1523 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1524 bp->rx_buf_size,
a2fbb9ea
ET
1525 PCI_DMA_FROMDEVICE);
1526 skb_reserve(skb, pad);
1527 skb_put(skb, len);
1528
1529 } else {
1530 DP(NETIF_MSG_RX_ERR,
34f80b04 1531 "ERROR packet dropped because "
a2fbb9ea 1532 "of alloc failure\n");
66e855f3 1533 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1534reuse_rx:
1535 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1536 goto next_rx;
1537 }
1538
1539 skb->protocol = eth_type_trans(skb, bp->dev);
1540
1541 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1542 if (bp->rx_csum) {
1adcd8be
EG
1543 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1544 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3
YG
1545 else
1546 bp->eth_stats.hw_csum_err++;
1547 }
a2fbb9ea
ET
1548 }
1549
1550#ifdef BCM_VLAN
34f80b04
EG
1551 if ((bp->vlgrp != NULL) &&
1552 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1553 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1554 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1555 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1556 else
1557#endif
34f80b04 1558 netif_receive_skb(skb);
a2fbb9ea
ET
1559
1560 bp->dev->last_rx = jiffies;
1561
1562next_rx:
1563 rx_buf->skb = NULL;
1564
1565 bd_cons = NEXT_RX_IDX(bd_cons);
1566 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1567 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1568 rx_pkt++;
a2fbb9ea
ET
1569next_cqe:
1570 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1571 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1572
34f80b04 1573 if (rx_pkt == budget)
a2fbb9ea
ET
1574 break;
1575 } /* while */
1576
1577 fp->rx_bd_cons = bd_cons;
34f80b04 1578 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1579 fp->rx_comp_cons = sw_comp_cons;
1580 fp->rx_comp_prod = sw_comp_prod;
1581
7a9b2557
VZ
1582 /* Update producers */
1583 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1584 fp->rx_sge_prod);
a2fbb9ea
ET
1585 mmiowb(); /* keep prod updates ordered */
1586
1587 fp->rx_pkt += rx_pkt;
1588 fp->rx_calls++;
1589
1590 return rx_pkt;
1591}
1592
1593static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1594{
1595 struct bnx2x_fastpath *fp = fp_cookie;
1596 struct bnx2x *bp = fp->bp;
1597 struct net_device *dev = bp->dev;
34f80b04 1598 int index = FP_IDX(fp);
a2fbb9ea 1599
da5a662a
VZ
1600 /* Return here if interrupt is disabled */
1601 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1602 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1603 return IRQ_HANDLED;
1604 }
1605
34f80b04
EG
1606 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1607 index, FP_SB_ID(fp));
1608 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1609
1610#ifdef BNX2X_STOP_ON_ERROR
1611 if (unlikely(bp->panic))
1612 return IRQ_HANDLED;
1613#endif
1614
1615 prefetch(fp->rx_cons_sb);
1616 prefetch(fp->tx_cons_sb);
1617 prefetch(&fp->status_blk->c_status_block.status_block_index);
1618 prefetch(&fp->status_blk->u_status_block.status_block_index);
1619
1620 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1621
a2fbb9ea
ET
1622 return IRQ_HANDLED;
1623}
1624
1625static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1626{
1627 struct net_device *dev = dev_instance;
1628 struct bnx2x *bp = netdev_priv(dev);
1629 u16 status = bnx2x_ack_int(bp);
34f80b04 1630 u16 mask;
a2fbb9ea 1631
34f80b04 1632 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1633 if (unlikely(status == 0)) {
1634 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1635 return IRQ_NONE;
1636 }
34f80b04 1637 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1638
34f80b04 1639 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1640 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1641 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1642 return IRQ_HANDLED;
1643 }
1644
3196a88a
EG
1645#ifdef BNX2X_STOP_ON_ERROR
1646 if (unlikely(bp->panic))
1647 return IRQ_HANDLED;
1648#endif
1649
34f80b04
EG
1650 mask = 0x2 << bp->fp[0].sb_id;
1651 if (status & mask) {
a2fbb9ea
ET
1652 struct bnx2x_fastpath *fp = &bp->fp[0];
1653
1654 prefetch(fp->rx_cons_sb);
1655 prefetch(fp->tx_cons_sb);
1656 prefetch(&fp->status_blk->c_status_block.status_block_index);
1657 prefetch(&fp->status_blk->u_status_block.status_block_index);
1658
1659 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1660
34f80b04 1661 status &= ~mask;
a2fbb9ea
ET
1662 }
1663
a2fbb9ea 1664
34f80b04 1665 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1666 schedule_work(&bp->sp_task);
1667
1668 status &= ~0x1;
1669 if (!status)
1670 return IRQ_HANDLED;
1671 }
1672
34f80b04
EG
1673 if (status)
1674 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1675 status);
a2fbb9ea 1676
c18487ee 1677 return IRQ_HANDLED;
a2fbb9ea
ET
1678}
1679
c18487ee 1680/* end of fast path */
a2fbb9ea 1681
bb2a0f7a 1682static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1683
c18487ee
YR
1684/* Link */
1685
1686/*
1687 * General service functions
1688 */
a2fbb9ea 1689
4a37fb66 1690static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1691{
1692 u32 lock_status;
1693 u32 resource_bit = (1 << resource);
4a37fb66
YG
1694 int func = BP_FUNC(bp);
1695 u32 hw_lock_control_reg;
c18487ee 1696 int cnt;
a2fbb9ea 1697
c18487ee
YR
1698 /* Validating that the resource is within range */
1699 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1700 DP(NETIF_MSG_HW,
1701 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1702 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1703 return -EINVAL;
1704 }
a2fbb9ea 1705
4a37fb66
YG
1706 if (func <= 5) {
1707 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1708 } else {
1709 hw_lock_control_reg =
1710 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1711 }
1712
c18487ee 1713 /* Validating that the resource is not already taken */
4a37fb66 1714 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1715 if (lock_status & resource_bit) {
1716 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1717 lock_status, resource_bit);
1718 return -EEXIST;
1719 }
a2fbb9ea 1720
46230476
EG
1721 /* Try for 5 second every 5ms */
1722 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1723 /* Try to acquire the lock */
4a37fb66
YG
1724 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1725 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1726 if (lock_status & resource_bit)
1727 return 0;
a2fbb9ea 1728
c18487ee 1729 msleep(5);
a2fbb9ea 1730 }
c18487ee
YR
1731 DP(NETIF_MSG_HW, "Timeout\n");
1732 return -EAGAIN;
1733}
a2fbb9ea 1734
4a37fb66 1735static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1736{
1737 u32 lock_status;
1738 u32 resource_bit = (1 << resource);
4a37fb66
YG
1739 int func = BP_FUNC(bp);
1740 u32 hw_lock_control_reg;
a2fbb9ea 1741
c18487ee
YR
1742 /* Validating that the resource is within range */
1743 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1744 DP(NETIF_MSG_HW,
1745 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1746 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1747 return -EINVAL;
1748 }
1749
4a37fb66
YG
1750 if (func <= 5) {
1751 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1752 } else {
1753 hw_lock_control_reg =
1754 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1755 }
1756
c18487ee 1757 /* Validating that the resource is currently taken */
4a37fb66 1758 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1759 if (!(lock_status & resource_bit)) {
1760 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1761 lock_status, resource_bit);
1762 return -EFAULT;
a2fbb9ea
ET
1763 }
1764
4a37fb66 1765 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1766 return 0;
1767}
1768
1769/* HW Lock for shared dual port PHYs */
4a37fb66 1770static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1771{
1772 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1773
34f80b04 1774 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1775
c18487ee
YR
1776 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1777 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1778 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1779}
a2fbb9ea 1780
4a37fb66 1781static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1782{
1783 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1784
c18487ee
YR
1785 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1786 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1787 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1788
34f80b04 1789 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1790}
a2fbb9ea 1791
17de50b7 1792int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1793{
1794 /* The GPIO should be swapped if swap register is set and active */
1795 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1796 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1797 int gpio_shift = gpio_num +
1798 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1799 u32 gpio_mask = (1 << gpio_shift);
1800 u32 gpio_reg;
a2fbb9ea 1801
c18487ee
YR
1802 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1803 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1804 return -EINVAL;
1805 }
a2fbb9ea 1806
4a37fb66 1807 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1808 /* read GPIO and mask except the float bits */
1809 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1810
c18487ee
YR
1811 switch (mode) {
1812 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1813 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1814 gpio_num, gpio_shift);
1815 /* clear FLOAT and set CLR */
1816 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1817 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1818 break;
a2fbb9ea 1819
c18487ee
YR
1820 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1821 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1822 gpio_num, gpio_shift);
1823 /* clear FLOAT and set SET */
1824 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1825 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1826 break;
a2fbb9ea 1827
17de50b7 1828 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1829 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1830 gpio_num, gpio_shift);
1831 /* set FLOAT */
1832 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1833 break;
a2fbb9ea 1834
c18487ee
YR
1835 default:
1836 break;
a2fbb9ea
ET
1837 }
1838
c18487ee 1839 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1840 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1841
c18487ee 1842 return 0;
a2fbb9ea
ET
1843}
1844
c18487ee 1845static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1846{
c18487ee
YR
1847 u32 spio_mask = (1 << spio_num);
1848 u32 spio_reg;
a2fbb9ea 1849
c18487ee
YR
1850 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1851 (spio_num > MISC_REGISTERS_SPIO_7)) {
1852 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1853 return -EINVAL;
a2fbb9ea
ET
1854 }
1855
4a37fb66 1856 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1857 /* read SPIO and mask except the float bits */
1858 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1859
c18487ee 1860 switch (mode) {
6378c025 1861 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1862 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1863 /* clear FLOAT and set CLR */
1864 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1865 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1866 break;
a2fbb9ea 1867
6378c025 1868 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1869 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1870 /* clear FLOAT and set SET */
1871 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1872 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1873 break;
a2fbb9ea 1874
c18487ee
YR
1875 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1876 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1877 /* set FLOAT */
1878 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1879 break;
a2fbb9ea 1880
c18487ee
YR
1881 default:
1882 break;
a2fbb9ea
ET
1883 }
1884
c18487ee 1885 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1886 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1887
a2fbb9ea
ET
1888 return 0;
1889}
1890
c18487ee 1891static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1892{
c18487ee
YR
1893 switch (bp->link_vars.ieee_fc) {
1894 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1895 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1896 ADVERTISED_Pause);
1897 break;
1898 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1899 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1900 ADVERTISED_Pause);
1901 break;
1902 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1903 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1904 break;
1905 default:
34f80b04 1906 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1907 ADVERTISED_Pause);
1908 break;
1909 }
1910}
f1410647 1911
c18487ee
YR
1912static void bnx2x_link_report(struct bnx2x *bp)
1913{
1914 if (bp->link_vars.link_up) {
1915 if (bp->state == BNX2X_STATE_OPEN)
1916 netif_carrier_on(bp->dev);
1917 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1918
c18487ee 1919 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1920
c18487ee
YR
1921 if (bp->link_vars.duplex == DUPLEX_FULL)
1922 printk("full duplex");
1923 else
1924 printk("half duplex");
f1410647 1925
c18487ee
YR
1926 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1927 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1928 printk(", receive ");
1929 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1930 printk("& transmit ");
1931 } else {
1932 printk(", transmit ");
1933 }
1934 printk("flow control ON");
1935 }
1936 printk("\n");
f1410647 1937
c18487ee
YR
1938 } else { /* link_down */
1939 netif_carrier_off(bp->dev);
1940 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1941 }
c18487ee
YR
1942}
1943
1944static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1945{
19680c48
EG
1946 if (!BP_NOMCP(bp)) {
1947 u8 rc;
a2fbb9ea 1948
19680c48 1949 /* Initialize link parameters structure variables */
8c99e7b0
YR
1950 /* It is recommended to turn off RX FC for jumbo frames
1951 for better performance */
1952 if (IS_E1HMF(bp))
1953 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
1954 else if (bp->dev->mtu > 5000)
1955 bp->link_params.req_fc_auto_adv = FLOW_CTRL_TX;
1956 else
1957 bp->link_params.req_fc_auto_adv = FLOW_CTRL_BOTH;
a2fbb9ea 1958
4a37fb66 1959 bnx2x_acquire_phy_lock(bp);
19680c48 1960 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1961 bnx2x_release_phy_lock(bp);
a2fbb9ea 1962
19680c48
EG
1963 if (bp->link_vars.link_up)
1964 bnx2x_link_report(bp);
a2fbb9ea 1965
19680c48 1966 bnx2x_calc_fc_adv(bp);
34f80b04 1967
19680c48
EG
1968 return rc;
1969 }
1970 BNX2X_ERR("Bootcode is missing -not initializing link\n");
1971 return -EINVAL;
a2fbb9ea
ET
1972}
1973
c18487ee 1974static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1975{
19680c48 1976 if (!BP_NOMCP(bp)) {
4a37fb66 1977 bnx2x_acquire_phy_lock(bp);
19680c48 1978 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1979 bnx2x_release_phy_lock(bp);
a2fbb9ea 1980
19680c48
EG
1981 bnx2x_calc_fc_adv(bp);
1982 } else
1983 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 1984}
a2fbb9ea 1985
c18487ee
YR
1986static void bnx2x__link_reset(struct bnx2x *bp)
1987{
19680c48 1988 if (!BP_NOMCP(bp)) {
4a37fb66 1989 bnx2x_acquire_phy_lock(bp);
19680c48 1990 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 1991 bnx2x_release_phy_lock(bp);
19680c48
EG
1992 } else
1993 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 1994}
a2fbb9ea 1995
c18487ee
YR
1996static u8 bnx2x_link_test(struct bnx2x *bp)
1997{
1998 u8 rc;
a2fbb9ea 1999
4a37fb66 2000 bnx2x_acquire_phy_lock(bp);
c18487ee 2001 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2002 bnx2x_release_phy_lock(bp);
a2fbb9ea 2003
c18487ee
YR
2004 return rc;
2005}
a2fbb9ea 2006
34f80b04
EG
2007/* Calculates the sum of vn_min_rates.
2008 It's needed for further normalizing of the min_rates.
2009
2010 Returns:
2011 sum of vn_min_rates
2012 or
2013 0 - if all the min_rates are 0.
33471629 2014 In the later case fairness algorithm should be deactivated.
34f80b04
EG
2015 If not all min_rates are zero then those that are zeroes will
2016 be set to 1.
2017 */
2018static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
2019{
2020 int i, port = BP_PORT(bp);
2021 u32 wsum = 0;
2022 int all_zero = 1;
2023
2024 for (i = 0; i < E1HVN_MAX; i++) {
2025 u32 vn_cfg =
2026 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
2027 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2028 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2029 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
2030 /* If min rate is zero - set it to 1 */
2031 if (!vn_min_rate)
2032 vn_min_rate = DEF_MIN_RATE;
2033 else
2034 all_zero = 0;
2035
2036 wsum += vn_min_rate;
2037 }
2038 }
2039
2040 /* ... only if all min rates are zeros - disable FAIRNESS */
2041 if (all_zero)
2042 return 0;
2043
2044 return wsum;
2045}
2046
2047static void bnx2x_init_port_minmax(struct bnx2x *bp,
2048 int en_fness,
2049 u16 port_rate,
2050 struct cmng_struct_per_port *m_cmng_port)
2051{
2052 u32 r_param = port_rate / 8;
2053 int port = BP_PORT(bp);
2054 int i;
2055
2056 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
2057
2058 /* Enable minmax only if we are in e1hmf mode */
2059 if (IS_E1HMF(bp)) {
2060 u32 fair_periodic_timeout_usec;
2061 u32 t_fair;
2062
2063 /* Enable rate shaping and fairness */
2064 m_cmng_port->flags.cmng_vn_enable = 1;
2065 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
2066 m_cmng_port->flags.rate_shaping_enable = 1;
2067
2068 if (!en_fness)
2069 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2070 " fairness will be disabled\n");
2071
2072 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2073 m_cmng_port->rs_vars.rs_periodic_timeout =
2074 RS_PERIODIC_TIMEOUT_USEC / 4;
2075
2076 /* this is the threshold below which no timer arming will occur
2077 1.25 coefficient is for the threshold to be a little bigger
2078 than the real time, to compensate for timer in-accuracy */
2079 m_cmng_port->rs_vars.rs_threshold =
2080 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2081
2082 /* resolution of fairness timer */
2083 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2084 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2085 t_fair = T_FAIR_COEF / port_rate;
2086
2087 /* this is the threshold below which we won't arm
2088 the timer anymore */
2089 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
2090
2091 /* we multiply by 1e3/8 to get bytes/msec.
2092 We don't want the credits to pass a credit
2093 of the T_FAIR*FAIR_MEM (algorithm resolution) */
2094 m_cmng_port->fair_vars.upper_bound =
2095 r_param * t_fair * FAIR_MEM;
2096 /* since each tick is 4 usec */
2097 m_cmng_port->fair_vars.fairness_timeout =
2098 fair_periodic_timeout_usec / 4;
2099
2100 } else {
2101 /* Disable rate shaping and fairness */
2102 m_cmng_port->flags.cmng_vn_enable = 0;
2103 m_cmng_port->flags.fairness_enable = 0;
2104 m_cmng_port->flags.rate_shaping_enable = 0;
2105
2106 DP(NETIF_MSG_IFUP,
2107 "Single function mode minmax will be disabled\n");
2108 }
2109
2110 /* Store it to internal memory */
2111 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2112 REG_WR(bp, BAR_XSTRORM_INTMEM +
2113 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
2114 ((u32 *)(m_cmng_port))[i]);
2115}
2116
2117static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
2118 u32 wsum, u16 port_rate,
2119 struct cmng_struct_per_port *m_cmng_port)
2120{
2121 struct rate_shaping_vars_per_vn m_rs_vn;
2122 struct fairness_vars_per_vn m_fair_vn;
2123 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2124 u16 vn_min_rate, vn_max_rate;
2125 int i;
2126
2127 /* If function is hidden - set min and max to zeroes */
2128 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2129 vn_min_rate = 0;
2130 vn_max_rate = 0;
2131
2132 } else {
2133 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2134 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2135 /* If FAIRNESS is enabled (not all min rates are zeroes) and
2136 if current min rate is zero - set it to 1.
33471629 2137 This is a requirement of the algorithm. */
34f80b04
EG
2138 if ((vn_min_rate == 0) && wsum)
2139 vn_min_rate = DEF_MIN_RATE;
2140 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2141 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2142 }
2143
2144 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
2145 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
2146
2147 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2148 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2149
2150 /* global vn counter - maximal Mbps for this vn */
2151 m_rs_vn.vn_counter.rate = vn_max_rate;
2152
2153 /* quota - number of bytes transmitted in this period */
2154 m_rs_vn.vn_counter.quota =
2155 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2156
2157#ifdef BNX2X_PER_PROT_QOS
2158 /* per protocol counter */
2159 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
2160 /* maximal Mbps for this protocol */
2161 m_rs_vn.protocol_counters[protocol].rate =
2162 protocol_max_rate[protocol];
2163 /* the quota in each timer period -
2164 number of bytes transmitted in this period */
2165 m_rs_vn.protocol_counters[protocol].quota =
2166 (u32)(rs_periodic_timeout_usec *
2167 ((double)m_rs_vn.
2168 protocol_counters[protocol].rate/8));
2169 }
2170#endif
2171
2172 if (wsum) {
2173 /* credit for each period of the fairness algorithm:
2174 number of bytes in T_FAIR (the vn share the port rate).
2175 wsum should not be larger than 10000, thus
2176 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
2177 m_fair_vn.vn_credit_delta =
2178 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
2179 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
2180 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2181 m_fair_vn.vn_credit_delta);
2182 }
2183
2184#ifdef BNX2X_PER_PROT_QOS
2185 do {
2186 u32 protocolWeightSum = 0;
2187
2188 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
2189 protocolWeightSum +=
2190 drvInit.protocol_min_rate[protocol];
2191 /* per protocol counter -
2192 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
2193 if (protocolWeightSum > 0) {
2194 for (protocol = 0;
2195 protocol < NUM_OF_PROTOCOLS; protocol++)
2196 /* credit for each period of the
2197 fairness algorithm - number of bytes in
2198 T_FAIR (the protocol share the vn rate) */
2199 m_fair_vn.protocol_credit_delta[protocol] =
2200 (u32)((vn_min_rate / 8) * t_fair *
2201 protocol_min_rate / protocolWeightSum);
2202 }
2203 } while (0);
2204#endif
2205
2206 /* Store it to internal memory */
2207 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2208 REG_WR(bp, BAR_XSTRORM_INTMEM +
2209 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2210 ((u32 *)(&m_rs_vn))[i]);
2211
2212 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2213 REG_WR(bp, BAR_XSTRORM_INTMEM +
2214 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2215 ((u32 *)(&m_fair_vn))[i]);
2216}
2217
c18487ee
YR
2218/* This function is called upon link interrupt */
2219static void bnx2x_link_attn(struct bnx2x *bp)
2220{
34f80b04
EG
2221 int vn;
2222
bb2a0f7a
YG
2223 /* Make sure that we are synced with the current statistics */
2224 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2225
4a37fb66 2226 bnx2x_acquire_phy_lock(bp);
c18487ee 2227 bnx2x_link_update(&bp->link_params, &bp->link_vars);
4a37fb66 2228 bnx2x_release_phy_lock(bp);
a2fbb9ea 2229
bb2a0f7a
YG
2230 if (bp->link_vars.link_up) {
2231
2232 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2233 struct host_port_stats *pstats;
2234
2235 pstats = bnx2x_sp(bp, port_stats);
2236 /* reset old bmac stats */
2237 memset(&(pstats->mac_stx[0]), 0,
2238 sizeof(struct mac_stx));
2239 }
2240 if ((bp->state == BNX2X_STATE_OPEN) ||
2241 (bp->state == BNX2X_STATE_DISABLED))
2242 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2243 }
2244
c18487ee
YR
2245 /* indicate link status */
2246 bnx2x_link_report(bp);
34f80b04
EG
2247
2248 if (IS_E1HMF(bp)) {
2249 int func;
2250
2251 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2252 if (vn == BP_E1HVN(bp))
2253 continue;
2254
2255 func = ((vn << 1) | BP_PORT(bp));
2256
2257 /* Set the attention towards other drivers
2258 on the same port */
2259 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2260 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2261 }
2262 }
2263
2264 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
2265 struct cmng_struct_per_port m_cmng_port;
2266 u32 wsum;
2267 int port = BP_PORT(bp);
2268
2269 /* Init RATE SHAPING and FAIRNESS contexts */
2270 wsum = bnx2x_calc_vn_wsum(bp);
2271 bnx2x_init_port_minmax(bp, (int)wsum,
2272 bp->link_vars.line_speed,
2273 &m_cmng_port);
2274 if (IS_E1HMF(bp))
2275 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2276 bnx2x_init_vn_minmax(bp, 2*vn + port,
2277 wsum, bp->link_vars.line_speed,
2278 &m_cmng_port);
2279 }
c18487ee 2280}
a2fbb9ea 2281
c18487ee
YR
2282static void bnx2x__link_status_update(struct bnx2x *bp)
2283{
2284 if (bp->state != BNX2X_STATE_OPEN)
2285 return;
a2fbb9ea 2286
c18487ee 2287 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2288
bb2a0f7a
YG
2289 if (bp->link_vars.link_up)
2290 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2291 else
2292 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2293
c18487ee
YR
2294 /* indicate link status */
2295 bnx2x_link_report(bp);
a2fbb9ea 2296}
a2fbb9ea 2297
34f80b04
EG
2298static void bnx2x_pmf_update(struct bnx2x *bp)
2299{
2300 int port = BP_PORT(bp);
2301 u32 val;
2302
2303 bp->port.pmf = 1;
2304 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2305
2306 /* enable nig attention */
2307 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2308 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2309 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2310
2311 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2312}
2313
c18487ee 2314/* end of Link */
a2fbb9ea
ET
2315
2316/* slow path */
2317
2318/*
2319 * General service functions
2320 */
2321
2322/* the slow path queue is odd since completions arrive on the fastpath ring */
2323static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2324 u32 data_hi, u32 data_lo, int common)
2325{
34f80b04 2326 int func = BP_FUNC(bp);
a2fbb9ea 2327
34f80b04
EG
2328 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2329 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2330 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2331 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2332 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2333
2334#ifdef BNX2X_STOP_ON_ERROR
2335 if (unlikely(bp->panic))
2336 return -EIO;
2337#endif
2338
34f80b04 2339 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2340
2341 if (!bp->spq_left) {
2342 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2343 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2344 bnx2x_panic();
2345 return -EBUSY;
2346 }
f1410647 2347
a2fbb9ea
ET
2348 /* CID needs port number to be encoded int it */
2349 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2350 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2351 HW_CID(bp, cid)));
2352 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2353 if (common)
2354 bp->spq_prod_bd->hdr.type |=
2355 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2356
2357 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2358 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2359
2360 bp->spq_left--;
2361
2362 if (bp->spq_prod_bd == bp->spq_last_bd) {
2363 bp->spq_prod_bd = bp->spq;
2364 bp->spq_prod_idx = 0;
2365 DP(NETIF_MSG_TIMER, "end of spq\n");
2366
2367 } else {
2368 bp->spq_prod_bd++;
2369 bp->spq_prod_idx++;
2370 }
2371
34f80b04 2372 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2373 bp->spq_prod_idx);
2374
34f80b04 2375 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2376 return 0;
2377}
2378
2379/* acquire split MCP access lock register */
4a37fb66 2380static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2381{
a2fbb9ea 2382 u32 i, j, val;
34f80b04 2383 int rc = 0;
a2fbb9ea
ET
2384
2385 might_sleep();
2386 i = 100;
2387 for (j = 0; j < i*10; j++) {
2388 val = (1UL << 31);
2389 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2390 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2391 if (val & (1L << 31))
2392 break;
2393
2394 msleep(5);
2395 }
a2fbb9ea 2396 if (!(val & (1L << 31))) {
19680c48 2397 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2398 rc = -EBUSY;
2399 }
2400
2401 return rc;
2402}
2403
4a37fb66
YG
2404/* release split MCP access lock register */
2405static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2406{
2407 u32 val = 0;
2408
2409 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2410}
2411
2412static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2413{
2414 struct host_def_status_block *def_sb = bp->def_status_blk;
2415 u16 rc = 0;
2416
2417 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2418 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2419 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2420 rc |= 1;
2421 }
2422 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2423 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2424 rc |= 2;
2425 }
2426 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2427 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2428 rc |= 4;
2429 }
2430 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2431 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2432 rc |= 8;
2433 }
2434 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2435 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2436 rc |= 16;
2437 }
2438 return rc;
2439}
2440
2441/*
2442 * slow path service functions
2443 */
2444
2445static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2446{
34f80b04 2447 int port = BP_PORT(bp);
5c862848
EG
2448 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2449 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2450 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2451 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2452 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2453 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2454 u32 aeu_mask;
a2fbb9ea 2455
a2fbb9ea
ET
2456 if (bp->attn_state & asserted)
2457 BNX2X_ERR("IGU ERROR\n");
2458
3fcaf2e5
EG
2459 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2460 aeu_mask = REG_RD(bp, aeu_addr);
2461
a2fbb9ea 2462 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2463 aeu_mask, asserted);
2464 aeu_mask &= ~(asserted & 0xff);
2465 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2466
3fcaf2e5
EG
2467 REG_WR(bp, aeu_addr, aeu_mask);
2468 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2469
3fcaf2e5 2470 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2471 bp->attn_state |= asserted;
3fcaf2e5 2472 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2473
2474 if (asserted & ATTN_HARD_WIRED_MASK) {
2475 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2476
877e9aa4
ET
2477 /* save nig interrupt mask */
2478 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2479 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2480
c18487ee 2481 bnx2x_link_attn(bp);
a2fbb9ea
ET
2482
2483 /* handle unicore attn? */
2484 }
2485 if (asserted & ATTN_SW_TIMER_4_FUNC)
2486 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2487
2488 if (asserted & GPIO_2_FUNC)
2489 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2490
2491 if (asserted & GPIO_3_FUNC)
2492 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2493
2494 if (asserted & GPIO_4_FUNC)
2495 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2496
2497 if (port == 0) {
2498 if (asserted & ATTN_GENERAL_ATTN_1) {
2499 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2500 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2501 }
2502 if (asserted & ATTN_GENERAL_ATTN_2) {
2503 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2504 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2505 }
2506 if (asserted & ATTN_GENERAL_ATTN_3) {
2507 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2508 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2509 }
2510 } else {
2511 if (asserted & ATTN_GENERAL_ATTN_4) {
2512 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2513 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2514 }
2515 if (asserted & ATTN_GENERAL_ATTN_5) {
2516 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2517 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2518 }
2519 if (asserted & ATTN_GENERAL_ATTN_6) {
2520 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2521 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2522 }
2523 }
2524
2525 } /* if hardwired */
2526
5c862848
EG
2527 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2528 asserted, hc_addr);
2529 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2530
2531 /* now set back the mask */
2532 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2533 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2534}
2535
877e9aa4 2536static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2537{
34f80b04 2538 int port = BP_PORT(bp);
877e9aa4
ET
2539 int reg_offset;
2540 u32 val;
2541
34f80b04
EG
2542 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2543 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2544
34f80b04 2545 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2546
2547 val = REG_RD(bp, reg_offset);
2548 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2549 REG_WR(bp, reg_offset, val);
2550
2551 BNX2X_ERR("SPIO5 hw attention\n");
2552
34f80b04 2553 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2554 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2555 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2556 /* Fan failure attention */
2557
17de50b7 2558 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2559 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2560 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2561 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2562 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2563 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2564 /* mark the failure */
c18487ee 2565 bp->link_params.ext_phy_config &=
877e9aa4 2566 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2567 bp->link_params.ext_phy_config |=
877e9aa4
ET
2568 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2569 SHMEM_WR(bp,
2570 dev_info.port_hw_config[port].
2571 external_phy_config,
c18487ee 2572 bp->link_params.ext_phy_config);
877e9aa4
ET
2573 /* log the failure */
2574 printk(KERN_ERR PFX "Fan Failure on Network"
2575 " Controller %s has caused the driver to"
2576 " shutdown the card to prevent permanent"
2577 " damage. Please contact Dell Support for"
2578 " assistance\n", bp->dev->name);
2579 break;
2580
2581 default:
2582 break;
2583 }
2584 }
34f80b04
EG
2585
2586 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2587
2588 val = REG_RD(bp, reg_offset);
2589 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2590 REG_WR(bp, reg_offset, val);
2591
2592 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2593 (attn & HW_INTERRUT_ASSERT_SET_0));
2594 bnx2x_panic();
2595 }
877e9aa4
ET
2596}
2597
2598static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2599{
2600 u32 val;
2601
2602 if (attn & BNX2X_DOORQ_ASSERT) {
2603
2604 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2605 BNX2X_ERR("DB hw attention 0x%x\n", val);
2606 /* DORQ discard attention */
2607 if (val & 0x2)
2608 BNX2X_ERR("FATAL error from DORQ\n");
2609 }
34f80b04
EG
2610
2611 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2612
2613 int port = BP_PORT(bp);
2614 int reg_offset;
2615
2616 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2617 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2618
2619 val = REG_RD(bp, reg_offset);
2620 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2621 REG_WR(bp, reg_offset, val);
2622
2623 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2624 (attn & HW_INTERRUT_ASSERT_SET_1));
2625 bnx2x_panic();
2626 }
877e9aa4
ET
2627}
2628
2629static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2630{
2631 u32 val;
2632
2633 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2634
2635 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2636 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2637 /* CFC error attention */
2638 if (val & 0x2)
2639 BNX2X_ERR("FATAL error from CFC\n");
2640 }
2641
2642 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2643
2644 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2645 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2646 /* RQ_USDMDP_FIFO_OVERFLOW */
2647 if (val & 0x18000)
2648 BNX2X_ERR("FATAL error from PXP\n");
2649 }
34f80b04
EG
2650
2651 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2652
2653 int port = BP_PORT(bp);
2654 int reg_offset;
2655
2656 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2657 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2658
2659 val = REG_RD(bp, reg_offset);
2660 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2661 REG_WR(bp, reg_offset, val);
2662
2663 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2664 (attn & HW_INTERRUT_ASSERT_SET_2));
2665 bnx2x_panic();
2666 }
877e9aa4
ET
2667}
2668
2669static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2670{
34f80b04
EG
2671 u32 val;
2672
877e9aa4
ET
2673 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2674
34f80b04
EG
2675 if (attn & BNX2X_PMF_LINK_ASSERT) {
2676 int func = BP_FUNC(bp);
2677
2678 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2679 bnx2x__link_status_update(bp);
2680 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2681 DRV_STATUS_PMF)
2682 bnx2x_pmf_update(bp);
2683
2684 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2685
2686 BNX2X_ERR("MC assert!\n");
2687 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2688 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2690 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2691 bnx2x_panic();
2692
2693 } else if (attn & BNX2X_MCP_ASSERT) {
2694
2695 BNX2X_ERR("MCP assert!\n");
2696 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2697 bnx2x_fw_dump(bp);
877e9aa4
ET
2698
2699 } else
2700 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2701 }
2702
2703 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2704 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2705 if (attn & BNX2X_GRC_TIMEOUT) {
2706 val = CHIP_IS_E1H(bp) ?
2707 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2708 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2709 }
2710 if (attn & BNX2X_GRC_RSV) {
2711 val = CHIP_IS_E1H(bp) ?
2712 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2713 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2714 }
877e9aa4 2715 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2716 }
2717}
2718
2719static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2720{
a2fbb9ea
ET
2721 struct attn_route attn;
2722 struct attn_route group_mask;
34f80b04 2723 int port = BP_PORT(bp);
877e9aa4 2724 int index;
a2fbb9ea
ET
2725 u32 reg_addr;
2726 u32 val;
3fcaf2e5 2727 u32 aeu_mask;
a2fbb9ea
ET
2728
2729 /* need to take HW lock because MCP or other port might also
2730 try to handle this event */
4a37fb66 2731 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2732
2733 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2734 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2735 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2736 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2737 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2738 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2739
2740 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2741 if (deasserted & (1 << index)) {
2742 group_mask = bp->attn_group[index];
2743
34f80b04
EG
2744 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2745 index, group_mask.sig[0], group_mask.sig[1],
2746 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2747
877e9aa4
ET
2748 bnx2x_attn_int_deasserted3(bp,
2749 attn.sig[3] & group_mask.sig[3]);
2750 bnx2x_attn_int_deasserted1(bp,
2751 attn.sig[1] & group_mask.sig[1]);
2752 bnx2x_attn_int_deasserted2(bp,
2753 attn.sig[2] & group_mask.sig[2]);
2754 bnx2x_attn_int_deasserted0(bp,
2755 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2756
a2fbb9ea
ET
2757 if ((attn.sig[0] & group_mask.sig[0] &
2758 HW_PRTY_ASSERT_SET_0) ||
2759 (attn.sig[1] & group_mask.sig[1] &
2760 HW_PRTY_ASSERT_SET_1) ||
2761 (attn.sig[2] & group_mask.sig[2] &
2762 HW_PRTY_ASSERT_SET_2))
6378c025 2763 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2764 }
2765 }
2766
4a37fb66 2767 bnx2x_release_alr(bp);
a2fbb9ea 2768
5c862848 2769 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2770
2771 val = ~deasserted;
3fcaf2e5
EG
2772 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2773 val, reg_addr);
5c862848 2774 REG_WR(bp, reg_addr, val);
a2fbb9ea 2775
a2fbb9ea 2776 if (~bp->attn_state & deasserted)
3fcaf2e5 2777 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2778
2779 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2780 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2781
3fcaf2e5
EG
2782 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2783 aeu_mask = REG_RD(bp, reg_addr);
2784
2785 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2786 aeu_mask, deasserted);
2787 aeu_mask |= (deasserted & 0xff);
2788 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2789
3fcaf2e5
EG
2790 REG_WR(bp, reg_addr, aeu_mask);
2791 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2792
2793 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2794 bp->attn_state &= ~deasserted;
2795 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2796}
2797
2798static void bnx2x_attn_int(struct bnx2x *bp)
2799{
2800 /* read local copy of bits */
2801 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2802 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2803 u32 attn_state = bp->attn_state;
2804
2805 /* look for changed bits */
2806 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2807 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2808
2809 DP(NETIF_MSG_HW,
2810 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2811 attn_bits, attn_ack, asserted, deasserted);
2812
2813 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2814 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2815
2816 /* handle bits that were raised */
2817 if (asserted)
2818 bnx2x_attn_int_asserted(bp, asserted);
2819
2820 if (deasserted)
2821 bnx2x_attn_int_deasserted(bp, deasserted);
2822}
2823
2824static void bnx2x_sp_task(struct work_struct *work)
2825{
2826 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2827 u16 status;
2828
34f80b04 2829
a2fbb9ea
ET
2830 /* Return here if interrupt is disabled */
2831 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2832 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2833 return;
2834 }
2835
2836 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2837/* if (status == 0) */
2838/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2839
3196a88a 2840 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2841
877e9aa4
ET
2842 /* HW attentions */
2843 if (status & 0x1)
a2fbb9ea 2844 bnx2x_attn_int(bp);
a2fbb9ea 2845
bb2a0f7a
YG
2846 /* CStorm events: query_stats, port delete ramrod */
2847 if (status & 0x2)
2848 bp->stats_pending = 0;
2849
a2fbb9ea
ET
2850 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2851 IGU_INT_NOP, 1);
2852 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2853 IGU_INT_NOP, 1);
2854 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2855 IGU_INT_NOP, 1);
2856 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2857 IGU_INT_NOP, 1);
2858 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2859 IGU_INT_ENABLE, 1);
877e9aa4 2860
a2fbb9ea
ET
2861}
2862
2863static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2864{
2865 struct net_device *dev = dev_instance;
2866 struct bnx2x *bp = netdev_priv(dev);
2867
2868 /* Return here if interrupt is disabled */
2869 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2870 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2871 return IRQ_HANDLED;
2872 }
2873
877e9aa4 2874 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2875
2876#ifdef BNX2X_STOP_ON_ERROR
2877 if (unlikely(bp->panic))
2878 return IRQ_HANDLED;
2879#endif
2880
2881 schedule_work(&bp->sp_task);
2882
2883 return IRQ_HANDLED;
2884}
2885
2886/* end of slow path */
2887
2888/* Statistics */
2889
2890/****************************************************************************
2891* Macros
2892****************************************************************************/
2893
a2fbb9ea
ET
2894/* sum[hi:lo] += add[hi:lo] */
2895#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2896 do { \
2897 s_lo += a_lo; \
2898 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2899 } while (0)
2900
2901/* difference = minuend - subtrahend */
2902#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2903 do { \
bb2a0f7a
YG
2904 if (m_lo < s_lo) { \
2905 /* underflow */ \
a2fbb9ea 2906 d_hi = m_hi - s_hi; \
bb2a0f7a 2907 if (d_hi > 0) { \
6378c025 2908 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2909 d_hi--; \
2910 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2911 } else { \
6378c025 2912 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2913 d_hi = 0; \
2914 d_lo = 0; \
2915 } \
bb2a0f7a
YG
2916 } else { \
2917 /* m_lo >= s_lo */ \
a2fbb9ea 2918 if (m_hi < s_hi) { \
bb2a0f7a
YG
2919 d_hi = 0; \
2920 d_lo = 0; \
2921 } else { \
6378c025 2922 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2923 d_hi = m_hi - s_hi; \
2924 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2925 } \
2926 } \
2927 } while (0)
2928
bb2a0f7a 2929#define UPDATE_STAT64(s, t) \
a2fbb9ea 2930 do { \
bb2a0f7a
YG
2931 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2932 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2933 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2934 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2935 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2936 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2937 } while (0)
2938
bb2a0f7a 2939#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2940 do { \
bb2a0f7a
YG
2941 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2942 diff.lo, new->s##_lo, old->s##_lo); \
2943 ADD_64(estats->t##_hi, diff.hi, \
2944 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2945 } while (0)
2946
2947/* sum[hi:lo] += add */
2948#define ADD_EXTEND_64(s_hi, s_lo, a) \
2949 do { \
2950 s_lo += a; \
2951 s_hi += (s_lo < a) ? 1 : 0; \
2952 } while (0)
2953
bb2a0f7a 2954#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2955 do { \
bb2a0f7a
YG
2956 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2957 pstats->mac_stx[1].s##_lo, \
2958 new->s); \
a2fbb9ea
ET
2959 } while (0)
2960
bb2a0f7a 2961#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2962 do { \
2963 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2964 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2965 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2966 } while (0)
2967
2968#define UPDATE_EXTEND_XSTAT(s, t) \
2969 do { \
2970 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2971 old_xclient->s = le32_to_cpu(xclient->s); \
2972 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2973 } while (0)
2974
2975/*
2976 * General service functions
2977 */
2978
2979static inline long bnx2x_hilo(u32 *hiref)
2980{
2981 u32 lo = *(hiref + 1);
2982#if (BITS_PER_LONG == 64)
2983 u32 hi = *hiref;
2984
2985 return HILO_U64(hi, lo);
2986#else
2987 return lo;
2988#endif
2989}
2990
2991/*
2992 * Init service functions
2993 */
2994
bb2a0f7a
YG
2995static void bnx2x_storm_stats_post(struct bnx2x *bp)
2996{
2997 if (!bp->stats_pending) {
2998 struct eth_query_ramrod_data ramrod_data = {0};
2999 int rc;
3000
3001 ramrod_data.drv_counter = bp->stats_counter++;
3002 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
3003 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
3004
3005 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3006 ((u32 *)&ramrod_data)[1],
3007 ((u32 *)&ramrod_data)[0], 0);
3008 if (rc == 0) {
3009 /* stats ramrod has it's own slot on the spq */
3010 bp->spq_left++;
3011 bp->stats_pending = 1;
3012 }
3013 }
3014}
3015
3016static void bnx2x_stats_init(struct bnx2x *bp)
3017{
3018 int port = BP_PORT(bp);
3019
3020 bp->executer_idx = 0;
3021 bp->stats_counter = 0;
3022
3023 /* port stats */
3024 if (!BP_NOMCP(bp))
3025 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3026 else
3027 bp->port.port_stx = 0;
3028 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3029
3030 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3031 bp->port.old_nig_stats.brb_discard =
3032 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3033 bp->port.old_nig_stats.brb_truncate =
3034 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3035 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3036 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3037 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3038 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3039
3040 /* function stats */
3041 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3042 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
3043 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
3044 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3045
3046 bp->stats_state = STATS_STATE_DISABLED;
3047 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3048 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3049}
3050
3051static void bnx2x_hw_stats_post(struct bnx2x *bp)
3052{
3053 struct dmae_command *dmae = &bp->stats_dmae;
3054 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3055
3056 *stats_comp = DMAE_COMP_VAL;
3057
3058 /* loader */
3059 if (bp->executer_idx) {
3060 int loader_idx = PMF_DMAE_C(bp);
3061
3062 memset(dmae, 0, sizeof(struct dmae_command));
3063
3064 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3065 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3066 DMAE_CMD_DST_RESET |
3067#ifdef __BIG_ENDIAN
3068 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3069#else
3070 DMAE_CMD_ENDIANITY_DW_SWAP |
3071#endif
3072 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3073 DMAE_CMD_PORT_0) |
3074 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3075 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3076 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3077 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3078 sizeof(struct dmae_command) *
3079 (loader_idx + 1)) >> 2;
3080 dmae->dst_addr_hi = 0;
3081 dmae->len = sizeof(struct dmae_command) >> 2;
3082 if (CHIP_IS_E1(bp))
3083 dmae->len--;
3084 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3085 dmae->comp_addr_hi = 0;
3086 dmae->comp_val = 1;
3087
3088 *stats_comp = 0;
3089 bnx2x_post_dmae(bp, dmae, loader_idx);
3090
3091 } else if (bp->func_stx) {
3092 *stats_comp = 0;
3093 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3094 }
3095}
3096
3097static int bnx2x_stats_comp(struct bnx2x *bp)
3098{
3099 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3100 int cnt = 10;
3101
3102 might_sleep();
3103 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3104 if (!cnt) {
3105 BNX2X_ERR("timeout waiting for stats finished\n");
3106 break;
3107 }
3108 cnt--;
12469401 3109 msleep(1);
bb2a0f7a
YG
3110 }
3111 return 1;
3112}
3113
3114/*
3115 * Statistics service functions
3116 */
3117
3118static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3119{
3120 struct dmae_command *dmae;
3121 u32 opcode;
3122 int loader_idx = PMF_DMAE_C(bp);
3123 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3124
3125 /* sanity */
3126 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3127 BNX2X_ERR("BUG!\n");
3128 return;
3129 }
3130
3131 bp->executer_idx = 0;
3132
3133 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3134 DMAE_CMD_C_ENABLE |
3135 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3136#ifdef __BIG_ENDIAN
3137 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3138#else
3139 DMAE_CMD_ENDIANITY_DW_SWAP |
3140#endif
3141 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3142 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143
3144 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3145 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3146 dmae->src_addr_lo = bp->port.port_stx >> 2;
3147 dmae->src_addr_hi = 0;
3148 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3149 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3150 dmae->len = DMAE_LEN32_RD_MAX;
3151 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3152 dmae->comp_addr_hi = 0;
3153 dmae->comp_val = 1;
3154
3155 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3156 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3157 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3158 dmae->src_addr_hi = 0;
7a9b2557
VZ
3159 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3160 DMAE_LEN32_RD_MAX * 4);
3161 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3162 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3163 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3164 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3165 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3166 dmae->comp_val = DMAE_COMP_VAL;
3167
3168 *stats_comp = 0;
3169 bnx2x_hw_stats_post(bp);
3170 bnx2x_stats_comp(bp);
3171}
3172
3173static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3174{
3175 struct dmae_command *dmae;
34f80b04 3176 int port = BP_PORT(bp);
bb2a0f7a 3177 int vn = BP_E1HVN(bp);
a2fbb9ea 3178 u32 opcode;
bb2a0f7a 3179 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3180 u32 mac_addr;
bb2a0f7a
YG
3181 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3182
3183 /* sanity */
3184 if (!bp->link_vars.link_up || !bp->port.pmf) {
3185 BNX2X_ERR("BUG!\n");
3186 return;
3187 }
a2fbb9ea
ET
3188
3189 bp->executer_idx = 0;
bb2a0f7a
YG
3190
3191 /* MCP */
3192 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3193 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3194 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3195#ifdef __BIG_ENDIAN
bb2a0f7a 3196 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3197#else
bb2a0f7a 3198 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3199#endif
bb2a0f7a
YG
3200 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3201 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3202
bb2a0f7a 3203 if (bp->port.port_stx) {
a2fbb9ea
ET
3204
3205 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3206 dmae->opcode = opcode;
bb2a0f7a
YG
3207 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3208 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3209 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3210 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3211 dmae->len = sizeof(struct host_port_stats) >> 2;
3212 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3213 dmae->comp_addr_hi = 0;
3214 dmae->comp_val = 1;
a2fbb9ea
ET
3215 }
3216
bb2a0f7a
YG
3217 if (bp->func_stx) {
3218
3219 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3220 dmae->opcode = opcode;
3221 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3222 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3223 dmae->dst_addr_lo = bp->func_stx >> 2;
3224 dmae->dst_addr_hi = 0;
3225 dmae->len = sizeof(struct host_func_stats) >> 2;
3226 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3227 dmae->comp_addr_hi = 0;
3228 dmae->comp_val = 1;
a2fbb9ea
ET
3229 }
3230
bb2a0f7a 3231 /* MAC */
a2fbb9ea
ET
3232 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3233 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3234 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3235#ifdef __BIG_ENDIAN
3236 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3237#else
3238 DMAE_CMD_ENDIANITY_DW_SWAP |
3239#endif
bb2a0f7a
YG
3240 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3241 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3242
c18487ee 3243 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3244
3245 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3246 NIG_REG_INGRESS_BMAC0_MEM);
3247
3248 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3249 BIGMAC_REGISTER_TX_STAT_GTBYT */
3250 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3251 dmae->opcode = opcode;
3252 dmae->src_addr_lo = (mac_addr +
3253 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3254 dmae->src_addr_hi = 0;
3255 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3256 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3257 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3258 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3259 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3260 dmae->comp_addr_hi = 0;
3261 dmae->comp_val = 1;
3262
3263 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3264 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266 dmae->opcode = opcode;
3267 dmae->src_addr_lo = (mac_addr +
3268 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3269 dmae->src_addr_hi = 0;
3270 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3271 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3272 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3273 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3274 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3275 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3276 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3277 dmae->comp_addr_hi = 0;
3278 dmae->comp_val = 1;
3279
c18487ee 3280 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3281
3282 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3283
3284 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286 dmae->opcode = opcode;
3287 dmae->src_addr_lo = (mac_addr +
3288 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3289 dmae->src_addr_hi = 0;
3290 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3291 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3292 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3293 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3294 dmae->comp_addr_hi = 0;
3295 dmae->comp_val = 1;
3296
3297 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3298 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3299 dmae->opcode = opcode;
3300 dmae->src_addr_lo = (mac_addr +
3301 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3302 dmae->src_addr_hi = 0;
3303 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3304 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3305 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3306 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3307 dmae->len = 1;
3308 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3309 dmae->comp_addr_hi = 0;
3310 dmae->comp_val = 1;
3311
3312 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3313 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3314 dmae->opcode = opcode;
3315 dmae->src_addr_lo = (mac_addr +
3316 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3317 dmae->src_addr_hi = 0;
3318 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3319 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3320 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3321 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3322 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3323 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3324 dmae->comp_addr_hi = 0;
3325 dmae->comp_val = 1;
3326 }
3327
3328 /* NIG */
bb2a0f7a
YG
3329 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3330 dmae->opcode = opcode;
3331 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3332 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3333 dmae->src_addr_hi = 0;
3334 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3335 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3336 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3337 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3338 dmae->comp_addr_hi = 0;
3339 dmae->comp_val = 1;
3340
3341 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3342 dmae->opcode = opcode;
3343 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3344 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3345 dmae->src_addr_hi = 0;
3346 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3347 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3348 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3349 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3350 dmae->len = (2*sizeof(u32)) >> 2;
3351 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3352 dmae->comp_addr_hi = 0;
3353 dmae->comp_val = 1;
3354
a2fbb9ea
ET
3355 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3356 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3357 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3358 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3359#ifdef __BIG_ENDIAN
3360 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3361#else
3362 DMAE_CMD_ENDIANITY_DW_SWAP |
3363#endif
bb2a0f7a
YG
3364 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3365 (vn << DMAE_CMD_E1HVN_SHIFT));
3366 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3367 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3368 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3369 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3370 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3371 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3372 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3373 dmae->len = (2*sizeof(u32)) >> 2;
3374 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3375 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3376 dmae->comp_val = DMAE_COMP_VAL;
3377
3378 *stats_comp = 0;
a2fbb9ea
ET
3379}
3380
bb2a0f7a 3381static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3382{
bb2a0f7a
YG
3383 struct dmae_command *dmae = &bp->stats_dmae;
3384 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3385
bb2a0f7a
YG
3386 /* sanity */
3387 if (!bp->func_stx) {
3388 BNX2X_ERR("BUG!\n");
3389 return;
3390 }
a2fbb9ea 3391
bb2a0f7a
YG
3392 bp->executer_idx = 0;
3393 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3394
bb2a0f7a
YG
3395 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3396 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3397 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3398#ifdef __BIG_ENDIAN
3399 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3400#else
3401 DMAE_CMD_ENDIANITY_DW_SWAP |
3402#endif
3403 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3404 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3405 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3406 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3407 dmae->dst_addr_lo = bp->func_stx >> 2;
3408 dmae->dst_addr_hi = 0;
3409 dmae->len = sizeof(struct host_func_stats) >> 2;
3410 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3411 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3412 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3413
bb2a0f7a
YG
3414 *stats_comp = 0;
3415}
a2fbb9ea 3416
bb2a0f7a
YG
3417static void bnx2x_stats_start(struct bnx2x *bp)
3418{
3419 if (bp->port.pmf)
3420 bnx2x_port_stats_init(bp);
3421
3422 else if (bp->func_stx)
3423 bnx2x_func_stats_init(bp);
3424
3425 bnx2x_hw_stats_post(bp);
3426 bnx2x_storm_stats_post(bp);
3427}
3428
3429static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3430{
3431 bnx2x_stats_comp(bp);
3432 bnx2x_stats_pmf_update(bp);
3433 bnx2x_stats_start(bp);
3434}
3435
3436static void bnx2x_stats_restart(struct bnx2x *bp)
3437{
3438 bnx2x_stats_comp(bp);
3439 bnx2x_stats_start(bp);
3440}
3441
3442static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3443{
3444 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3445 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3446 struct regpair diff;
3447
3448 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3449 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3450 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3451 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3452 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3453 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3454 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a
YG
3455 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3456 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3457 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3458 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3459 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3460 UPDATE_STAT64(tx_stat_gt127,
3461 tx_stat_etherstatspkts65octetsto127octets);
3462 UPDATE_STAT64(tx_stat_gt255,
3463 tx_stat_etherstatspkts128octetsto255octets);
3464 UPDATE_STAT64(tx_stat_gt511,
3465 tx_stat_etherstatspkts256octetsto511octets);
3466 UPDATE_STAT64(tx_stat_gt1023,
3467 tx_stat_etherstatspkts512octetsto1023octets);
3468 UPDATE_STAT64(tx_stat_gt1518,
3469 tx_stat_etherstatspkts1024octetsto1522octets);
3470 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3471 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3472 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3473 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3474 UPDATE_STAT64(tx_stat_gterr,
3475 tx_stat_dot3statsinternalmactransmiterrors);
3476 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3477}
3478
3479static void bnx2x_emac_stats_update(struct bnx2x *bp)
3480{
3481 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3482 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3483
3484 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3485 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3486 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3487 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3488 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3489 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3490 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3491 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3492 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3493 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3494 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3495 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3496 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3497 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3498 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3499 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3500 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3502 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3503 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3504 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3505 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3506 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3507 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3508 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3509 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3510 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3513 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3514 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3515}
3516
3517static int bnx2x_hw_stats_update(struct bnx2x *bp)
3518{
3519 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3520 struct nig_stats *old = &(bp->port.old_nig_stats);
3521 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3522 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3523 struct regpair diff;
3524
3525 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3526 bnx2x_bmac_stats_update(bp);
3527
3528 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3529 bnx2x_emac_stats_update(bp);
3530
3531 else { /* unreached */
3532 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3533 return -1;
3534 }
a2fbb9ea 3535
bb2a0f7a
YG
3536 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3537 new->brb_discard - old->brb_discard);
66e855f3
YG
3538 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3539 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3540
bb2a0f7a
YG
3541 UPDATE_STAT64_NIG(egress_mac_pkt0,
3542 etherstatspkts1024octetsto1522octets);
3543 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3544
bb2a0f7a 3545 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3546
bb2a0f7a
YG
3547 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3548 sizeof(struct mac_stx));
3549 estats->brb_drop_hi = pstats->brb_drop_hi;
3550 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3551
bb2a0f7a 3552 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3553
bb2a0f7a 3554 return 0;
a2fbb9ea
ET
3555}
3556
bb2a0f7a 3557static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3558{
3559 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3560 int cl_id = BP_CL_ID(bp);
3561 struct tstorm_per_port_stats *tport =
3562 &stats->tstorm_common.port_statistics;
a2fbb9ea 3563 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3564 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3565 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3566 struct xstorm_per_client_stats *xclient =
3567 &stats->xstorm_common.client_statistics[cl_id];
3568 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3569 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3570 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3571 u32 diff;
3572
bb2a0f7a
YG
3573 /* are storm stats valid? */
3574 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3575 bp->stats_counter) {
3576 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3577 " tstorm counter (%d) != stats_counter (%d)\n",
3578 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3579 return -1;
3580 }
bb2a0f7a
YG
3581 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3582 bp->stats_counter) {
3583 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3584 " xstorm counter (%d) != stats_counter (%d)\n",
3585 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3586 return -2;
3587 }
a2fbb9ea 3588
bb2a0f7a
YG
3589 fstats->total_bytes_received_hi =
3590 fstats->valid_bytes_received_hi =
a2fbb9ea 3591 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3592 fstats->total_bytes_received_lo =
3593 fstats->valid_bytes_received_lo =
a2fbb9ea 3594 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3595
3596 estats->error_bytes_received_hi =
3597 le32_to_cpu(tclient->rcv_error_bytes.hi);
3598 estats->error_bytes_received_lo =
3599 le32_to_cpu(tclient->rcv_error_bytes.lo);
3600 ADD_64(estats->error_bytes_received_hi,
3601 estats->rx_stat_ifhcinbadoctets_hi,
3602 estats->error_bytes_received_lo,
3603 estats->rx_stat_ifhcinbadoctets_lo);
3604
3605 ADD_64(fstats->total_bytes_received_hi,
3606 estats->error_bytes_received_hi,
3607 fstats->total_bytes_received_lo,
3608 estats->error_bytes_received_lo);
3609
3610 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3611 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3612 total_multicast_packets_received);
a2fbb9ea 3613 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3614 total_broadcast_packets_received);
3615
3616 fstats->total_bytes_transmitted_hi =
3617 le32_to_cpu(xclient->total_sent_bytes.hi);
3618 fstats->total_bytes_transmitted_lo =
3619 le32_to_cpu(xclient->total_sent_bytes.lo);
3620
3621 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3622 total_unicast_packets_transmitted);
3623 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3624 total_multicast_packets_transmitted);
3625 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3626 total_broadcast_packets_transmitted);
3627
3628 memcpy(estats, &(fstats->total_bytes_received_hi),
3629 sizeof(struct host_func_stats) - 2*sizeof(u32));
3630
3631 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3632 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3633 estats->brb_truncate_discard =
3634 le32_to_cpu(tport->brb_truncate_discard);
3635 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3636
3637 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3638 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3639 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3640 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3641 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3642 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3643 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3644 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3645 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3646 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3647 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3648 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3649 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3650
bb2a0f7a
YG
3651 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3652 old_tclient->packets_too_big_discard =
a2fbb9ea 3653 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3654 estats->no_buff_discard =
3655 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3656 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3657
3658 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3659 old_xclient->unicast_bytes_sent.hi =
3660 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3661 old_xclient->unicast_bytes_sent.lo =
3662 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3663 old_xclient->multicast_bytes_sent.hi =
3664 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3665 old_xclient->multicast_bytes_sent.lo =
3666 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3667 old_xclient->broadcast_bytes_sent.hi =
3668 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3669 old_xclient->broadcast_bytes_sent.lo =
3670 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3671
3672 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3673
3674 return 0;
3675}
3676
bb2a0f7a 3677static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3678{
bb2a0f7a
YG
3679 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3680 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3681 struct net_device_stats *nstats = &bp->dev->stats;
3682
3683 nstats->rx_packets =
3684 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3685 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3686 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3687
3688 nstats->tx_packets =
3689 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3690 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3691 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3692
bb2a0f7a 3693 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3694
0e39e645 3695 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3696
bb2a0f7a
YG
3697 nstats->rx_dropped = old_tclient->checksum_discard +
3698 estats->mac_discard;
a2fbb9ea
ET
3699 nstats->tx_dropped = 0;
3700
3701 nstats->multicast =
3702 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3703
bb2a0f7a
YG
3704 nstats->collisions =
3705 estats->tx_stat_dot3statssinglecollisionframes_lo +
3706 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3707 estats->tx_stat_dot3statslatecollisions_lo +
3708 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3709
bb2a0f7a
YG
3710 estats->jabber_packets_received =
3711 old_tclient->packets_too_big_discard +
3712 estats->rx_stat_dot3statsframestoolong_lo;
3713
3714 nstats->rx_length_errors =
3715 estats->rx_stat_etherstatsundersizepkts_lo +
3716 estats->jabber_packets_received;
66e855f3 3717 nstats->rx_over_errors = estats->brb_drop_lo + estats->brb_truncate_lo;
bb2a0f7a
YG
3718 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3719 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3720 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3721 nstats->rx_missed_errors = estats->xxoverflow_discard;
3722
3723 nstats->rx_errors = nstats->rx_length_errors +
3724 nstats->rx_over_errors +
3725 nstats->rx_crc_errors +
3726 nstats->rx_frame_errors +
0e39e645
ET
3727 nstats->rx_fifo_errors +
3728 nstats->rx_missed_errors;
a2fbb9ea 3729
bb2a0f7a
YG
3730 nstats->tx_aborted_errors =
3731 estats->tx_stat_dot3statslatecollisions_lo +
3732 estats->tx_stat_dot3statsexcessivecollisions_lo;
3733 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3734 nstats->tx_fifo_errors = 0;
3735 nstats->tx_heartbeat_errors = 0;
3736 nstats->tx_window_errors = 0;
3737
3738 nstats->tx_errors = nstats->tx_aborted_errors +
3739 nstats->tx_carrier_errors;
a2fbb9ea
ET
3740}
3741
bb2a0f7a 3742static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3743{
bb2a0f7a
YG
3744 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3745 int update = 0;
a2fbb9ea 3746
bb2a0f7a
YG
3747 if (*stats_comp != DMAE_COMP_VAL)
3748 return;
3749
3750 if (bp->port.pmf)
3751 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3752
bb2a0f7a 3753 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3754
bb2a0f7a
YG
3755 if (update)
3756 bnx2x_net_stats_update(bp);
a2fbb9ea 3757
bb2a0f7a
YG
3758 else {
3759 if (bp->stats_pending) {
3760 bp->stats_pending++;
3761 if (bp->stats_pending == 3) {
3762 BNX2X_ERR("stats not updated for 3 times\n");
3763 bnx2x_panic();
3764 return;
3765 }
3766 }
a2fbb9ea
ET
3767 }
3768
3769 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3770 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3771 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3772 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3773 int i;
a2fbb9ea
ET
3774
3775 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3776 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3777 " tx pkt (%lx)\n",
3778 bnx2x_tx_avail(bp->fp),
7a9b2557 3779 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3780 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3781 " rx pkt (%lx)\n",
7a9b2557
VZ
3782 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3783 bp->fp->rx_comp_cons),
3784 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
a2fbb9ea 3785 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
6378c025 3786 netif_queue_stopped(bp->dev) ? "Xoff" : "Xon",
bb2a0f7a 3787 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3788 printk(KERN_DEBUG "tstats: checksum_discard %u "
3789 "packets_too_big_discard %u no_buff_discard %u "
3790 "mac_discard %u mac_filter_discard %u "
3791 "xxovrflow_discard %u brb_truncate_discard %u "
3792 "ttl0_discard %u\n",
bb2a0f7a
YG
3793 old_tclient->checksum_discard,
3794 old_tclient->packets_too_big_discard,
3795 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3796 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3797 estats->brb_truncate_discard,
3798 old_tclient->ttl0_discard);
a2fbb9ea
ET
3799
3800 for_each_queue(bp, i) {
3801 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3802 bnx2x_fp(bp, i, tx_pkt),
3803 bnx2x_fp(bp, i, rx_pkt),
3804 bnx2x_fp(bp, i, rx_calls));
3805 }
3806 }
3807
bb2a0f7a
YG
3808 bnx2x_hw_stats_post(bp);
3809 bnx2x_storm_stats_post(bp);
3810}
a2fbb9ea 3811
bb2a0f7a
YG
3812static void bnx2x_port_stats_stop(struct bnx2x *bp)
3813{
3814 struct dmae_command *dmae;
3815 u32 opcode;
3816 int loader_idx = PMF_DMAE_C(bp);
3817 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3818
bb2a0f7a 3819 bp->executer_idx = 0;
a2fbb9ea 3820
bb2a0f7a
YG
3821 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3822 DMAE_CMD_C_ENABLE |
3823 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3824#ifdef __BIG_ENDIAN
bb2a0f7a 3825 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3826#else
bb2a0f7a 3827 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3828#endif
bb2a0f7a
YG
3829 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3830 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3831
3832 if (bp->port.port_stx) {
3833
3834 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3835 if (bp->func_stx)
3836 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3837 else
3838 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3839 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3840 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3841 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3842 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3843 dmae->len = sizeof(struct host_port_stats) >> 2;
3844 if (bp->func_stx) {
3845 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3846 dmae->comp_addr_hi = 0;
3847 dmae->comp_val = 1;
3848 } else {
3849 dmae->comp_addr_lo =
3850 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3851 dmae->comp_addr_hi =
3852 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3853 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3854
bb2a0f7a
YG
3855 *stats_comp = 0;
3856 }
a2fbb9ea
ET
3857 }
3858
bb2a0f7a
YG
3859 if (bp->func_stx) {
3860
3861 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3862 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3863 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3864 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3865 dmae->dst_addr_lo = bp->func_stx >> 2;
3866 dmae->dst_addr_hi = 0;
3867 dmae->len = sizeof(struct host_func_stats) >> 2;
3868 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3869 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3870 dmae->comp_val = DMAE_COMP_VAL;
3871
3872 *stats_comp = 0;
a2fbb9ea 3873 }
bb2a0f7a
YG
3874}
3875
3876static void bnx2x_stats_stop(struct bnx2x *bp)
3877{
3878 int update = 0;
3879
3880 bnx2x_stats_comp(bp);
3881
3882 if (bp->port.pmf)
3883 update = (bnx2x_hw_stats_update(bp) == 0);
3884
3885 update |= (bnx2x_storm_stats_update(bp) == 0);
3886
3887 if (update) {
3888 bnx2x_net_stats_update(bp);
a2fbb9ea 3889
bb2a0f7a
YG
3890 if (bp->port.pmf)
3891 bnx2x_port_stats_stop(bp);
3892
3893 bnx2x_hw_stats_post(bp);
3894 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3895 }
3896}
3897
bb2a0f7a
YG
3898static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3899{
3900}
3901
3902static const struct {
3903 void (*action)(struct bnx2x *bp);
3904 enum bnx2x_stats_state next_state;
3905} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3906/* state event */
3907{
3908/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3909/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3910/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3911/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3912},
3913{
3914/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3915/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3916/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3917/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3918}
3919};
3920
3921static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3922{
3923 enum bnx2x_stats_state state = bp->stats_state;
3924
3925 bnx2x_stats_stm[state][event].action(bp);
3926 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3927
3928 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3929 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3930 state, event, bp->stats_state);
3931}
3932
a2fbb9ea
ET
3933static void bnx2x_timer(unsigned long data)
3934{
3935 struct bnx2x *bp = (struct bnx2x *) data;
3936
3937 if (!netif_running(bp->dev))
3938 return;
3939
3940 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3941 goto timer_restart;
a2fbb9ea
ET
3942
3943 if (poll) {
3944 struct bnx2x_fastpath *fp = &bp->fp[0];
3945 int rc;
3946
3947 bnx2x_tx_int(fp, 1000);
3948 rc = bnx2x_rx_int(fp, 1000);
3949 }
3950
34f80b04
EG
3951 if (!BP_NOMCP(bp)) {
3952 int func = BP_FUNC(bp);
a2fbb9ea
ET
3953 u32 drv_pulse;
3954 u32 mcp_pulse;
3955
3956 ++bp->fw_drv_pulse_wr_seq;
3957 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3958 /* TBD - add SYSTEM_TIME */
3959 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3960 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3961
34f80b04 3962 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3963 MCP_PULSE_SEQ_MASK);
3964 /* The delta between driver pulse and mcp response
3965 * should be 1 (before mcp response) or 0 (after mcp response)
3966 */
3967 if ((drv_pulse != mcp_pulse) &&
3968 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3969 /* someone lost a heartbeat... */
3970 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3971 drv_pulse, mcp_pulse);
3972 }
3973 }
3974
bb2a0f7a
YG
3975 if ((bp->state == BNX2X_STATE_OPEN) ||
3976 (bp->state == BNX2X_STATE_DISABLED))
3977 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3978
f1410647 3979timer_restart:
a2fbb9ea
ET
3980 mod_timer(&bp->timer, jiffies + bp->current_interval);
3981}
3982
3983/* end of Statistics */
3984
3985/* nic init */
3986
3987/*
3988 * nic init service functions
3989 */
3990
34f80b04 3991static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3992{
34f80b04
EG
3993 int port = BP_PORT(bp);
3994
3995 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3996 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 3997 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
3998 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3999 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4000 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4001}
4002
5c862848
EG
4003static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4004 dma_addr_t mapping, int sb_id)
34f80b04
EG
4005{
4006 int port = BP_PORT(bp);
bb2a0f7a 4007 int func = BP_FUNC(bp);
a2fbb9ea 4008 int index;
34f80b04 4009 u64 section;
a2fbb9ea
ET
4010
4011 /* USTORM */
4012 section = ((u64)mapping) + offsetof(struct host_status_block,
4013 u_status_block);
34f80b04 4014 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4015
4016 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4017 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4018 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4019 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4020 U64_HI(section));
bb2a0f7a
YG
4021 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4022 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4023
4024 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4025 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4026 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4027
4028 /* CSTORM */
4029 section = ((u64)mapping) + offsetof(struct host_status_block,
4030 c_status_block);
34f80b04 4031 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4032
4033 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4034 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4035 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4036 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4037 U64_HI(section));
7a9b2557
VZ
4038 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4039 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4040
4041 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4042 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4043 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4044
4045 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4046}
4047
4048static void bnx2x_zero_def_sb(struct bnx2x *bp)
4049{
4050 int func = BP_FUNC(bp);
a2fbb9ea 4051
34f80b04
EG
4052 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4053 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4054 sizeof(struct ustorm_def_status_block)/4);
4055 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4056 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4057 sizeof(struct cstorm_def_status_block)/4);
4058 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4059 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4060 sizeof(struct xstorm_def_status_block)/4);
4061 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4062 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4063 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4064}
4065
4066static void bnx2x_init_def_sb(struct bnx2x *bp,
4067 struct host_def_status_block *def_sb,
34f80b04 4068 dma_addr_t mapping, int sb_id)
a2fbb9ea 4069{
34f80b04
EG
4070 int port = BP_PORT(bp);
4071 int func = BP_FUNC(bp);
a2fbb9ea
ET
4072 int index, val, reg_offset;
4073 u64 section;
4074
4075 /* ATTN */
4076 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4077 atten_status_block);
34f80b04 4078 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4079
49d66772
ET
4080 bp->attn_state = 0;
4081
a2fbb9ea
ET
4082 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4083 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4084
34f80b04 4085 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4086 bp->attn_group[index].sig[0] = REG_RD(bp,
4087 reg_offset + 0x10*index);
4088 bp->attn_group[index].sig[1] = REG_RD(bp,
4089 reg_offset + 0x4 + 0x10*index);
4090 bp->attn_group[index].sig[2] = REG_RD(bp,
4091 reg_offset + 0x8 + 0x10*index);
4092 bp->attn_group[index].sig[3] = REG_RD(bp,
4093 reg_offset + 0xc + 0x10*index);
4094 }
4095
a2fbb9ea
ET
4096 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4097 HC_REG_ATTN_MSG0_ADDR_L);
4098
4099 REG_WR(bp, reg_offset, U64_LO(section));
4100 REG_WR(bp, reg_offset + 4, U64_HI(section));
4101
4102 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4103
4104 val = REG_RD(bp, reg_offset);
34f80b04 4105 val |= sb_id;
a2fbb9ea
ET
4106 REG_WR(bp, reg_offset, val);
4107
4108 /* USTORM */
4109 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4110 u_def_status_block);
34f80b04 4111 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4112
4113 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4114 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4115 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4116 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4117 U64_HI(section));
5c862848 4118 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4119 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4120
4121 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4122 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4123 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4124
4125 /* CSTORM */
4126 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4127 c_def_status_block);
34f80b04 4128 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4129
4130 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4131 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4132 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4133 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4134 U64_HI(section));
5c862848 4135 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4136 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4137
4138 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4139 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4140 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4141
4142 /* TSTORM */
4143 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4144 t_def_status_block);
34f80b04 4145 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4146
4147 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4148 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4149 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4150 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4151 U64_HI(section));
5c862848 4152 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4153 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4154
4155 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4156 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4157 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4158
4159 /* XSTORM */
4160 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4161 x_def_status_block);
34f80b04 4162 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4163
4164 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4165 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4166 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4167 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4168 U64_HI(section));
5c862848 4169 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4170 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4171
4172 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4173 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4174 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4175
bb2a0f7a 4176 bp->stats_pending = 0;
66e855f3 4177 bp->set_mac_pending = 0;
bb2a0f7a 4178
34f80b04 4179 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4180}
4181
4182static void bnx2x_update_coalesce(struct bnx2x *bp)
4183{
34f80b04 4184 int port = BP_PORT(bp);
a2fbb9ea
ET
4185 int i;
4186
4187 for_each_queue(bp, i) {
34f80b04 4188 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4189
4190 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4191 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4192 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4193 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4194 bp->rx_ticks/12);
a2fbb9ea 4195 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4196 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4197 U_SB_ETH_RX_CQ_INDEX),
4198 bp->rx_ticks ? 0 : 1);
4199 REG_WR16(bp, BAR_USTRORM_INTMEM +
4200 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4201 U_SB_ETH_RX_BD_INDEX),
34f80b04 4202 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4203
4204 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4205 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4206 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4207 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4208 bp->tx_ticks/12);
a2fbb9ea 4209 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4210 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4211 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4212 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4213 }
4214}
4215
7a9b2557
VZ
4216static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4217 struct bnx2x_fastpath *fp, int last)
4218{
4219 int i;
4220
4221 for (i = 0; i < last; i++) {
4222 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4223 struct sk_buff *skb = rx_buf->skb;
4224
4225 if (skb == NULL) {
4226 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4227 continue;
4228 }
4229
4230 if (fp->tpa_state[i] == BNX2X_TPA_START)
4231 pci_unmap_single(bp->pdev,
4232 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4233 bp->rx_buf_size,
7a9b2557
VZ
4234 PCI_DMA_FROMDEVICE);
4235
4236 dev_kfree_skb(skb);
4237 rx_buf->skb = NULL;
4238 }
4239}
4240
a2fbb9ea
ET
4241static void bnx2x_init_rx_rings(struct bnx2x *bp)
4242{
7a9b2557 4243 int func = BP_FUNC(bp);
32626230
EG
4244 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4245 ETH_MAX_AGGREGATION_QUEUES_E1H;
4246 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4247 int i, j;
a2fbb9ea 4248
437cf2f1
EG
4249 bp->rx_buf_size = bp->dev->mtu;
4250 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD +
4251 BCM_RX_ETH_PAYLOAD_ALIGN;
a2fbb9ea 4252
7a9b2557
VZ
4253 if (bp->flags & TPA_ENABLE_FLAG) {
4254 DP(NETIF_MSG_IFUP,
437cf2f1
EG
4255 "rx_buf_size %d effective_mtu %d\n",
4256 bp->rx_buf_size, bp->dev->mtu + ETH_OVREHEAD);
7a9b2557
VZ
4257
4258 for_each_queue(bp, j) {
32626230 4259 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4260
32626230 4261 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4262 fp->tpa_pool[i].skb =
4263 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4264 if (!fp->tpa_pool[i].skb) {
4265 BNX2X_ERR("Failed to allocate TPA "
4266 "skb pool for queue[%d] - "
4267 "disabling TPA on this "
4268 "queue!\n", j);
4269 bnx2x_free_tpa_pool(bp, fp, i);
4270 fp->disable_tpa = 1;
4271 break;
4272 }
4273 pci_unmap_addr_set((struct sw_rx_bd *)
4274 &bp->fp->tpa_pool[i],
4275 mapping, 0);
4276 fp->tpa_state[i] = BNX2X_TPA_STOP;
4277 }
4278 }
4279 }
4280
a2fbb9ea
ET
4281 for_each_queue(bp, j) {
4282 struct bnx2x_fastpath *fp = &bp->fp[j];
4283
4284 fp->rx_bd_cons = 0;
4285 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4286 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4287
4288 /* "next page" elements initialization */
4289 /* SGE ring */
4290 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4291 struct eth_rx_sge *sge;
4292
4293 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4294 sge->addr_hi =
4295 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4296 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4297 sge->addr_lo =
4298 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4299 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4300 }
4301
4302 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4303
7a9b2557 4304 /* RX BD ring */
a2fbb9ea
ET
4305 for (i = 1; i <= NUM_RX_RINGS; i++) {
4306 struct eth_rx_bd *rx_bd;
4307
4308 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4309 rx_bd->addr_hi =
4310 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4311 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4312 rx_bd->addr_lo =
4313 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4314 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4315 }
4316
34f80b04 4317 /* CQ ring */
a2fbb9ea
ET
4318 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4319 struct eth_rx_cqe_next_page *nextpg;
4320
4321 nextpg = (struct eth_rx_cqe_next_page *)
4322 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4323 nextpg->addr_hi =
4324 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4325 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4326 nextpg->addr_lo =
4327 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4328 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4329 }
4330
7a9b2557
VZ
4331 /* Allocate SGEs and initialize the ring elements */
4332 for (i = 0, ring_prod = 0;
4333 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4334
7a9b2557
VZ
4335 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4336 BNX2X_ERR("was only able to allocate "
4337 "%d rx sges\n", i);
4338 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4339 /* Cleanup already allocated elements */
4340 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4341 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4342 fp->disable_tpa = 1;
4343 ring_prod = 0;
4344 break;
4345 }
4346 ring_prod = NEXT_SGE_IDX(ring_prod);
4347 }
4348 fp->rx_sge_prod = ring_prod;
4349
4350 /* Allocate BDs and initialize BD ring */
66e855f3 4351 fp->rx_comp_cons = 0;
7a9b2557 4352 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4353 for (i = 0; i < bp->rx_ring_size; i++) {
4354 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4355 BNX2X_ERR("was only able to allocate "
4356 "%d rx skbs\n", i);
66e855f3 4357 bp->eth_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4358 break;
4359 }
4360 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4361 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4362 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4363 }
4364
7a9b2557
VZ
4365 fp->rx_bd_prod = ring_prod;
4366 /* must not have more available CQEs than BDs */
4367 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4368 cqe_ring_prod);
a2fbb9ea
ET
4369 fp->rx_pkt = fp->rx_calls = 0;
4370
7a9b2557
VZ
4371 /* Warning!
4372 * this will generate an interrupt (to the TSTORM)
4373 * must only be done after chip is initialized
4374 */
4375 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4376 fp->rx_sge_prod);
a2fbb9ea
ET
4377 if (j != 0)
4378 continue;
4379
4380 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4381 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4382 U64_LO(fp->rx_comp_mapping));
4383 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4384 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4385 U64_HI(fp->rx_comp_mapping));
4386 }
4387}
4388
4389static void bnx2x_init_tx_ring(struct bnx2x *bp)
4390{
4391 int i, j;
4392
4393 for_each_queue(bp, j) {
4394 struct bnx2x_fastpath *fp = &bp->fp[j];
4395
4396 for (i = 1; i <= NUM_TX_RINGS; i++) {
4397 struct eth_tx_bd *tx_bd =
4398 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4399
4400 tx_bd->addr_hi =
4401 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4402 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4403 tx_bd->addr_lo =
4404 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4405 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4406 }
4407
4408 fp->tx_pkt_prod = 0;
4409 fp->tx_pkt_cons = 0;
4410 fp->tx_bd_prod = 0;
4411 fp->tx_bd_cons = 0;
4412 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4413 fp->tx_pkt = 0;
4414 }
4415}
4416
4417static void bnx2x_init_sp_ring(struct bnx2x *bp)
4418{
34f80b04 4419 int func = BP_FUNC(bp);
a2fbb9ea
ET
4420
4421 spin_lock_init(&bp->spq_lock);
4422
4423 bp->spq_left = MAX_SPQ_PENDING;
4424 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4425 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4426 bp->spq_prod_bd = bp->spq;
4427 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4428
34f80b04 4429 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4430 U64_LO(bp->spq_mapping));
34f80b04
EG
4431 REG_WR(bp,
4432 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4433 U64_HI(bp->spq_mapping));
4434
34f80b04 4435 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4436 bp->spq_prod_idx);
4437}
4438
4439static void bnx2x_init_context(struct bnx2x *bp)
4440{
4441 int i;
4442
4443 for_each_queue(bp, i) {
4444 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4445 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 4446 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
4447
4448 context->xstorm_st_context.tx_bd_page_base_hi =
4449 U64_HI(fp->tx_desc_mapping);
4450 context->xstorm_st_context.tx_bd_page_base_lo =
4451 U64_LO(fp->tx_desc_mapping);
4452 context->xstorm_st_context.db_data_addr_hi =
4453 U64_HI(fp->tx_prods_mapping);
4454 context->xstorm_st_context.db_data_addr_lo =
4455 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
4456 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
4457 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4458
4459 context->ustorm_st_context.common.sb_index_numbers =
4460 BNX2X_RX_SB_INDEX_NUM;
4461 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4462 context->ustorm_st_context.common.status_block_id = sb_id;
4463 context->ustorm_st_context.common.flags =
4464 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
437cf2f1
EG
4465 context->ustorm_st_context.common.mc_alignment_size =
4466 BCM_RX_ETH_PAYLOAD_ALIGN;
34f80b04 4467 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4468 bp->rx_buf_size;
34f80b04 4469 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4470 U64_HI(fp->rx_desc_mapping);
34f80b04 4471 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4472 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4473 if (!fp->disable_tpa) {
4474 context->ustorm_st_context.common.flags |=
4475 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4476 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4477 context->ustorm_st_context.common.sge_buff_size =
4478 (u16)(BCM_PAGE_SIZE*PAGES_PER_SGE);
4479 context->ustorm_st_context.common.sge_page_base_hi =
4480 U64_HI(fp->rx_sge_mapping);
4481 context->ustorm_st_context.common.sge_page_base_lo =
4482 U64_LO(fp->rx_sge_mapping);
4483 }
4484
a2fbb9ea 4485 context->cstorm_st_context.sb_index_number =
5c862848 4486 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4487 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4488
4489 context->xstorm_ag_context.cdu_reserved =
4490 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4491 CDU_REGION_NUMBER_XCM_AG,
4492 ETH_CONNECTION_TYPE);
4493 context->ustorm_ag_context.cdu_usage =
4494 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4495 CDU_REGION_NUMBER_UCM_AG,
4496 ETH_CONNECTION_TYPE);
4497 }
4498}
4499
4500static void bnx2x_init_ind_table(struct bnx2x *bp)
4501{
34f80b04 4502 int port = BP_PORT(bp);
a2fbb9ea
ET
4503 int i;
4504
4505 if (!is_multi(bp))
4506 return;
4507
34f80b04 4508 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 4509 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4510 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4511 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4512 i % bp->num_queues);
4513
4514 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4515}
4516
49d66772
ET
4517static void bnx2x_set_client_config(struct bnx2x *bp)
4518{
49d66772 4519 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4520 int port = BP_PORT(bp);
4521 int i;
49d66772 4522
34f80b04 4523 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
66e855f3 4524 tstorm_client.statistics_counter_id = BP_CL_ID(bp);
49d66772
ET
4525 tstorm_client.config_flags =
4526 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4527#ifdef BCM_VLAN
34f80b04 4528 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4529 tstorm_client.config_flags |=
4530 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4531 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4532 }
4533#endif
49d66772 4534
7a9b2557
VZ
4535 if (bp->flags & TPA_ENABLE_FLAG) {
4536 tstorm_client.max_sges_for_packet =
4537 BCM_PAGE_ALIGN(tstorm_client.mtu) >> BCM_PAGE_SHIFT;
4538 tstorm_client.max_sges_for_packet =
4539 ((tstorm_client.max_sges_for_packet +
4540 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4541 PAGES_PER_SGE_SHIFT;
4542
4543 tstorm_client.config_flags |=
4544 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4545 }
4546
49d66772
ET
4547 for_each_queue(bp, i) {
4548 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4549 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4550 ((u32 *)&tstorm_client)[0]);
4551 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4552 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4553 ((u32 *)&tstorm_client)[1]);
4554 }
4555
34f80b04
EG
4556 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4557 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4558}
4559
a2fbb9ea
ET
4560static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4561{
a2fbb9ea 4562 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4563 int mode = bp->rx_mode;
4564 int mask = (1 << BP_L_ID(bp));
4565 int func = BP_FUNC(bp);
a2fbb9ea
ET
4566 int i;
4567
3196a88a 4568 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4569
4570 switch (mode) {
4571 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4572 tstorm_mac_filter.ucast_drop_all = mask;
4573 tstorm_mac_filter.mcast_drop_all = mask;
4574 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4575 break;
4576 case BNX2X_RX_MODE_NORMAL:
34f80b04 4577 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4578 break;
4579 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4580 tstorm_mac_filter.mcast_accept_all = mask;
4581 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4582 break;
4583 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4584 tstorm_mac_filter.ucast_accept_all = mask;
4585 tstorm_mac_filter.mcast_accept_all = mask;
4586 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4587 break;
4588 default:
34f80b04
EG
4589 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4590 break;
a2fbb9ea
ET
4591 }
4592
4593 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4594 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4595 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4596 ((u32 *)&tstorm_mac_filter)[i]);
4597
34f80b04 4598/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4599 ((u32 *)&tstorm_mac_filter)[i]); */
4600 }
a2fbb9ea 4601
49d66772
ET
4602 if (mode != BNX2X_RX_MODE_NONE)
4603 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4604}
4605
471de716
EG
4606static void bnx2x_init_internal_common(struct bnx2x *bp)
4607{
4608 int i;
4609
3cdf1db7
YG
4610 if (bp->flags & TPA_ENABLE_FLAG) {
4611 struct tstorm_eth_tpa_exist tpa = {0};
4612
4613 tpa.tpa_exist = 1;
4614
4615 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4616 ((u32 *)&tpa)[0]);
4617 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4618 ((u32 *)&tpa)[1]);
4619 }
4620
471de716
EG
4621 /* Zero this manually as its initialization is
4622 currently missing in the initTool */
4623 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4624 REG_WR(bp, BAR_USTRORM_INTMEM +
4625 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4626}
4627
4628static void bnx2x_init_internal_port(struct bnx2x *bp)
4629{
4630 int port = BP_PORT(bp);
4631
4632 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4633 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4634 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4635 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4636}
4637
4638static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4639{
a2fbb9ea
ET
4640 struct tstorm_eth_function_common_config tstorm_config = {0};
4641 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4642 int port = BP_PORT(bp);
4643 int func = BP_FUNC(bp);
4644 int i;
471de716 4645 u16 max_agg_size;
a2fbb9ea
ET
4646
4647 if (is_multi(bp)) {
4648 tstorm_config.config_flags = MULTI_FLAGS;
4649 tstorm_config.rss_result_mask = MULTI_MASK;
4650 }
4651
34f80b04
EG
4652 tstorm_config.leading_client_id = BP_L_ID(bp);
4653
a2fbb9ea 4654 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4655 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4656 (*(u32 *)&tstorm_config));
4657
c14423fe 4658 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4659 bnx2x_set_storm_rx_mode(bp);
4660
66e855f3
YG
4661 /* reset xstorm per client statistics */
4662 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++) {
4663 REG_WR(bp, BAR_XSTRORM_INTMEM +
4664 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4665 i*4, 0);
4666 }
4667 /* reset tstorm per client statistics */
4668 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++) {
4669 REG_WR(bp, BAR_TSTRORM_INTMEM +
4670 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, BP_CL_ID(bp)) +
4671 i*4, 0);
4672 }
4673
4674 /* Init statistics related context */
34f80b04 4675 stats_flags.collect_eth = 1;
a2fbb9ea 4676
66e855f3 4677 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4678 ((u32 *)&stats_flags)[0]);
66e855f3 4679 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4680 ((u32 *)&stats_flags)[1]);
4681
66e855f3 4682 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4683 ((u32 *)&stats_flags)[0]);
66e855f3 4684 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4685 ((u32 *)&stats_flags)[1]);
4686
66e855f3 4687 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4688 ((u32 *)&stats_flags)[0]);
66e855f3 4689 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4690 ((u32 *)&stats_flags)[1]);
4691
66e855f3
YG
4692 REG_WR(bp, BAR_XSTRORM_INTMEM +
4693 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4694 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4695 REG_WR(bp, BAR_XSTRORM_INTMEM +
4696 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4697 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4698
4699 REG_WR(bp, BAR_TSTRORM_INTMEM +
4700 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4701 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4702 REG_WR(bp, BAR_TSTRORM_INTMEM +
4703 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4704 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04
EG
4705
4706 if (CHIP_IS_E1H(bp)) {
4707 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4708 IS_E1HMF(bp));
4709 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4710 IS_E1HMF(bp));
4711 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4712 IS_E1HMF(bp));
4713 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4714 IS_E1HMF(bp));
4715
7a9b2557
VZ
4716 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4717 bp->e1hov);
34f80b04
EG
4718 }
4719
471de716 4720 /* Init CQ ring mapping and aggregation size */
437cf2f1 4721 max_agg_size = min((u32)(bp->rx_buf_size +
471de716
EG
4722 8*BCM_PAGE_SIZE*PAGES_PER_SGE),
4723 (u32)0xffff);
7a9b2557
VZ
4724 for_each_queue(bp, i) {
4725 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4726
4727 REG_WR(bp, BAR_USTRORM_INTMEM +
4728 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4729 U64_LO(fp->rx_comp_mapping));
4730 REG_WR(bp, BAR_USTRORM_INTMEM +
4731 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4732 U64_HI(fp->rx_comp_mapping));
4733
7a9b2557
VZ
4734 REG_WR16(bp, BAR_USTRORM_INTMEM +
4735 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4736 max_agg_size);
4737 }
a2fbb9ea
ET
4738}
4739
471de716
EG
4740static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4741{
4742 switch (load_code) {
4743 case FW_MSG_CODE_DRV_LOAD_COMMON:
4744 bnx2x_init_internal_common(bp);
4745 /* no break */
4746
4747 case FW_MSG_CODE_DRV_LOAD_PORT:
4748 bnx2x_init_internal_port(bp);
4749 /* no break */
4750
4751 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4752 bnx2x_init_internal_func(bp);
4753 break;
4754
4755 default:
4756 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4757 break;
4758 }
4759}
4760
4761static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4762{
4763 int i;
4764
4765 for_each_queue(bp, i) {
4766 struct bnx2x_fastpath *fp = &bp->fp[i];
4767
34f80b04 4768 fp->bp = bp;
a2fbb9ea 4769 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4770 fp->index = i;
34f80b04
EG
4771 fp->cl_id = BP_L_ID(bp) + i;
4772 fp->sb_id = fp->cl_id;
4773 DP(NETIF_MSG_IFUP,
4774 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4775 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4776 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4777 FP_SB_ID(fp));
4778 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4779 }
4780
5c862848
EG
4781 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4782 DEF_SB_ID);
4783 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4784 bnx2x_update_coalesce(bp);
4785 bnx2x_init_rx_rings(bp);
4786 bnx2x_init_tx_ring(bp);
4787 bnx2x_init_sp_ring(bp);
4788 bnx2x_init_context(bp);
471de716 4789 bnx2x_init_internal(bp, load_code);
a2fbb9ea 4790 bnx2x_init_ind_table(bp);
615f8fd9 4791 bnx2x_int_enable(bp);
a2fbb9ea
ET
4792}
4793
4794/* end of nic init */
4795
4796/*
4797 * gzip service functions
4798 */
4799
4800static int bnx2x_gunzip_init(struct bnx2x *bp)
4801{
4802 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4803 &bp->gunzip_mapping);
4804 if (bp->gunzip_buf == NULL)
4805 goto gunzip_nomem1;
4806
4807 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4808 if (bp->strm == NULL)
4809 goto gunzip_nomem2;
4810
4811 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4812 GFP_KERNEL);
4813 if (bp->strm->workspace == NULL)
4814 goto gunzip_nomem3;
4815
4816 return 0;
4817
4818gunzip_nomem3:
4819 kfree(bp->strm);
4820 bp->strm = NULL;
4821
4822gunzip_nomem2:
4823 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4824 bp->gunzip_mapping);
4825 bp->gunzip_buf = NULL;
4826
4827gunzip_nomem1:
4828 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4829 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4830 return -ENOMEM;
4831}
4832
4833static void bnx2x_gunzip_end(struct bnx2x *bp)
4834{
4835 kfree(bp->strm->workspace);
4836
4837 kfree(bp->strm);
4838 bp->strm = NULL;
4839
4840 if (bp->gunzip_buf) {
4841 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4842 bp->gunzip_mapping);
4843 bp->gunzip_buf = NULL;
4844 }
4845}
4846
4847static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4848{
4849 int n, rc;
4850
4851 /* check gzip header */
4852 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4853 return -EINVAL;
4854
4855 n = 10;
4856
34f80b04 4857#define FNAME 0x8
a2fbb9ea
ET
4858
4859 if (zbuf[3] & FNAME)
4860 while ((zbuf[n++] != 0) && (n < len));
4861
4862 bp->strm->next_in = zbuf + n;
4863 bp->strm->avail_in = len - n;
4864 bp->strm->next_out = bp->gunzip_buf;
4865 bp->strm->avail_out = FW_BUF_SIZE;
4866
4867 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4868 if (rc != Z_OK)
4869 return rc;
4870
4871 rc = zlib_inflate(bp->strm, Z_FINISH);
4872 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4873 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4874 bp->dev->name, bp->strm->msg);
4875
4876 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4877 if (bp->gunzip_outlen & 0x3)
4878 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4879 " gunzip_outlen (%d) not aligned\n",
4880 bp->dev->name, bp->gunzip_outlen);
4881 bp->gunzip_outlen >>= 2;
4882
4883 zlib_inflateEnd(bp->strm);
4884
4885 if (rc == Z_STREAM_END)
4886 return 0;
4887
4888 return rc;
4889}
4890
4891/* nic load/unload */
4892
4893/*
34f80b04 4894 * General service functions
a2fbb9ea
ET
4895 */
4896
4897/* send a NIG loopback debug packet */
4898static void bnx2x_lb_pckt(struct bnx2x *bp)
4899{
a2fbb9ea 4900 u32 wb_write[3];
a2fbb9ea
ET
4901
4902 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4903 wb_write[0] = 0x55555555;
4904 wb_write[1] = 0x55555555;
34f80b04 4905 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4906 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4907
4908 /* NON-IP protocol */
a2fbb9ea
ET
4909 wb_write[0] = 0x09000000;
4910 wb_write[1] = 0x55555555;
34f80b04 4911 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4912 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4913}
4914
4915/* some of the internal memories
4916 * are not directly readable from the driver
4917 * to test them we send debug packets
4918 */
4919static int bnx2x_int_mem_test(struct bnx2x *bp)
4920{
4921 int factor;
4922 int count, i;
4923 u32 val = 0;
4924
ad8d3948 4925 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4926 factor = 120;
ad8d3948
EG
4927 else if (CHIP_REV_IS_EMUL(bp))
4928 factor = 200;
4929 else
a2fbb9ea 4930 factor = 1;
a2fbb9ea
ET
4931
4932 DP(NETIF_MSG_HW, "start part1\n");
4933
4934 /* Disable inputs of parser neighbor blocks */
4935 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4936 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4937 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4938 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4939
4940 /* Write 0 to parser credits for CFC search request */
4941 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4942
4943 /* send Ethernet packet */
4944 bnx2x_lb_pckt(bp);
4945
4946 /* TODO do i reset NIG statistic? */
4947 /* Wait until NIG register shows 1 packet of size 0x10 */
4948 count = 1000 * factor;
4949 while (count) {
34f80b04 4950
a2fbb9ea
ET
4951 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4952 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4953 if (val == 0x10)
4954 break;
4955
4956 msleep(10);
4957 count--;
4958 }
4959 if (val != 0x10) {
4960 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4961 return -1;
4962 }
4963
4964 /* Wait until PRS register shows 1 packet */
4965 count = 1000 * factor;
4966 while (count) {
4967 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4968 if (val == 1)
4969 break;
4970
4971 msleep(10);
4972 count--;
4973 }
4974 if (val != 0x1) {
4975 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4976 return -2;
4977 }
4978
4979 /* Reset and init BRB, PRS */
34f80b04 4980 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4981 msleep(50);
34f80b04 4982 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4983 msleep(50);
4984 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4985 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4986
4987 DP(NETIF_MSG_HW, "part2\n");
4988
4989 /* Disable inputs of parser neighbor blocks */
4990 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4991 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4992 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4993 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4994
4995 /* Write 0 to parser credits for CFC search request */
4996 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4997
4998 /* send 10 Ethernet packets */
4999 for (i = 0; i < 10; i++)
5000 bnx2x_lb_pckt(bp);
5001
5002 /* Wait until NIG register shows 10 + 1
5003 packets of size 11*0x10 = 0xb0 */
5004 count = 1000 * factor;
5005 while (count) {
34f80b04 5006
a2fbb9ea
ET
5007 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5008 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5009 if (val == 0xb0)
5010 break;
5011
5012 msleep(10);
5013 count--;
5014 }
5015 if (val != 0xb0) {
5016 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5017 return -3;
5018 }
5019
5020 /* Wait until PRS register shows 2 packets */
5021 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5022 if (val != 2)
5023 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5024
5025 /* Write 1 to parser credits for CFC search request */
5026 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5027
5028 /* Wait until PRS register shows 3 packets */
5029 msleep(10 * factor);
5030 /* Wait until NIG register shows 1 packet of size 0x10 */
5031 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5032 if (val != 3)
5033 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5034
5035 /* clear NIG EOP FIFO */
5036 for (i = 0; i < 11; i++)
5037 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5038 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5039 if (val != 1) {
5040 BNX2X_ERR("clear of NIG failed\n");
5041 return -4;
5042 }
5043
5044 /* Reset and init BRB, PRS, NIG */
5045 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5046 msleep(50);
5047 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5048 msleep(50);
5049 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5050 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5051#ifndef BCM_ISCSI
5052 /* set NIC mode */
5053 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5054#endif
5055
5056 /* Enable inputs of parser neighbor blocks */
5057 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5058 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5059 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5060 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5061
5062 DP(NETIF_MSG_HW, "done\n");
5063
5064 return 0; /* OK */
5065}
5066
5067static void enable_blocks_attention(struct bnx2x *bp)
5068{
5069 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5070 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5071 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5072 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5073 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5074 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5075 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5076 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5077 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5078/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5079/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5080 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5081 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5082 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5083/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5084/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5085 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5086 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5087 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5088 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5089/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5090/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5091 if (CHIP_REV_IS_FPGA(bp))
5092 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5093 else
5094 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5095 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5096 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5097 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5098/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5099/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5100 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5101 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5102/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5103 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5104}
5105
34f80b04
EG
5106
5107static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5108{
a2fbb9ea 5109 u32 val, i;
a2fbb9ea 5110
34f80b04 5111 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5112
34f80b04
EG
5113 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5114 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5115
34f80b04
EG
5116 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5117 if (CHIP_IS_E1H(bp))
5118 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5119
34f80b04
EG
5120 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5121 msleep(30);
5122 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5123
34f80b04
EG
5124 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5125 if (CHIP_IS_E1(bp)) {
5126 /* enable HW interrupt from PXP on USDM overflow
5127 bit 16 on INT_MASK_0 */
5128 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5129 }
a2fbb9ea 5130
34f80b04
EG
5131 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5132 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5133
5134#ifdef __BIG_ENDIAN
34f80b04
EG
5135 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5136 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5137 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5138 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5139 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5140 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
5141
5142/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5143 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5144 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5145 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5146 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5147#endif
5148
34f80b04 5149 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5150#ifdef BCM_ISCSI
34f80b04
EG
5151 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5152 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5153 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5154#endif
5155
34f80b04
EG
5156 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5157 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5158
34f80b04
EG
5159 /* let the HW do it's magic ... */
5160 msleep(100);
5161 /* finish PXP init */
5162 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5163 if (val != 1) {
5164 BNX2X_ERR("PXP2 CFG failed\n");
5165 return -EBUSY;
5166 }
5167 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5168 if (val != 1) {
5169 BNX2X_ERR("PXP2 RD_INIT failed\n");
5170 return -EBUSY;
5171 }
a2fbb9ea 5172
34f80b04
EG
5173 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5174 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5175
34f80b04 5176 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5177
34f80b04
EG
5178 /* clean the DMAE memory */
5179 bp->dmae_ready = 1;
5180 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5181
34f80b04
EG
5182 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5183 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5184 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5185 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5186
34f80b04
EG
5187 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5188 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5189 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5190 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5191
5192 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5193 /* soft reset pulse */
5194 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5195 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5196
5197#ifdef BCM_ISCSI
34f80b04 5198 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5199#endif
a2fbb9ea 5200
34f80b04
EG
5201 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5202 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5203 if (!CHIP_REV_IS_SLOW(bp)) {
5204 /* enable hw interrupt from doorbell Q */
5205 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5206 }
a2fbb9ea 5207
34f80b04
EG
5208 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5209 if (CHIP_REV_IS_SLOW(bp)) {
5210 /* fix for emulation and FPGA for no pause */
5211 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5212 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5213 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5214 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5215 }
a2fbb9ea 5216
34f80b04 5217 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3196a88a
EG
5218 /* set NIC mode */
5219 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5220 if (CHIP_IS_E1H(bp))
5221 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5222
34f80b04
EG
5223 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5224 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5225 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5226 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5227
34f80b04
EG
5228 if (CHIP_IS_E1H(bp)) {
5229 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5230 STORM_INTMEM_SIZE_E1H/2);
5231 bnx2x_init_fill(bp,
5232 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5233 0, STORM_INTMEM_SIZE_E1H/2);
5234 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5235 STORM_INTMEM_SIZE_E1H/2);
5236 bnx2x_init_fill(bp,
5237 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5238 0, STORM_INTMEM_SIZE_E1H/2);
5239 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5240 STORM_INTMEM_SIZE_E1H/2);
5241 bnx2x_init_fill(bp,
5242 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5243 0, STORM_INTMEM_SIZE_E1H/2);
5244 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5245 STORM_INTMEM_SIZE_E1H/2);
5246 bnx2x_init_fill(bp,
5247 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5248 0, STORM_INTMEM_SIZE_E1H/2);
5249 } else { /* E1 */
ad8d3948
EG
5250 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5251 STORM_INTMEM_SIZE_E1);
5252 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5253 STORM_INTMEM_SIZE_E1);
5254 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5255 STORM_INTMEM_SIZE_E1);
5256 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5257 STORM_INTMEM_SIZE_E1);
34f80b04 5258 }
a2fbb9ea 5259
34f80b04
EG
5260 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5261 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5262 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5263 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5264
34f80b04
EG
5265 /* sync semi rtc */
5266 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5267 0x80000000);
5268 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5269 0x80000000);
a2fbb9ea 5270
34f80b04
EG
5271 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5272 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5273 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5274
34f80b04
EG
5275 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5276 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5277 REG_WR(bp, i, 0xc0cac01a);
5278 /* TODO: replace with something meaningful */
5279 }
5280 if (CHIP_IS_E1H(bp))
5281 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
5282 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5283
34f80b04
EG
5284 if (sizeof(union cdu_context) != 1024)
5285 /* we currently assume that a context is 1024 bytes */
5286 printk(KERN_ALERT PFX "please adjust the size of"
5287 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5288
34f80b04
EG
5289 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5290 val = (4 << 24) + (0 << 12) + 1024;
5291 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5292 if (CHIP_IS_E1(bp)) {
5293 /* !!! fix pxp client crdit until excel update */
5294 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5295 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5296 }
a2fbb9ea 5297
34f80b04
EG
5298 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5299 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 5300
34f80b04
EG
5301 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5302 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5303
34f80b04
EG
5304 /* PXPCS COMMON comes here */
5305 /* Reset PCIE errors for debug */
5306 REG_WR(bp, 0x2814, 0xffffffff);
5307 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5308
34f80b04
EG
5309 /* EMAC0 COMMON comes here */
5310 /* EMAC1 COMMON comes here */
5311 /* DBU COMMON comes here */
5312 /* DBG COMMON comes here */
5313
5314 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5315 if (CHIP_IS_E1H(bp)) {
5316 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5317 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5318 }
5319
5320 if (CHIP_REV_IS_SLOW(bp))
5321 msleep(200);
5322
5323 /* finish CFC init */
5324 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5325 if (val != 1) {
5326 BNX2X_ERR("CFC LL_INIT failed\n");
5327 return -EBUSY;
5328 }
5329 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5330 if (val != 1) {
5331 BNX2X_ERR("CFC AC_INIT failed\n");
5332 return -EBUSY;
5333 }
5334 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5335 if (val != 1) {
5336 BNX2X_ERR("CFC CAM_INIT failed\n");
5337 return -EBUSY;
5338 }
5339 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5340
34f80b04
EG
5341 /* read NIG statistic
5342 to see if this is our first up since powerup */
5343 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5344 val = *bnx2x_sp(bp, wb_data[0]);
5345
5346 /* do internal memory self test */
5347 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5348 BNX2X_ERR("internal mem self test failed\n");
5349 return -EBUSY;
5350 }
5351
5352 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5353 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5354 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5355 /* Fan failure is indicated by SPIO 5 */
5356 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5357 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5358
5359 /* set to active low mode */
5360 val = REG_RD(bp, MISC_REG_SPIO_INT);
5361 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5362 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5363 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5364
34f80b04
EG
5365 /* enable interrupt to signal the IGU */
5366 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5367 val |= (1 << MISC_REGISTERS_SPIO_5);
5368 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5369 break;
f1410647 5370
34f80b04
EG
5371 default:
5372 break;
5373 }
f1410647 5374
34f80b04
EG
5375 /* clear PXP2 attentions */
5376 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5377
34f80b04 5378 enable_blocks_attention(bp);
a2fbb9ea 5379
6bbca910
YR
5380 if (!BP_NOMCP(bp)) {
5381 bnx2x_acquire_phy_lock(bp);
5382 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5383 bnx2x_release_phy_lock(bp);
5384 } else
5385 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5386
34f80b04
EG
5387 return 0;
5388}
a2fbb9ea 5389
34f80b04
EG
5390static int bnx2x_init_port(struct bnx2x *bp)
5391{
5392 int port = BP_PORT(bp);
5393 u32 val;
a2fbb9ea 5394
34f80b04
EG
5395 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5396
5397 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5398
5399 /* Port PXP comes here */
5400 /* Port PXP2 comes here */
a2fbb9ea
ET
5401#ifdef BCM_ISCSI
5402 /* Port0 1
5403 * Port1 385 */
5404 i++;
5405 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5406 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5407 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5408 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5409
5410 /* Port0 2
5411 * Port1 386 */
5412 i++;
5413 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5414 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5415 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5416 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5417
5418 /* Port0 3
5419 * Port1 387 */
5420 i++;
5421 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5422 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5423 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5424 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5425#endif
34f80b04 5426 /* Port CMs come here */
a2fbb9ea
ET
5427
5428 /* Port QM comes here */
a2fbb9ea
ET
5429#ifdef BCM_ISCSI
5430 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5431 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5432
5433 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5434 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5435#endif
5436 /* Port DQ comes here */
5437 /* Port BRB1 comes here */
ad8d3948 5438 /* Port PRS comes here */
a2fbb9ea
ET
5439 /* Port TSDM comes here */
5440 /* Port CSDM comes here */
5441 /* Port USDM comes here */
5442 /* Port XSDM comes here */
34f80b04
EG
5443 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5444 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5445 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5446 port ? USEM_PORT1_END : USEM_PORT0_END);
5447 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5448 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5449 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5450 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5451 /* Port UPB comes here */
34f80b04
EG
5452 /* Port XPB comes here */
5453
5454 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5455 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5456
5457 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5458 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5459
5460 /* update threshold */
34f80b04 5461 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5462 /* update init credit */
34f80b04 5463 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5464
5465 /* probe changes */
34f80b04 5466 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5467 msleep(5);
34f80b04 5468 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5469
5470#ifdef BCM_ISCSI
5471 /* tell the searcher where the T2 table is */
5472 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5473
5474 wb_write[0] = U64_LO(bp->t2_mapping);
5475 wb_write[1] = U64_HI(bp->t2_mapping);
5476 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5477 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5478 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5479 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5480
5481 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5482 /* Port SRCH comes here */
5483#endif
5484 /* Port CDU comes here */
5485 /* Port CFC comes here */
34f80b04
EG
5486
5487 if (CHIP_IS_E1(bp)) {
5488 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5489 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5490 }
5491 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5492 port ? HC_PORT1_END : HC_PORT0_END);
5493
5494 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5495 MISC_AEU_PORT0_START,
34f80b04
EG
5496 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5497 /* init aeu_mask_attn_func_0/1:
5498 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5499 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5500 * bits 4-7 are used for "per vn group attention" */
5501 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5502 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5503
a2fbb9ea
ET
5504 /* Port PXPCS comes here */
5505 /* Port EMAC0 comes here */
5506 /* Port EMAC1 comes here */
5507 /* Port DBU comes here */
5508 /* Port DBG comes here */
34f80b04
EG
5509 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5510 port ? NIG_PORT1_END : NIG_PORT0_END);
5511
5512 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5513
5514 if (CHIP_IS_E1H(bp)) {
5515 u32 wsum;
5516 struct cmng_struct_per_port m_cmng_port;
5517 int vn;
5518
5519 /* 0x2 disable e1hov, 0x1 enable */
5520 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5521 (IS_E1HMF(bp) ? 0x1 : 0x2));
5522
5523 /* Init RATE SHAPING and FAIRNESS contexts.
5524 Initialize as if there is 10G link. */
5525 wsum = bnx2x_calc_vn_wsum(bp);
5526 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
5527 if (IS_E1HMF(bp))
5528 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5529 bnx2x_init_vn_minmax(bp, 2*vn + port,
5530 wsum, 10000, &m_cmng_port);
5531 }
5532
a2fbb9ea
ET
5533 /* Port MCP comes here */
5534 /* Port DMAE comes here */
5535
34f80b04 5536 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5537 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5538 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5539 /* add SPIO 5 to group 0 */
5540 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5541 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5542 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5543 break;
5544
5545 default:
5546 break;
5547 }
5548
c18487ee 5549 bnx2x__link_reset(bp);
a2fbb9ea 5550
34f80b04
EG
5551 return 0;
5552}
5553
5554#define ILT_PER_FUNC (768/2)
5555#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5556/* the phys address is shifted right 12 bits and has an added
5557 1=valid bit added to the 53rd bit
5558 then since this is a wide register(TM)
5559 we split it into two 32 bit writes
5560 */
5561#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5562#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5563#define PXP_ONE_ILT(x) (((x) << 10) | x)
5564#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5565
5566#define CNIC_ILT_LINES 0
5567
5568static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5569{
5570 int reg;
5571
5572 if (CHIP_IS_E1H(bp))
5573 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5574 else /* E1 */
5575 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5576
5577 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5578}
5579
5580static int bnx2x_init_func(struct bnx2x *bp)
5581{
5582 int port = BP_PORT(bp);
5583 int func = BP_FUNC(bp);
5584 int i;
5585
5586 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5587
5588 i = FUNC_ILT_BASE(func);
5589
5590 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5591 if (CHIP_IS_E1H(bp)) {
5592 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5593 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5594 } else /* E1 */
5595 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5596 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5597
5598
5599 if (CHIP_IS_E1H(bp)) {
5600 for (i = 0; i < 9; i++)
5601 bnx2x_init_block(bp,
5602 cm_start[func][i], cm_end[func][i]);
5603
5604 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5605 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5606 }
5607
5608 /* HC init per function */
5609 if (CHIP_IS_E1H(bp)) {
5610 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5611
5612 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5613 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5614 }
5615 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5616
5617 if (CHIP_IS_E1H(bp))
5618 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5619
c14423fe 5620 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5621 REG_WR(bp, 0x2114, 0xffffffff);
5622 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5623
34f80b04
EG
5624 return 0;
5625}
5626
5627static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5628{
5629 int i, rc = 0;
a2fbb9ea 5630
34f80b04
EG
5631 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5632 BP_FUNC(bp), load_code);
a2fbb9ea 5633
34f80b04
EG
5634 bp->dmae_ready = 0;
5635 mutex_init(&bp->dmae_mutex);
5636 bnx2x_gunzip_init(bp);
a2fbb9ea 5637
34f80b04
EG
5638 switch (load_code) {
5639 case FW_MSG_CODE_DRV_LOAD_COMMON:
5640 rc = bnx2x_init_common(bp);
5641 if (rc)
5642 goto init_hw_err;
5643 /* no break */
5644
5645 case FW_MSG_CODE_DRV_LOAD_PORT:
5646 bp->dmae_ready = 1;
5647 rc = bnx2x_init_port(bp);
5648 if (rc)
5649 goto init_hw_err;
5650 /* no break */
5651
5652 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5653 bp->dmae_ready = 1;
5654 rc = bnx2x_init_func(bp);
5655 if (rc)
5656 goto init_hw_err;
5657 break;
5658
5659 default:
5660 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5661 break;
5662 }
5663
5664 if (!BP_NOMCP(bp)) {
5665 int func = BP_FUNC(bp);
a2fbb9ea
ET
5666
5667 bp->fw_drv_pulse_wr_seq =
34f80b04 5668 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5669 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5670 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5671 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5672 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5673 } else
5674 bp->func_stx = 0;
a2fbb9ea 5675
34f80b04
EG
5676 /* this needs to be done before gunzip end */
5677 bnx2x_zero_def_sb(bp);
5678 for_each_queue(bp, i)
5679 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5680
5681init_hw_err:
5682 bnx2x_gunzip_end(bp);
5683
5684 return rc;
a2fbb9ea
ET
5685}
5686
c14423fe 5687/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5688static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5689{
34f80b04 5690 int func = BP_FUNC(bp);
f1410647
ET
5691 u32 seq = ++bp->fw_seq;
5692 u32 rc = 0;
19680c48
EG
5693 u32 cnt = 1;
5694 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5695
34f80b04 5696 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5697 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5698
19680c48
EG
5699 do {
5700 /* let the FW do it's magic ... */
5701 msleep(delay);
a2fbb9ea 5702
19680c48 5703 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5704
19680c48
EG
5705 /* Give the FW up to 2 second (200*10ms) */
5706 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5707
5708 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5709 cnt*delay, rc, seq);
a2fbb9ea
ET
5710
5711 /* is this a reply to our command? */
5712 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5713 rc &= FW_MSG_CODE_MASK;
f1410647 5714
a2fbb9ea
ET
5715 } else {
5716 /* FW BUG! */
5717 BNX2X_ERR("FW failed to respond!\n");
5718 bnx2x_fw_dump(bp);
5719 rc = 0;
5720 }
f1410647 5721
a2fbb9ea
ET
5722 return rc;
5723}
5724
5725static void bnx2x_free_mem(struct bnx2x *bp)
5726{
5727
5728#define BNX2X_PCI_FREE(x, y, size) \
5729 do { \
5730 if (x) { \
5731 pci_free_consistent(bp->pdev, size, x, y); \
5732 x = NULL; \
5733 y = 0; \
5734 } \
5735 } while (0)
5736
5737#define BNX2X_FREE(x) \
5738 do { \
5739 if (x) { \
5740 vfree(x); \
5741 x = NULL; \
5742 } \
5743 } while (0)
5744
5745 int i;
5746
5747 /* fastpath */
5748 for_each_queue(bp, i) {
5749
5750 /* Status blocks */
5751 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5752 bnx2x_fp(bp, i, status_blk_mapping),
5753 sizeof(struct host_status_block) +
5754 sizeof(struct eth_tx_db_data));
5755
5756 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5757 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5758 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5759 bnx2x_fp(bp, i, tx_desc_mapping),
5760 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5761
5762 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5763 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5764 bnx2x_fp(bp, i, rx_desc_mapping),
5765 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5766
5767 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5768 bnx2x_fp(bp, i, rx_comp_mapping),
5769 sizeof(struct eth_fast_path_rx_cqe) *
5770 NUM_RCQ_BD);
a2fbb9ea 5771
7a9b2557 5772 /* SGE ring */
32626230 5773 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5774 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5775 bnx2x_fp(bp, i, rx_sge_mapping),
5776 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5777 }
a2fbb9ea
ET
5778 /* end of fastpath */
5779
5780 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5781 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5782
5783 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5784 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5785
5786#ifdef BCM_ISCSI
5787 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5788 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5789 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5790 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5791#endif
7a9b2557 5792 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
5793
5794#undef BNX2X_PCI_FREE
5795#undef BNX2X_KFREE
5796}
5797
5798static int bnx2x_alloc_mem(struct bnx2x *bp)
5799{
5800
5801#define BNX2X_PCI_ALLOC(x, y, size) \
5802 do { \
5803 x = pci_alloc_consistent(bp->pdev, size, y); \
5804 if (x == NULL) \
5805 goto alloc_mem_err; \
5806 memset(x, 0, size); \
5807 } while (0)
5808
5809#define BNX2X_ALLOC(x, size) \
5810 do { \
5811 x = vmalloc(size); \
5812 if (x == NULL) \
5813 goto alloc_mem_err; \
5814 memset(x, 0, size); \
5815 } while (0)
5816
5817 int i;
5818
5819 /* fastpath */
a2fbb9ea
ET
5820 for_each_queue(bp, i) {
5821 bnx2x_fp(bp, i, bp) = bp;
5822
5823 /* Status blocks */
5824 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5825 &bnx2x_fp(bp, i, status_blk_mapping),
5826 sizeof(struct host_status_block) +
5827 sizeof(struct eth_tx_db_data));
5828
5829 bnx2x_fp(bp, i, hw_tx_prods) =
5830 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5831
5832 bnx2x_fp(bp, i, tx_prods_mapping) =
5833 bnx2x_fp(bp, i, status_blk_mapping) +
5834 sizeof(struct host_status_block);
5835
5836 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5837 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5838 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5839 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5840 &bnx2x_fp(bp, i, tx_desc_mapping),
5841 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5842
5843 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5844 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5845 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5846 &bnx2x_fp(bp, i, rx_desc_mapping),
5847 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5848
5849 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5850 &bnx2x_fp(bp, i, rx_comp_mapping),
5851 sizeof(struct eth_fast_path_rx_cqe) *
5852 NUM_RCQ_BD);
5853
7a9b2557
VZ
5854 /* SGE ring */
5855 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5856 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5857 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5858 &bnx2x_fp(bp, i, rx_sge_mapping),
5859 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea
ET
5860 }
5861 /* end of fastpath */
5862
5863 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5864 sizeof(struct host_def_status_block));
5865
5866 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5867 sizeof(struct bnx2x_slowpath));
5868
5869#ifdef BCM_ISCSI
5870 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5871
5872 /* Initialize T1 */
5873 for (i = 0; i < 64*1024; i += 64) {
5874 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5875 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5876 }
5877
5878 /* allocate searcher T2 table
5879 we allocate 1/4 of alloc num for T2
5880 (which is not entered into the ILT) */
5881 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5882
5883 /* Initialize T2 */
5884 for (i = 0; i < 16*1024; i += 64)
5885 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5886
c14423fe 5887 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5888 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5889
5890 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5891 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5892
5893 /* QM queues (128*MAX_CONN) */
5894 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5895#endif
5896
5897 /* Slow path ring */
5898 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5899
5900 return 0;
5901
5902alloc_mem_err:
5903 bnx2x_free_mem(bp);
5904 return -ENOMEM;
5905
5906#undef BNX2X_PCI_ALLOC
5907#undef BNX2X_ALLOC
5908}
5909
5910static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5911{
5912 int i;
5913
5914 for_each_queue(bp, i) {
5915 struct bnx2x_fastpath *fp = &bp->fp[i];
5916
5917 u16 bd_cons = fp->tx_bd_cons;
5918 u16 sw_prod = fp->tx_pkt_prod;
5919 u16 sw_cons = fp->tx_pkt_cons;
5920
a2fbb9ea
ET
5921 while (sw_cons != sw_prod) {
5922 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5923 sw_cons++;
5924 }
5925 }
5926}
5927
5928static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5929{
5930 int i, j;
5931
5932 for_each_queue(bp, j) {
5933 struct bnx2x_fastpath *fp = &bp->fp[j];
5934
a2fbb9ea
ET
5935 for (i = 0; i < NUM_RX_BD; i++) {
5936 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5937 struct sk_buff *skb = rx_buf->skb;
5938
5939 if (skb == NULL)
5940 continue;
5941
5942 pci_unmap_single(bp->pdev,
5943 pci_unmap_addr(rx_buf, mapping),
437cf2f1 5944 bp->rx_buf_size,
a2fbb9ea
ET
5945 PCI_DMA_FROMDEVICE);
5946
5947 rx_buf->skb = NULL;
5948 dev_kfree_skb(skb);
5949 }
7a9b2557 5950 if (!fp->disable_tpa)
32626230
EG
5951 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
5952 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 5953 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
5954 }
5955}
5956
5957static void bnx2x_free_skbs(struct bnx2x *bp)
5958{
5959 bnx2x_free_tx_skbs(bp);
5960 bnx2x_free_rx_skbs(bp);
5961}
5962
5963static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5964{
34f80b04 5965 int i, offset = 1;
a2fbb9ea
ET
5966
5967 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5968 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5969 bp->msix_table[0].vector);
5970
5971 for_each_queue(bp, i) {
c14423fe 5972 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5973 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5974 bnx2x_fp(bp, i, state));
5975
228241eb
ET
5976 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5977 BNX2X_ERR("IRQ of fp #%d being freed while "
5978 "state != closed\n", i);
a2fbb9ea 5979
34f80b04 5980 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5981 }
a2fbb9ea
ET
5982}
5983
5984static void bnx2x_free_irq(struct bnx2x *bp)
5985{
a2fbb9ea 5986 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5987 bnx2x_free_msix_irqs(bp);
5988 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5989 bp->flags &= ~USING_MSIX_FLAG;
5990
5991 } else
5992 free_irq(bp->pdev->irq, bp->dev);
5993}
5994
5995static int bnx2x_enable_msix(struct bnx2x *bp)
5996{
34f80b04 5997 int i, rc, offset;
a2fbb9ea
ET
5998
5999 bp->msix_table[0].entry = 0;
34f80b04
EG
6000 offset = 1;
6001 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 6002
34f80b04
EG
6003 for_each_queue(bp, i) {
6004 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 6005
34f80b04
EG
6006 bp->msix_table[i + offset].entry = igu_vec;
6007 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6008 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6009 }
6010
34f80b04
EG
6011 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6012 bp->num_queues + offset);
6013 if (rc) {
6014 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
6015 return -1;
6016 }
a2fbb9ea
ET
6017 bp->flags |= USING_MSIX_FLAG;
6018
6019 return 0;
a2fbb9ea
ET
6020}
6021
a2fbb9ea
ET
6022static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6023{
34f80b04 6024 int i, rc, offset = 1;
a2fbb9ea 6025
a2fbb9ea
ET
6026 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6027 bp->dev->name, bp->dev);
a2fbb9ea
ET
6028 if (rc) {
6029 BNX2X_ERR("request sp irq failed\n");
6030 return -EBUSY;
6031 }
6032
6033 for_each_queue(bp, i) {
34f80b04 6034 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6035 bnx2x_msix_fp_int, 0,
6036 bp->dev->name, &bp->fp[i]);
a2fbb9ea 6037 if (rc) {
3196a88a
EG
6038 BNX2X_ERR("request fp #%d irq failed rc -%d\n",
6039 i + offset, -rc);
a2fbb9ea
ET
6040 bnx2x_free_msix_irqs(bp);
6041 return -EBUSY;
6042 }
6043
6044 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6045 }
6046
6047 return 0;
a2fbb9ea
ET
6048}
6049
6050static int bnx2x_req_irq(struct bnx2x *bp)
6051{
34f80b04 6052 int rc;
a2fbb9ea 6053
34f80b04
EG
6054 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
6055 bp->dev->name, bp->dev);
a2fbb9ea
ET
6056 if (!rc)
6057 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6058
6059 return rc;
a2fbb9ea
ET
6060}
6061
65abd74d
YG
6062static void bnx2x_napi_enable(struct bnx2x *bp)
6063{
6064 int i;
6065
6066 for_each_queue(bp, i)
6067 napi_enable(&bnx2x_fp(bp, i, napi));
6068}
6069
6070static void bnx2x_napi_disable(struct bnx2x *bp)
6071{
6072 int i;
6073
6074 for_each_queue(bp, i)
6075 napi_disable(&bnx2x_fp(bp, i, napi));
6076}
6077
6078static void bnx2x_netif_start(struct bnx2x *bp)
6079{
6080 if (atomic_dec_and_test(&bp->intr_sem)) {
6081 if (netif_running(bp->dev)) {
6082 if (bp->state == BNX2X_STATE_OPEN)
6083 netif_wake_queue(bp->dev);
6084 bnx2x_napi_enable(bp);
6085 bnx2x_int_enable(bp);
6086 }
6087 }
6088}
6089
f8ef6e44 6090static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6091{
f8ef6e44 6092 bnx2x_int_disable_sync(bp, disable_hw);
65abd74d
YG
6093 if (netif_running(bp->dev)) {
6094 bnx2x_napi_disable(bp);
6095 netif_tx_disable(bp->dev);
6096 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6097 }
6098}
6099
a2fbb9ea
ET
6100/*
6101 * Init service functions
6102 */
6103
3101c2bc 6104static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6105{
6106 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6107 int port = BP_PORT(bp);
a2fbb9ea
ET
6108
6109 /* CAM allocation
6110 * unicasts 0-31:port0 32-63:port1
6111 * multicast 64-127:port0 128-191:port1
6112 */
6113 config->hdr.length_6b = 2;
34f80b04
EG
6114 config->hdr.offset = port ? 31 : 0;
6115 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6116 config->hdr.reserved1 = 0;
6117
6118 /* primary MAC */
6119 config->config_table[0].cam_entry.msb_mac_addr =
6120 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6121 config->config_table[0].cam_entry.middle_mac_addr =
6122 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6123 config->config_table[0].cam_entry.lsb_mac_addr =
6124 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6125 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6126 if (set)
6127 config->config_table[0].target_table_entry.flags = 0;
6128 else
6129 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6130 config->config_table[0].target_table_entry.client_id = 0;
6131 config->config_table[0].target_table_entry.vlan_id = 0;
6132
3101c2bc
YG
6133 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6134 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6135 config->config_table[0].cam_entry.msb_mac_addr,
6136 config->config_table[0].cam_entry.middle_mac_addr,
6137 config->config_table[0].cam_entry.lsb_mac_addr);
6138
6139 /* broadcast */
6140 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6141 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6142 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6143 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6144 if (set)
6145 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6146 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6147 else
6148 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6149 config->config_table[1].target_table_entry.client_id = 0;
6150 config->config_table[1].target_table_entry.vlan_id = 0;
6151
6152 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6153 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6154 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6155}
6156
3101c2bc 6157static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6158{
6159 struct mac_configuration_cmd_e1h *config =
6160 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6161
3101c2bc 6162 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6163 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6164 return;
6165 }
6166
6167 /* CAM allocation for E1H
6168 * unicasts: by func number
6169 * multicast: 20+FUNC*20, 20 each
6170 */
6171 config->hdr.length_6b = 1;
6172 config->hdr.offset = BP_FUNC(bp);
6173 config->hdr.client_id = BP_CL_ID(bp);
6174 config->hdr.reserved1 = 0;
6175
6176 /* primary MAC */
6177 config->config_table[0].msb_mac_addr =
6178 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6179 config->config_table[0].middle_mac_addr =
6180 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6181 config->config_table[0].lsb_mac_addr =
6182 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6183 config->config_table[0].client_id = BP_L_ID(bp);
6184 config->config_table[0].vlan_id = 0;
6185 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6186 if (set)
6187 config->config_table[0].flags = BP_PORT(bp);
6188 else
6189 config->config_table[0].flags =
6190 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6191
3101c2bc
YG
6192 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6193 (set ? "setting" : "clearing"),
34f80b04
EG
6194 config->config_table[0].msb_mac_addr,
6195 config->config_table[0].middle_mac_addr,
6196 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6197
6198 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6199 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6200 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6201}
6202
a2fbb9ea
ET
6203static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6204 int *state_p, int poll)
6205{
6206 /* can take a while if any port is running */
34f80b04 6207 int cnt = 500;
a2fbb9ea 6208
c14423fe
ET
6209 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6210 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6211
6212 might_sleep();
34f80b04 6213 while (cnt--) {
a2fbb9ea
ET
6214 if (poll) {
6215 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6216 /* if index is different from 0
6217 * the reply for some commands will
3101c2bc 6218 * be on the non default queue
a2fbb9ea
ET
6219 */
6220 if (idx)
6221 bnx2x_rx_int(&bp->fp[idx], 10);
6222 }
a2fbb9ea 6223
3101c2bc 6224 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6225 if (*state_p == state)
a2fbb9ea
ET
6226 return 0;
6227
a2fbb9ea 6228 msleep(1);
a2fbb9ea
ET
6229 }
6230
a2fbb9ea 6231 /* timeout! */
49d66772
ET
6232 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6233 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6234#ifdef BNX2X_STOP_ON_ERROR
6235 bnx2x_panic();
6236#endif
a2fbb9ea 6237
49d66772 6238 return -EBUSY;
a2fbb9ea
ET
6239}
6240
6241static int bnx2x_setup_leading(struct bnx2x *bp)
6242{
34f80b04 6243 int rc;
a2fbb9ea 6244
c14423fe 6245 /* reset IGU state */
34f80b04 6246 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6247
6248 /* SETUP ramrod */
6249 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6250
34f80b04
EG
6251 /* Wait for completion */
6252 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6253
34f80b04 6254 return rc;
a2fbb9ea
ET
6255}
6256
6257static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6258{
a2fbb9ea 6259 /* reset IGU state */
34f80b04 6260 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6261
228241eb 6262 /* SETUP ramrod */
a2fbb9ea
ET
6263 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
6264 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
6265
6266 /* Wait for completion */
6267 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 6268 &(bp->fp[index].state), 0);
a2fbb9ea
ET
6269}
6270
a2fbb9ea
ET
6271static int bnx2x_poll(struct napi_struct *napi, int budget);
6272static void bnx2x_set_rx_mode(struct net_device *dev);
6273
34f80b04
EG
6274/* must be called with rtnl_lock */
6275static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 6276{
228241eb 6277 u32 load_code;
34f80b04 6278 int i, rc;
34f80b04
EG
6279#ifdef BNX2X_STOP_ON_ERROR
6280 if (unlikely(bp->panic))
6281 return -EPERM;
6282#endif
a2fbb9ea
ET
6283
6284 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6285
34f80b04
EG
6286 /* Send LOAD_REQUEST command to MCP
6287 Returns the type of LOAD command:
6288 if it is the first port to be initialized
6289 common blocks should be initialized, otherwise - not
a2fbb9ea 6290 */
34f80b04 6291 if (!BP_NOMCP(bp)) {
228241eb
ET
6292 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6293 if (!load_code) {
da5a662a 6294 BNX2X_ERR("MCP response failure, aborting\n");
228241eb
ET
6295 return -EBUSY;
6296 }
34f80b04 6297 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 6298 return -EBUSY; /* other port in diagnostic mode */
34f80b04 6299
a2fbb9ea 6300 } else {
da5a662a
VZ
6301 int port = BP_PORT(bp);
6302
34f80b04
EG
6303 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6304 load_count[0], load_count[1], load_count[2]);
6305 load_count[0]++;
da5a662a 6306 load_count[1 + port]++;
34f80b04
EG
6307 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6308 load_count[0], load_count[1], load_count[2]);
6309 if (load_count[0] == 1)
6310 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
da5a662a 6311 else if (load_count[1 + port] == 1)
34f80b04
EG
6312 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6313 else
6314 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
6315 }
6316
34f80b04
EG
6317 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6318 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6319 bp->port.pmf = 1;
6320 else
6321 bp->port.pmf = 0;
6322 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
6323
6324 /* if we can't use MSI-X we only need one fp,
6325 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
6326 * and fallback to inta with one fp
6327 */
34f80b04
EG
6328 if (use_inta) {
6329 bp->num_queues = 1;
6330
6331 } else {
6332 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
6333 /* user requested number */
6334 bp->num_queues = use_multi;
6335
6336 else if (use_multi)
6337 bp->num_queues = min_t(u32, num_online_cpus(),
6338 BP_MAX_QUEUES(bp));
6339 else
a2fbb9ea 6340 bp->num_queues = 1;
34f80b04
EG
6341
6342 if (bnx2x_enable_msix(bp)) {
6343 /* failed to enable MSI-X */
6344 bp->num_queues = 1;
6345 if (use_multi)
6346 BNX2X_ERR("Multi requested but failed"
6347 " to enable MSI-X\n");
a2fbb9ea
ET
6348 }
6349 }
34f80b04
EG
6350 DP(NETIF_MSG_IFUP,
6351 "set number of queues to %d\n", bp->num_queues);
c14423fe 6352
a2fbb9ea
ET
6353 if (bnx2x_alloc_mem(bp))
6354 return -ENOMEM;
6355
7a9b2557
VZ
6356 for_each_queue(bp, i)
6357 bnx2x_fp(bp, i, disable_tpa) =
6358 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6359
34f80b04
EG
6360 if (bp->flags & USING_MSIX_FLAG) {
6361 rc = bnx2x_req_msix_irqs(bp);
6362 if (rc) {
6363 pci_disable_msix(bp->pdev);
6364 goto load_error;
6365 }
6366 } else {
6367 bnx2x_ack_int(bp);
6368 rc = bnx2x_req_irq(bp);
6369 if (rc) {
6370 BNX2X_ERR("IRQ request failed, aborting\n");
6371 goto load_error;
a2fbb9ea
ET
6372 }
6373 }
6374
6375 for_each_queue(bp, i)
6376 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6377 bnx2x_poll, 128);
6378
a2fbb9ea 6379 /* Initialize HW */
34f80b04
EG
6380 rc = bnx2x_init_hw(bp, load_code);
6381 if (rc) {
a2fbb9ea 6382 BNX2X_ERR("HW init failed, aborting\n");
d1014634 6383 goto load_int_disable;
a2fbb9ea
ET
6384 }
6385
a2fbb9ea 6386 /* Setup NIC internals and enable interrupts */
471de716 6387 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6388
6389 /* Send LOAD_DONE command to MCP */
34f80b04 6390 if (!BP_NOMCP(bp)) {
228241eb
ET
6391 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6392 if (!load_code) {
da5a662a 6393 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6394 rc = -EBUSY;
d1014634 6395 goto load_rings_free;
a2fbb9ea
ET
6396 }
6397 }
6398
bb2a0f7a
YG
6399 bnx2x_stats_init(bp);
6400
a2fbb9ea
ET
6401 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6402
6403 /* Enable Rx interrupt handling before sending the ramrod
6404 as it's completed on Rx FP queue */
65abd74d 6405 bnx2x_napi_enable(bp);
a2fbb9ea 6406
da5a662a
VZ
6407 /* Enable interrupt handling */
6408 atomic_set(&bp->intr_sem, 0);
6409
34f80b04
EG
6410 rc = bnx2x_setup_leading(bp);
6411 if (rc) {
da5a662a 6412 BNX2X_ERR("Setup leading failed!\n");
d1014634 6413 goto load_netif_stop;
34f80b04 6414 }
a2fbb9ea 6415
34f80b04
EG
6416 if (CHIP_IS_E1H(bp))
6417 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6418 BNX2X_ERR("!!! mf_cfg function disabled\n");
6419 bp->state = BNX2X_STATE_DISABLED;
6420 }
a2fbb9ea 6421
34f80b04
EG
6422 if (bp->state == BNX2X_STATE_OPEN)
6423 for_each_nondefault_queue(bp, i) {
6424 rc = bnx2x_setup_multi(bp, i);
6425 if (rc)
d1014634 6426 goto load_netif_stop;
34f80b04 6427 }
a2fbb9ea 6428
34f80b04 6429 if (CHIP_IS_E1(bp))
3101c2bc 6430 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6431 else
3101c2bc 6432 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6433
6434 if (bp->port.pmf)
6435 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6436
6437 /* Start fast path */
34f80b04
EG
6438 switch (load_mode) {
6439 case LOAD_NORMAL:
6440 /* Tx queue should be only reenabled */
6441 netif_wake_queue(bp->dev);
6442 bnx2x_set_rx_mode(bp->dev);
6443 break;
6444
6445 case LOAD_OPEN:
a2fbb9ea 6446 netif_start_queue(bp->dev);
34f80b04 6447 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
6448 if (bp->flags & USING_MSIX_FLAG)
6449 printk(KERN_INFO PFX "%s: using MSI-X\n",
6450 bp->dev->name);
34f80b04 6451 break;
a2fbb9ea 6452
34f80b04 6453 case LOAD_DIAG:
a2fbb9ea 6454 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6455 bp->state = BNX2X_STATE_DIAG;
6456 break;
6457
6458 default:
6459 break;
a2fbb9ea
ET
6460 }
6461
34f80b04
EG
6462 if (!bp->port.pmf)
6463 bnx2x__link_status_update(bp);
6464
a2fbb9ea
ET
6465 /* start the timer */
6466 mod_timer(&bp->timer, jiffies + bp->current_interval);
6467
34f80b04 6468
a2fbb9ea
ET
6469 return 0;
6470
d1014634 6471load_netif_stop:
65abd74d 6472 bnx2x_napi_disable(bp);
d1014634 6473load_rings_free:
7a9b2557
VZ
6474 /* Free SKBs, SGEs, TPA pool and driver internals */
6475 bnx2x_free_skbs(bp);
6476 for_each_queue(bp, i)
3196a88a 6477 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d1014634 6478load_int_disable:
f8ef6e44 6479 bnx2x_int_disable_sync(bp, 1);
d1014634
YG
6480 /* Release IRQs */
6481 bnx2x_free_irq(bp);
228241eb 6482load_error:
a2fbb9ea 6483 bnx2x_free_mem(bp);
9a035440 6484 bp->port.pmf = 0;
a2fbb9ea
ET
6485
6486 /* TBD we really need to reset the chip
6487 if we want to recover from this */
34f80b04 6488 return rc;
a2fbb9ea
ET
6489}
6490
6491static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6492{
a2fbb9ea
ET
6493 int rc;
6494
c14423fe 6495 /* halt the connection */
a2fbb9ea 6496 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
231fd58a 6497 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, index, 0);
a2fbb9ea 6498
34f80b04 6499 /* Wait for completion */
a2fbb9ea 6500 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 6501 &(bp->fp[index].state), 1);
c14423fe 6502 if (rc) /* timeout */
a2fbb9ea
ET
6503 return rc;
6504
6505 /* delete cfc entry */
6506 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6507
34f80b04
EG
6508 /* Wait for completion */
6509 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
6510 &(bp->fp[index].state), 1);
6511 return rc;
a2fbb9ea
ET
6512}
6513
da5a662a 6514static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6515{
49d66772 6516 u16 dsb_sp_prod_idx;
c14423fe 6517 /* if the other port is handling traffic,
a2fbb9ea 6518 this can take a lot of time */
34f80b04
EG
6519 int cnt = 500;
6520 int rc;
a2fbb9ea
ET
6521
6522 might_sleep();
6523
6524 /* Send HALT ramrod */
6525 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6526 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6527
34f80b04
EG
6528 /* Wait for completion */
6529 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6530 &(bp->fp[0].state), 1);
6531 if (rc) /* timeout */
da5a662a 6532 return rc;
a2fbb9ea 6533
49d66772 6534 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6535
228241eb 6536 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6537 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6538
49d66772 6539 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6540 we are going to reset the chip anyway
6541 so there is not much to do if this times out
6542 */
34f80b04 6543 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6544 if (!cnt) {
6545 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6546 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6547 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6548#ifdef BNX2X_STOP_ON_ERROR
6549 bnx2x_panic();
da5a662a
VZ
6550#else
6551 rc = -EBUSY;
34f80b04
EG
6552#endif
6553 break;
6554 }
6555 cnt--;
da5a662a 6556 msleep(1);
49d66772
ET
6557 }
6558 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6559 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6560
6561 return rc;
a2fbb9ea
ET
6562}
6563
34f80b04
EG
6564static void bnx2x_reset_func(struct bnx2x *bp)
6565{
6566 int port = BP_PORT(bp);
6567 int func = BP_FUNC(bp);
6568 int base, i;
6569
6570 /* Configure IGU */
6571 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6572 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6573
6574 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
6575
6576 /* Clear ILT */
6577 base = FUNC_ILT_BASE(func);
6578 for (i = base; i < base + ILT_PER_FUNC; i++)
6579 bnx2x_ilt_wr(bp, i, 0);
6580}
6581
6582static void bnx2x_reset_port(struct bnx2x *bp)
6583{
6584 int port = BP_PORT(bp);
6585 u32 val;
6586
6587 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6588
6589 /* Do not rcv packets to BRB */
6590 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6591 /* Do not direct rcv packets that are not for MCP to the BRB */
6592 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6593 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6594
6595 /* Configure AEU */
6596 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6597
6598 msleep(100);
6599 /* Check for BRB port occupancy */
6600 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6601 if (val)
6602 DP(NETIF_MSG_IFDOWN,
33471629 6603 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6604
6605 /* TODO: Close Doorbell port? */
6606}
6607
6608static void bnx2x_reset_common(struct bnx2x *bp)
6609{
6610 /* reset_common */
6611 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6612 0xd3ffff7f);
6613 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6614}
6615
6616static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6617{
6618 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6619 BP_FUNC(bp), reset_code);
6620
6621 switch (reset_code) {
6622 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6623 bnx2x_reset_port(bp);
6624 bnx2x_reset_func(bp);
6625 bnx2x_reset_common(bp);
6626 break;
6627
6628 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6629 bnx2x_reset_port(bp);
6630 bnx2x_reset_func(bp);
6631 break;
6632
6633 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6634 bnx2x_reset_func(bp);
6635 break;
49d66772 6636
34f80b04
EG
6637 default:
6638 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6639 break;
6640 }
6641}
6642
33471629 6643/* must be called with rtnl_lock */
34f80b04 6644static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6645{
da5a662a 6646 int port = BP_PORT(bp);
a2fbb9ea 6647 u32 reset_code = 0;
da5a662a 6648 int i, cnt, rc;
a2fbb9ea
ET
6649
6650 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6651
228241eb
ET
6652 bp->rx_mode = BNX2X_RX_MODE_NONE;
6653 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6654
f8ef6e44 6655 bnx2x_netif_stop(bp, 1);
65abd74d
YG
6656 if (!netif_running(bp->dev))
6657 bnx2x_napi_disable(bp);
34f80b04
EG
6658 del_timer_sync(&bp->timer);
6659 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6660 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6661 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6662
da5a662a 6663 /* Wait until tx fast path tasks complete */
228241eb
ET
6664 for_each_queue(bp, i) {
6665 struct bnx2x_fastpath *fp = &bp->fp[i];
6666
34f80b04
EG
6667 cnt = 1000;
6668 smp_rmb();
da5a662a
VZ
6669 while (BNX2X_HAS_TX_WORK(fp)) {
6670
65abd74d 6671 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6672 if (!cnt) {
6673 BNX2X_ERR("timeout waiting for queue[%d]\n",
6674 i);
6675#ifdef BNX2X_STOP_ON_ERROR
6676 bnx2x_panic();
6677 return -EBUSY;
6678#else
6679 break;
6680#endif
6681 }
6682 cnt--;
da5a662a 6683 msleep(1);
34f80b04
EG
6684 smp_rmb();
6685 }
228241eb 6686 }
da5a662a
VZ
6687 /* Give HW time to discard old tx messages */
6688 msleep(1);
a2fbb9ea 6689
34f80b04
EG
6690 /* Release IRQs */
6691 bnx2x_free_irq(bp);
6692
3101c2bc
YG
6693 if (CHIP_IS_E1(bp)) {
6694 struct mac_configuration_cmd *config =
6695 bnx2x_sp(bp, mcast_config);
6696
6697 bnx2x_set_mac_addr_e1(bp, 0);
6698
6699 for (i = 0; i < config->hdr.length_6b; i++)
6700 CAM_INVALIDATE(config->config_table[i]);
6701
6702 config->hdr.length_6b = i;
6703 if (CHIP_REV_IS_SLOW(bp))
6704 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6705 else
6706 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
6707 config->hdr.client_id = BP_CL_ID(bp);
6708 config->hdr.reserved1 = 0;
6709
6710 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6711 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6712 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6713
6714 } else { /* E1H */
65abd74d
YG
6715 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6716
3101c2bc
YG
6717 bnx2x_set_mac_addr_e1h(bp, 0);
6718
6719 for (i = 0; i < MC_HASH_SIZE; i++)
6720 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6721 }
6722
65abd74d
YG
6723 if (unload_mode == UNLOAD_NORMAL)
6724 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6725
6726 else if (bp->flags & NO_WOL_FLAG) {
6727 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
6728 if (CHIP_IS_E1H(bp))
6729 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
6730
6731 } else if (bp->wol) {
6732 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
6733 u8 *mac_addr = bp->dev->dev_addr;
6734 u32 val;
6735 /* The mac address is written to entries 1-4 to
6736 preserve entry 0 which is used by the PMF */
6737 u8 entry = (BP_E1HVN(bp) + 1)*8;
6738
6739 val = (mac_addr[0] << 8) | mac_addr[1];
6740 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
6741
6742 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6743 (mac_addr[4] << 8) | mac_addr[5];
6744 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
6745
6746 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
6747
6748 } else
6749 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6750
34f80b04
EG
6751 /* Close multi and leading connections
6752 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6753 for_each_nondefault_queue(bp, i)
6754 if (bnx2x_stop_multi(bp, i))
228241eb 6755 goto unload_error;
a2fbb9ea 6756
da5a662a
VZ
6757 rc = bnx2x_stop_leading(bp);
6758 if (rc) {
34f80b04 6759 BNX2X_ERR("Stop leading failed!\n");
da5a662a 6760#ifdef BNX2X_STOP_ON_ERROR
34f80b04 6761 return -EBUSY;
da5a662a
VZ
6762#else
6763 goto unload_error;
34f80b04 6764#endif
228241eb
ET
6765 }
6766
6767unload_error:
34f80b04 6768 if (!BP_NOMCP(bp))
228241eb 6769 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6770 else {
6771 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6772 load_count[0], load_count[1], load_count[2]);
6773 load_count[0]--;
da5a662a 6774 load_count[1 + port]--;
34f80b04
EG
6775 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6776 load_count[0], load_count[1], load_count[2]);
6777 if (load_count[0] == 0)
6778 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 6779 else if (load_count[1 + port] == 0)
34f80b04
EG
6780 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6781 else
6782 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6783 }
a2fbb9ea 6784
34f80b04
EG
6785 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6786 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6787 bnx2x__link_reset(bp);
a2fbb9ea
ET
6788
6789 /* Reset the chip */
228241eb 6790 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6791
6792 /* Report UNLOAD_DONE to MCP */
34f80b04 6793 if (!BP_NOMCP(bp))
a2fbb9ea 6794 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 6795 bp->port.pmf = 0;
a2fbb9ea 6796
7a9b2557 6797 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 6798 bnx2x_free_skbs(bp);
7a9b2557 6799 for_each_queue(bp, i)
3196a88a 6800 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
a2fbb9ea
ET
6801 bnx2x_free_mem(bp);
6802
6803 bp->state = BNX2X_STATE_CLOSED;
228241eb 6804
a2fbb9ea
ET
6805 netif_carrier_off(bp->dev);
6806
6807 return 0;
6808}
6809
34f80b04
EG
6810static void bnx2x_reset_task(struct work_struct *work)
6811{
6812 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6813
6814#ifdef BNX2X_STOP_ON_ERROR
6815 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6816 " so reset not done to allow debug dump,\n"
6817 KERN_ERR " you will need to reboot when done\n");
6818 return;
6819#endif
6820
6821 rtnl_lock();
6822
6823 if (!netif_running(bp->dev))
6824 goto reset_task_exit;
6825
6826 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6827 bnx2x_nic_load(bp, LOAD_NORMAL);
6828
6829reset_task_exit:
6830 rtnl_unlock();
6831}
6832
a2fbb9ea
ET
6833/* end of nic load/unload */
6834
6835/* ethtool_ops */
6836
6837/*
6838 * Init service functions
6839 */
6840
34f80b04
EG
6841static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6842{
6843 u32 val;
6844
6845 /* Check if there is any driver already loaded */
6846 val = REG_RD(bp, MISC_REG_UNPREPARED);
6847 if (val == 0x1) {
6848 /* Check if it is the UNDI driver
6849 * UNDI driver initializes CID offset for normal bell to 0x7
6850 */
4a37fb66 6851 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04 6852 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
76b190c5
EG
6853 if (val == 0x7)
6854 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6855 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6856
34f80b04
EG
6857 if (val == 0x7) {
6858 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 6859 /* save our func */
34f80b04 6860 int func = BP_FUNC(bp);
da5a662a
VZ
6861 u32 swap_en;
6862 u32 swap_val;
34f80b04
EG
6863
6864 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6865
6866 /* try unload UNDI on port 0 */
6867 bp->func = 0;
da5a662a
VZ
6868 bp->fw_seq =
6869 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6870 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 6871 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6872
6873 /* if UNDI is loaded on the other port */
6874 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6875
da5a662a
VZ
6876 /* send "DONE" for previous unload */
6877 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6878
6879 /* unload UNDI on port 1 */
34f80b04 6880 bp->func = 1;
da5a662a
VZ
6881 bp->fw_seq =
6882 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6883 DRV_MSG_SEQ_NUMBER_MASK);
6884 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6885
6886 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6887 }
6888
da5a662a
VZ
6889 REG_WR(bp, (BP_PORT(bp) ? HC_REG_CONFIG_1 :
6890 HC_REG_CONFIG_0), 0x1000);
6891
6892 /* close input traffic and wait for it */
6893 /* Do not rcv packets to BRB */
6894 REG_WR(bp,
6895 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6896 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6897 /* Do not direct rcv packets that are not for MCP to
6898 * the BRB */
6899 REG_WR(bp,
6900 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6901 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6902 /* clear AEU */
6903 REG_WR(bp,
6904 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6905 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6906 msleep(10);
6907
6908 /* save NIG port swap info */
6909 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6910 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
6911 /* reset device */
6912 REG_WR(bp,
6913 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 6914 0xd3ffffff);
34f80b04
EG
6915 REG_WR(bp,
6916 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6917 0x1403);
da5a662a
VZ
6918 /* take the NIG out of reset and restore swap values */
6919 REG_WR(bp,
6920 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6921 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6922 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6923 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6924
6925 /* send unload done to the MCP */
6926 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6927
6928 /* restore our func and fw_seq */
6929 bp->func = func;
6930 bp->fw_seq =
6931 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6932 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
6933 }
6934 }
6935}
6936
6937static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6938{
6939 u32 val, val2, val3, val4, id;
72ce58c3 6940 u16 pmc;
34f80b04
EG
6941
6942 /* Get the chip revision id and number. */
6943 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6944 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6945 id = ((val & 0xffff) << 16);
6946 val = REG_RD(bp, MISC_REG_CHIP_REV);
6947 id |= ((val & 0xf) << 12);
6948 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6949 id |= ((val & 0xff) << 4);
6950 REG_RD(bp, MISC_REG_BOND_ID);
6951 id |= (val & 0xf);
6952 bp->common.chip_id = id;
6953 bp->link_params.chip_id = bp->common.chip_id;
6954 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6955
6956 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6957 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6958 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6959 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6960 bp->common.flash_size, bp->common.flash_size);
6961
6962 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6963 bp->link_params.shmem_base = bp->common.shmem_base;
6964 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6965
6966 if (!bp->common.shmem_base ||
6967 (bp->common.shmem_base < 0xA0000) ||
6968 (bp->common.shmem_base >= 0xC0000)) {
6969 BNX2X_DEV_INFO("MCP not active\n");
6970 bp->flags |= NO_MCP_FLAG;
6971 return;
6972 }
6973
6974 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6975 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6976 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6977 BNX2X_ERR("BAD MCP validity signature\n");
6978
6979 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6980 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6981
6982 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6983 bp->common.hw_config, bp->common.board);
6984
6985 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6986 SHARED_HW_CFG_LED_MODE_MASK) >>
6987 SHARED_HW_CFG_LED_MODE_SHIFT);
6988
6989 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6990 bp->common.bc_ver = val;
6991 BNX2X_DEV_INFO("bc_ver %X\n", val);
6992 if (val < BNX2X_BC_VER) {
6993 /* for now only warn
6994 * later we might need to enforce this */
6995 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6996 " please upgrade BC\n", BNX2X_BC_VER, val);
6997 }
72ce58c3
EG
6998
6999 if (BP_E1HVN(bp) == 0) {
7000 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7001 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7002 } else {
7003 /* no WOL capability for E1HVN != 0 */
7004 bp->flags |= NO_WOL_FLAG;
7005 }
7006 BNX2X_DEV_INFO("%sWoL capable\n",
7007 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7008
7009 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7010 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7011 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7012 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7013
7014 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7015 val, val2, val3, val4);
7016}
7017
7018static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7019 u32 switch_cfg)
a2fbb9ea 7020{
34f80b04 7021 int port = BP_PORT(bp);
a2fbb9ea
ET
7022 u32 ext_phy_type;
7023
a2fbb9ea
ET
7024 switch (switch_cfg) {
7025 case SWITCH_CFG_1G:
7026 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7027
c18487ee
YR
7028 ext_phy_type =
7029 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7030 switch (ext_phy_type) {
7031 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7032 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7033 ext_phy_type);
7034
34f80b04
EG
7035 bp->port.supported |= (SUPPORTED_10baseT_Half |
7036 SUPPORTED_10baseT_Full |
7037 SUPPORTED_100baseT_Half |
7038 SUPPORTED_100baseT_Full |
7039 SUPPORTED_1000baseT_Full |
7040 SUPPORTED_2500baseX_Full |
7041 SUPPORTED_TP |
7042 SUPPORTED_FIBRE |
7043 SUPPORTED_Autoneg |
7044 SUPPORTED_Pause |
7045 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7046 break;
7047
7048 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7049 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7050 ext_phy_type);
7051
34f80b04
EG
7052 bp->port.supported |= (SUPPORTED_10baseT_Half |
7053 SUPPORTED_10baseT_Full |
7054 SUPPORTED_100baseT_Half |
7055 SUPPORTED_100baseT_Full |
7056 SUPPORTED_1000baseT_Full |
7057 SUPPORTED_TP |
7058 SUPPORTED_FIBRE |
7059 SUPPORTED_Autoneg |
7060 SUPPORTED_Pause |
7061 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7062 break;
7063
7064 default:
7065 BNX2X_ERR("NVRAM config error. "
7066 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7067 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7068 return;
7069 }
7070
34f80b04
EG
7071 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7072 port*0x10);
7073 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7074 break;
7075
7076 case SWITCH_CFG_10G:
7077 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7078
c18487ee
YR
7079 ext_phy_type =
7080 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7081 switch (ext_phy_type) {
7082 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7083 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7084 ext_phy_type);
7085
34f80b04
EG
7086 bp->port.supported |= (SUPPORTED_10baseT_Half |
7087 SUPPORTED_10baseT_Full |
7088 SUPPORTED_100baseT_Half |
7089 SUPPORTED_100baseT_Full |
7090 SUPPORTED_1000baseT_Full |
7091 SUPPORTED_2500baseX_Full |
7092 SUPPORTED_10000baseT_Full |
7093 SUPPORTED_TP |
7094 SUPPORTED_FIBRE |
7095 SUPPORTED_Autoneg |
7096 SUPPORTED_Pause |
7097 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7098 break;
7099
7100 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7101 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7102 ext_phy_type);
f1410647 7103
34f80b04
EG
7104 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7105 SUPPORTED_FIBRE |
7106 SUPPORTED_Pause |
7107 SUPPORTED_Asym_Pause);
f1410647
ET
7108 break;
7109
a2fbb9ea 7110 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7111 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7112 ext_phy_type);
7113
34f80b04
EG
7114 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7115 SUPPORTED_1000baseT_Full |
7116 SUPPORTED_FIBRE |
7117 SUPPORTED_Pause |
7118 SUPPORTED_Asym_Pause);
f1410647
ET
7119 break;
7120
7121 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7122 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7123 ext_phy_type);
7124
34f80b04
EG
7125 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7126 SUPPORTED_1000baseT_Full |
7127 SUPPORTED_FIBRE |
7128 SUPPORTED_Autoneg |
7129 SUPPORTED_Pause |
7130 SUPPORTED_Asym_Pause);
f1410647
ET
7131 break;
7132
c18487ee
YR
7133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7134 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7135 ext_phy_type);
7136
34f80b04
EG
7137 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7138 SUPPORTED_2500baseX_Full |
7139 SUPPORTED_1000baseT_Full |
7140 SUPPORTED_FIBRE |
7141 SUPPORTED_Autoneg |
7142 SUPPORTED_Pause |
7143 SUPPORTED_Asym_Pause);
c18487ee
YR
7144 break;
7145
f1410647
ET
7146 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7147 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7148 ext_phy_type);
7149
34f80b04
EG
7150 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7151 SUPPORTED_TP |
7152 SUPPORTED_Autoneg |
7153 SUPPORTED_Pause |
7154 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7155 break;
7156
c18487ee
YR
7157 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7158 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7159 bp->link_params.ext_phy_config);
7160 break;
7161
a2fbb9ea
ET
7162 default:
7163 BNX2X_ERR("NVRAM config error. "
7164 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7165 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7166 return;
7167 }
7168
34f80b04
EG
7169 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7170 port*0x18);
7171 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7172
a2fbb9ea
ET
7173 break;
7174
7175 default:
7176 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7177 bp->port.link_config);
a2fbb9ea
ET
7178 return;
7179 }
34f80b04 7180 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7181
7182 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7183 if (!(bp->link_params.speed_cap_mask &
7184 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7185 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7186
c18487ee
YR
7187 if (!(bp->link_params.speed_cap_mask &
7188 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7189 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7190
c18487ee
YR
7191 if (!(bp->link_params.speed_cap_mask &
7192 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7193 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7194
c18487ee
YR
7195 if (!(bp->link_params.speed_cap_mask &
7196 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7197 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7198
c18487ee
YR
7199 if (!(bp->link_params.speed_cap_mask &
7200 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7201 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7202 SUPPORTED_1000baseT_Full);
a2fbb9ea 7203
c18487ee
YR
7204 if (!(bp->link_params.speed_cap_mask &
7205 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7206 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7207
c18487ee
YR
7208 if (!(bp->link_params.speed_cap_mask &
7209 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7210 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7211
34f80b04 7212 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7213}
7214
34f80b04 7215static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7216{
c18487ee 7217 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7218
34f80b04 7219 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7220 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7221 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7222 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7223 bp->port.advertising = bp->port.supported;
a2fbb9ea 7224 } else {
c18487ee
YR
7225 u32 ext_phy_type =
7226 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7227
7228 if ((ext_phy_type ==
7229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7230 (ext_phy_type ==
7231 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7232 /* force 10G, no AN */
c18487ee 7233 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7234 bp->port.advertising =
a2fbb9ea
ET
7235 (ADVERTISED_10000baseT_Full |
7236 ADVERTISED_FIBRE);
7237 break;
7238 }
7239 BNX2X_ERR("NVRAM config error. "
7240 "Invalid link_config 0x%x"
7241 " Autoneg not supported\n",
34f80b04 7242 bp->port.link_config);
a2fbb9ea
ET
7243 return;
7244 }
7245 break;
7246
7247 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7248 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7249 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7250 bp->port.advertising = (ADVERTISED_10baseT_Full |
7251 ADVERTISED_TP);
a2fbb9ea
ET
7252 } else {
7253 BNX2X_ERR("NVRAM config error. "
7254 "Invalid link_config 0x%x"
7255 " speed_cap_mask 0x%x\n",
34f80b04 7256 bp->port.link_config,
c18487ee 7257 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7258 return;
7259 }
7260 break;
7261
7262 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7263 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7264 bp->link_params.req_line_speed = SPEED_10;
7265 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7266 bp->port.advertising = (ADVERTISED_10baseT_Half |
7267 ADVERTISED_TP);
a2fbb9ea
ET
7268 } else {
7269 BNX2X_ERR("NVRAM config error. "
7270 "Invalid link_config 0x%x"
7271 " speed_cap_mask 0x%x\n",
34f80b04 7272 bp->port.link_config,
c18487ee 7273 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7274 return;
7275 }
7276 break;
7277
7278 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7279 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7280 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7281 bp->port.advertising = (ADVERTISED_100baseT_Full |
7282 ADVERTISED_TP);
a2fbb9ea
ET
7283 } else {
7284 BNX2X_ERR("NVRAM config error. "
7285 "Invalid link_config 0x%x"
7286 " speed_cap_mask 0x%x\n",
34f80b04 7287 bp->port.link_config,
c18487ee 7288 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7289 return;
7290 }
7291 break;
7292
7293 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7294 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7295 bp->link_params.req_line_speed = SPEED_100;
7296 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7297 bp->port.advertising = (ADVERTISED_100baseT_Half |
7298 ADVERTISED_TP);
a2fbb9ea
ET
7299 } else {
7300 BNX2X_ERR("NVRAM config error. "
7301 "Invalid link_config 0x%x"
7302 " speed_cap_mask 0x%x\n",
34f80b04 7303 bp->port.link_config,
c18487ee 7304 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7305 return;
7306 }
7307 break;
7308
7309 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7310 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7311 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7312 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7313 ADVERTISED_TP);
a2fbb9ea
ET
7314 } else {
7315 BNX2X_ERR("NVRAM config error. "
7316 "Invalid link_config 0x%x"
7317 " speed_cap_mask 0x%x\n",
34f80b04 7318 bp->port.link_config,
c18487ee 7319 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7320 return;
7321 }
7322 break;
7323
7324 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7325 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7326 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7327 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7328 ADVERTISED_TP);
a2fbb9ea
ET
7329 } else {
7330 BNX2X_ERR("NVRAM config error. "
7331 "Invalid link_config 0x%x"
7332 " speed_cap_mask 0x%x\n",
34f80b04 7333 bp->port.link_config,
c18487ee 7334 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7335 return;
7336 }
7337 break;
7338
7339 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7340 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7341 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7342 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7343 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7344 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7345 ADVERTISED_FIBRE);
a2fbb9ea
ET
7346 } else {
7347 BNX2X_ERR("NVRAM config error. "
7348 "Invalid link_config 0x%x"
7349 " speed_cap_mask 0x%x\n",
34f80b04 7350 bp->port.link_config,
c18487ee 7351 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7352 return;
7353 }
7354 break;
7355
7356 default:
7357 BNX2X_ERR("NVRAM config error. "
7358 "BAD link speed link_config 0x%x\n",
34f80b04 7359 bp->port.link_config);
c18487ee 7360 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7361 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7362 break;
7363 }
a2fbb9ea 7364
34f80b04
EG
7365 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7366 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 7367 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
4ab84d45 7368 !(bp->port.supported & SUPPORTED_Autoneg))
c18487ee 7369 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7370
c18487ee 7371 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7372 " advertising 0x%x\n",
c18487ee
YR
7373 bp->link_params.req_line_speed,
7374 bp->link_params.req_duplex,
34f80b04 7375 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7376}
7377
34f80b04 7378static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7379{
34f80b04
EG
7380 int port = BP_PORT(bp);
7381 u32 val, val2;
a2fbb9ea 7382
c18487ee 7383 bp->link_params.bp = bp;
34f80b04 7384 bp->link_params.port = port;
c18487ee 7385
c18487ee 7386 bp->link_params.serdes_config =
f1410647 7387 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7388 bp->link_params.lane_config =
a2fbb9ea 7389 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7390 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7391 SHMEM_RD(bp,
7392 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7393 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7394 SHMEM_RD(bp,
7395 dev_info.port_hw_config[port].speed_capability_mask);
7396
34f80b04 7397 bp->port.link_config =
a2fbb9ea
ET
7398 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7399
34f80b04
EG
7400 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7401 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7402 " link_config 0x%08x\n",
c18487ee
YR
7403 bp->link_params.serdes_config,
7404 bp->link_params.lane_config,
7405 bp->link_params.ext_phy_config,
34f80b04 7406 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7407
34f80b04 7408 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7409 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7410 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7411
7412 bnx2x_link_settings_requested(bp);
7413
7414 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7415 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7416 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7417 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7418 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7419 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7420 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7421 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7422 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7423 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7424}
7425
7426static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7427{
7428 int func = BP_FUNC(bp);
7429 u32 val, val2;
7430 int rc = 0;
a2fbb9ea 7431
34f80b04 7432 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7433
34f80b04
EG
7434 bp->e1hov = 0;
7435 bp->e1hmf = 0;
7436 if (CHIP_IS_E1H(bp)) {
7437 bp->mf_config =
7438 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7439
3196a88a
EG
7440 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7441 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7442 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7443
34f80b04
EG
7444 bp->e1hov = val;
7445 bp->e1hmf = 1;
7446 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7447 "(0x%04x)\n",
7448 func, bp->e1hov, bp->e1hov);
7449 } else {
7450 BNX2X_DEV_INFO("Single function mode\n");
7451 if (BP_E1HVN(bp)) {
7452 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7453 " aborting\n", func);
7454 rc = -EPERM;
7455 }
7456 }
7457 }
a2fbb9ea 7458
34f80b04
EG
7459 if (!BP_NOMCP(bp)) {
7460 bnx2x_get_port_hwinfo(bp);
7461
7462 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7463 DRV_MSG_SEQ_NUMBER_MASK);
7464 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7465 }
7466
7467 if (IS_E1HMF(bp)) {
7468 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7469 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7470 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7471 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7472 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7473 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7474 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7475 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7476 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7477 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7478 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7479 ETH_ALEN);
7480 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7481 ETH_ALEN);
a2fbb9ea 7482 }
34f80b04
EG
7483
7484 return rc;
a2fbb9ea
ET
7485 }
7486
34f80b04
EG
7487 if (BP_NOMCP(bp)) {
7488 /* only supposed to happen on emulation/FPGA */
33471629 7489 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7490 random_ether_addr(bp->dev->dev_addr);
7491 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7492 }
a2fbb9ea 7493
34f80b04
EG
7494 return rc;
7495}
7496
7497static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7498{
7499 int func = BP_FUNC(bp);
7500 int rc;
7501
da5a662a
VZ
7502 /* Disable interrupt handling until HW is initialized */
7503 atomic_set(&bp->intr_sem, 1);
7504
34f80b04 7505 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7506
34f80b04
EG
7507 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7508 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7509
7510 rc = bnx2x_get_hwinfo(bp);
7511
7512 /* need to reset chip if undi was active */
7513 if (!BP_NOMCP(bp))
7514 bnx2x_undi_unload(bp);
7515
7516 if (CHIP_REV_IS_FPGA(bp))
7517 printk(KERN_ERR PFX "FPGA detected\n");
7518
7519 if (BP_NOMCP(bp) && (func == 0))
7520 printk(KERN_ERR PFX
7521 "MCP disabled, must load devices in order!\n");
7522
7a9b2557
VZ
7523 /* Set TPA flags */
7524 if (disable_tpa) {
7525 bp->flags &= ~TPA_ENABLE_FLAG;
7526 bp->dev->features &= ~NETIF_F_LRO;
7527 } else {
7528 bp->flags |= TPA_ENABLE_FLAG;
7529 bp->dev->features |= NETIF_F_LRO;
7530 }
7531
7532
34f80b04
EG
7533 bp->tx_ring_size = MAX_TX_AVAIL;
7534 bp->rx_ring_size = MAX_RX_AVAIL;
7535
7536 bp->rx_csum = 1;
7537 bp->rx_offset = 0;
7538
7539 bp->tx_ticks = 50;
7540 bp->rx_ticks = 25;
7541
34f80b04
EG
7542 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7543 bp->current_interval = (poll ? poll : bp->timer_interval);
7544
7545 init_timer(&bp->timer);
7546 bp->timer.expires = jiffies + bp->current_interval;
7547 bp->timer.data = (unsigned long) bp;
7548 bp->timer.function = bnx2x_timer;
7549
7550 return rc;
a2fbb9ea
ET
7551}
7552
7553/*
7554 * ethtool service functions
7555 */
7556
7557/* All ethtool functions called with rtnl_lock */
7558
7559static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7560{
7561 struct bnx2x *bp = netdev_priv(dev);
7562
34f80b04
EG
7563 cmd->supported = bp->port.supported;
7564 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7565
7566 if (netif_carrier_ok(dev)) {
c18487ee
YR
7567 cmd->speed = bp->link_vars.line_speed;
7568 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7569 } else {
c18487ee
YR
7570 cmd->speed = bp->link_params.req_line_speed;
7571 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7572 }
34f80b04
EG
7573 if (IS_E1HMF(bp)) {
7574 u16 vn_max_rate;
7575
7576 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7577 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7578 if (vn_max_rate < cmd->speed)
7579 cmd->speed = vn_max_rate;
7580 }
a2fbb9ea 7581
c18487ee
YR
7582 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7583 u32 ext_phy_type =
7584 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7585
7586 switch (ext_phy_type) {
7587 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7588 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7589 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7590 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7591 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7592 cmd->port = PORT_FIBRE;
7593 break;
7594
7595 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7596 cmd->port = PORT_TP;
7597 break;
7598
c18487ee
YR
7599 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7600 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7601 bp->link_params.ext_phy_config);
7602 break;
7603
f1410647
ET
7604 default:
7605 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7606 bp->link_params.ext_phy_config);
7607 break;
f1410647
ET
7608 }
7609 } else
a2fbb9ea 7610 cmd->port = PORT_TP;
a2fbb9ea 7611
34f80b04 7612 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
7613 cmd->transceiver = XCVR_INTERNAL;
7614
c18487ee 7615 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 7616 cmd->autoneg = AUTONEG_ENABLE;
f1410647 7617 else
a2fbb9ea 7618 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
7619
7620 cmd->maxtxpkt = 0;
7621 cmd->maxrxpkt = 0;
7622
7623 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7624 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7625 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7626 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7627 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7628 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7629 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7630
7631 return 0;
7632}
7633
7634static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7635{
7636 struct bnx2x *bp = netdev_priv(dev);
7637 u32 advertising;
7638
34f80b04
EG
7639 if (IS_E1HMF(bp))
7640 return 0;
7641
a2fbb9ea
ET
7642 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
7643 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
7644 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
7645 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
7646 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
7647 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
7648 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
7649
a2fbb9ea 7650 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
7651 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
7652 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 7653 return -EINVAL;
f1410647 7654 }
a2fbb9ea
ET
7655
7656 /* advertise the requested speed and duplex if supported */
34f80b04 7657 cmd->advertising &= bp->port.supported;
a2fbb9ea 7658
c18487ee
YR
7659 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7660 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
7661 bp->port.advertising |= (ADVERTISED_Autoneg |
7662 cmd->advertising);
a2fbb9ea
ET
7663
7664 } else { /* forced speed */
7665 /* advertise the requested speed and duplex if supported */
7666 switch (cmd->speed) {
7667 case SPEED_10:
7668 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7669 if (!(bp->port.supported &
f1410647
ET
7670 SUPPORTED_10baseT_Full)) {
7671 DP(NETIF_MSG_LINK,
7672 "10M full not supported\n");
a2fbb9ea 7673 return -EINVAL;
f1410647 7674 }
a2fbb9ea
ET
7675
7676 advertising = (ADVERTISED_10baseT_Full |
7677 ADVERTISED_TP);
7678 } else {
34f80b04 7679 if (!(bp->port.supported &
f1410647
ET
7680 SUPPORTED_10baseT_Half)) {
7681 DP(NETIF_MSG_LINK,
7682 "10M half not supported\n");
a2fbb9ea 7683 return -EINVAL;
f1410647 7684 }
a2fbb9ea
ET
7685
7686 advertising = (ADVERTISED_10baseT_Half |
7687 ADVERTISED_TP);
7688 }
7689 break;
7690
7691 case SPEED_100:
7692 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 7693 if (!(bp->port.supported &
f1410647
ET
7694 SUPPORTED_100baseT_Full)) {
7695 DP(NETIF_MSG_LINK,
7696 "100M full not supported\n");
a2fbb9ea 7697 return -EINVAL;
f1410647 7698 }
a2fbb9ea
ET
7699
7700 advertising = (ADVERTISED_100baseT_Full |
7701 ADVERTISED_TP);
7702 } else {
34f80b04 7703 if (!(bp->port.supported &
f1410647
ET
7704 SUPPORTED_100baseT_Half)) {
7705 DP(NETIF_MSG_LINK,
7706 "100M half not supported\n");
a2fbb9ea 7707 return -EINVAL;
f1410647 7708 }
a2fbb9ea
ET
7709
7710 advertising = (ADVERTISED_100baseT_Half |
7711 ADVERTISED_TP);
7712 }
7713 break;
7714
7715 case SPEED_1000:
f1410647
ET
7716 if (cmd->duplex != DUPLEX_FULL) {
7717 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 7718 return -EINVAL;
f1410647 7719 }
a2fbb9ea 7720
34f80b04 7721 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 7722 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 7723 return -EINVAL;
f1410647 7724 }
a2fbb9ea
ET
7725
7726 advertising = (ADVERTISED_1000baseT_Full |
7727 ADVERTISED_TP);
7728 break;
7729
7730 case SPEED_2500:
f1410647
ET
7731 if (cmd->duplex != DUPLEX_FULL) {
7732 DP(NETIF_MSG_LINK,
7733 "2.5G half not supported\n");
a2fbb9ea 7734 return -EINVAL;
f1410647 7735 }
a2fbb9ea 7736
34f80b04 7737 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
7738 DP(NETIF_MSG_LINK,
7739 "2.5G full not supported\n");
a2fbb9ea 7740 return -EINVAL;
f1410647 7741 }
a2fbb9ea 7742
f1410647 7743 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
7744 ADVERTISED_TP);
7745 break;
7746
7747 case SPEED_10000:
f1410647
ET
7748 if (cmd->duplex != DUPLEX_FULL) {
7749 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 7750 return -EINVAL;
f1410647 7751 }
a2fbb9ea 7752
34f80b04 7753 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 7754 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 7755 return -EINVAL;
f1410647 7756 }
a2fbb9ea
ET
7757
7758 advertising = (ADVERTISED_10000baseT_Full |
7759 ADVERTISED_FIBRE);
7760 break;
7761
7762 default:
f1410647 7763 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7764 return -EINVAL;
7765 }
7766
c18487ee
YR
7767 bp->link_params.req_line_speed = cmd->speed;
7768 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7769 bp->port.advertising = advertising;
a2fbb9ea
ET
7770 }
7771
c18487ee 7772 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7773 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7774 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7775 bp->port.advertising);
a2fbb9ea 7776
34f80b04 7777 if (netif_running(dev)) {
bb2a0f7a 7778 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7779 bnx2x_link_set(bp);
7780 }
a2fbb9ea
ET
7781
7782 return 0;
7783}
7784
c18487ee
YR
7785#define PHY_FW_VER_LEN 10
7786
a2fbb9ea
ET
7787static void bnx2x_get_drvinfo(struct net_device *dev,
7788 struct ethtool_drvinfo *info)
7789{
7790 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 7791 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7792
7793 strcpy(info->driver, DRV_MODULE_NAME);
7794 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7795
7796 phy_fw_ver[0] = '\0';
34f80b04 7797 if (bp->port.pmf) {
4a37fb66 7798 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
7799 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7800 (bp->state != BNX2X_STATE_CLOSED),
7801 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 7802 bnx2x_release_phy_lock(bp);
34f80b04 7803 }
c18487ee 7804
f0e53a84
EG
7805 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
7806 (bp->common.bc_ver & 0xff0000) >> 16,
7807 (bp->common.bc_ver & 0xff00) >> 8,
7808 (bp->common.bc_ver & 0xff),
7809 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
7810 strcpy(info->bus_info, pci_name(bp->pdev));
7811 info->n_stats = BNX2X_NUM_STATS;
7812 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7813 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7814 info->regdump_len = 0;
7815}
7816
7817static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7818{
7819 struct bnx2x *bp = netdev_priv(dev);
7820
7821 if (bp->flags & NO_WOL_FLAG) {
7822 wol->supported = 0;
7823 wol->wolopts = 0;
7824 } else {
7825 wol->supported = WAKE_MAGIC;
7826 if (bp->wol)
7827 wol->wolopts = WAKE_MAGIC;
7828 else
7829 wol->wolopts = 0;
7830 }
7831 memset(&wol->sopass, 0, sizeof(wol->sopass));
7832}
7833
7834static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7835{
7836 struct bnx2x *bp = netdev_priv(dev);
7837
7838 if (wol->wolopts & ~WAKE_MAGIC)
7839 return -EINVAL;
7840
7841 if (wol->wolopts & WAKE_MAGIC) {
7842 if (bp->flags & NO_WOL_FLAG)
7843 return -EINVAL;
7844
7845 bp->wol = 1;
34f80b04 7846 } else
a2fbb9ea 7847 bp->wol = 0;
34f80b04 7848
a2fbb9ea
ET
7849 return 0;
7850}
7851
7852static u32 bnx2x_get_msglevel(struct net_device *dev)
7853{
7854 struct bnx2x *bp = netdev_priv(dev);
7855
7856 return bp->msglevel;
7857}
7858
7859static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7860{
7861 struct bnx2x *bp = netdev_priv(dev);
7862
7863 if (capable(CAP_NET_ADMIN))
7864 bp->msglevel = level;
7865}
7866
7867static int bnx2x_nway_reset(struct net_device *dev)
7868{
7869 struct bnx2x *bp = netdev_priv(dev);
7870
34f80b04
EG
7871 if (!bp->port.pmf)
7872 return 0;
a2fbb9ea 7873
34f80b04 7874 if (netif_running(dev)) {
bb2a0f7a 7875 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7876 bnx2x_link_set(bp);
7877 }
a2fbb9ea
ET
7878
7879 return 0;
7880}
7881
7882static int bnx2x_get_eeprom_len(struct net_device *dev)
7883{
7884 struct bnx2x *bp = netdev_priv(dev);
7885
34f80b04 7886 return bp->common.flash_size;
a2fbb9ea
ET
7887}
7888
7889static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7890{
34f80b04 7891 int port = BP_PORT(bp);
a2fbb9ea
ET
7892 int count, i;
7893 u32 val = 0;
7894
7895 /* adjust timeout for emulation/FPGA */
7896 count = NVRAM_TIMEOUT_COUNT;
7897 if (CHIP_REV_IS_SLOW(bp))
7898 count *= 100;
7899
7900 /* request access to nvram interface */
7901 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7902 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7903
7904 for (i = 0; i < count*10; i++) {
7905 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7906 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7907 break;
7908
7909 udelay(5);
7910 }
7911
7912 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7913 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7914 return -EBUSY;
7915 }
7916
7917 return 0;
7918}
7919
7920static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7921{
34f80b04 7922 int port = BP_PORT(bp);
a2fbb9ea
ET
7923 int count, i;
7924 u32 val = 0;
7925
7926 /* adjust timeout for emulation/FPGA */
7927 count = NVRAM_TIMEOUT_COUNT;
7928 if (CHIP_REV_IS_SLOW(bp))
7929 count *= 100;
7930
7931 /* relinquish nvram interface */
7932 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7933 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7934
7935 for (i = 0; i < count*10; i++) {
7936 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7937 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7938 break;
7939
7940 udelay(5);
7941 }
7942
7943 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7944 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7945 return -EBUSY;
7946 }
7947
7948 return 0;
7949}
7950
7951static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7952{
7953 u32 val;
7954
7955 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7956
7957 /* enable both bits, even on read */
7958 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7959 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7960 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7961}
7962
7963static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7964{
7965 u32 val;
7966
7967 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7968
7969 /* disable both bits, even after read */
7970 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7971 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7972 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7973}
7974
7975static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7976 u32 cmd_flags)
7977{
f1410647 7978 int count, i, rc;
a2fbb9ea
ET
7979 u32 val;
7980
7981 /* build the command word */
7982 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7983
7984 /* need to clear DONE bit separately */
7985 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7986
7987 /* address of the NVRAM to read from */
7988 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7989 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7990
7991 /* issue a read command */
7992 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7993
7994 /* adjust timeout for emulation/FPGA */
7995 count = NVRAM_TIMEOUT_COUNT;
7996 if (CHIP_REV_IS_SLOW(bp))
7997 count *= 100;
7998
7999 /* wait for completion */
8000 *ret_val = 0;
8001 rc = -EBUSY;
8002 for (i = 0; i < count; i++) {
8003 udelay(5);
8004 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8005
8006 if (val & MCPR_NVM_COMMAND_DONE) {
8007 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8008 /* we read nvram data in cpu order
8009 * but ethtool sees it as an array of bytes
8010 * converting to big-endian will do the work */
8011 val = cpu_to_be32(val);
8012 *ret_val = val;
8013 rc = 0;
8014 break;
8015 }
8016 }
8017
8018 return rc;
8019}
8020
8021static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8022 int buf_size)
8023{
8024 int rc;
8025 u32 cmd_flags;
8026 u32 val;
8027
8028 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8029 DP(BNX2X_MSG_NVM,
c14423fe 8030 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8031 offset, buf_size);
8032 return -EINVAL;
8033 }
8034
34f80b04
EG
8035 if (offset + buf_size > bp->common.flash_size) {
8036 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8037 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8038 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8039 return -EINVAL;
8040 }
8041
8042 /* request access to nvram interface */
8043 rc = bnx2x_acquire_nvram_lock(bp);
8044 if (rc)
8045 return rc;
8046
8047 /* enable access to nvram interface */
8048 bnx2x_enable_nvram_access(bp);
8049
8050 /* read the first word(s) */
8051 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8052 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8053 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8054 memcpy(ret_buf, &val, 4);
8055
8056 /* advance to the next dword */
8057 offset += sizeof(u32);
8058 ret_buf += sizeof(u32);
8059 buf_size -= sizeof(u32);
8060 cmd_flags = 0;
8061 }
8062
8063 if (rc == 0) {
8064 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8065 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8066 memcpy(ret_buf, &val, 4);
8067 }
8068
8069 /* disable access to nvram interface */
8070 bnx2x_disable_nvram_access(bp);
8071 bnx2x_release_nvram_lock(bp);
8072
8073 return rc;
8074}
8075
8076static int bnx2x_get_eeprom(struct net_device *dev,
8077 struct ethtool_eeprom *eeprom, u8 *eebuf)
8078{
8079 struct bnx2x *bp = netdev_priv(dev);
8080 int rc;
8081
34f80b04 8082 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8083 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8084 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8085 eeprom->len, eeprom->len);
8086
8087 /* parameters already validated in ethtool_get_eeprom */
8088
8089 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8090
8091 return rc;
8092}
8093
8094static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8095 u32 cmd_flags)
8096{
f1410647 8097 int count, i, rc;
a2fbb9ea
ET
8098
8099 /* build the command word */
8100 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8101
8102 /* need to clear DONE bit separately */
8103 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8104
8105 /* write the data */
8106 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8107
8108 /* address of the NVRAM to write to */
8109 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8110 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8111
8112 /* issue the write command */
8113 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8114
8115 /* adjust timeout for emulation/FPGA */
8116 count = NVRAM_TIMEOUT_COUNT;
8117 if (CHIP_REV_IS_SLOW(bp))
8118 count *= 100;
8119
8120 /* wait for completion */
8121 rc = -EBUSY;
8122 for (i = 0; i < count; i++) {
8123 udelay(5);
8124 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8125 if (val & MCPR_NVM_COMMAND_DONE) {
8126 rc = 0;
8127 break;
8128 }
8129 }
8130
8131 return rc;
8132}
8133
f1410647 8134#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8135
8136static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8137 int buf_size)
8138{
8139 int rc;
8140 u32 cmd_flags;
8141 u32 align_offset;
8142 u32 val;
8143
34f80b04
EG
8144 if (offset + buf_size > bp->common.flash_size) {
8145 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8146 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8147 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8148 return -EINVAL;
8149 }
8150
8151 /* request access to nvram interface */
8152 rc = bnx2x_acquire_nvram_lock(bp);
8153 if (rc)
8154 return rc;
8155
8156 /* enable access to nvram interface */
8157 bnx2x_enable_nvram_access(bp);
8158
8159 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8160 align_offset = (offset & ~0x03);
8161 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8162
8163 if (rc == 0) {
8164 val &= ~(0xff << BYTE_OFFSET(offset));
8165 val |= (*data_buf << BYTE_OFFSET(offset));
8166
8167 /* nvram data is returned as an array of bytes
8168 * convert it back to cpu order */
8169 val = be32_to_cpu(val);
8170
a2fbb9ea
ET
8171 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8172 cmd_flags);
8173 }
8174
8175 /* disable access to nvram interface */
8176 bnx2x_disable_nvram_access(bp);
8177 bnx2x_release_nvram_lock(bp);
8178
8179 return rc;
8180}
8181
8182static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8183 int buf_size)
8184{
8185 int rc;
8186 u32 cmd_flags;
8187 u32 val;
8188 u32 written_so_far;
8189
34f80b04 8190 if (buf_size == 1) /* ethtool */
a2fbb9ea 8191 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8192
8193 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8194 DP(BNX2X_MSG_NVM,
c14423fe 8195 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8196 offset, buf_size);
8197 return -EINVAL;
8198 }
8199
34f80b04
EG
8200 if (offset + buf_size > bp->common.flash_size) {
8201 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8202 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8203 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8204 return -EINVAL;
8205 }
8206
8207 /* request access to nvram interface */
8208 rc = bnx2x_acquire_nvram_lock(bp);
8209 if (rc)
8210 return rc;
8211
8212 /* enable access to nvram interface */
8213 bnx2x_enable_nvram_access(bp);
8214
8215 written_so_far = 0;
8216 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8217 while ((written_so_far < buf_size) && (rc == 0)) {
8218 if (written_so_far == (buf_size - sizeof(u32)))
8219 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8220 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8221 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8222 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8223 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8224
8225 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8226
8227 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8228
8229 /* advance to the next dword */
8230 offset += sizeof(u32);
8231 data_buf += sizeof(u32);
8232 written_so_far += sizeof(u32);
8233 cmd_flags = 0;
8234 }
8235
8236 /* disable access to nvram interface */
8237 bnx2x_disable_nvram_access(bp);
8238 bnx2x_release_nvram_lock(bp);
8239
8240 return rc;
8241}
8242
8243static int bnx2x_set_eeprom(struct net_device *dev,
8244 struct ethtool_eeprom *eeprom, u8 *eebuf)
8245{
8246 struct bnx2x *bp = netdev_priv(dev);
8247 int rc;
8248
34f80b04 8249 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8250 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8251 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8252 eeprom->len, eeprom->len);
8253
8254 /* parameters already validated in ethtool_set_eeprom */
8255
c18487ee 8256 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8257 if (eeprom->magic == 0x00504859)
8258 if (bp->port.pmf) {
8259
4a37fb66 8260 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8261 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8262 bp->link_params.ext_phy_config,
8263 (bp->state != BNX2X_STATE_CLOSED),
8264 eebuf, eeprom->len);
bb2a0f7a
YG
8265 if ((bp->state == BNX2X_STATE_OPEN) ||
8266 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8267 rc |= bnx2x_link_reset(&bp->link_params,
8268 &bp->link_vars);
8269 rc |= bnx2x_phy_init(&bp->link_params,
8270 &bp->link_vars);
bb2a0f7a 8271 }
4a37fb66 8272 bnx2x_release_phy_lock(bp);
34f80b04
EG
8273
8274 } else /* Only the PMF can access the PHY */
8275 return -EINVAL;
8276 else
c18487ee 8277 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8278
8279 return rc;
8280}
8281
8282static int bnx2x_get_coalesce(struct net_device *dev,
8283 struct ethtool_coalesce *coal)
8284{
8285 struct bnx2x *bp = netdev_priv(dev);
8286
8287 memset(coal, 0, sizeof(struct ethtool_coalesce));
8288
8289 coal->rx_coalesce_usecs = bp->rx_ticks;
8290 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8291
8292 return 0;
8293}
8294
8295static int bnx2x_set_coalesce(struct net_device *dev,
8296 struct ethtool_coalesce *coal)
8297{
8298 struct bnx2x *bp = netdev_priv(dev);
8299
8300 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8301 if (bp->rx_ticks > 3000)
8302 bp->rx_ticks = 3000;
8303
8304 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8305 if (bp->tx_ticks > 0x3000)
8306 bp->tx_ticks = 0x3000;
8307
34f80b04 8308 if (netif_running(dev))
a2fbb9ea
ET
8309 bnx2x_update_coalesce(bp);
8310
8311 return 0;
8312}
8313
8314static void bnx2x_get_ringparam(struct net_device *dev,
8315 struct ethtool_ringparam *ering)
8316{
8317 struct bnx2x *bp = netdev_priv(dev);
8318
8319 ering->rx_max_pending = MAX_RX_AVAIL;
8320 ering->rx_mini_max_pending = 0;
8321 ering->rx_jumbo_max_pending = 0;
8322
8323 ering->rx_pending = bp->rx_ring_size;
8324 ering->rx_mini_pending = 0;
8325 ering->rx_jumbo_pending = 0;
8326
8327 ering->tx_max_pending = MAX_TX_AVAIL;
8328 ering->tx_pending = bp->tx_ring_size;
8329}
8330
8331static int bnx2x_set_ringparam(struct net_device *dev,
8332 struct ethtool_ringparam *ering)
8333{
8334 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8335 int rc = 0;
a2fbb9ea
ET
8336
8337 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8338 (ering->tx_pending > MAX_TX_AVAIL) ||
8339 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8340 return -EINVAL;
8341
8342 bp->rx_ring_size = ering->rx_pending;
8343 bp->tx_ring_size = ering->tx_pending;
8344
34f80b04
EG
8345 if (netif_running(dev)) {
8346 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8347 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8348 }
8349
34f80b04 8350 return rc;
a2fbb9ea
ET
8351}
8352
8353static void bnx2x_get_pauseparam(struct net_device *dev,
8354 struct ethtool_pauseparam *epause)
8355{
8356 struct bnx2x *bp = netdev_priv(dev);
8357
c18487ee
YR
8358 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
8359 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8360
8361 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
8362 FLOW_CTRL_RX);
8363 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
8364 FLOW_CTRL_TX);
a2fbb9ea
ET
8365
8366 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8367 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8368 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8369}
8370
8371static int bnx2x_set_pauseparam(struct net_device *dev,
8372 struct ethtool_pauseparam *epause)
8373{
8374 struct bnx2x *bp = netdev_priv(dev);
8375
34f80b04
EG
8376 if (IS_E1HMF(bp))
8377 return 0;
8378
a2fbb9ea
ET
8379 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8380 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8381 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8382
c18487ee 8383 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 8384
f1410647 8385 if (epause->rx_pause)
c18487ee
YR
8386 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
8387
f1410647 8388 if (epause->tx_pause)
c18487ee
YR
8389 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
8390
8391 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
8392 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 8393
c18487ee 8394 if (epause->autoneg) {
34f80b04 8395 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8396 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8397 return -EINVAL;
8398 }
a2fbb9ea 8399
c18487ee
YR
8400 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8401 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
8402 }
a2fbb9ea 8403
c18487ee
YR
8404 DP(NETIF_MSG_LINK,
8405 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8406
8407 if (netif_running(dev)) {
bb2a0f7a 8408 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8409 bnx2x_link_set(bp);
8410 }
a2fbb9ea
ET
8411
8412 return 0;
8413}
8414
df0f2343
VZ
8415static int bnx2x_set_flags(struct net_device *dev, u32 data)
8416{
8417 struct bnx2x *bp = netdev_priv(dev);
8418 int changed = 0;
8419 int rc = 0;
8420
8421 /* TPA requires Rx CSUM offloading */
8422 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8423 if (!(dev->features & NETIF_F_LRO)) {
8424 dev->features |= NETIF_F_LRO;
8425 bp->flags |= TPA_ENABLE_FLAG;
8426 changed = 1;
8427 }
8428
8429 } else if (dev->features & NETIF_F_LRO) {
8430 dev->features &= ~NETIF_F_LRO;
8431 bp->flags &= ~TPA_ENABLE_FLAG;
8432 changed = 1;
8433 }
8434
8435 if (changed && netif_running(dev)) {
8436 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8437 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8438 }
8439
8440 return rc;
8441}
8442
a2fbb9ea
ET
8443static u32 bnx2x_get_rx_csum(struct net_device *dev)
8444{
8445 struct bnx2x *bp = netdev_priv(dev);
8446
8447 return bp->rx_csum;
8448}
8449
8450static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8451{
8452 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8453 int rc = 0;
a2fbb9ea
ET
8454
8455 bp->rx_csum = data;
df0f2343
VZ
8456
8457 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8458 TPA'ed packets will be discarded due to wrong TCP CSUM */
8459 if (!data) {
8460 u32 flags = ethtool_op_get_flags(dev);
8461
8462 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8463 }
8464
8465 return rc;
a2fbb9ea
ET
8466}
8467
8468static int bnx2x_set_tso(struct net_device *dev, u32 data)
8469{
755735eb 8470 if (data) {
a2fbb9ea 8471 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8472 dev->features |= NETIF_F_TSO6;
8473 } else {
a2fbb9ea 8474 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8475 dev->features &= ~NETIF_F_TSO6;
8476 }
8477
a2fbb9ea
ET
8478 return 0;
8479}
8480
f3c87cdd 8481static const struct {
a2fbb9ea
ET
8482 char string[ETH_GSTRING_LEN];
8483} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8484 { "register_test (offline)" },
8485 { "memory_test (offline)" },
8486 { "loopback_test (offline)" },
8487 { "nvram_test (online)" },
8488 { "interrupt_test (online)" },
8489 { "link_test (online)" },
8490 { "idle check (online)" },
8491 { "MC errors (online)" }
a2fbb9ea
ET
8492};
8493
8494static int bnx2x_self_test_count(struct net_device *dev)
8495{
8496 return BNX2X_NUM_TESTS;
8497}
8498
f3c87cdd
YG
8499static int bnx2x_test_registers(struct bnx2x *bp)
8500{
8501 int idx, i, rc = -ENODEV;
8502 u32 wr_val = 0;
9dabc424 8503 int port = BP_PORT(bp);
f3c87cdd
YG
8504 static const struct {
8505 u32 offset0;
8506 u32 offset1;
8507 u32 mask;
8508 } reg_tbl[] = {
8509/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8510 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8511 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8512 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8513 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8514 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8515 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8516 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8517 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8518 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8519/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8520 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8521 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8522 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8523 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8524 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8525 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8526 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8527 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8528 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8529/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8530 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8531 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8532 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8533 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8534 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8535 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8536 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8537 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8538 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8539/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8540 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8541 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8542 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8543 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8544 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8545 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8546 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8547
8548 { 0xffffffff, 0, 0x00000000 }
8549 };
8550
8551 if (!netif_running(bp->dev))
8552 return rc;
8553
8554 /* Repeat the test twice:
8555 First by writing 0x00000000, second by writing 0xffffffff */
8556 for (idx = 0; idx < 2; idx++) {
8557
8558 switch (idx) {
8559 case 0:
8560 wr_val = 0;
8561 break;
8562 case 1:
8563 wr_val = 0xffffffff;
8564 break;
8565 }
8566
8567 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8568 u32 offset, mask, save_val, val;
f3c87cdd
YG
8569
8570 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8571 mask = reg_tbl[i].mask;
8572
8573 save_val = REG_RD(bp, offset);
8574
8575 REG_WR(bp, offset, wr_val);
8576 val = REG_RD(bp, offset);
8577
8578 /* Restore the original register's value */
8579 REG_WR(bp, offset, save_val);
8580
8581 /* verify that value is as expected value */
8582 if ((val & mask) != (wr_val & mask))
8583 goto test_reg_exit;
8584 }
8585 }
8586
8587 rc = 0;
8588
8589test_reg_exit:
8590 return rc;
8591}
8592
8593static int bnx2x_test_memory(struct bnx2x *bp)
8594{
8595 int i, j, rc = -ENODEV;
8596 u32 val;
8597 static const struct {
8598 u32 offset;
8599 int size;
8600 } mem_tbl[] = {
8601 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8602 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8603 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8604 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8605 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8606 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
8607 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
8608
8609 { 0xffffffff, 0 }
8610 };
8611 static const struct {
8612 char *name;
8613 u32 offset;
9dabc424
YG
8614 u32 e1_mask;
8615 u32 e1h_mask;
f3c87cdd 8616 } prty_tbl[] = {
9dabc424
YG
8617 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
8618 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
8619 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
8620 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
8621 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
8622 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
8623
8624 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
8625 };
8626
8627 if (!netif_running(bp->dev))
8628 return rc;
8629
8630 /* Go through all the memories */
8631 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
8632 for (j = 0; j < mem_tbl[i].size; j++)
8633 REG_RD(bp, mem_tbl[i].offset + j*4);
8634
8635 /* Check the parity status */
8636 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
8637 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
8638 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
8639 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
8640 DP(NETIF_MSG_HW,
8641 "%s is 0x%x\n", prty_tbl[i].name, val);
8642 goto test_mem_exit;
8643 }
8644 }
8645
8646 rc = 0;
8647
8648test_mem_exit:
8649 return rc;
8650}
8651
f3c87cdd
YG
8652static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
8653{
8654 int cnt = 1000;
8655
8656 if (link_up)
8657 while (bnx2x_link_test(bp) && cnt--)
8658 msleep(10);
8659}
8660
8661static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
8662{
8663 unsigned int pkt_size, num_pkts, i;
8664 struct sk_buff *skb;
8665 unsigned char *packet;
8666 struct bnx2x_fastpath *fp = &bp->fp[0];
8667 u16 tx_start_idx, tx_idx;
8668 u16 rx_start_idx, rx_idx;
8669 u16 pkt_prod;
8670 struct sw_tx_bd *tx_buf;
8671 struct eth_tx_bd *tx_bd;
8672 dma_addr_t mapping;
8673 union eth_rx_cqe *cqe;
8674 u8 cqe_fp_flags;
8675 struct sw_rx_bd *rx_buf;
8676 u16 len;
8677 int rc = -ENODEV;
8678
8679 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
8680 bp->link_params.loopback_mode = LOOPBACK_BMAC;
4a37fb66 8681 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8682 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8683 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8684
8685 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
8686 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
4a37fb66 8687 bnx2x_acquire_phy_lock(bp);
f3c87cdd 8688 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 8689 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
8690 /* wait until link state is restored */
8691 bnx2x_wait_for_link(bp, link_up);
8692
8693 } else
8694 return -EINVAL;
8695
8696 pkt_size = 1514;
8697 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
8698 if (!skb) {
8699 rc = -ENOMEM;
8700 goto test_loopback_exit;
8701 }
8702 packet = skb_put(skb, pkt_size);
8703 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
8704 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
8705 for (i = ETH_HLEN; i < pkt_size; i++)
8706 packet[i] = (unsigned char) (i & 0xff);
8707
8708 num_pkts = 0;
8709 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
8710 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
8711
8712 pkt_prod = fp->tx_pkt_prod++;
8713 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8714 tx_buf->first_bd = fp->tx_bd_prod;
8715 tx_buf->skb = skb;
8716
8717 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
8718 mapping = pci_map_single(bp->pdev, skb->data,
8719 skb_headlen(skb), PCI_DMA_TODEVICE);
8720 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8721 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8722 tx_bd->nbd = cpu_to_le16(1);
8723 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8724 tx_bd->vlan = cpu_to_le16(pkt_prod);
8725 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
8726 ETH_TX_BD_FLAGS_END_BD);
8727 tx_bd->general_data = ((UNICAST_ADDRESS <<
8728 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
8729
8730 fp->hw_tx_prods->bds_prod =
8731 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
8732 mb(); /* FW restriction: must not reorder writing nbd and packets */
8733 fp->hw_tx_prods->packets_prod =
8734 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
8735 DOORBELL(bp, FP_IDX(fp), 0);
8736
8737 mmiowb();
8738
8739 num_pkts++;
8740 fp->tx_bd_prod++;
8741 bp->dev->trans_start = jiffies;
8742
8743 udelay(100);
8744
8745 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
8746 if (tx_idx != tx_start_idx + num_pkts)
8747 goto test_loopback_exit;
8748
8749 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
8750 if (rx_idx != rx_start_idx + num_pkts)
8751 goto test_loopback_exit;
8752
8753 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
8754 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
8755 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
8756 goto test_loopback_rx_exit;
8757
8758 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
8759 if (len != pkt_size)
8760 goto test_loopback_rx_exit;
8761
8762 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
8763 skb = rx_buf->skb;
8764 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
8765 for (i = ETH_HLEN; i < pkt_size; i++)
8766 if (*(skb->data + i) != (unsigned char) (i & 0xff))
8767 goto test_loopback_rx_exit;
8768
8769 rc = 0;
8770
8771test_loopback_rx_exit:
8772 bp->dev->last_rx = jiffies;
8773
8774 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
8775 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
8776 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
8777 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
8778
8779 /* Update producers */
8780 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
8781 fp->rx_sge_prod);
8782 mmiowb(); /* keep prod updates ordered */
8783
8784test_loopback_exit:
8785 bp->link_params.loopback_mode = LOOPBACK_NONE;
8786
8787 return rc;
8788}
8789
8790static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
8791{
8792 int rc = 0;
8793
8794 if (!netif_running(bp->dev))
8795 return BNX2X_LOOPBACK_FAILED;
8796
f8ef6e44 8797 bnx2x_netif_stop(bp, 1);
f3c87cdd
YG
8798
8799 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
8800 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
8801 rc |= BNX2X_MAC_LOOPBACK_FAILED;
8802 }
8803
8804 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
8805 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
8806 rc |= BNX2X_PHY_LOOPBACK_FAILED;
8807 }
8808
8809 bnx2x_netif_start(bp);
8810
8811 return rc;
8812}
8813
8814#define CRC32_RESIDUAL 0xdebb20e3
8815
8816static int bnx2x_test_nvram(struct bnx2x *bp)
8817{
8818 static const struct {
8819 int offset;
8820 int size;
8821 } nvram_tbl[] = {
8822 { 0, 0x14 }, /* bootstrap */
8823 { 0x14, 0xec }, /* dir */
8824 { 0x100, 0x350 }, /* manuf_info */
8825 { 0x450, 0xf0 }, /* feature_info */
8826 { 0x640, 0x64 }, /* upgrade_key_info */
8827 { 0x6a4, 0x64 },
8828 { 0x708, 0x70 }, /* manuf_key_info */
8829 { 0x778, 0x70 },
8830 { 0, 0 }
8831 };
8832 u32 buf[0x350 / 4];
8833 u8 *data = (u8 *)buf;
8834 int i, rc;
8835 u32 magic, csum;
8836
8837 rc = bnx2x_nvram_read(bp, 0, data, 4);
8838 if (rc) {
8839 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
8840 goto test_nvram_exit;
8841 }
8842
8843 magic = be32_to_cpu(buf[0]);
8844 if (magic != 0x669955aa) {
8845 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
8846 rc = -ENODEV;
8847 goto test_nvram_exit;
8848 }
8849
8850 for (i = 0; nvram_tbl[i].size; i++) {
8851
8852 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
8853 nvram_tbl[i].size);
8854 if (rc) {
8855 DP(NETIF_MSG_PROBE,
8856 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
8857 goto test_nvram_exit;
8858 }
8859
8860 csum = ether_crc_le(nvram_tbl[i].size, data);
8861 if (csum != CRC32_RESIDUAL) {
8862 DP(NETIF_MSG_PROBE,
8863 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
8864 rc = -ENODEV;
8865 goto test_nvram_exit;
8866 }
8867 }
8868
8869test_nvram_exit:
8870 return rc;
8871}
8872
8873static int bnx2x_test_intr(struct bnx2x *bp)
8874{
8875 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
8876 int i, rc;
8877
8878 if (!netif_running(bp->dev))
8879 return -ENODEV;
8880
8881 config->hdr.length_6b = 0;
8882 config->hdr.offset = 0;
8883 config->hdr.client_id = BP_CL_ID(bp);
8884 config->hdr.reserved1 = 0;
8885
8886 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8887 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
8888 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
8889 if (rc == 0) {
8890 bp->set_mac_pending++;
8891 for (i = 0; i < 10; i++) {
8892 if (!bp->set_mac_pending)
8893 break;
8894 msleep_interruptible(10);
8895 }
8896 if (i == 10)
8897 rc = -ENODEV;
8898 }
8899
8900 return rc;
8901}
8902
a2fbb9ea
ET
8903static void bnx2x_self_test(struct net_device *dev,
8904 struct ethtool_test *etest, u64 *buf)
8905{
8906 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
8907
8908 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
8909
f3c87cdd 8910 if (!netif_running(dev))
a2fbb9ea 8911 return;
a2fbb9ea 8912
33471629 8913 /* offline tests are not supported in MF mode */
f3c87cdd
YG
8914 if (IS_E1HMF(bp))
8915 etest->flags &= ~ETH_TEST_FL_OFFLINE;
8916
8917 if (etest->flags & ETH_TEST_FL_OFFLINE) {
8918 u8 link_up;
8919
8920 link_up = bp->link_vars.link_up;
8921 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8922 bnx2x_nic_load(bp, LOAD_DIAG);
8923 /* wait until link state is restored */
8924 bnx2x_wait_for_link(bp, link_up);
8925
8926 if (bnx2x_test_registers(bp) != 0) {
8927 buf[0] = 1;
8928 etest->flags |= ETH_TEST_FL_FAILED;
8929 }
8930 if (bnx2x_test_memory(bp) != 0) {
8931 buf[1] = 1;
8932 etest->flags |= ETH_TEST_FL_FAILED;
8933 }
8934 buf[2] = bnx2x_test_loopback(bp, link_up);
8935 if (buf[2] != 0)
8936 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 8937
f3c87cdd
YG
8938 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8939 bnx2x_nic_load(bp, LOAD_NORMAL);
8940 /* wait until link state is restored */
8941 bnx2x_wait_for_link(bp, link_up);
8942 }
8943 if (bnx2x_test_nvram(bp) != 0) {
8944 buf[3] = 1;
a2fbb9ea
ET
8945 etest->flags |= ETH_TEST_FL_FAILED;
8946 }
f3c87cdd
YG
8947 if (bnx2x_test_intr(bp) != 0) {
8948 buf[4] = 1;
8949 etest->flags |= ETH_TEST_FL_FAILED;
8950 }
8951 if (bp->port.pmf)
8952 if (bnx2x_link_test(bp) != 0) {
8953 buf[5] = 1;
8954 etest->flags |= ETH_TEST_FL_FAILED;
8955 }
8956 buf[7] = bnx2x_mc_assert(bp);
8957 if (buf[7] != 0)
8958 etest->flags |= ETH_TEST_FL_FAILED;
8959
8960#ifdef BNX2X_EXTRA_DEBUG
8961 bnx2x_panic_dump(bp);
8962#endif
a2fbb9ea
ET
8963}
8964
bb2a0f7a
YG
8965static const struct {
8966 long offset;
8967 int size;
8968 u32 flags;
66e855f3
YG
8969#define STATS_FLAGS_PORT 1
8970#define STATS_FLAGS_FUNC 2
8971 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 8972} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
66e855f3
YG
8973/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi),
8974 8, STATS_FLAGS_FUNC, "rx_bytes" },
8975 { STATS_OFFSET32(error_bytes_received_hi),
8976 8, STATS_FLAGS_FUNC, "rx_error_bytes" },
8977 { STATS_OFFSET32(total_bytes_transmitted_hi),
8978 8, STATS_FLAGS_FUNC, "tx_bytes" },
8979 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
8980 8, STATS_FLAGS_PORT, "tx_error_bytes" },
bb2a0f7a 8981 { STATS_OFFSET32(total_unicast_packets_received_hi),
66e855f3 8982 8, STATS_FLAGS_FUNC, "rx_ucast_packets" },
bb2a0f7a 8983 { STATS_OFFSET32(total_multicast_packets_received_hi),
66e855f3 8984 8, STATS_FLAGS_FUNC, "rx_mcast_packets" },
bb2a0f7a 8985 { STATS_OFFSET32(total_broadcast_packets_received_hi),
66e855f3 8986 8, STATS_FLAGS_FUNC, "rx_bcast_packets" },
bb2a0f7a 8987 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
66e855f3 8988 8, STATS_FLAGS_FUNC, "tx_packets" },
bb2a0f7a 8989 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
66e855f3 8990 8, STATS_FLAGS_PORT, "tx_mac_errors" },
bb2a0f7a 8991/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
66e855f3 8992 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 8993 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 8994 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 8995 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 8996 8, STATS_FLAGS_PORT, "rx_align_errors" },
bb2a0f7a 8997 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 8998 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 8999 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9000 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
bb2a0f7a 9001 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9002 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9003 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9004 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9005 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9006 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9007 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9008 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9009 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
66e855f3
YG
9010 8, STATS_FLAGS_PORT, "rx_fragments" },
9011/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9012 8, STATS_FLAGS_PORT, "rx_jabbers" },
bb2a0f7a 9013 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
66e855f3 9014 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
bb2a0f7a 9015 { STATS_OFFSET32(jabber_packets_received),
66e855f3 9016 4, STATS_FLAGS_FUNC, "rx_oversize_packets" },
bb2a0f7a 9017 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9018 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9019 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9020 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9021 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9022 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9023 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9024 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9025 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9026 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9027 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9028 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
bb2a0f7a 9029 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9030 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
bb2a0f7a 9031/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
66e855f3 9032 8, STATS_FLAGS_PORT, "rx_xon_frames" },
bb2a0f7a 9033 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
66e855f3
YG
9034 8, STATS_FLAGS_PORT, "rx_xoff_frames" },
9035 { STATS_OFFSET32(tx_stat_outxonsent_hi),
9036 8, STATS_FLAGS_PORT, "tx_xon_frames" },
9037 { STATS_OFFSET32(tx_stat_outxoffsent_hi),
9038 8, STATS_FLAGS_PORT, "tx_xoff_frames" },
bb2a0f7a 9039 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
66e855f3
YG
9040 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9041 { STATS_OFFSET32(mac_filter_discard),
9042 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9043 { STATS_OFFSET32(no_buff_discard),
9044 4, STATS_FLAGS_FUNC, "rx_discards" },
9045 { STATS_OFFSET32(xxoverflow_discard),
9046 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9047 { STATS_OFFSET32(brb_drop_hi),
9048 8, STATS_FLAGS_PORT, "brb_discard" },
9049 { STATS_OFFSET32(brb_truncate_hi),
9050 8, STATS_FLAGS_PORT, "brb_truncate" },
9051/* 40 */{ STATS_OFFSET32(rx_err_discard_pkt),
9052 4, STATS_FLAGS_FUNC, "rx_phy_ip_err_discards"},
9053 { STATS_OFFSET32(rx_skb_alloc_failed),
9054 4, STATS_FLAGS_FUNC, "rx_skb_alloc_discard" },
9055/* 42 */{ STATS_OFFSET32(hw_csum_err),
9056 4, STATS_FLAGS_FUNC, "rx_csum_offload_errors" }
a2fbb9ea
ET
9057};
9058
66e855f3
YG
9059#define IS_NOT_E1HMF_STAT(bp, i) \
9060 (IS_E1HMF(bp) && (bnx2x_stats_arr[i].flags & STATS_FLAGS_PORT))
9061
a2fbb9ea
ET
9062static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9063{
bb2a0f7a
YG
9064 struct bnx2x *bp = netdev_priv(dev);
9065 int i, j;
9066
a2fbb9ea
ET
9067 switch (stringset) {
9068 case ETH_SS_STATS:
bb2a0f7a 9069 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9070 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9071 continue;
9072 strcpy(buf + j*ETH_GSTRING_LEN,
9073 bnx2x_stats_arr[i].string);
9074 j++;
9075 }
a2fbb9ea
ET
9076 break;
9077
9078 case ETH_SS_TEST:
9079 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9080 break;
9081 }
9082}
9083
9084static int bnx2x_get_stats_count(struct net_device *dev)
9085{
bb2a0f7a
YG
9086 struct bnx2x *bp = netdev_priv(dev);
9087 int i, num_stats = 0;
9088
9089 for (i = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9090 if (IS_NOT_E1HMF_STAT(bp, i))
bb2a0f7a
YG
9091 continue;
9092 num_stats++;
9093 }
9094 return num_stats;
a2fbb9ea
ET
9095}
9096
9097static void bnx2x_get_ethtool_stats(struct net_device *dev,
9098 struct ethtool_stats *stats, u64 *buf)
9099{
9100 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
9101 u32 *hw_stats = (u32 *)&bp->eth_stats;
9102 int i, j;
a2fbb9ea 9103
bb2a0f7a 9104 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
66e855f3 9105 if (IS_NOT_E1HMF_STAT(bp, i))
a2fbb9ea 9106 continue;
bb2a0f7a
YG
9107
9108 if (bnx2x_stats_arr[i].size == 0) {
9109 /* skip this counter */
9110 buf[j] = 0;
9111 j++;
a2fbb9ea
ET
9112 continue;
9113 }
bb2a0f7a 9114 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 9115 /* 4-byte counter */
bb2a0f7a
YG
9116 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
9117 j++;
a2fbb9ea
ET
9118 continue;
9119 }
9120 /* 8-byte counter */
bb2a0f7a
YG
9121 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
9122 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
9123 j++;
a2fbb9ea
ET
9124 }
9125}
9126
9127static int bnx2x_phys_id(struct net_device *dev, u32 data)
9128{
9129 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9130 int port = BP_PORT(bp);
a2fbb9ea
ET
9131 int i;
9132
34f80b04
EG
9133 if (!netif_running(dev))
9134 return 0;
9135
9136 if (!bp->port.pmf)
9137 return 0;
9138
a2fbb9ea
ET
9139 if (data == 0)
9140 data = 2;
9141
9142 for (i = 0; i < (data * 2); i++) {
c18487ee 9143 if ((i % 2) == 0)
34f80b04 9144 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9145 bp->link_params.hw_led_mode,
9146 bp->link_params.chip_id);
9147 else
34f80b04 9148 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9149 bp->link_params.hw_led_mode,
9150 bp->link_params.chip_id);
9151
a2fbb9ea
ET
9152 msleep_interruptible(500);
9153 if (signal_pending(current))
9154 break;
9155 }
9156
c18487ee 9157 if (bp->link_vars.link_up)
34f80b04 9158 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9159 bp->link_vars.line_speed,
9160 bp->link_params.hw_led_mode,
9161 bp->link_params.chip_id);
a2fbb9ea
ET
9162
9163 return 0;
9164}
9165
9166static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9167 .get_settings = bnx2x_get_settings,
9168 .set_settings = bnx2x_set_settings,
9169 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9170 .get_wol = bnx2x_get_wol,
9171 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9172 .get_msglevel = bnx2x_get_msglevel,
9173 .set_msglevel = bnx2x_set_msglevel,
9174 .nway_reset = bnx2x_nway_reset,
9175 .get_link = ethtool_op_get_link,
9176 .get_eeprom_len = bnx2x_get_eeprom_len,
9177 .get_eeprom = bnx2x_get_eeprom,
9178 .set_eeprom = bnx2x_set_eeprom,
9179 .get_coalesce = bnx2x_get_coalesce,
9180 .set_coalesce = bnx2x_set_coalesce,
9181 .get_ringparam = bnx2x_get_ringparam,
9182 .set_ringparam = bnx2x_set_ringparam,
9183 .get_pauseparam = bnx2x_get_pauseparam,
9184 .set_pauseparam = bnx2x_set_pauseparam,
9185 .get_rx_csum = bnx2x_get_rx_csum,
9186 .set_rx_csum = bnx2x_set_rx_csum,
9187 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9188 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9189 .set_flags = bnx2x_set_flags,
9190 .get_flags = ethtool_op_get_flags,
9191 .get_sg = ethtool_op_get_sg,
9192 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9193 .get_tso = ethtool_op_get_tso,
9194 .set_tso = bnx2x_set_tso,
9195 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9196 .self_test = bnx2x_self_test,
9197 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9198 .phys_id = bnx2x_phys_id,
9199 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9200 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9201};
9202
9203/* end of ethtool_ops */
9204
9205/****************************************************************************
9206* General service functions
9207****************************************************************************/
9208
9209static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9210{
9211 u16 pmcsr;
9212
9213 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9214
9215 switch (state) {
9216 case PCI_D0:
34f80b04 9217 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9218 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9219 PCI_PM_CTRL_PME_STATUS));
9220
9221 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9222 /* delay required during transition out of D3hot */
a2fbb9ea 9223 msleep(20);
34f80b04 9224 break;
a2fbb9ea 9225
34f80b04
EG
9226 case PCI_D3hot:
9227 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9228 pmcsr |= 3;
a2fbb9ea 9229
34f80b04
EG
9230 if (bp->wol)
9231 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9232
34f80b04
EG
9233 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9234 pmcsr);
a2fbb9ea 9235
34f80b04
EG
9236 /* No more memory access after this point until
9237 * device is brought back to D0.
9238 */
9239 break;
9240
9241 default:
9242 return -EINVAL;
9243 }
9244 return 0;
a2fbb9ea
ET
9245}
9246
34f80b04
EG
9247/*
9248 * net_device service functions
9249 */
9250
a2fbb9ea
ET
9251static int bnx2x_poll(struct napi_struct *napi, int budget)
9252{
9253 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9254 napi);
9255 struct bnx2x *bp = fp->bp;
9256 int work_done = 0;
2772f903 9257 u16 rx_cons_sb;
a2fbb9ea
ET
9258
9259#ifdef BNX2X_STOP_ON_ERROR
9260 if (unlikely(bp->panic))
34f80b04 9261 goto poll_panic;
a2fbb9ea
ET
9262#endif
9263
9264 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9265 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9266 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9267
9268 bnx2x_update_fpsb_idx(fp);
9269
da5a662a 9270 if (BNX2X_HAS_TX_WORK(fp))
a2fbb9ea
ET
9271 bnx2x_tx_int(fp, budget);
9272
2772f903
EG
9273 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9274 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9275 rx_cons_sb++;
da5a662a 9276 if (BNX2X_HAS_RX_WORK(fp))
a2fbb9ea
ET
9277 work_done = bnx2x_rx_int(fp, budget);
9278
da5a662a 9279 rmb(); /* BNX2X_HAS_WORK() reads the status block */
2772f903
EG
9280 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9281 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9282 rx_cons_sb++;
a2fbb9ea
ET
9283
9284 /* must not complete if we consumed full budget */
da5a662a 9285 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9286
9287#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9288poll_panic:
a2fbb9ea
ET
9289#endif
9290 netif_rx_complete(bp->dev, napi);
9291
34f80b04 9292 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9293 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9294 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9295 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9296 }
a2fbb9ea
ET
9297 return work_done;
9298}
9299
755735eb
EG
9300
9301/* we split the first BD into headers and data BDs
33471629 9302 * to ease the pain of our fellow microcode engineers
755735eb
EG
9303 * we use one mapping for both BDs
9304 * So far this has only been observed to happen
9305 * in Other Operating Systems(TM)
9306 */
9307static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9308 struct bnx2x_fastpath *fp,
9309 struct eth_tx_bd **tx_bd, u16 hlen,
9310 u16 bd_prod, int nbd)
9311{
9312 struct eth_tx_bd *h_tx_bd = *tx_bd;
9313 struct eth_tx_bd *d_tx_bd;
9314 dma_addr_t mapping;
9315 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9316
9317 /* first fix first BD */
9318 h_tx_bd->nbd = cpu_to_le16(nbd);
9319 h_tx_bd->nbytes = cpu_to_le16(hlen);
9320
9321 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9322 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9323 h_tx_bd->addr_lo, h_tx_bd->nbd);
9324
9325 /* now get a new data BD
9326 * (after the pbd) and fill it */
9327 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9328 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9329
9330 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9331 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9332
9333 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9334 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9335 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9336 d_tx_bd->vlan = 0;
9337 /* this marks the BD as one that has no individual mapping
9338 * the FW ignores this flag in a BD not marked start
9339 */
9340 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9341 DP(NETIF_MSG_TX_QUEUED,
9342 "TSO split data size is %d (%x:%x)\n",
9343 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9344
9345 /* update tx_bd for marking the last BD flag */
9346 *tx_bd = d_tx_bd;
9347
9348 return bd_prod;
9349}
9350
9351static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9352{
9353 if (fix > 0)
9354 csum = (u16) ~csum_fold(csum_sub(csum,
9355 csum_partial(t_header - fix, fix, 0)));
9356
9357 else if (fix < 0)
9358 csum = (u16) ~csum_fold(csum_add(csum,
9359 csum_partial(t_header, -fix, 0)));
9360
9361 return swab16(csum);
9362}
9363
9364static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9365{
9366 u32 rc;
9367
9368 if (skb->ip_summed != CHECKSUM_PARTIAL)
9369 rc = XMIT_PLAIN;
9370
9371 else {
9372 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9373 rc = XMIT_CSUM_V6;
9374 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9375 rc |= XMIT_CSUM_TCP;
9376
9377 } else {
9378 rc = XMIT_CSUM_V4;
9379 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9380 rc |= XMIT_CSUM_TCP;
9381 }
9382 }
9383
9384 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9385 rc |= XMIT_GSO_V4;
9386
9387 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9388 rc |= XMIT_GSO_V6;
9389
9390 return rc;
9391}
9392
9393/* check if packet requires linearization (packet is too fragmented) */
9394static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9395 u32 xmit_type)
9396{
9397 int to_copy = 0;
9398 int hlen = 0;
9399 int first_bd_sz = 0;
9400
9401 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9402 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9403
9404 if (xmit_type & XMIT_GSO) {
9405 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9406 /* Check if LSO packet needs to be copied:
9407 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9408 int wnd_size = MAX_FETCH_BD - 3;
33471629 9409 /* Number of windows to check */
755735eb
EG
9410 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9411 int wnd_idx = 0;
9412 int frag_idx = 0;
9413 u32 wnd_sum = 0;
9414
9415 /* Headers length */
9416 hlen = (int)(skb_transport_header(skb) - skb->data) +
9417 tcp_hdrlen(skb);
9418
9419 /* Amount of data (w/o headers) on linear part of SKB*/
9420 first_bd_sz = skb_headlen(skb) - hlen;
9421
9422 wnd_sum = first_bd_sz;
9423
9424 /* Calculate the first sum - it's special */
9425 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9426 wnd_sum +=
9427 skb_shinfo(skb)->frags[frag_idx].size;
9428
9429 /* If there was data on linear skb data - check it */
9430 if (first_bd_sz > 0) {
9431 if (unlikely(wnd_sum < lso_mss)) {
9432 to_copy = 1;
9433 goto exit_lbl;
9434 }
9435
9436 wnd_sum -= first_bd_sz;
9437 }
9438
9439 /* Others are easier: run through the frag list and
9440 check all windows */
9441 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9442 wnd_sum +=
9443 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9444
9445 if (unlikely(wnd_sum < lso_mss)) {
9446 to_copy = 1;
9447 break;
9448 }
9449 wnd_sum -=
9450 skb_shinfo(skb)->frags[wnd_idx].size;
9451 }
9452
9453 } else {
9454 /* in non-LSO too fragmented packet should always
9455 be linearized */
9456 to_copy = 1;
9457 }
9458 }
9459
9460exit_lbl:
9461 if (unlikely(to_copy))
9462 DP(NETIF_MSG_TX_QUEUED,
9463 "Linearization IS REQUIRED for %s packet. "
9464 "num_frags %d hlen %d first_bd_sz %d\n",
9465 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9466 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9467
9468 return to_copy;
9469}
9470
9471/* called with netif_tx_lock
a2fbb9ea 9472 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9473 * netif_wake_queue()
a2fbb9ea
ET
9474 */
9475static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9476{
9477 struct bnx2x *bp = netdev_priv(dev);
9478 struct bnx2x_fastpath *fp;
9479 struct sw_tx_bd *tx_buf;
9480 struct eth_tx_bd *tx_bd;
9481 struct eth_tx_parse_bd *pbd = NULL;
9482 u16 pkt_prod, bd_prod;
755735eb 9483 int nbd, fp_index;
a2fbb9ea 9484 dma_addr_t mapping;
755735eb
EG
9485 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9486 int vlan_off = (bp->e1hov ? 4 : 0);
9487 int i;
9488 u8 hlen = 0;
a2fbb9ea
ET
9489
9490#ifdef BNX2X_STOP_ON_ERROR
9491 if (unlikely(bp->panic))
9492 return NETDEV_TX_BUSY;
9493#endif
9494
755735eb 9495 fp_index = (smp_processor_id() % bp->num_queues);
a2fbb9ea 9496 fp = &bp->fp[fp_index];
755735eb 9497
231fd58a 9498 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 9499 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
9500 netif_stop_queue(dev);
9501 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
9502 return NETDEV_TX_BUSY;
9503 }
9504
755735eb
EG
9505 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
9506 " gso type %x xmit_type %x\n",
9507 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
9508 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
9509
33471629 9510 /* First, check if we need to linearize the skb
755735eb
EG
9511 (due to FW restrictions) */
9512 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
9513 /* Statistics of linearization */
9514 bp->lin_cnt++;
9515 if (skb_linearize(skb) != 0) {
9516 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
9517 "silently dropping this SKB\n");
9518 dev_kfree_skb_any(skb);
da5a662a 9519 return NETDEV_TX_OK;
755735eb
EG
9520 }
9521 }
9522
a2fbb9ea 9523 /*
755735eb 9524 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 9525 then for TSO or xsum we have a parsing info BD,
755735eb 9526 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
9527 (don't forget to mark the last one as last,
9528 and to unmap only AFTER you write to the BD ...)
755735eb 9529 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
9530 */
9531
9532 pkt_prod = fp->tx_pkt_prod++;
755735eb 9533 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 9534
755735eb 9535 /* get a tx_buf and first BD */
a2fbb9ea
ET
9536 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9537 tx_bd = &fp->tx_desc_ring[bd_prod];
9538
9539 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9540 tx_bd->general_data = (UNICAST_ADDRESS <<
9541 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
9542 /* header nbd */
9543 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 9544
755735eb
EG
9545 /* remember the first BD of the packet */
9546 tx_buf->first_bd = fp->tx_bd_prod;
9547 tx_buf->skb = skb;
a2fbb9ea
ET
9548
9549 DP(NETIF_MSG_TX_QUEUED,
9550 "sending pkt %u @%p next_idx %u bd %u @%p\n",
9551 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
9552
755735eb
EG
9553 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
9554 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
9555 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
9556 vlan_off += 4;
9557 } else
9558 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 9559
755735eb 9560 if (xmit_type) {
755735eb 9561 /* turn on parsing and get a BD */
a2fbb9ea
ET
9562 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9563 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
9564
9565 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9566 }
9567
9568 if (xmit_type & XMIT_CSUM) {
9569 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
9570
9571 /* for now NS flag is not used in Linux */
755735eb 9572 pbd->global_data = (hlen |
96fc1784 9573 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 9574 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 9575
755735eb
EG
9576 pbd->ip_hlen = (skb_transport_header(skb) -
9577 skb_network_header(skb)) / 2;
9578
9579 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 9580
755735eb
EG
9581 pbd->total_hlen = cpu_to_le16(hlen);
9582 hlen = hlen*2 - vlan_off;
a2fbb9ea 9583
755735eb
EG
9584 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
9585
9586 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 9587 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
9588 ETH_TX_BD_FLAGS_IP_CSUM;
9589 else
9590 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
9591
9592 if (xmit_type & XMIT_CSUM_TCP) {
9593 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
9594
9595 } else {
9596 s8 fix = SKB_CS_OFF(skb); /* signed! */
9597
a2fbb9ea 9598 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 9599 pbd->cs_offset = fix / 2;
a2fbb9ea 9600
755735eb
EG
9601 DP(NETIF_MSG_TX_QUEUED,
9602 "hlen %d offset %d fix %d csum before fix %x\n",
9603 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
9604 SKB_CS(skb));
9605
9606 /* HW bug: fixup the CSUM */
9607 pbd->tcp_pseudo_csum =
9608 bnx2x_csum_fix(skb_transport_header(skb),
9609 SKB_CS(skb), fix);
9610
9611 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
9612 pbd->tcp_pseudo_csum);
9613 }
a2fbb9ea
ET
9614 }
9615
9616 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 9617 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
9618
9619 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9620 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 9621 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
9622 tx_bd->nbd = cpu_to_le16(nbd);
9623 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9624
9625 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
9626 " nbytes %d flags %x vlan %x\n",
9627 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
9628 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
9629 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 9630
755735eb 9631 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
9632
9633 DP(NETIF_MSG_TX_QUEUED,
9634 "TSO packet len %d hlen %d total len %d tso size %d\n",
9635 skb->len, hlen, skb_headlen(skb),
9636 skb_shinfo(skb)->gso_size);
9637
9638 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
9639
755735eb
EG
9640 if (unlikely(skb_headlen(skb) > hlen))
9641 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
9642 bd_prod, ++nbd);
a2fbb9ea
ET
9643
9644 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
9645 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
9646 pbd->tcp_flags = pbd_tcp_flags(skb);
9647
9648 if (xmit_type & XMIT_GSO_V4) {
9649 pbd->ip_id = swab16(ip_hdr(skb)->id);
9650 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
9651 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
9652 ip_hdr(skb)->daddr,
9653 0, IPPROTO_TCP, 0));
755735eb
EG
9654
9655 } else
9656 pbd->tcp_pseudo_csum =
9657 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
9658 &ipv6_hdr(skb)->daddr,
9659 0, IPPROTO_TCP, 0));
9660
a2fbb9ea
ET
9661 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
9662 }
9663
755735eb
EG
9664 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
9665 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 9666
755735eb
EG
9667 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9668 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 9669
755735eb
EG
9670 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
9671 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 9672
755735eb
EG
9673 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9674 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9675 tx_bd->nbytes = cpu_to_le16(frag->size);
9676 tx_bd->vlan = cpu_to_le16(pkt_prod);
9677 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 9678
755735eb
EG
9679 DP(NETIF_MSG_TX_QUEUED,
9680 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
9681 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
9682 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
9683 }
9684
755735eb 9685 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
9686 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
9687
9688 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
9689 tx_bd, tx_bd->bd_flags.as_bitfield);
9690
a2fbb9ea
ET
9691 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9692
755735eb 9693 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
9694 * if the packet contains or ends with it
9695 */
9696 if (TX_BD_POFF(bd_prod) < nbd)
9697 nbd++;
9698
9699 if (pbd)
9700 DP(NETIF_MSG_TX_QUEUED,
9701 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
9702 " tcp_flags %x xsum %x seq %u hlen %u\n",
9703 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
9704 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 9705 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 9706
755735eb 9707 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 9708
96fc1784
ET
9709 fp->hw_tx_prods->bds_prod =
9710 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 9711 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
9712 fp->hw_tx_prods->packets_prod =
9713 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 9714 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
9715
9716 mmiowb();
9717
755735eb 9718 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
9719 dev->trans_start = jiffies;
9720
9721 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
9722 netif_stop_queue(dev);
bb2a0f7a 9723 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
9724 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
9725 netif_wake_queue(dev);
9726 }
9727 fp->tx_pkt++;
9728
9729 return NETDEV_TX_OK;
9730}
9731
bb2a0f7a 9732/* called with rtnl_lock */
a2fbb9ea
ET
9733static int bnx2x_open(struct net_device *dev)
9734{
9735 struct bnx2x *bp = netdev_priv(dev);
9736
9737 bnx2x_set_power_state(bp, PCI_D0);
9738
bb2a0f7a 9739 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
9740}
9741
bb2a0f7a 9742/* called with rtnl_lock */
a2fbb9ea
ET
9743static int bnx2x_close(struct net_device *dev)
9744{
a2fbb9ea
ET
9745 struct bnx2x *bp = netdev_priv(dev);
9746
9747 /* Unload the driver, release IRQs */
bb2a0f7a
YG
9748 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
9749 if (atomic_read(&bp->pdev->enable_cnt) == 1)
9750 if (!CHIP_REV_IS_SLOW(bp))
9751 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
9752
9753 return 0;
9754}
9755
34f80b04
EG
9756/* called with netif_tx_lock from set_multicast */
9757static void bnx2x_set_rx_mode(struct net_device *dev)
9758{
9759 struct bnx2x *bp = netdev_priv(dev);
9760 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
9761 int port = BP_PORT(bp);
9762
9763 if (bp->state != BNX2X_STATE_OPEN) {
9764 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9765 return;
9766 }
9767
9768 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9769
9770 if (dev->flags & IFF_PROMISC)
9771 rx_mode = BNX2X_RX_MODE_PROMISC;
9772
9773 else if ((dev->flags & IFF_ALLMULTI) ||
9774 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
9775 rx_mode = BNX2X_RX_MODE_ALLMULTI;
9776
9777 else { /* some multicasts */
9778 if (CHIP_IS_E1(bp)) {
9779 int i, old, offset;
9780 struct dev_mc_list *mclist;
9781 struct mac_configuration_cmd *config =
9782 bnx2x_sp(bp, mcast_config);
9783
9784 for (i = 0, mclist = dev->mc_list;
9785 mclist && (i < dev->mc_count);
9786 i++, mclist = mclist->next) {
9787
9788 config->config_table[i].
9789 cam_entry.msb_mac_addr =
9790 swab16(*(u16 *)&mclist->dmi_addr[0]);
9791 config->config_table[i].
9792 cam_entry.middle_mac_addr =
9793 swab16(*(u16 *)&mclist->dmi_addr[2]);
9794 config->config_table[i].
9795 cam_entry.lsb_mac_addr =
9796 swab16(*(u16 *)&mclist->dmi_addr[4]);
9797 config->config_table[i].cam_entry.flags =
9798 cpu_to_le16(port);
9799 config->config_table[i].
9800 target_table_entry.flags = 0;
9801 config->config_table[i].
9802 target_table_entry.client_id = 0;
9803 config->config_table[i].
9804 target_table_entry.vlan_id = 0;
9805
9806 DP(NETIF_MSG_IFUP,
9807 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
9808 config->config_table[i].
9809 cam_entry.msb_mac_addr,
9810 config->config_table[i].
9811 cam_entry.middle_mac_addr,
9812 config->config_table[i].
9813 cam_entry.lsb_mac_addr);
9814 }
9815 old = config->hdr.length_6b;
9816 if (old > i) {
9817 for (; i < old; i++) {
9818 if (CAM_IS_INVALID(config->
9819 config_table[i])) {
9820 i--; /* already invalidated */
9821 break;
9822 }
9823 /* invalidate */
9824 CAM_INVALIDATE(config->
9825 config_table[i]);
9826 }
9827 }
9828
9829 if (CHIP_REV_IS_SLOW(bp))
9830 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
9831 else
9832 offset = BNX2X_MAX_MULTICAST*(1 + port);
9833
9834 config->hdr.length_6b = i;
9835 config->hdr.offset = offset;
9836 config->hdr.client_id = BP_CL_ID(bp);
9837 config->hdr.reserved1 = 0;
9838
9839 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9840 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
9841 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
9842 0);
9843 } else { /* E1H */
9844 /* Accept one or more multicasts */
9845 struct dev_mc_list *mclist;
9846 u32 mc_filter[MC_HASH_SIZE];
9847 u32 crc, bit, regidx;
9848 int i;
9849
9850 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
9851
9852 for (i = 0, mclist = dev->mc_list;
9853 mclist && (i < dev->mc_count);
9854 i++, mclist = mclist->next) {
9855
9856 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
9857 "%02x:%02x:%02x:%02x:%02x:%02x\n",
9858 mclist->dmi_addr[0], mclist->dmi_addr[1],
9859 mclist->dmi_addr[2], mclist->dmi_addr[3],
9860 mclist->dmi_addr[4], mclist->dmi_addr[5]);
9861
9862 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
9863 bit = (crc >> 24) & 0xff;
9864 regidx = bit >> 5;
9865 bit &= 0x1f;
9866 mc_filter[regidx] |= (1 << bit);
9867 }
9868
9869 for (i = 0; i < MC_HASH_SIZE; i++)
9870 REG_WR(bp, MC_HASH_OFFSET(bp, i),
9871 mc_filter[i]);
9872 }
9873 }
9874
9875 bp->rx_mode = rx_mode;
9876 bnx2x_set_storm_rx_mode(bp);
9877}
9878
9879/* called with rtnl_lock */
a2fbb9ea
ET
9880static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
9881{
9882 struct sockaddr *addr = p;
9883 struct bnx2x *bp = netdev_priv(dev);
9884
34f80b04 9885 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
9886 return -EINVAL;
9887
9888 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
9889 if (netif_running(dev)) {
9890 if (CHIP_IS_E1(bp))
3101c2bc 9891 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 9892 else
3101c2bc 9893 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 9894 }
a2fbb9ea
ET
9895
9896 return 0;
9897}
9898
c18487ee 9899/* called with rtnl_lock */
a2fbb9ea
ET
9900static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9901{
9902 struct mii_ioctl_data *data = if_mii(ifr);
9903 struct bnx2x *bp = netdev_priv(dev);
3196a88a 9904 int port = BP_PORT(bp);
a2fbb9ea
ET
9905 int err;
9906
9907 switch (cmd) {
9908 case SIOCGMIIPHY:
34f80b04 9909 data->phy_id = bp->port.phy_addr;
a2fbb9ea 9910
c14423fe 9911 /* fallthrough */
c18487ee 9912
a2fbb9ea 9913 case SIOCGMIIREG: {
c18487ee 9914 u16 mii_regval;
a2fbb9ea 9915
c18487ee
YR
9916 if (!netif_running(dev))
9917 return -EAGAIN;
a2fbb9ea 9918
34f80b04 9919 mutex_lock(&bp->port.phy_mutex);
3196a88a 9920 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9921 DEFAULT_PHY_DEV_ADDR,
9922 (data->reg_num & 0x1f), &mii_regval);
9923 data->val_out = mii_regval;
34f80b04 9924 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9925 return err;
9926 }
9927
9928 case SIOCSMIIREG:
9929 if (!capable(CAP_NET_ADMIN))
9930 return -EPERM;
9931
c18487ee
YR
9932 if (!netif_running(dev))
9933 return -EAGAIN;
9934
34f80b04 9935 mutex_lock(&bp->port.phy_mutex);
3196a88a 9936 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
9937 DEFAULT_PHY_DEV_ADDR,
9938 (data->reg_num & 0x1f), data->val_in);
34f80b04 9939 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
9940 return err;
9941
9942 default:
9943 /* do nothing */
9944 break;
9945 }
9946
9947 return -EOPNOTSUPP;
9948}
9949
34f80b04 9950/* called with rtnl_lock */
a2fbb9ea
ET
9951static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
9952{
9953 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9954 int rc = 0;
a2fbb9ea
ET
9955
9956 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
9957 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
9958 return -EINVAL;
9959
9960 /* This does not race with packet allocation
c14423fe 9961 * because the actual alloc size is
a2fbb9ea
ET
9962 * only updated as part of load
9963 */
9964 dev->mtu = new_mtu;
9965
9966 if (netif_running(dev)) {
34f80b04
EG
9967 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9968 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 9969 }
34f80b04
EG
9970
9971 return rc;
a2fbb9ea
ET
9972}
9973
9974static void bnx2x_tx_timeout(struct net_device *dev)
9975{
9976 struct bnx2x *bp = netdev_priv(dev);
9977
9978#ifdef BNX2X_STOP_ON_ERROR
9979 if (!bp->panic)
9980 bnx2x_panic();
9981#endif
9982 /* This allows the netif to be shutdown gracefully before resetting */
9983 schedule_work(&bp->reset_task);
9984}
9985
9986#ifdef BCM_VLAN
34f80b04 9987/* called with rtnl_lock */
a2fbb9ea
ET
9988static void bnx2x_vlan_rx_register(struct net_device *dev,
9989 struct vlan_group *vlgrp)
9990{
9991 struct bnx2x *bp = netdev_priv(dev);
9992
9993 bp->vlgrp = vlgrp;
9994 if (netif_running(dev))
49d66772 9995 bnx2x_set_client_config(bp);
a2fbb9ea 9996}
34f80b04 9997
a2fbb9ea
ET
9998#endif
9999
10000#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10001static void poll_bnx2x(struct net_device *dev)
10002{
10003 struct bnx2x *bp = netdev_priv(dev);
10004
10005 disable_irq(bp->pdev->irq);
10006 bnx2x_interrupt(bp->pdev->irq, dev);
10007 enable_irq(bp->pdev->irq);
10008}
10009#endif
10010
34f80b04
EG
10011static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10012 struct net_device *dev)
a2fbb9ea
ET
10013{
10014 struct bnx2x *bp;
10015 int rc;
10016
10017 SET_NETDEV_DEV(dev, &pdev->dev);
10018 bp = netdev_priv(dev);
10019
34f80b04
EG
10020 bp->dev = dev;
10021 bp->pdev = pdev;
a2fbb9ea 10022 bp->flags = 0;
34f80b04 10023 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10024
10025 rc = pci_enable_device(pdev);
10026 if (rc) {
10027 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10028 goto err_out;
10029 }
10030
10031 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10032 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10033 " aborting\n");
10034 rc = -ENODEV;
10035 goto err_out_disable;
10036 }
10037
10038 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10039 printk(KERN_ERR PFX "Cannot find second PCI device"
10040 " base address, aborting\n");
10041 rc = -ENODEV;
10042 goto err_out_disable;
10043 }
10044
34f80b04
EG
10045 if (atomic_read(&pdev->enable_cnt) == 1) {
10046 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10047 if (rc) {
10048 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10049 " aborting\n");
10050 goto err_out_disable;
10051 }
a2fbb9ea 10052
34f80b04
EG
10053 pci_set_master(pdev);
10054 pci_save_state(pdev);
10055 }
a2fbb9ea
ET
10056
10057 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10058 if (bp->pm_cap == 0) {
10059 printk(KERN_ERR PFX "Cannot find power management"
10060 " capability, aborting\n");
10061 rc = -EIO;
10062 goto err_out_release;
10063 }
10064
10065 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10066 if (bp->pcie_cap == 0) {
10067 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10068 " aborting\n");
10069 rc = -EIO;
10070 goto err_out_release;
10071 }
10072
10073 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10074 bp->flags |= USING_DAC_FLAG;
10075 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10076 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10077 " failed, aborting\n");
10078 rc = -EIO;
10079 goto err_out_release;
10080 }
10081
10082 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10083 printk(KERN_ERR PFX "System does not support DMA,"
10084 " aborting\n");
10085 rc = -EIO;
10086 goto err_out_release;
10087 }
10088
34f80b04
EG
10089 dev->mem_start = pci_resource_start(pdev, 0);
10090 dev->base_addr = dev->mem_start;
10091 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10092
10093 dev->irq = pdev->irq;
10094
10095 bp->regview = ioremap_nocache(dev->base_addr,
10096 pci_resource_len(pdev, 0));
10097 if (!bp->regview) {
10098 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10099 rc = -ENOMEM;
10100 goto err_out_release;
10101 }
10102
34f80b04
EG
10103 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10104 min_t(u64, BNX2X_DB_SIZE,
10105 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10106 if (!bp->doorbells) {
10107 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10108 rc = -ENOMEM;
10109 goto err_out_unmap;
10110 }
10111
10112 bnx2x_set_power_state(bp, PCI_D0);
10113
34f80b04
EG
10114 /* clean indirect addresses */
10115 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10116 PCICFG_VENDOR_ID_OFFSET);
10117 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10118 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10119 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10120 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10121
34f80b04
EG
10122 dev->hard_start_xmit = bnx2x_start_xmit;
10123 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10124
34f80b04
EG
10125 dev->ethtool_ops = &bnx2x_ethtool_ops;
10126 dev->open = bnx2x_open;
10127 dev->stop = bnx2x_close;
10128 dev->set_multicast_list = bnx2x_set_rx_mode;
10129 dev->set_mac_address = bnx2x_change_mac_addr;
10130 dev->do_ioctl = bnx2x_ioctl;
10131 dev->change_mtu = bnx2x_change_mtu;
10132 dev->tx_timeout = bnx2x_tx_timeout;
10133#ifdef BCM_VLAN
10134 dev->vlan_rx_register = bnx2x_vlan_rx_register;
10135#endif
10136#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10137 dev->poll_controller = poll_bnx2x;
10138#endif
10139 dev->features |= NETIF_F_SG;
10140 dev->features |= NETIF_F_HW_CSUM;
10141 if (bp->flags & USING_DAC_FLAG)
10142 dev->features |= NETIF_F_HIGHDMA;
10143#ifdef BCM_VLAN
10144 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
10145#endif
10146 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10147 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10148
10149 return 0;
10150
10151err_out_unmap:
10152 if (bp->regview) {
10153 iounmap(bp->regview);
10154 bp->regview = NULL;
10155 }
a2fbb9ea
ET
10156 if (bp->doorbells) {
10157 iounmap(bp->doorbells);
10158 bp->doorbells = NULL;
10159 }
10160
10161err_out_release:
34f80b04
EG
10162 if (atomic_read(&pdev->enable_cnt) == 1)
10163 pci_release_regions(pdev);
a2fbb9ea
ET
10164
10165err_out_disable:
10166 pci_disable_device(pdev);
10167 pci_set_drvdata(pdev, NULL);
10168
10169err_out:
10170 return rc;
10171}
10172
25047950
ET
10173static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10174{
10175 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10176
10177 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10178 return val;
10179}
10180
10181/* return value of 1=2.5GHz 2=5GHz */
10182static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10183{
10184 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10185
10186 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10187 return val;
10188}
10189
a2fbb9ea
ET
10190static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10191 const struct pci_device_id *ent)
10192{
10193 static int version_printed;
10194 struct net_device *dev = NULL;
10195 struct bnx2x *bp;
25047950 10196 int rc;
25047950 10197 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
10198
10199 if (version_printed++ == 0)
10200 printk(KERN_INFO "%s", version);
10201
10202 /* dev zeroed in init_etherdev */
10203 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
10204 if (!dev) {
10205 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10206 return -ENOMEM;
34f80b04 10207 }
a2fbb9ea 10208
a2fbb9ea
ET
10209 bp = netdev_priv(dev);
10210 bp->msglevel = debug;
10211
34f80b04 10212 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10213 if (rc < 0) {
10214 free_netdev(dev);
10215 return rc;
10216 }
10217
a2fbb9ea
ET
10218 rc = register_netdev(dev);
10219 if (rc) {
c14423fe 10220 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 10221 goto init_one_exit;
a2fbb9ea
ET
10222 }
10223
10224 pci_set_drvdata(pdev, dev);
10225
34f80b04
EG
10226 rc = bnx2x_init_bp(bp);
10227 if (rc) {
10228 unregister_netdev(dev);
10229 goto init_one_exit;
10230 }
10231
12b56ea8
EG
10232 netif_carrier_off(dev);
10233
34f80b04 10234 bp->common.name = board_info[ent->driver_data].name;
25047950 10235 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10236 " IRQ %d, ", dev->name, bp->common.name,
10237 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10238 bnx2x_get_pcie_width(bp),
10239 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10240 dev->base_addr, bp->pdev->irq);
10241 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 10242 return 0;
34f80b04
EG
10243
10244init_one_exit:
10245 if (bp->regview)
10246 iounmap(bp->regview);
10247
10248 if (bp->doorbells)
10249 iounmap(bp->doorbells);
10250
10251 free_netdev(dev);
10252
10253 if (atomic_read(&pdev->enable_cnt) == 1)
10254 pci_release_regions(pdev);
10255
10256 pci_disable_device(pdev);
10257 pci_set_drvdata(pdev, NULL);
10258
10259 return rc;
a2fbb9ea
ET
10260}
10261
10262static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10263{
10264 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10265 struct bnx2x *bp;
10266
10267 if (!dev) {
228241eb
ET
10268 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10269 return;
10270 }
228241eb 10271 bp = netdev_priv(dev);
a2fbb9ea 10272
a2fbb9ea
ET
10273 unregister_netdev(dev);
10274
10275 if (bp->regview)
10276 iounmap(bp->regview);
10277
10278 if (bp->doorbells)
10279 iounmap(bp->doorbells);
10280
10281 free_netdev(dev);
34f80b04
EG
10282
10283 if (atomic_read(&pdev->enable_cnt) == 1)
10284 pci_release_regions(pdev);
10285
a2fbb9ea
ET
10286 pci_disable_device(pdev);
10287 pci_set_drvdata(pdev, NULL);
10288}
10289
10290static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10291{
10292 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10293 struct bnx2x *bp;
10294
34f80b04
EG
10295 if (!dev) {
10296 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10297 return -ENODEV;
10298 }
10299 bp = netdev_priv(dev);
a2fbb9ea 10300
34f80b04 10301 rtnl_lock();
a2fbb9ea 10302
34f80b04 10303 pci_save_state(pdev);
228241eb 10304
34f80b04
EG
10305 if (!netif_running(dev)) {
10306 rtnl_unlock();
10307 return 0;
10308 }
a2fbb9ea
ET
10309
10310 netif_device_detach(dev);
a2fbb9ea 10311
da5a662a 10312 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10313
a2fbb9ea 10314 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10315
34f80b04
EG
10316 rtnl_unlock();
10317
a2fbb9ea
ET
10318 return 0;
10319}
10320
10321static int bnx2x_resume(struct pci_dev *pdev)
10322{
10323 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10324 struct bnx2x *bp;
a2fbb9ea
ET
10325 int rc;
10326
228241eb
ET
10327 if (!dev) {
10328 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10329 return -ENODEV;
10330 }
228241eb 10331 bp = netdev_priv(dev);
a2fbb9ea 10332
34f80b04
EG
10333 rtnl_lock();
10334
228241eb 10335 pci_restore_state(pdev);
34f80b04
EG
10336
10337 if (!netif_running(dev)) {
10338 rtnl_unlock();
10339 return 0;
10340 }
10341
a2fbb9ea
ET
10342 bnx2x_set_power_state(bp, PCI_D0);
10343 netif_device_attach(dev);
10344
da5a662a 10345 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10346
34f80b04
EG
10347 rtnl_unlock();
10348
10349 return rc;
a2fbb9ea
ET
10350}
10351
f8ef6e44
YG
10352static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10353{
10354 int i;
10355
10356 bp->state = BNX2X_STATE_ERROR;
10357
10358 bp->rx_mode = BNX2X_RX_MODE_NONE;
10359
10360 bnx2x_netif_stop(bp, 0);
10361
10362 del_timer_sync(&bp->timer);
10363 bp->stats_state = STATS_STATE_DISABLED;
10364 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10365
10366 /* Release IRQs */
10367 bnx2x_free_irq(bp);
10368
10369 if (CHIP_IS_E1(bp)) {
10370 struct mac_configuration_cmd *config =
10371 bnx2x_sp(bp, mcast_config);
10372
10373 for (i = 0; i < config->hdr.length_6b; i++)
10374 CAM_INVALIDATE(config->config_table[i]);
10375 }
10376
10377 /* Free SKBs, SGEs, TPA pool and driver internals */
10378 bnx2x_free_skbs(bp);
10379 for_each_queue(bp, i)
10380 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
10381 bnx2x_free_mem(bp);
10382
10383 bp->state = BNX2X_STATE_CLOSED;
10384
10385 netif_carrier_off(bp->dev);
10386
10387 return 0;
10388}
10389
10390static void bnx2x_eeh_recover(struct bnx2x *bp)
10391{
10392 u32 val;
10393
10394 mutex_init(&bp->port.phy_mutex);
10395
10396 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10397 bp->link_params.shmem_base = bp->common.shmem_base;
10398 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10399
10400 if (!bp->common.shmem_base ||
10401 (bp->common.shmem_base < 0xA0000) ||
10402 (bp->common.shmem_base >= 0xC0000)) {
10403 BNX2X_DEV_INFO("MCP not active\n");
10404 bp->flags |= NO_MCP_FLAG;
10405 return;
10406 }
10407
10408 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10409 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10410 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10411 BNX2X_ERR("BAD MCP validity signature\n");
10412
10413 if (!BP_NOMCP(bp)) {
10414 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10415 & DRV_MSG_SEQ_NUMBER_MASK);
10416 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10417 }
10418}
10419
493adb1f
WX
10420/**
10421 * bnx2x_io_error_detected - called when PCI error is detected
10422 * @pdev: Pointer to PCI device
10423 * @state: The current pci connection state
10424 *
10425 * This function is called after a PCI bus error affecting
10426 * this device has been detected.
10427 */
10428static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10429 pci_channel_state_t state)
10430{
10431 struct net_device *dev = pci_get_drvdata(pdev);
10432 struct bnx2x *bp = netdev_priv(dev);
10433
10434 rtnl_lock();
10435
10436 netif_device_detach(dev);
10437
10438 if (netif_running(dev))
f8ef6e44 10439 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10440
10441 pci_disable_device(pdev);
10442
10443 rtnl_unlock();
10444
10445 /* Request a slot reset */
10446 return PCI_ERS_RESULT_NEED_RESET;
10447}
10448
10449/**
10450 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10451 * @pdev: Pointer to PCI device
10452 *
10453 * Restart the card from scratch, as if from a cold-boot.
10454 */
10455static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10456{
10457 struct net_device *dev = pci_get_drvdata(pdev);
10458 struct bnx2x *bp = netdev_priv(dev);
10459
10460 rtnl_lock();
10461
10462 if (pci_enable_device(pdev)) {
10463 dev_err(&pdev->dev,
10464 "Cannot re-enable PCI device after reset\n");
10465 rtnl_unlock();
10466 return PCI_ERS_RESULT_DISCONNECT;
10467 }
10468
10469 pci_set_master(pdev);
10470 pci_restore_state(pdev);
10471
10472 if (netif_running(dev))
10473 bnx2x_set_power_state(bp, PCI_D0);
10474
10475 rtnl_unlock();
10476
10477 return PCI_ERS_RESULT_RECOVERED;
10478}
10479
10480/**
10481 * bnx2x_io_resume - called when traffic can start flowing again
10482 * @pdev: Pointer to PCI device
10483 *
10484 * This callback is called when the error recovery driver tells us that
10485 * its OK to resume normal operation.
10486 */
10487static void bnx2x_io_resume(struct pci_dev *pdev)
10488{
10489 struct net_device *dev = pci_get_drvdata(pdev);
10490 struct bnx2x *bp = netdev_priv(dev);
10491
10492 rtnl_lock();
10493
f8ef6e44
YG
10494 bnx2x_eeh_recover(bp);
10495
493adb1f 10496 if (netif_running(dev))
f8ef6e44 10497 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10498
10499 netif_device_attach(dev);
10500
10501 rtnl_unlock();
10502}
10503
10504static struct pci_error_handlers bnx2x_err_handler = {
10505 .error_detected = bnx2x_io_error_detected,
10506 .slot_reset = bnx2x_io_slot_reset,
10507 .resume = bnx2x_io_resume,
10508};
10509
a2fbb9ea 10510static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10511 .name = DRV_MODULE_NAME,
10512 .id_table = bnx2x_pci_tbl,
10513 .probe = bnx2x_init_one,
10514 .remove = __devexit_p(bnx2x_remove_one),
10515 .suspend = bnx2x_suspend,
10516 .resume = bnx2x_resume,
10517 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10518};
10519
10520static int __init bnx2x_init(void)
10521{
10522 return pci_register_driver(&bnx2x_pci_driver);
10523}
10524
10525static void __exit bnx2x_cleanup(void)
10526{
10527 pci_unregister_driver(&bnx2x_pci_driver);
10528}
10529
10530module_init(bnx2x_init);
10531module_exit(bnx2x_cleanup);
10532