bnx2x: New statistics code
[linux-2.6-block.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
41#ifdef NETIF_F_HW_VLAN_TX
42 #include <linux/if_vlan.h>
a2fbb9ea
ET
43#endif
44#include <net/ip.h>
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04
EG
47#include <linux/version.h>
48#include <net/ip6_checksum.h>
a2fbb9ea
ET
49#include <linux/workqueue.h>
50#include <linux/crc32.h>
34f80b04 51#include <linux/crc32c.h>
a2fbb9ea
ET
52#include <linux/prefetch.h>
53#include <linux/zlib.h>
a2fbb9ea
ET
54#include <linux/io.h>
55
56#include "bnx2x_reg.h"
57#include "bnx2x_fw_defs.h"
58#include "bnx2x_hsi.h"
c18487ee 59#include "bnx2x_link.h"
a2fbb9ea
ET
60#include "bnx2x.h"
61#include "bnx2x_init.h"
62
619c714c
ET
63#define DRV_MODULE_VERSION "1.42.4"
64#define DRV_MODULE_RELDATE "2008/4/9"
34f80b04 65#define BNX2X_BC_VER 0x040200
a2fbb9ea 66
34f80b04
EG
67/* Time in jiffies before concluding the transmitter is hung */
68#define TX_TIMEOUT (5*HZ)
a2fbb9ea 69
53a10565 70static char version[] __devinitdata =
34f80b04 71 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
72 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
73
24e3fcef 74MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
75MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
76MODULE_LICENSE("GPL");
77MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea
ET
78
79static int use_inta;
80static int poll;
a2fbb9ea 81static int debug;
34f80b04
EG
82static int nomcp;
83static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea
ET
84static int use_multi;
85
86module_param(use_inta, int, 0);
87module_param(poll, int, 0);
a2fbb9ea 88module_param(debug, int, 0);
34f80b04 89module_param(nomcp, int, 0);
a2fbb9ea
ET
90MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
91MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 92MODULE_PARM_DESC(debug, "default debug msglevel");
34f80b04 93MODULE_PARM_DESC(nomcp, "ignore management CPU");
a2fbb9ea
ET
94
95#ifdef BNX2X_MULTI
96module_param(use_multi, int, 0);
97MODULE_PARM_DESC(use_multi, "use per-CPU queues");
98#endif
99
100enum bnx2x_board_type {
101 BCM57710 = 0,
34f80b04
EG
102 BCM57711 = 1,
103 BCM57711E = 2,
a2fbb9ea
ET
104};
105
34f80b04 106/* indexed by board_type, above */
53a10565 107static struct {
a2fbb9ea
ET
108 char *name;
109} board_info[] __devinitdata = {
34f80b04
EG
110 { "Broadcom NetXtreme II BCM57710 XGb" },
111 { "Broadcom NetXtreme II BCM57711 XGb" },
112 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
113};
114
34f80b04 115
a2fbb9ea
ET
116static const struct pci_device_id bnx2x_pci_tbl[] = {
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
122 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
123 { 0 }
124};
125
126MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
127
128/****************************************************************************
129* General service functions
130****************************************************************************/
131
132/* used only at init
133 * locking is done by mcp
134 */
135static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
136{
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
138 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
139 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
140 PCICFG_VENDOR_ID_OFFSET);
141}
142
a2fbb9ea
ET
143static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
144{
145 u32 val;
146
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
148 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
149 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
150 PCICFG_VENDOR_ID_OFFSET);
151
152 return val;
153}
a2fbb9ea
ET
154
155static const u32 dmae_reg_go_c[] = {
156 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
157 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
158 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
159 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
160};
161
162/* copy command into DMAE command memory and set DMAE command go */
163static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
164 int idx)
165{
166 u32 cmd_offset;
167 int i;
168
169 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
170 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
171 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
172
ad8d3948
EG
173 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
174 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
175 }
176 REG_WR(bp, dmae_reg_go_c[idx], 1);
177}
178
ad8d3948
EG
179void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
180 u32 len32)
a2fbb9ea 181{
ad8d3948 182 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 183 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
184 int cnt = 200;
185
186 if (!bp->dmae_ready) {
187 u32 *data = bnx2x_sp(bp, wb_data[0]);
188
189 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
190 " using indirect\n", dst_addr, len32);
191 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
192 return;
193 }
194
195 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
196
197 memset(dmae, 0, sizeof(struct dmae_command));
198
199 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
200 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
201 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
202#ifdef __BIG_ENDIAN
203 DMAE_CMD_ENDIANITY_B_DW_SWAP |
204#else
205 DMAE_CMD_ENDIANITY_DW_SWAP |
206#endif
34f80b04
EG
207 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
208 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
209 dmae->src_addr_lo = U64_LO(dma_addr);
210 dmae->src_addr_hi = U64_HI(dma_addr);
211 dmae->dst_addr_lo = dst_addr >> 2;
212 dmae->dst_addr_hi = 0;
213 dmae->len = len32;
214 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
215 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 216 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 217
ad8d3948 218 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
219 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
220 "dst_addr [%x:%08x (%08x)]\n"
221 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
222 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
223 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
224 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 225 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
226 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
227 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
228
229 *wb_comp = 0;
230
34f80b04 231 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
232
233 udelay(5);
ad8d3948
EG
234
235 while (*wb_comp != DMAE_COMP_VAL) {
236 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
237
238 /* adjust delay for emulation/FPGA */
239 if (CHIP_REV_IS_SLOW(bp))
240 msleep(100);
241 else
242 udelay(5);
243
244 if (!cnt) {
a2fbb9ea
ET
245 BNX2X_ERR("dmae timeout!\n");
246 break;
247 }
ad8d3948 248 cnt--;
a2fbb9ea 249 }
ad8d3948
EG
250
251 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
252}
253
c18487ee 254void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 255{
ad8d3948 256 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 257 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
258 int cnt = 200;
259
260 if (!bp->dmae_ready) {
261 u32 *data = bnx2x_sp(bp, wb_data[0]);
262 int i;
263
264 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
265 " using indirect\n", src_addr, len32);
266 for (i = 0; i < len32; i++)
267 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
268 return;
269 }
270
271 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
272
273 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
274 memset(dmae, 0, sizeof(struct dmae_command));
275
276 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
277 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
278 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
279#ifdef __BIG_ENDIAN
280 DMAE_CMD_ENDIANITY_B_DW_SWAP |
281#else
282 DMAE_CMD_ENDIANITY_DW_SWAP |
283#endif
34f80b04
EG
284 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
285 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
286 dmae->src_addr_lo = src_addr >> 2;
287 dmae->src_addr_hi = 0;
288 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
289 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
290 dmae->len = len32;
291 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
292 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 293 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 294
ad8d3948 295 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
296 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
297 "dst_addr [%x:%08x (%08x)]\n"
298 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
299 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
300 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
301 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
302
303 *wb_comp = 0;
304
34f80b04 305 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
306
307 udelay(5);
ad8d3948
EG
308
309 while (*wb_comp != DMAE_COMP_VAL) {
310
311 /* adjust delay for emulation/FPGA */
312 if (CHIP_REV_IS_SLOW(bp))
313 msleep(100);
314 else
315 udelay(5);
316
317 if (!cnt) {
a2fbb9ea
ET
318 BNX2X_ERR("dmae timeout!\n");
319 break;
320 }
ad8d3948 321 cnt--;
a2fbb9ea 322 }
ad8d3948 323 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
324 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
325 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
326
327 mutex_unlock(&bp->dmae_mutex);
328}
329
330/* used only for slowpath so not inlined */
331static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
332{
333 u32 wb_write[2];
334
335 wb_write[0] = val_hi;
336 wb_write[1] = val_lo;
337 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 338}
a2fbb9ea 339
ad8d3948
EG
340#ifdef USE_WB_RD
341static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
342{
343 u32 wb_data[2];
344
345 REG_RD_DMAE(bp, reg, wb_data, 2);
346
347 return HILO_U64(wb_data[0], wb_data[1]);
348}
349#endif
350
a2fbb9ea
ET
351static int bnx2x_mc_assert(struct bnx2x *bp)
352{
a2fbb9ea 353 char last_idx;
34f80b04
EG
354 int i, rc = 0;
355 u32 row0, row1, row2, row3;
356
357 /* XSTORM */
358 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
359 XSTORM_ASSERT_LIST_INDEX_OFFSET);
360 if (last_idx)
361 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
362
363 /* print the asserts */
364 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
365
366 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i));
368 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
370 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
371 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
372 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
373 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
374
375 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
376 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
377 " 0x%08x 0x%08x 0x%08x\n",
378 i, row3, row2, row1, row0);
379 rc++;
380 } else {
381 break;
382 }
383 }
384
385 /* TSTORM */
386 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
387 TSTORM_ASSERT_LIST_INDEX_OFFSET);
388 if (last_idx)
389 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
390
391 /* print the asserts */
392 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
393
394 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i));
396 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
398 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
399 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
400 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
401 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
402
403 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
404 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
405 " 0x%08x 0x%08x 0x%08x\n",
406 i, row3, row2, row1, row0);
407 rc++;
408 } else {
409 break;
410 }
411 }
412
413 /* CSTORM */
414 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
415 CSTORM_ASSERT_LIST_INDEX_OFFSET);
416 if (last_idx)
417 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
418
419 /* print the asserts */
420 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
421
422 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i));
424 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
426 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
427 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
428 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
429 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
430
431 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
432 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
433 " 0x%08x 0x%08x 0x%08x\n",
434 i, row3, row2, row1, row0);
435 rc++;
436 } else {
437 break;
438 }
439 }
440
441 /* USTORM */
442 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
443 USTORM_ASSERT_LIST_INDEX_OFFSET);
444 if (last_idx)
445 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
446
447 /* print the asserts */
448 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
449
450 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i));
452 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 4);
454 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
455 USTORM_ASSERT_LIST_OFFSET(i) + 8);
456 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
457 USTORM_ASSERT_LIST_OFFSET(i) + 12);
458
459 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
460 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
461 " 0x%08x 0x%08x 0x%08x\n",
462 i, row3, row2, row1, row0);
463 rc++;
464 } else {
465 break;
a2fbb9ea
ET
466 }
467 }
34f80b04 468
a2fbb9ea
ET
469 return rc;
470}
c14423fe 471
a2fbb9ea
ET
472static void bnx2x_fw_dump(struct bnx2x *bp)
473{
474 u32 mark, offset;
475 u32 data[9];
476 int word;
477
478 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
479 mark = ((mark + 0x3) & ~0x3);
480 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
481
482 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
483 for (word = 0; word < 8; word++)
484 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
485 offset + 4*word));
486 data[8] = 0x0;
49d66772 487 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
488 }
489 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
490 for (word = 0; word < 8; word++)
491 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
492 offset + 4*word));
493 data[8] = 0x0;
49d66772 494 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
495 }
496 printk("\n" KERN_ERR PFX "end of fw dump\n");
497}
498
499static void bnx2x_panic_dump(struct bnx2x *bp)
500{
501 int i;
502 u16 j, start, end;
503
504 BNX2X_ERR("begin crash dump -----------------\n");
505
506 for_each_queue(bp, i) {
507 struct bnx2x_fastpath *fp = &bp->fp[i];
508 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
509
510 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 511 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 512 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04
EG
513 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
514 BNX2X_ERR(" rx_comp_prod(%x) rx_comp_cons(%x)"
515 " *rx_cons_sb(%x)\n",
516 fp->rx_comp_prod, fp->rx_comp_cons,
517 le16_to_cpu(*fp->rx_cons_sb));
518 BNX2X_ERR(" fp_c_idx(%x) fp_u_idx(%x)"
519 " bd data(%x,%x)\n",
520 fp->fp_c_idx, fp->fp_u_idx, hw_prods->packets_prod,
a2fbb9ea
ET
521 hw_prods->bds_prod);
522
523 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
524 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
525 for (j = start; j < end; j++) {
526 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
527
528 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
529 sw_bd->skb, sw_bd->first_bd);
530 }
531
532 start = TX_BD(fp->tx_bd_cons - 10);
533 end = TX_BD(fp->tx_bd_cons + 254);
534 for (j = start; j < end; j++) {
535 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
536
537 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
538 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
539 }
540
541 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
542 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
543 for (j = start; j < end; j++) {
544 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
545 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
546
547 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 548 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
549 }
550
551 start = RCQ_BD(fp->rx_comp_cons - 10);
552 end = RCQ_BD(fp->rx_comp_cons + 503);
553 for (j = start; j < end; j++) {
554 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
555
556 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
557 j, cqe[0], cqe[1], cqe[2], cqe[3]);
558 }
559 }
560
49d66772
ET
561 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
562 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 563 " spq_prod_idx(%u)\n",
49d66772 564 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
565 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
566
34f80b04 567 bnx2x_fw_dump(bp);
a2fbb9ea
ET
568 bnx2x_mc_assert(bp);
569 BNX2X_ERR("end crash dump -----------------\n");
570
bb2a0f7a
YG
571 bp->stats_state = STATS_STATE_DISABLED;
572 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
a2fbb9ea
ET
573}
574
615f8fd9 575static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 576{
34f80b04 577 int port = BP_PORT(bp);
a2fbb9ea
ET
578 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
579 u32 val = REG_RD(bp, addr);
580 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
581
582 if (msix) {
583 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
584 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
585 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
586 } else {
587 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 588 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
589 HC_CONFIG_0_REG_INT_LINE_EN_0 |
590 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 591
615f8fd9
ET
592 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
593 val, port, addr, msix);
594
595 REG_WR(bp, addr, val);
596
a2fbb9ea
ET
597 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
598 }
599
615f8fd9 600 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
601 val, port, addr, msix);
602
603 REG_WR(bp, addr, val);
34f80b04
EG
604
605 if (CHIP_IS_E1H(bp)) {
606 /* init leading/trailing edge */
607 if (IS_E1HMF(bp)) {
608 val = (0xfe0f | (1 << (BP_E1HVN(bp) + 4)));
609 if (bp->port.pmf)
610 /* enable nig attention */
611 val |= 0x0100;
612 } else
613 val = 0xffff;
614
615 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
616 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
617 }
a2fbb9ea
ET
618}
619
615f8fd9 620static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 621{
34f80b04 622 int port = BP_PORT(bp);
a2fbb9ea
ET
623 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
624 u32 val = REG_RD(bp, addr);
625
626 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
627 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
628 HC_CONFIG_0_REG_INT_LINE_EN_0 |
629 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
630
631 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
632 val, port, addr);
633
634 REG_WR(bp, addr, val);
635 if (REG_RD(bp, addr) != val)
636 BNX2X_ERR("BUG! proper val not read from IGU!\n");
637}
638
615f8fd9 639static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea 640{
a2fbb9ea
ET
641 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
642 int i;
643
34f80b04 644 /* disable interrupt handling */
a2fbb9ea 645 atomic_inc(&bp->intr_sem);
c14423fe 646 /* prevent the HW from sending interrupts */
615f8fd9 647 bnx2x_int_disable(bp);
a2fbb9ea
ET
648
649 /* make sure all ISRs are done */
650 if (msix) {
651 for_each_queue(bp, i)
652 synchronize_irq(bp->msix_table[i].vector);
653
654 /* one more for the Slow Path IRQ */
655 synchronize_irq(bp->msix_table[i].vector);
656 } else
657 synchronize_irq(bp->pdev->irq);
658
659 /* make sure sp_task is not running */
660 cancel_work_sync(&bp->sp_task);
a2fbb9ea
ET
661}
662
34f80b04 663/* fast path */
a2fbb9ea
ET
664
665/*
34f80b04 666 * General service functions
a2fbb9ea
ET
667 */
668
34f80b04 669static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
670 u8 storm, u16 index, u8 op, u8 update)
671{
34f80b04 672 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
673 struct igu_ack_register igu_ack;
674
675 igu_ack.status_block_index = index;
676 igu_ack.sb_id_and_flags =
34f80b04 677 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
678 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
679 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
680 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
681
34f80b04
EG
682 DP(BNX2X_MSG_OFF, "write 0x%08x to IGU addr 0x%x\n",
683 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
684 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
685}
686
687static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
688{
689 struct host_status_block *fpsb = fp->status_blk;
690 u16 rc = 0;
691
692 barrier(); /* status block is written to by the chip */
693 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
694 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
695 rc |= 1;
696 }
697 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
698 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
699 rc |= 2;
700 }
701 return rc;
702}
703
704static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
705{
706 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
707
708 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
709 rx_cons_sb++;
710
34f80b04
EG
711 if ((fp->rx_comp_cons != rx_cons_sb) ||
712 (fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
713 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
714 return 1;
715
716 return 0;
717}
718
719static u16 bnx2x_ack_int(struct bnx2x *bp)
720{
34f80b04 721 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
722 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
723
34f80b04
EG
724 DP(BNX2X_MSG_OFF, "read 0x%08x from IGU addr 0x%x\n",
725 result, BAR_IGU_INTMEM + igu_addr);
a2fbb9ea
ET
726
727#ifdef IGU_DEBUG
728#warning IGU_DEBUG active
729 if (result == 0) {
730 BNX2X_ERR("read %x from IGU\n", result);
731 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
732 }
733#endif
734 return result;
735}
736
737
738/*
739 * fast path service functions
740 */
741
742/* free skb in the packet ring at pos idx
743 * return idx of last bd freed
744 */
745static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
746 u16 idx)
747{
748 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
749 struct eth_tx_bd *tx_bd;
750 struct sk_buff *skb = tx_buf->skb;
34f80b04 751 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
752 int nbd;
753
754 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
755 idx, tx_buf, skb);
756
757 /* unmap first bd */
758 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
759 tx_bd = &fp->tx_desc_ring[bd_idx];
760 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
761 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
762
763 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 764 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
765#ifdef BNX2X_STOP_ON_ERROR
766 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 767 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
768 bnx2x_panic();
769 }
770#endif
771
772 /* Skip a parse bd and the TSO split header bd
773 since they have no mapping */
774 if (nbd)
775 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
776
777 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
778 ETH_TX_BD_FLAGS_TCP_CSUM |
779 ETH_TX_BD_FLAGS_SW_LSO)) {
780 if (--nbd)
781 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
782 tx_bd = &fp->tx_desc_ring[bd_idx];
783 /* is this a TSO split header bd? */
784 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
785 if (--nbd)
786 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
787 }
788 }
789
790 /* now free frags */
791 while (nbd > 0) {
792
793 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
794 tx_bd = &fp->tx_desc_ring[bd_idx];
795 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
796 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
797 if (--nbd)
798 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
799 }
800
801 /* release skb */
802 BUG_TRAP(skb);
803 dev_kfree_skb(skb);
804 tx_buf->first_bd = 0;
805 tx_buf->skb = NULL;
806
34f80b04 807 return new_cons;
a2fbb9ea
ET
808}
809
34f80b04 810static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 811{
34f80b04
EG
812 s16 used;
813 u16 prod;
814 u16 cons;
a2fbb9ea 815
34f80b04 816 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
817 prod = fp->tx_bd_prod;
818 cons = fp->tx_bd_cons;
819
34f80b04
EG
820 /* NUM_TX_RINGS = number of "next-page" entries
821 It will be used as a threshold */
822 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 823
34f80b04
EG
824#ifdef BNX2X_STOP_ON_ERROR
825 BUG_TRAP(used >= 0);
a2fbb9ea
ET
826 BUG_TRAP(used <= fp->bp->tx_ring_size);
827 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
34f80b04 828#endif
a2fbb9ea 829
34f80b04 830 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
831}
832
833static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
834{
835 struct bnx2x *bp = fp->bp;
836 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
837 int done = 0;
838
839#ifdef BNX2X_STOP_ON_ERROR
840 if (unlikely(bp->panic))
841 return;
842#endif
843
844 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
845 sw_cons = fp->tx_pkt_cons;
846
847 while (sw_cons != hw_cons) {
848 u16 pkt_cons;
849
850 pkt_cons = TX_BD(sw_cons);
851
852 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
853
34f80b04 854 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
855 hw_cons, sw_cons, pkt_cons);
856
34f80b04 857/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
858 rmb();
859 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
860 }
861*/
862 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
863 sw_cons++;
864 done++;
865
866 if (done == work)
867 break;
868 }
869
870 fp->tx_pkt_cons = sw_cons;
871 fp->tx_bd_cons = bd_cons;
872
873 /* Need to make the tx_cons update visible to start_xmit()
874 * before checking for netif_queue_stopped(). Without the
875 * memory barrier, there is a small possibility that start_xmit()
876 * will miss it and cause the queue to be stopped forever.
877 */
878 smp_mb();
879
880 /* TBD need a thresh? */
881 if (unlikely(netif_queue_stopped(bp->dev))) {
882
883 netif_tx_lock(bp->dev);
884
885 if (netif_queue_stopped(bp->dev) &&
886 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
887 netif_wake_queue(bp->dev);
888
889 netif_tx_unlock(bp->dev);
a2fbb9ea
ET
890 }
891}
892
893static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
894 union eth_rx_cqe *rr_cqe)
895{
896 struct bnx2x *bp = fp->bp;
897 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
898 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
899
34f80b04 900 DP(BNX2X_MSG_SP,
a2fbb9ea 901 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
902 FP_IDX(fp), cid, command, bp->state,
903 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
904
905 bp->spq_left++;
906
34f80b04 907 if (FP_IDX(fp)) {
a2fbb9ea
ET
908 switch (command | fp->state) {
909 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
910 BNX2X_FP_STATE_OPENING):
911 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
912 cid);
913 fp->state = BNX2X_FP_STATE_OPEN;
914 break;
915
916 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
917 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
918 cid);
919 fp->state = BNX2X_FP_STATE_HALTED;
920 break;
921
922 default:
34f80b04
EG
923 BNX2X_ERR("unexpected MC reply (%d) "
924 "fp->state is %x\n", command, fp->state);
925 break;
a2fbb9ea 926 }
34f80b04 927 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
928 return;
929 }
c14423fe 930
a2fbb9ea
ET
931 switch (command | bp->state) {
932 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
933 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
934 bp->state = BNX2X_STATE_OPEN;
935 break;
936
937 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
938 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
939 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
940 fp->state = BNX2X_FP_STATE_HALTED;
941 break;
942
a2fbb9ea 943 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 944 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 945 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
946 break;
947
948 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 949 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 950 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 951 bp->set_mac_pending = 0;
a2fbb9ea
ET
952 break;
953
49d66772 954 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 955 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
956 break;
957
a2fbb9ea 958 default:
34f80b04 959 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 960 command, bp->state);
34f80b04 961 break;
a2fbb9ea 962 }
34f80b04 963 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
964}
965
966static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
967 struct bnx2x_fastpath *fp, u16 index)
968{
969 struct sk_buff *skb;
970 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
971 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
972 dma_addr_t mapping;
973
974 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
975 if (unlikely(skb == NULL))
976 return -ENOMEM;
977
978 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
979 PCI_DMA_FROMDEVICE);
980 if (unlikely(dma_mapping_error(mapping))) {
a2fbb9ea
ET
981 dev_kfree_skb(skb);
982 return -ENOMEM;
983 }
984
985 rx_buf->skb = skb;
986 pci_unmap_addr_set(rx_buf, mapping, mapping);
987
988 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
989 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
990
991 return 0;
992}
993
994/* note that we are not allocating a new skb,
995 * we are just moving one from cons to prod
996 * we are not creating a new mapping,
997 * so there is no need to check for dma_mapping_error().
998 */
999static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1000 struct sk_buff *skb, u16 cons, u16 prod)
1001{
1002 struct bnx2x *bp = fp->bp;
1003 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1004 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1005 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1006 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1007
1008 pci_dma_sync_single_for_device(bp->pdev,
1009 pci_unmap_addr(cons_rx_buf, mapping),
1010 bp->rx_offset + RX_COPY_THRESH,
1011 PCI_DMA_FROMDEVICE);
1012
1013 prod_rx_buf->skb = cons_rx_buf->skb;
1014 pci_unmap_addr_set(prod_rx_buf, mapping,
1015 pci_unmap_addr(cons_rx_buf, mapping));
1016 *prod_bd = *cons_bd;
1017}
1018
1019static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1020{
1021 struct bnx2x *bp = fp->bp;
34f80b04 1022 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1023 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1024 int rx_pkt = 0;
1025
1026#ifdef BNX2X_STOP_ON_ERROR
1027 if (unlikely(bp->panic))
1028 return 0;
1029#endif
1030
34f80b04
EG
1031 /* CQ "next element" is of the size of the regular element,
1032 that's why it's ok here */
a2fbb9ea
ET
1033 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1034 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1035 hw_comp_cons++;
1036
1037 bd_cons = fp->rx_bd_cons;
1038 bd_prod = fp->rx_bd_prod;
34f80b04 1039 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1040 sw_comp_cons = fp->rx_comp_cons;
1041 sw_comp_prod = fp->rx_comp_prod;
1042
1043 /* Memory barrier necessary as speculative reads of the rx
1044 * buffer can be ahead of the index in the status block
1045 */
1046 rmb();
1047
1048 DP(NETIF_MSG_RX_STATUS,
1049 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1050 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1051
1052 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1053 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1054 struct sk_buff *skb;
1055 union eth_rx_cqe *cqe;
34f80b04
EG
1056 u8 cqe_fp_flags;
1057 u16 len, pad;
a2fbb9ea
ET
1058
1059 comp_ring_cons = RCQ_BD(sw_comp_cons);
1060 bd_prod = RX_BD(bd_prod);
1061 bd_cons = RX_BD(bd_cons);
1062
1063 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1064 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1065
a2fbb9ea 1066 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1067 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1068 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
a2fbb9ea 1069 cqe->fast_path_cqe.rss_hash_result,
34f80b04
EG
1070 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1071 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1072
1073 /* is this a slowpath msg? */
34f80b04 1074 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1075 bnx2x_sp_event(fp, cqe);
1076 goto next_cqe;
1077
1078 /* this is an rx packet */
1079 } else {
1080 rx_buf = &fp->rx_buf_ring[bd_cons];
1081 skb = rx_buf->skb;
a2fbb9ea
ET
1082 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1083 pad = cqe->fast_path_cqe.placement_offset;
1084
1085 pci_dma_sync_single_for_device(bp->pdev,
1086 pci_unmap_addr(rx_buf, mapping),
1087 pad + RX_COPY_THRESH,
1088 PCI_DMA_FROMDEVICE);
1089 prefetch(skb);
1090 prefetch(((char *)(skb)) + 128);
1091
1092 /* is this an error packet? */
34f80b04 1093 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea
ET
1094 /* do we sometimes forward error packets anyway? */
1095 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1096 "ERROR flags %x rx packet %u\n",
1097 cqe_fp_flags, sw_comp_cons);
a2fbb9ea
ET
1098 /* TBD make sure MC counts this as a drop */
1099 goto reuse_rx;
1100 }
1101
1102 /* Since we don't have a jumbo ring
1103 * copy small packets if mtu > 1500
1104 */
1105 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1106 (len <= RX_COPY_THRESH)) {
1107 struct sk_buff *new_skb;
1108
1109 new_skb = netdev_alloc_skb(bp->dev,
1110 len + pad);
1111 if (new_skb == NULL) {
1112 DP(NETIF_MSG_RX_ERR,
34f80b04 1113 "ERROR packet dropped "
a2fbb9ea
ET
1114 "because of alloc failure\n");
1115 /* TBD count this as a drop? */
1116 goto reuse_rx;
1117 }
1118
1119 /* aligned copy */
1120 skb_copy_from_linear_data_offset(skb, pad,
1121 new_skb->data + pad, len);
1122 skb_reserve(new_skb, pad);
1123 skb_put(new_skb, len);
1124
1125 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1126
1127 skb = new_skb;
1128
1129 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1130 pci_unmap_single(bp->pdev,
1131 pci_unmap_addr(rx_buf, mapping),
1132 bp->rx_buf_use_size,
1133 PCI_DMA_FROMDEVICE);
1134 skb_reserve(skb, pad);
1135 skb_put(skb, len);
1136
1137 } else {
1138 DP(NETIF_MSG_RX_ERR,
34f80b04 1139 "ERROR packet dropped because "
a2fbb9ea
ET
1140 "of alloc failure\n");
1141reuse_rx:
1142 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1143 goto next_rx;
1144 }
1145
1146 skb->protocol = eth_type_trans(skb, bp->dev);
1147
1148 skb->ip_summed = CHECKSUM_NONE;
1149 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1150 skb->ip_summed = CHECKSUM_UNNECESSARY;
1151
1152 /* TBD do we pass bad csum packets in promisc */
1153 }
1154
1155#ifdef BCM_VLAN
34f80b04
EG
1156 if ((bp->vlgrp != NULL) &&
1157 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1158 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1159 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1160 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1161 else
1162#endif
34f80b04 1163 netif_receive_skb(skb);
a2fbb9ea
ET
1164
1165 bp->dev->last_rx = jiffies;
1166
1167next_rx:
1168 rx_buf->skb = NULL;
1169
1170 bd_cons = NEXT_RX_IDX(bd_cons);
1171 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1172 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1173 rx_pkt++;
a2fbb9ea
ET
1174next_cqe:
1175 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1176 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1177
34f80b04 1178 if (rx_pkt == budget)
a2fbb9ea
ET
1179 break;
1180 } /* while */
1181
1182 fp->rx_bd_cons = bd_cons;
34f80b04 1183 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1184 fp->rx_comp_cons = sw_comp_cons;
1185 fp->rx_comp_prod = sw_comp_prod;
1186
1187 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04
EG
1188 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)),
1189 sw_comp_prod);
1190
a2fbb9ea
ET
1191
1192 mmiowb(); /* keep prod updates ordered */
1193
1194 fp->rx_pkt += rx_pkt;
1195 fp->rx_calls++;
1196
1197 return rx_pkt;
1198}
1199
1200static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1201{
1202 struct bnx2x_fastpath *fp = fp_cookie;
1203 struct bnx2x *bp = fp->bp;
1204 struct net_device *dev = bp->dev;
34f80b04 1205 int index = FP_IDX(fp);
a2fbb9ea 1206
34f80b04
EG
1207 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1208 index, FP_SB_ID(fp));
1209 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1210
1211#ifdef BNX2X_STOP_ON_ERROR
1212 if (unlikely(bp->panic))
1213 return IRQ_HANDLED;
1214#endif
1215
1216 prefetch(fp->rx_cons_sb);
1217 prefetch(fp->tx_cons_sb);
1218 prefetch(&fp->status_blk->c_status_block.status_block_index);
1219 prefetch(&fp->status_blk->u_status_block.status_block_index);
1220
1221 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
34f80b04 1222
a2fbb9ea
ET
1223 return IRQ_HANDLED;
1224}
1225
1226static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1227{
1228 struct net_device *dev = dev_instance;
1229 struct bnx2x *bp = netdev_priv(dev);
1230 u16 status = bnx2x_ack_int(bp);
34f80b04 1231 u16 mask;
a2fbb9ea 1232
34f80b04 1233 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1234 if (unlikely(status == 0)) {
1235 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1236 return IRQ_NONE;
1237 }
34f80b04 1238 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea
ET
1239
1240#ifdef BNX2X_STOP_ON_ERROR
1241 if (unlikely(bp->panic))
1242 return IRQ_HANDLED;
1243#endif
1244
34f80b04 1245 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1246 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1247 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1248 return IRQ_HANDLED;
1249 }
1250
34f80b04
EG
1251 mask = 0x2 << bp->fp[0].sb_id;
1252 if (status & mask) {
a2fbb9ea
ET
1253 struct bnx2x_fastpath *fp = &bp->fp[0];
1254
1255 prefetch(fp->rx_cons_sb);
1256 prefetch(fp->tx_cons_sb);
1257 prefetch(&fp->status_blk->c_status_block.status_block_index);
1258 prefetch(&fp->status_blk->u_status_block.status_block_index);
1259
1260 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1261
34f80b04 1262 status &= ~mask;
a2fbb9ea
ET
1263 }
1264
a2fbb9ea 1265
34f80b04 1266 if (unlikely(status & 0x1)) {
a2fbb9ea
ET
1267 schedule_work(&bp->sp_task);
1268
1269 status &= ~0x1;
1270 if (!status)
1271 return IRQ_HANDLED;
1272 }
1273
34f80b04
EG
1274 if (status)
1275 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1276 status);
a2fbb9ea 1277
c18487ee 1278 return IRQ_HANDLED;
a2fbb9ea
ET
1279}
1280
c18487ee 1281/* end of fast path */
a2fbb9ea 1282
bb2a0f7a 1283static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1284
c18487ee
YR
1285/* Link */
1286
1287/*
1288 * General service functions
1289 */
a2fbb9ea 1290
c18487ee
YR
1291static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1292{
1293 u32 lock_status;
1294 u32 resource_bit = (1 << resource);
34f80b04 1295 u8 port = BP_PORT(bp);
c18487ee 1296 int cnt;
a2fbb9ea 1297
c18487ee
YR
1298 /* Validating that the resource is within range */
1299 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1300 DP(NETIF_MSG_HW,
1301 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1302 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1303 return -EINVAL;
1304 }
a2fbb9ea 1305
c18487ee
YR
1306 /* Validating that the resource is not already taken */
1307 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1308 if (lock_status & resource_bit) {
1309 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1310 lock_status, resource_bit);
1311 return -EEXIST;
1312 }
a2fbb9ea 1313
c18487ee
YR
1314 /* Try for 1 second every 5ms */
1315 for (cnt = 0; cnt < 200; cnt++) {
1316 /* Try to acquire the lock */
1317 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1318 resource_bit);
1319 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1320 if (lock_status & resource_bit)
1321 return 0;
a2fbb9ea 1322
c18487ee 1323 msleep(5);
a2fbb9ea 1324 }
c18487ee
YR
1325 DP(NETIF_MSG_HW, "Timeout\n");
1326 return -EAGAIN;
1327}
a2fbb9ea 1328
c18487ee
YR
1329static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1330{
1331 u32 lock_status;
1332 u32 resource_bit = (1 << resource);
34f80b04 1333 u8 port = BP_PORT(bp);
a2fbb9ea 1334
c18487ee
YR
1335 /* Validating that the resource is within range */
1336 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1337 DP(NETIF_MSG_HW,
1338 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1339 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1340 return -EINVAL;
1341 }
1342
1343 /* Validating that the resource is currently taken */
1344 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1345 if (!(lock_status & resource_bit)) {
1346 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1347 lock_status, resource_bit);
1348 return -EFAULT;
a2fbb9ea
ET
1349 }
1350
c18487ee
YR
1351 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1352 return 0;
1353}
1354
1355/* HW Lock for shared dual port PHYs */
1356static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1357{
1358 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1359
34f80b04 1360 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1361
c18487ee
YR
1362 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1363 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1364 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1365}
a2fbb9ea 1366
c18487ee
YR
1367static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1368{
1369 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1370
c18487ee
YR
1371 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1372 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1373 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1374
34f80b04 1375 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1376}
a2fbb9ea 1377
c18487ee
YR
1378int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1379{
1380 /* The GPIO should be swapped if swap register is set and active */
1381 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
34f80b04 1382 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ BP_PORT(bp);
c18487ee
YR
1383 int gpio_shift = gpio_num +
1384 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1385 u32 gpio_mask = (1 << gpio_shift);
1386 u32 gpio_reg;
a2fbb9ea 1387
c18487ee
YR
1388 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1389 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1390 return -EINVAL;
1391 }
a2fbb9ea 1392
c18487ee
YR
1393 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1394 /* read GPIO and mask except the float bits */
1395 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1396
c18487ee
YR
1397 switch (mode) {
1398 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1399 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1400 gpio_num, gpio_shift);
1401 /* clear FLOAT and set CLR */
1402 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1403 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1404 break;
a2fbb9ea 1405
c18487ee
YR
1406 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1407 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1408 gpio_num, gpio_shift);
1409 /* clear FLOAT and set SET */
1410 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1411 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1412 break;
a2fbb9ea 1413
c18487ee
YR
1414 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1415 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1416 gpio_num, gpio_shift);
1417 /* set FLOAT */
1418 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1419 break;
a2fbb9ea 1420
c18487ee
YR
1421 default:
1422 break;
a2fbb9ea
ET
1423 }
1424
c18487ee
YR
1425 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1426 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1427
c18487ee 1428 return 0;
a2fbb9ea
ET
1429}
1430
c18487ee 1431static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1432{
c18487ee
YR
1433 u32 spio_mask = (1 << spio_num);
1434 u32 spio_reg;
a2fbb9ea 1435
c18487ee
YR
1436 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1437 (spio_num > MISC_REGISTERS_SPIO_7)) {
1438 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1439 return -EINVAL;
a2fbb9ea
ET
1440 }
1441
c18487ee
YR
1442 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1443 /* read SPIO and mask except the float bits */
1444 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1445
c18487ee
YR
1446 switch (mode) {
1447 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1448 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1449 /* clear FLOAT and set CLR */
1450 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1451 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1452 break;
a2fbb9ea 1453
c18487ee
YR
1454 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1455 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1456 /* clear FLOAT and set SET */
1457 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1458 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1459 break;
a2fbb9ea 1460
c18487ee
YR
1461 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1462 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1463 /* set FLOAT */
1464 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1465 break;
a2fbb9ea 1466
c18487ee
YR
1467 default:
1468 break;
a2fbb9ea
ET
1469 }
1470
c18487ee
YR
1471 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1472 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1473
a2fbb9ea
ET
1474 return 0;
1475}
1476
c18487ee 1477static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1478{
c18487ee
YR
1479 switch (bp->link_vars.ieee_fc) {
1480 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1481 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1482 ADVERTISED_Pause);
1483 break;
1484 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1485 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1486 ADVERTISED_Pause);
1487 break;
1488 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1489 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1490 break;
1491 default:
34f80b04 1492 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1493 ADVERTISED_Pause);
1494 break;
1495 }
1496}
f1410647 1497
c18487ee
YR
1498static void bnx2x_link_report(struct bnx2x *bp)
1499{
1500 if (bp->link_vars.link_up) {
1501 if (bp->state == BNX2X_STATE_OPEN)
1502 netif_carrier_on(bp->dev);
1503 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1504
c18487ee 1505 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1506
c18487ee
YR
1507 if (bp->link_vars.duplex == DUPLEX_FULL)
1508 printk("full duplex");
1509 else
1510 printk("half duplex");
f1410647 1511
c18487ee
YR
1512 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1513 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1514 printk(", receive ");
1515 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1516 printk("& transmit ");
1517 } else {
1518 printk(", transmit ");
1519 }
1520 printk("flow control ON");
1521 }
1522 printk("\n");
f1410647 1523
c18487ee
YR
1524 } else { /* link_down */
1525 netif_carrier_off(bp->dev);
1526 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1527 }
c18487ee
YR
1528}
1529
1530static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1531{
1532 u8 rc;
a2fbb9ea 1533
c18487ee
YR
1534 /* Initialize link parameters structure variables */
1535 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1536
c18487ee
YR
1537 bnx2x_phy_hw_lock(bp);
1538 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1539 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1540
c18487ee
YR
1541 if (bp->link_vars.link_up)
1542 bnx2x_link_report(bp);
a2fbb9ea 1543
c18487ee 1544 bnx2x_calc_fc_adv(bp);
34f80b04 1545
c18487ee 1546 return rc;
a2fbb9ea
ET
1547}
1548
c18487ee 1549static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1550{
c18487ee
YR
1551 bnx2x_phy_hw_lock(bp);
1552 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1553 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1554
c18487ee
YR
1555 bnx2x_calc_fc_adv(bp);
1556}
a2fbb9ea 1557
c18487ee
YR
1558static void bnx2x__link_reset(struct bnx2x *bp)
1559{
1560 bnx2x_phy_hw_lock(bp);
1561 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1562 bnx2x_phy_hw_unlock(bp);
1563}
a2fbb9ea 1564
c18487ee
YR
1565static u8 bnx2x_link_test(struct bnx2x *bp)
1566{
1567 u8 rc;
a2fbb9ea 1568
c18487ee
YR
1569 bnx2x_phy_hw_lock(bp);
1570 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1571 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1572
c18487ee
YR
1573 return rc;
1574}
a2fbb9ea 1575
34f80b04
EG
1576/* Calculates the sum of vn_min_rates.
1577 It's needed for further normalizing of the min_rates.
1578
1579 Returns:
1580 sum of vn_min_rates
1581 or
1582 0 - if all the min_rates are 0.
1583 In the later case fainess algorithm should be deactivated.
1584 If not all min_rates are zero then those that are zeroes will
1585 be set to 1.
1586 */
1587static u32 bnx2x_calc_vn_wsum(struct bnx2x *bp)
1588{
1589 int i, port = BP_PORT(bp);
1590 u32 wsum = 0;
1591 int all_zero = 1;
1592
1593 for (i = 0; i < E1HVN_MAX; i++) {
1594 u32 vn_cfg =
1595 SHMEM_RD(bp, mf_cfg.func_mf_config[2*i + port].config);
1596 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1597 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1598 if (!(vn_cfg & FUNC_MF_CFG_FUNC_HIDE)) {
1599 /* If min rate is zero - set it to 1 */
1600 if (!vn_min_rate)
1601 vn_min_rate = DEF_MIN_RATE;
1602 else
1603 all_zero = 0;
1604
1605 wsum += vn_min_rate;
1606 }
1607 }
1608
1609 /* ... only if all min rates are zeros - disable FAIRNESS */
1610 if (all_zero)
1611 return 0;
1612
1613 return wsum;
1614}
1615
1616static void bnx2x_init_port_minmax(struct bnx2x *bp,
1617 int en_fness,
1618 u16 port_rate,
1619 struct cmng_struct_per_port *m_cmng_port)
1620{
1621 u32 r_param = port_rate / 8;
1622 int port = BP_PORT(bp);
1623 int i;
1624
1625 memset(m_cmng_port, 0, sizeof(struct cmng_struct_per_port));
1626
1627 /* Enable minmax only if we are in e1hmf mode */
1628 if (IS_E1HMF(bp)) {
1629 u32 fair_periodic_timeout_usec;
1630 u32 t_fair;
1631
1632 /* Enable rate shaping and fairness */
1633 m_cmng_port->flags.cmng_vn_enable = 1;
1634 m_cmng_port->flags.fairness_enable = en_fness ? 1 : 0;
1635 m_cmng_port->flags.rate_shaping_enable = 1;
1636
1637 if (!en_fness)
1638 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1639 " fairness will be disabled\n");
1640
1641 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1642 m_cmng_port->rs_vars.rs_periodic_timeout =
1643 RS_PERIODIC_TIMEOUT_USEC / 4;
1644
1645 /* this is the threshold below which no timer arming will occur
1646 1.25 coefficient is for the threshold to be a little bigger
1647 than the real time, to compensate for timer in-accuracy */
1648 m_cmng_port->rs_vars.rs_threshold =
1649 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1650
1651 /* resolution of fairness timer */
1652 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1653 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1654 t_fair = T_FAIR_COEF / port_rate;
1655
1656 /* this is the threshold below which we won't arm
1657 the timer anymore */
1658 m_cmng_port->fair_vars.fair_threshold = QM_ARB_BYTES;
1659
1660 /* we multiply by 1e3/8 to get bytes/msec.
1661 We don't want the credits to pass a credit
1662 of the T_FAIR*FAIR_MEM (algorithm resolution) */
1663 m_cmng_port->fair_vars.upper_bound =
1664 r_param * t_fair * FAIR_MEM;
1665 /* since each tick is 4 usec */
1666 m_cmng_port->fair_vars.fairness_timeout =
1667 fair_periodic_timeout_usec / 4;
1668
1669 } else {
1670 /* Disable rate shaping and fairness */
1671 m_cmng_port->flags.cmng_vn_enable = 0;
1672 m_cmng_port->flags.fairness_enable = 0;
1673 m_cmng_port->flags.rate_shaping_enable = 0;
1674
1675 DP(NETIF_MSG_IFUP,
1676 "Single function mode minmax will be disabled\n");
1677 }
1678
1679 /* Store it to internal memory */
1680 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1681 REG_WR(bp, BAR_XSTRORM_INTMEM +
1682 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
1683 ((u32 *)(m_cmng_port))[i]);
1684}
1685
1686static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func,
1687 u32 wsum, u16 port_rate,
1688 struct cmng_struct_per_port *m_cmng_port)
1689{
1690 struct rate_shaping_vars_per_vn m_rs_vn;
1691 struct fairness_vars_per_vn m_fair_vn;
1692 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1693 u16 vn_min_rate, vn_max_rate;
1694 int i;
1695
1696 /* If function is hidden - set min and max to zeroes */
1697 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1698 vn_min_rate = 0;
1699 vn_max_rate = 0;
1700
1701 } else {
1702 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1703 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1704 /* If FAIRNESS is enabled (not all min rates are zeroes) and
1705 if current min rate is zero - set it to 1.
1706 This is a requirment of the algorithm. */
1707 if ((vn_min_rate == 0) && wsum)
1708 vn_min_rate = DEF_MIN_RATE;
1709 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1710 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1711 }
1712
1713 DP(NETIF_MSG_IFUP, "func %d: vn_min_rate=%d vn_max_rate=%d "
1714 "wsum=%d\n", func, vn_min_rate, vn_max_rate, wsum);
1715
1716 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1717 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1718
1719 /* global vn counter - maximal Mbps for this vn */
1720 m_rs_vn.vn_counter.rate = vn_max_rate;
1721
1722 /* quota - number of bytes transmitted in this period */
1723 m_rs_vn.vn_counter.quota =
1724 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1725
1726#ifdef BNX2X_PER_PROT_QOS
1727 /* per protocol counter */
1728 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++) {
1729 /* maximal Mbps for this protocol */
1730 m_rs_vn.protocol_counters[protocol].rate =
1731 protocol_max_rate[protocol];
1732 /* the quota in each timer period -
1733 number of bytes transmitted in this period */
1734 m_rs_vn.protocol_counters[protocol].quota =
1735 (u32)(rs_periodic_timeout_usec *
1736 ((double)m_rs_vn.
1737 protocol_counters[protocol].rate/8));
1738 }
1739#endif
1740
1741 if (wsum) {
1742 /* credit for each period of the fairness algorithm:
1743 number of bytes in T_FAIR (the vn share the port rate).
1744 wsum should not be larger than 10000, thus
1745 T_FAIR_COEF / (8 * wsum) will always be grater than zero */
1746 m_fair_vn.vn_credit_delta =
1747 max((u64)(vn_min_rate * (T_FAIR_COEF / (8 * wsum))),
1748 (u64)(m_cmng_port->fair_vars.fair_threshold * 2));
1749 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
1750 m_fair_vn.vn_credit_delta);
1751 }
1752
1753#ifdef BNX2X_PER_PROT_QOS
1754 do {
1755 u32 protocolWeightSum = 0;
1756
1757 for (protocol = 0; protocol < NUM_OF_PROTOCOLS; protocol++)
1758 protocolWeightSum +=
1759 drvInit.protocol_min_rate[protocol];
1760 /* per protocol counter -
1761 NOT NEEDED IF NO PER-PROTOCOL CONGESTION MANAGEMENT */
1762 if (protocolWeightSum > 0) {
1763 for (protocol = 0;
1764 protocol < NUM_OF_PROTOCOLS; protocol++)
1765 /* credit for each period of the
1766 fairness algorithm - number of bytes in
1767 T_FAIR (the protocol share the vn rate) */
1768 m_fair_vn.protocol_credit_delta[protocol] =
1769 (u32)((vn_min_rate / 8) * t_fair *
1770 protocol_min_rate / protocolWeightSum);
1771 }
1772 } while (0);
1773#endif
1774
1775 /* Store it to internal memory */
1776 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1777 REG_WR(bp, BAR_XSTRORM_INTMEM +
1778 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1779 ((u32 *)(&m_rs_vn))[i]);
1780
1781 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1782 REG_WR(bp, BAR_XSTRORM_INTMEM +
1783 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1784 ((u32 *)(&m_fair_vn))[i]);
1785}
1786
c18487ee
YR
1787/* This function is called upon link interrupt */
1788static void bnx2x_link_attn(struct bnx2x *bp)
1789{
34f80b04
EG
1790 int vn;
1791
bb2a0f7a
YG
1792 /* Make sure that we are synced with the current statistics */
1793 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1794
c18487ee
YR
1795 bnx2x_phy_hw_lock(bp);
1796 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1797 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1798
bb2a0f7a
YG
1799 if (bp->link_vars.link_up) {
1800
1801 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1802 struct host_port_stats *pstats;
1803
1804 pstats = bnx2x_sp(bp, port_stats);
1805 /* reset old bmac stats */
1806 memset(&(pstats->mac_stx[0]), 0,
1807 sizeof(struct mac_stx));
1808 }
1809 if ((bp->state == BNX2X_STATE_OPEN) ||
1810 (bp->state == BNX2X_STATE_DISABLED))
1811 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1812 }
1813
c18487ee
YR
1814 /* indicate link status */
1815 bnx2x_link_report(bp);
34f80b04
EG
1816
1817 if (IS_E1HMF(bp)) {
1818 int func;
1819
1820 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1821 if (vn == BP_E1HVN(bp))
1822 continue;
1823
1824 func = ((vn << 1) | BP_PORT(bp));
1825
1826 /* Set the attention towards other drivers
1827 on the same port */
1828 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1829 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1830 }
1831 }
1832
1833 if (CHIP_IS_E1H(bp) && (bp->link_vars.line_speed > 0)) {
1834 struct cmng_struct_per_port m_cmng_port;
1835 u32 wsum;
1836 int port = BP_PORT(bp);
1837
1838 /* Init RATE SHAPING and FAIRNESS contexts */
1839 wsum = bnx2x_calc_vn_wsum(bp);
1840 bnx2x_init_port_minmax(bp, (int)wsum,
1841 bp->link_vars.line_speed,
1842 &m_cmng_port);
1843 if (IS_E1HMF(bp))
1844 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1845 bnx2x_init_vn_minmax(bp, 2*vn + port,
1846 wsum, bp->link_vars.line_speed,
1847 &m_cmng_port);
1848 }
c18487ee 1849}
a2fbb9ea 1850
c18487ee
YR
1851static void bnx2x__link_status_update(struct bnx2x *bp)
1852{
1853 if (bp->state != BNX2X_STATE_OPEN)
1854 return;
a2fbb9ea 1855
c18487ee 1856 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1857
bb2a0f7a
YG
1858 if (bp->link_vars.link_up)
1859 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1860 else
1861 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1862
c18487ee
YR
1863 /* indicate link status */
1864 bnx2x_link_report(bp);
a2fbb9ea 1865}
a2fbb9ea 1866
34f80b04
EG
1867static void bnx2x_pmf_update(struct bnx2x *bp)
1868{
1869 int port = BP_PORT(bp);
1870 u32 val;
1871
1872 bp->port.pmf = 1;
1873 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1874
1875 /* enable nig attention */
1876 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1877 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1878 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
1879
1880 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
1881}
1882
c18487ee 1883/* end of Link */
a2fbb9ea
ET
1884
1885/* slow path */
1886
1887/*
1888 * General service functions
1889 */
1890
1891/* the slow path queue is odd since completions arrive on the fastpath ring */
1892static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1893 u32 data_hi, u32 data_lo, int common)
1894{
34f80b04 1895 int func = BP_FUNC(bp);
a2fbb9ea 1896
34f80b04
EG
1897 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1898 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
1899 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
1900 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1901 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1902
1903#ifdef BNX2X_STOP_ON_ERROR
1904 if (unlikely(bp->panic))
1905 return -EIO;
1906#endif
1907
34f80b04 1908 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
1909
1910 if (!bp->spq_left) {
1911 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 1912 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1913 bnx2x_panic();
1914 return -EBUSY;
1915 }
f1410647 1916
a2fbb9ea
ET
1917 /* CID needs port number to be encoded int it */
1918 bp->spq_prod_bd->hdr.conn_and_cmd_data =
1919 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
1920 HW_CID(bp, cid)));
1921 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1922 if (common)
1923 bp->spq_prod_bd->hdr.type |=
1924 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1925
1926 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1927 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1928
1929 bp->spq_left--;
1930
1931 if (bp->spq_prod_bd == bp->spq_last_bd) {
1932 bp->spq_prod_bd = bp->spq;
1933 bp->spq_prod_idx = 0;
1934 DP(NETIF_MSG_TIMER, "end of spq\n");
1935
1936 } else {
1937 bp->spq_prod_bd++;
1938 bp->spq_prod_idx++;
1939 }
1940
34f80b04 1941 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
1942 bp->spq_prod_idx);
1943
34f80b04 1944 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
1945 return 0;
1946}
1947
1948/* acquire split MCP access lock register */
1949static int bnx2x_lock_alr(struct bnx2x *bp)
1950{
a2fbb9ea 1951 u32 i, j, val;
34f80b04 1952 int rc = 0;
a2fbb9ea
ET
1953
1954 might_sleep();
1955 i = 100;
1956 for (j = 0; j < i*10; j++) {
1957 val = (1UL << 31);
1958 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1959 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1960 if (val & (1L << 31))
1961 break;
1962
1963 msleep(5);
1964 }
a2fbb9ea
ET
1965 if (!(val & (1L << 31))) {
1966 BNX2X_ERR("Cannot acquire nvram interface\n");
a2fbb9ea
ET
1967 rc = -EBUSY;
1968 }
1969
1970 return rc;
1971}
1972
1973/* Release split MCP access lock register */
1974static void bnx2x_unlock_alr(struct bnx2x *bp)
1975{
1976 u32 val = 0;
1977
1978 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1979}
1980
1981static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1982{
1983 struct host_def_status_block *def_sb = bp->def_status_blk;
1984 u16 rc = 0;
1985
1986 barrier(); /* status block is written to by the chip */
1987
1988 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1989 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1990 rc |= 1;
1991 }
1992 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1993 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1994 rc |= 2;
1995 }
1996 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1997 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1998 rc |= 4;
1999 }
2000 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2001 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2002 rc |= 8;
2003 }
2004 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2005 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2006 rc |= 16;
2007 }
2008 return rc;
2009}
2010
2011/*
2012 * slow path service functions
2013 */
2014
2015static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2016{
34f80b04
EG
2017 int port = BP_PORT(bp);
2018 int func = BP_FUNC(bp);
2019 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_FUNC_BASE * func) * 8;
a2fbb9ea
ET
2020 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2021 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2022 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2023 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
2024
2025 if (~bp->aeu_mask & (asserted & 0xff))
2026 BNX2X_ERR("IGU ERROR\n");
2027 if (bp->attn_state & asserted)
2028 BNX2X_ERR("IGU ERROR\n");
2029
2030 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2031 bp->aeu_mask, asserted);
2032 bp->aeu_mask &= ~(asserted & 0xff);
2033 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
2034
2035 REG_WR(bp, aeu_addr, bp->aeu_mask);
2036
2037 bp->attn_state |= asserted;
2038
2039 if (asserted & ATTN_HARD_WIRED_MASK) {
2040 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2041
877e9aa4
ET
2042 /* save nig interrupt mask */
2043 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2044 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2045
c18487ee 2046 bnx2x_link_attn(bp);
a2fbb9ea
ET
2047
2048 /* handle unicore attn? */
2049 }
2050 if (asserted & ATTN_SW_TIMER_4_FUNC)
2051 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2052
2053 if (asserted & GPIO_2_FUNC)
2054 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2055
2056 if (asserted & GPIO_3_FUNC)
2057 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2058
2059 if (asserted & GPIO_4_FUNC)
2060 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2061
2062 if (port == 0) {
2063 if (asserted & ATTN_GENERAL_ATTN_1) {
2064 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2066 }
2067 if (asserted & ATTN_GENERAL_ATTN_2) {
2068 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2069 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2070 }
2071 if (asserted & ATTN_GENERAL_ATTN_3) {
2072 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2073 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2074 }
2075 } else {
2076 if (asserted & ATTN_GENERAL_ATTN_4) {
2077 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2078 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2079 }
2080 if (asserted & ATTN_GENERAL_ATTN_5) {
2081 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2083 }
2084 if (asserted & ATTN_GENERAL_ATTN_6) {
2085 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2087 }
2088 }
2089
2090 } /* if hardwired */
2091
2092 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
2093 asserted, BAR_IGU_INTMEM + igu_addr);
2094 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
2095
2096 /* now set back the mask */
2097 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 2098 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
2099}
2100
877e9aa4 2101static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2102{
34f80b04 2103 int port = BP_PORT(bp);
877e9aa4
ET
2104 int reg_offset;
2105 u32 val;
2106
34f80b04
EG
2107 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2108 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2109
34f80b04 2110 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2111
2112 val = REG_RD(bp, reg_offset);
2113 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2114 REG_WR(bp, reg_offset, val);
2115
2116 BNX2X_ERR("SPIO5 hw attention\n");
2117
34f80b04 2118 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
877e9aa4
ET
2119 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2120 /* Fan failure attention */
2121
2122 /* The PHY reset is controled by GPIO 1 */
2123 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2124 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2125 /* Low power mode is controled by GPIO 2 */
2126 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2127 MISC_REGISTERS_GPIO_OUTPUT_LOW);
2128 /* mark the failure */
c18487ee 2129 bp->link_params.ext_phy_config &=
877e9aa4 2130 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2131 bp->link_params.ext_phy_config |=
877e9aa4
ET
2132 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2133 SHMEM_WR(bp,
2134 dev_info.port_hw_config[port].
2135 external_phy_config,
c18487ee 2136 bp->link_params.ext_phy_config);
877e9aa4
ET
2137 /* log the failure */
2138 printk(KERN_ERR PFX "Fan Failure on Network"
2139 " Controller %s has caused the driver to"
2140 " shutdown the card to prevent permanent"
2141 " damage. Please contact Dell Support for"
2142 " assistance\n", bp->dev->name);
2143 break;
2144
2145 default:
2146 break;
2147 }
2148 }
34f80b04
EG
2149
2150 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2151
2152 val = REG_RD(bp, reg_offset);
2153 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2154 REG_WR(bp, reg_offset, val);
2155
2156 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2157 (attn & HW_INTERRUT_ASSERT_SET_0));
2158 bnx2x_panic();
2159 }
877e9aa4
ET
2160}
2161
2162static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2163{
2164 u32 val;
2165
2166 if (attn & BNX2X_DOORQ_ASSERT) {
2167
2168 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2169 BNX2X_ERR("DB hw attention 0x%x\n", val);
2170 /* DORQ discard attention */
2171 if (val & 0x2)
2172 BNX2X_ERR("FATAL error from DORQ\n");
2173 }
34f80b04
EG
2174
2175 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2176
2177 int port = BP_PORT(bp);
2178 int reg_offset;
2179
2180 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2181 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2182
2183 val = REG_RD(bp, reg_offset);
2184 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2185 REG_WR(bp, reg_offset, val);
2186
2187 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2188 (attn & HW_INTERRUT_ASSERT_SET_1));
2189 bnx2x_panic();
2190 }
877e9aa4
ET
2191}
2192
2193static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2194{
2195 u32 val;
2196
2197 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2198
2199 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2200 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2201 /* CFC error attention */
2202 if (val & 0x2)
2203 BNX2X_ERR("FATAL error from CFC\n");
2204 }
2205
2206 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2207
2208 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2209 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2210 /* RQ_USDMDP_FIFO_OVERFLOW */
2211 if (val & 0x18000)
2212 BNX2X_ERR("FATAL error from PXP\n");
2213 }
34f80b04
EG
2214
2215 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2216
2217 int port = BP_PORT(bp);
2218 int reg_offset;
2219
2220 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2221 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2222
2223 val = REG_RD(bp, reg_offset);
2224 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2225 REG_WR(bp, reg_offset, val);
2226
2227 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2228 (attn & HW_INTERRUT_ASSERT_SET_2));
2229 bnx2x_panic();
2230 }
877e9aa4
ET
2231}
2232
2233static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2234{
34f80b04
EG
2235 u32 val;
2236
877e9aa4
ET
2237 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2238
34f80b04
EG
2239 if (attn & BNX2X_PMF_LINK_ASSERT) {
2240 int func = BP_FUNC(bp);
2241
2242 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2243 bnx2x__link_status_update(bp);
2244 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2245 DRV_STATUS_PMF)
2246 bnx2x_pmf_update(bp);
2247
2248 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2249
2250 BNX2X_ERR("MC assert!\n");
2251 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2252 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2253 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2254 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2255 bnx2x_panic();
2256
2257 } else if (attn & BNX2X_MCP_ASSERT) {
2258
2259 BNX2X_ERR("MCP assert!\n");
2260 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2261 bnx2x_fw_dump(bp);
877e9aa4
ET
2262
2263 } else
2264 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2265 }
2266
2267 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2268 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2269 if (attn & BNX2X_GRC_TIMEOUT) {
2270 val = CHIP_IS_E1H(bp) ?
2271 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2272 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2273 }
2274 if (attn & BNX2X_GRC_RSV) {
2275 val = CHIP_IS_E1H(bp) ?
2276 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2277 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2278 }
877e9aa4 2279 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2280 }
2281}
2282
2283static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2284{
a2fbb9ea
ET
2285 struct attn_route attn;
2286 struct attn_route group_mask;
34f80b04 2287 int port = BP_PORT(bp);
877e9aa4 2288 int index;
a2fbb9ea
ET
2289 u32 reg_addr;
2290 u32 val;
2291
2292 /* need to take HW lock because MCP or other port might also
2293 try to handle this event */
2294 bnx2x_lock_alr(bp);
2295
2296 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2297 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2298 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2299 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2300 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2301 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2302
2303 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2304 if (deasserted & (1 << index)) {
2305 group_mask = bp->attn_group[index];
2306
34f80b04
EG
2307 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2308 index, group_mask.sig[0], group_mask.sig[1],
2309 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2310
877e9aa4
ET
2311 bnx2x_attn_int_deasserted3(bp,
2312 attn.sig[3] & group_mask.sig[3]);
2313 bnx2x_attn_int_deasserted1(bp,
2314 attn.sig[1] & group_mask.sig[1]);
2315 bnx2x_attn_int_deasserted2(bp,
2316 attn.sig[2] & group_mask.sig[2]);
2317 bnx2x_attn_int_deasserted0(bp,
2318 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2319
a2fbb9ea
ET
2320 if ((attn.sig[0] & group_mask.sig[0] &
2321 HW_PRTY_ASSERT_SET_0) ||
2322 (attn.sig[1] & group_mask.sig[1] &
2323 HW_PRTY_ASSERT_SET_1) ||
2324 (attn.sig[2] & group_mask.sig[2] &
2325 HW_PRTY_ASSERT_SET_2))
877e9aa4 2326 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2327 }
2328 }
2329
2330 bnx2x_unlock_alr(bp);
2331
34f80b04 2332 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_FUNC_BASE * BP_FUNC(bp)) * 8;
a2fbb9ea
ET
2333
2334 val = ~deasserted;
34f80b04 2335/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
a2fbb9ea
ET
2336 val, BAR_IGU_INTMEM + reg_addr); */
2337 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
2338
2339 if (bp->aeu_mask & (deasserted & 0xff))
34f80b04 2340 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea 2341 if (~bp->attn_state & deasserted)
34f80b04 2342 BNX2X_ERR("IGU BUG!\n");
a2fbb9ea
ET
2343
2344 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2345 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2346
2347 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
2348 bp->aeu_mask |= (deasserted & 0xff);
2349
2350 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
2351 REG_WR(bp, reg_addr, bp->aeu_mask);
2352
2353 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2354 bp->attn_state &= ~deasserted;
2355 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2356}
2357
2358static void bnx2x_attn_int(struct bnx2x *bp)
2359{
2360 /* read local copy of bits */
2361 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
2362 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
2363 u32 attn_state = bp->attn_state;
2364
2365 /* look for changed bits */
2366 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2367 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2368
2369 DP(NETIF_MSG_HW,
2370 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2371 attn_bits, attn_ack, asserted, deasserted);
2372
2373 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2374 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2375
2376 /* handle bits that were raised */
2377 if (asserted)
2378 bnx2x_attn_int_asserted(bp, asserted);
2379
2380 if (deasserted)
2381 bnx2x_attn_int_deasserted(bp, deasserted);
2382}
2383
2384static void bnx2x_sp_task(struct work_struct *work)
2385{
2386 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
2387 u16 status;
2388
34f80b04 2389
a2fbb9ea
ET
2390 /* Return here if interrupt is disabled */
2391 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2392 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2393 return;
2394 }
2395
2396 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2397/* if (status == 0) */
2398/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2399
34f80b04 2400 DP(BNX2X_MSG_SP, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2401
877e9aa4
ET
2402 /* HW attentions */
2403 if (status & 0x1)
a2fbb9ea 2404 bnx2x_attn_int(bp);
a2fbb9ea 2405
bb2a0f7a
YG
2406 /* CStorm events: query_stats, port delete ramrod */
2407 if (status & 0x2)
2408 bp->stats_pending = 0;
2409
a2fbb9ea
ET
2410 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
2411 IGU_INT_NOP, 1);
2412 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2413 IGU_INT_NOP, 1);
2414 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2415 IGU_INT_NOP, 1);
2416 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2417 IGU_INT_NOP, 1);
2418 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2419 IGU_INT_ENABLE, 1);
877e9aa4 2420
a2fbb9ea
ET
2421}
2422
2423static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2424{
2425 struct net_device *dev = dev_instance;
2426 struct bnx2x *bp = netdev_priv(dev);
2427
2428 /* Return here if interrupt is disabled */
2429 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 2430 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2431 return IRQ_HANDLED;
2432 }
2433
877e9aa4 2434 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2435
2436#ifdef BNX2X_STOP_ON_ERROR
2437 if (unlikely(bp->panic))
2438 return IRQ_HANDLED;
2439#endif
2440
2441 schedule_work(&bp->sp_task);
2442
2443 return IRQ_HANDLED;
2444}
2445
2446/* end of slow path */
2447
2448/* Statistics */
2449
2450/****************************************************************************
2451* Macros
2452****************************************************************************/
2453
a2fbb9ea
ET
2454/* sum[hi:lo] += add[hi:lo] */
2455#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2456 do { \
2457 s_lo += a_lo; \
2458 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2459 } while (0)
2460
2461/* difference = minuend - subtrahend */
2462#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2463 do { \
bb2a0f7a
YG
2464 if (m_lo < s_lo) { \
2465 /* underflow */ \
a2fbb9ea 2466 d_hi = m_hi - s_hi; \
bb2a0f7a
YG
2467 if (d_hi > 0) { \
2468 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2469 d_hi--; \
2470 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a
YG
2471 } else { \
2472 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2473 d_hi = 0; \
2474 d_lo = 0; \
2475 } \
bb2a0f7a
YG
2476 } else { \
2477 /* m_lo >= s_lo */ \
a2fbb9ea 2478 if (m_hi < s_hi) { \
bb2a0f7a
YG
2479 d_hi = 0; \
2480 d_lo = 0; \
2481 } else { \
2482 /* m_hi >= s_hi */ \
2483 d_hi = m_hi - s_hi; \
2484 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2485 } \
2486 } \
2487 } while (0)
2488
bb2a0f7a 2489#define UPDATE_STAT64(s, t) \
a2fbb9ea 2490 do { \
bb2a0f7a
YG
2491 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2492 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2493 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2494 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2495 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2496 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2497 } while (0)
2498
bb2a0f7a 2499#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2500 do { \
bb2a0f7a
YG
2501 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2502 diff.lo, new->s##_lo, old->s##_lo); \
2503 ADD_64(estats->t##_hi, diff.hi, \
2504 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2505 } while (0)
2506
2507/* sum[hi:lo] += add */
2508#define ADD_EXTEND_64(s_hi, s_lo, a) \
2509 do { \
2510 s_lo += a; \
2511 s_hi += (s_lo < a) ? 1 : 0; \
2512 } while (0)
2513
bb2a0f7a 2514#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2515 do { \
bb2a0f7a
YG
2516 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2517 pstats->mac_stx[1].s##_lo, \
2518 new->s); \
a2fbb9ea
ET
2519 } while (0)
2520
bb2a0f7a 2521#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2522 do { \
2523 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2524 old_tclient->s = le32_to_cpu(tclient->s); \
bb2a0f7a
YG
2525 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
2526 } while (0)
2527
2528#define UPDATE_EXTEND_XSTAT(s, t) \
2529 do { \
2530 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2531 old_xclient->s = le32_to_cpu(xclient->s); \
2532 ADD_EXTEND_64(fstats->t##_hi, fstats->t##_lo, diff); \
a2fbb9ea
ET
2533 } while (0)
2534
2535/*
2536 * General service functions
2537 */
2538
2539static inline long bnx2x_hilo(u32 *hiref)
2540{
2541 u32 lo = *(hiref + 1);
2542#if (BITS_PER_LONG == 64)
2543 u32 hi = *hiref;
2544
2545 return HILO_U64(hi, lo);
2546#else
2547 return lo;
2548#endif
2549}
2550
2551/*
2552 * Init service functions
2553 */
2554
bb2a0f7a
YG
2555static void bnx2x_storm_stats_init(struct bnx2x *bp)
2556{
2557 int func = BP_FUNC(bp);
2558
2559 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func), 1);
2560 REG_WR(bp, BAR_XSTRORM_INTMEM +
2561 XSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2562
2563 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func), 1);
2564 REG_WR(bp, BAR_TSTRORM_INTMEM +
2565 TSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2566
2567 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func), 0);
2568 REG_WR(bp, BAR_CSTRORM_INTMEM +
2569 CSTORM_STATS_FLAGS_OFFSET(func) + 4, 0);
2570
2571 REG_WR(bp, BAR_XSTRORM_INTMEM +
2572 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2573 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2574 REG_WR(bp, BAR_XSTRORM_INTMEM +
2575 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2576 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2577
2578 REG_WR(bp, BAR_TSTRORM_INTMEM +
2579 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
2580 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2581 REG_WR(bp, BAR_TSTRORM_INTMEM +
2582 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
2583 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2584}
2585
2586static void bnx2x_storm_stats_post(struct bnx2x *bp)
2587{
2588 if (!bp->stats_pending) {
2589 struct eth_query_ramrod_data ramrod_data = {0};
2590 int rc;
2591
2592 ramrod_data.drv_counter = bp->stats_counter++;
2593 ramrod_data.collect_port_1b = bp->port.pmf ? 1 : 0;
2594 ramrod_data.ctr_id_vector = (1 << BP_CL_ID(bp));
2595
2596 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2597 ((u32 *)&ramrod_data)[1],
2598 ((u32 *)&ramrod_data)[0], 0);
2599 if (rc == 0) {
2600 /* stats ramrod has it's own slot on the spq */
2601 bp->spq_left++;
2602 bp->stats_pending = 1;
2603 }
2604 }
2605}
2606
2607static void bnx2x_stats_init(struct bnx2x *bp)
2608{
2609 int port = BP_PORT(bp);
2610
2611 bp->executer_idx = 0;
2612 bp->stats_counter = 0;
2613
2614 /* port stats */
2615 if (!BP_NOMCP(bp))
2616 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
2617 else
2618 bp->port.port_stx = 0;
2619 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
2620
2621 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
2622 bp->port.old_nig_stats.brb_discard =
2623 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
2624 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
2625 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
2626 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
2627 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
2628
2629 /* function stats */
2630 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
2631 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
2632 memset(&bp->old_xclient, 0, sizeof(struct xstorm_per_client_stats));
2633 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
2634
2635 bp->stats_state = STATS_STATE_DISABLED;
2636 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
2637 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2638}
2639
2640static void bnx2x_hw_stats_post(struct bnx2x *bp)
2641{
2642 struct dmae_command *dmae = &bp->stats_dmae;
2643 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2644
2645 *stats_comp = DMAE_COMP_VAL;
2646
2647 /* loader */
2648 if (bp->executer_idx) {
2649 int loader_idx = PMF_DMAE_C(bp);
2650
2651 memset(dmae, 0, sizeof(struct dmae_command));
2652
2653 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2654 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2655 DMAE_CMD_DST_RESET |
2656#ifdef __BIG_ENDIAN
2657 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2658#else
2659 DMAE_CMD_ENDIANITY_DW_SWAP |
2660#endif
2661 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
2662 DMAE_CMD_PORT_0) |
2663 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2664 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
2665 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
2666 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
2667 sizeof(struct dmae_command) *
2668 (loader_idx + 1)) >> 2;
2669 dmae->dst_addr_hi = 0;
2670 dmae->len = sizeof(struct dmae_command) >> 2;
2671 if (CHIP_IS_E1(bp))
2672 dmae->len--;
2673 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
2674 dmae->comp_addr_hi = 0;
2675 dmae->comp_val = 1;
2676
2677 *stats_comp = 0;
2678 bnx2x_post_dmae(bp, dmae, loader_idx);
2679
2680 } else if (bp->func_stx) {
2681 *stats_comp = 0;
2682 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
2683 }
2684}
2685
2686static int bnx2x_stats_comp(struct bnx2x *bp)
2687{
2688 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2689 int cnt = 10;
2690
2691 might_sleep();
2692 while (*stats_comp != DMAE_COMP_VAL) {
2693 msleep(1);
2694 if (!cnt) {
2695 BNX2X_ERR("timeout waiting for stats finished\n");
2696 break;
2697 }
2698 cnt--;
2699 }
2700 return 1;
2701}
2702
2703/*
2704 * Statistics service functions
2705 */
2706
2707static void bnx2x_stats_pmf_update(struct bnx2x *bp)
2708{
2709 struct dmae_command *dmae;
2710 u32 opcode;
2711 int loader_idx = PMF_DMAE_C(bp);
2712 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2713
2714 /* sanity */
2715 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
2716 BNX2X_ERR("BUG!\n");
2717 return;
2718 }
2719
2720 bp->executer_idx = 0;
2721
2722 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2723 DMAE_CMD_C_ENABLE |
2724 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2725#ifdef __BIG_ENDIAN
2726 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2727#else
2728 DMAE_CMD_ENDIANITY_DW_SWAP |
2729#endif
2730 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2731 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2732
2733 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2734 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
2735 dmae->src_addr_lo = bp->port.port_stx >> 2;
2736 dmae->src_addr_hi = 0;
2737 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
2738 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
2739 dmae->len = DMAE_LEN32_RD_MAX;
2740 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2741 dmae->comp_addr_hi = 0;
2742 dmae->comp_val = 1;
2743
2744 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2745 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
2746 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
2747 dmae->src_addr_hi = 0;
2748 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)
2749 + DMAE_LEN32_RD_MAX * 4);
2750 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)
2751 + DMAE_LEN32_RD_MAX * 4);
2752 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
2753 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
2754 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
2755 dmae->comp_val = DMAE_COMP_VAL;
2756
2757 *stats_comp = 0;
2758 bnx2x_hw_stats_post(bp);
2759 bnx2x_stats_comp(bp);
2760}
2761
2762static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
2763{
2764 struct dmae_command *dmae;
34f80b04 2765 int port = BP_PORT(bp);
bb2a0f7a 2766 int vn = BP_E1HVN(bp);
a2fbb9ea 2767 u32 opcode;
bb2a0f7a 2768 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 2769 u32 mac_addr;
bb2a0f7a
YG
2770 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
2771
2772 /* sanity */
2773 if (!bp->link_vars.link_up || !bp->port.pmf) {
2774 BNX2X_ERR("BUG!\n");
2775 return;
2776 }
a2fbb9ea
ET
2777
2778 bp->executer_idx = 0;
bb2a0f7a
YG
2779
2780 /* MCP */
2781 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2782 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2783 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 2784#ifdef __BIG_ENDIAN
bb2a0f7a 2785 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 2786#else
bb2a0f7a 2787 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 2788#endif
bb2a0f7a
YG
2789 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2790 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 2791
bb2a0f7a 2792 if (bp->port.port_stx) {
a2fbb9ea
ET
2793
2794 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2795 dmae->opcode = opcode;
bb2a0f7a
YG
2796 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
2797 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
2798 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 2799 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
2800 dmae->len = sizeof(struct host_port_stats) >> 2;
2801 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2802 dmae->comp_addr_hi = 0;
2803 dmae->comp_val = 1;
a2fbb9ea
ET
2804 }
2805
bb2a0f7a
YG
2806 if (bp->func_stx) {
2807
2808 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2809 dmae->opcode = opcode;
2810 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
2811 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
2812 dmae->dst_addr_lo = bp->func_stx >> 2;
2813 dmae->dst_addr_hi = 0;
2814 dmae->len = sizeof(struct host_func_stats) >> 2;
2815 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2816 dmae->comp_addr_hi = 0;
2817 dmae->comp_val = 1;
a2fbb9ea
ET
2818 }
2819
bb2a0f7a 2820 /* MAC */
a2fbb9ea
ET
2821 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2822 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2823 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2824#ifdef __BIG_ENDIAN
2825 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2826#else
2827 DMAE_CMD_ENDIANITY_DW_SWAP |
2828#endif
bb2a0f7a
YG
2829 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2830 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 2831
c18487ee 2832 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
2833
2834 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
2835 NIG_REG_INGRESS_BMAC0_MEM);
2836
2837 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
2838 BIGMAC_REGISTER_TX_STAT_GTBYT */
2839 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2840 dmae->opcode = opcode;
2841 dmae->src_addr_lo = (mac_addr +
2842 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2843 dmae->src_addr_hi = 0;
2844 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2845 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2846 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
2847 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2848 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2849 dmae->comp_addr_hi = 0;
2850 dmae->comp_val = 1;
2851
2852 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
2853 BIGMAC_REGISTER_RX_STAT_GRIPJ */
2854 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2855 dmae->opcode = opcode;
2856 dmae->src_addr_lo = (mac_addr +
2857 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2858 dmae->src_addr_hi = 0;
2859 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 2860 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 2861 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 2862 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
2863 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
2864 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2865 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2866 dmae->comp_addr_hi = 0;
2867 dmae->comp_val = 1;
2868
c18487ee 2869 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
2870
2871 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
2872
2873 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
2874 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2875 dmae->opcode = opcode;
2876 dmae->src_addr_lo = (mac_addr +
2877 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
2878 dmae->src_addr_hi = 0;
2879 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2880 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2881 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
2882 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2883 dmae->comp_addr_hi = 0;
2884 dmae->comp_val = 1;
2885
2886 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
2887 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2888 dmae->opcode = opcode;
2889 dmae->src_addr_lo = (mac_addr +
2890 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
2891 dmae->src_addr_hi = 0;
2892 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 2893 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 2894 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 2895 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
2896 dmae->len = 1;
2897 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2898 dmae->comp_addr_hi = 0;
2899 dmae->comp_val = 1;
2900
2901 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
2902 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2903 dmae->opcode = opcode;
2904 dmae->src_addr_lo = (mac_addr +
2905 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
2906 dmae->src_addr_hi = 0;
2907 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 2908 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 2909 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 2910 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
2911 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
2912 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2913 dmae->comp_addr_hi = 0;
2914 dmae->comp_val = 1;
2915 }
2916
2917 /* NIG */
bb2a0f7a
YG
2918 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2919 dmae->opcode = opcode;
2920 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
2921 NIG_REG_STAT0_BRB_DISCARD) >> 2;
2922 dmae->src_addr_hi = 0;
2923 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
2924 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
2925 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
2926 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2927 dmae->comp_addr_hi = 0;
2928 dmae->comp_val = 1;
2929
2930 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2931 dmae->opcode = opcode;
2932 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
2933 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
2934 dmae->src_addr_hi = 0;
2935 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
2936 offsetof(struct nig_stats, egress_mac_pkt0_lo));
2937 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
2938 offsetof(struct nig_stats, egress_mac_pkt0_lo));
2939 dmae->len = (2*sizeof(u32)) >> 2;
2940 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2941 dmae->comp_addr_hi = 0;
2942 dmae->comp_val = 1;
2943
a2fbb9ea
ET
2944 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2945 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2946 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
2947 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2948#ifdef __BIG_ENDIAN
2949 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2950#else
2951 DMAE_CMD_ENDIANITY_DW_SWAP |
2952#endif
bb2a0f7a
YG
2953 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2954 (vn << DMAE_CMD_E1HVN_SHIFT));
2955 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
2956 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 2957 dmae->src_addr_hi = 0;
bb2a0f7a
YG
2958 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
2959 offsetof(struct nig_stats, egress_mac_pkt1_lo));
2960 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
2961 offsetof(struct nig_stats, egress_mac_pkt1_lo));
2962 dmae->len = (2*sizeof(u32)) >> 2;
2963 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
2964 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
2965 dmae->comp_val = DMAE_COMP_VAL;
2966
2967 *stats_comp = 0;
a2fbb9ea
ET
2968}
2969
bb2a0f7a 2970static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 2971{
bb2a0f7a
YG
2972 struct dmae_command *dmae = &bp->stats_dmae;
2973 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 2974
bb2a0f7a
YG
2975 /* sanity */
2976 if (!bp->func_stx) {
2977 BNX2X_ERR("BUG!\n");
2978 return;
2979 }
a2fbb9ea 2980
bb2a0f7a
YG
2981 bp->executer_idx = 0;
2982 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 2983
bb2a0f7a
YG
2984 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2985 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
2986 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2987#ifdef __BIG_ENDIAN
2988 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2989#else
2990 DMAE_CMD_ENDIANITY_DW_SWAP |
2991#endif
2992 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
2993 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
2994 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
2995 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
2996 dmae->dst_addr_lo = bp->func_stx >> 2;
2997 dmae->dst_addr_hi = 0;
2998 dmae->len = sizeof(struct host_func_stats) >> 2;
2999 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3000 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3001 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3002
bb2a0f7a
YG
3003 *stats_comp = 0;
3004}
a2fbb9ea 3005
bb2a0f7a
YG
3006static void bnx2x_stats_start(struct bnx2x *bp)
3007{
3008 if (bp->port.pmf)
3009 bnx2x_port_stats_init(bp);
3010
3011 else if (bp->func_stx)
3012 bnx2x_func_stats_init(bp);
3013
3014 bnx2x_hw_stats_post(bp);
3015 bnx2x_storm_stats_post(bp);
3016}
3017
3018static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3019{
3020 bnx2x_stats_comp(bp);
3021 bnx2x_stats_pmf_update(bp);
3022 bnx2x_stats_start(bp);
3023}
3024
3025static void bnx2x_stats_restart(struct bnx2x *bp)
3026{
3027 bnx2x_stats_comp(bp);
3028 bnx2x_stats_start(bp);
3029}
3030
3031static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3032{
3033 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3034 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3035 struct regpair diff;
3036
3037 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3038 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3039 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3040 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3041 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3042 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3043 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3044 UPDATE_STAT64(rx_stat_grxcf, rx_stat_bmac_xcf);
3045 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3046 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffpauseframesreceived);
3047 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3048 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3049 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3050 UPDATE_STAT64(tx_stat_gt127,
3051 tx_stat_etherstatspkts65octetsto127octets);
3052 UPDATE_STAT64(tx_stat_gt255,
3053 tx_stat_etherstatspkts128octetsto255octets);
3054 UPDATE_STAT64(tx_stat_gt511,
3055 tx_stat_etherstatspkts256octetsto511octets);
3056 UPDATE_STAT64(tx_stat_gt1023,
3057 tx_stat_etherstatspkts512octetsto1023octets);
3058 UPDATE_STAT64(tx_stat_gt1518,
3059 tx_stat_etherstatspkts1024octetsto1522octets);
3060 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3061 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3062 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3063 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3064 UPDATE_STAT64(tx_stat_gterr,
3065 tx_stat_dot3statsinternalmactransmiterrors);
3066 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3067}
3068
3069static void bnx2x_emac_stats_update(struct bnx2x *bp)
3070{
3071 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3072 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3073
3074 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3075 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3076 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3077 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3078 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3079 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3080 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3081 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3082 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3083 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3084 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3085 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3086 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3087 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3088 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3089 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3090 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3091 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3092 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3093 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3094 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3095 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3096 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3097 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3098 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3099 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3100 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3101 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3102 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3103 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3104 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3105}
3106
3107static int bnx2x_hw_stats_update(struct bnx2x *bp)
3108{
3109 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3110 struct nig_stats *old = &(bp->port.old_nig_stats);
3111 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3112 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3113 struct regpair diff;
3114
3115 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3116 bnx2x_bmac_stats_update(bp);
3117
3118 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3119 bnx2x_emac_stats_update(bp);
3120
3121 else { /* unreached */
3122 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3123 return -1;
3124 }
a2fbb9ea 3125
bb2a0f7a
YG
3126 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3127 new->brb_discard - old->brb_discard);
a2fbb9ea 3128
bb2a0f7a
YG
3129 UPDATE_STAT64_NIG(egress_mac_pkt0,
3130 etherstatspkts1024octetsto1522octets);
3131 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3132
bb2a0f7a 3133 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3134
bb2a0f7a
YG
3135 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3136 sizeof(struct mac_stx));
3137 estats->brb_drop_hi = pstats->brb_drop_hi;
3138 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3139
bb2a0f7a 3140 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3141
bb2a0f7a 3142 return 0;
a2fbb9ea
ET
3143}
3144
bb2a0f7a 3145static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3146{
3147 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a
YG
3148 int cl_id = BP_CL_ID(bp);
3149 struct tstorm_per_port_stats *tport =
3150 &stats->tstorm_common.port_statistics;
a2fbb9ea 3151 struct tstorm_per_client_stats *tclient =
bb2a0f7a 3152 &stats->tstorm_common.client_statistics[cl_id];
a2fbb9ea 3153 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
bb2a0f7a
YG
3154 struct xstorm_per_client_stats *xclient =
3155 &stats->xstorm_common.client_statistics[cl_id];
3156 struct xstorm_per_client_stats *old_xclient = &bp->old_xclient;
3157 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3158 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3159 u32 diff;
3160
bb2a0f7a
YG
3161 /* are storm stats valid? */
3162 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3163 bp->stats_counter) {
3164 DP(BNX2X_MSG_STATS, "stats not updated by tstorm"
3165 " tstorm counter (%d) != stats_counter (%d)\n",
3166 tclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3167 return -1;
3168 }
bb2a0f7a
YG
3169 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3170 bp->stats_counter) {
3171 DP(BNX2X_MSG_STATS, "stats not updated by xstorm"
3172 " xstorm counter (%d) != stats_counter (%d)\n",
3173 xclient->stats_counter, bp->stats_counter);
a2fbb9ea
ET
3174 return -2;
3175 }
a2fbb9ea 3176
bb2a0f7a
YG
3177 fstats->total_bytes_received_hi =
3178 fstats->valid_bytes_received_hi =
a2fbb9ea 3179 le32_to_cpu(tclient->total_rcv_bytes.hi);
bb2a0f7a
YG
3180 fstats->total_bytes_received_lo =
3181 fstats->valid_bytes_received_lo =
a2fbb9ea 3182 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a
YG
3183
3184 estats->error_bytes_received_hi =
3185 le32_to_cpu(tclient->rcv_error_bytes.hi);
3186 estats->error_bytes_received_lo =
3187 le32_to_cpu(tclient->rcv_error_bytes.lo);
3188 ADD_64(estats->error_bytes_received_hi,
3189 estats->rx_stat_ifhcinbadoctets_hi,
3190 estats->error_bytes_received_lo,
3191 estats->rx_stat_ifhcinbadoctets_lo);
3192
3193 ADD_64(fstats->total_bytes_received_hi,
3194 estats->error_bytes_received_hi,
3195 fstats->total_bytes_received_lo,
3196 estats->error_bytes_received_lo);
3197
3198 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts, total_unicast_packets_received);
a2fbb9ea 3199 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
bb2a0f7a 3200 total_multicast_packets_received);
a2fbb9ea 3201 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
bb2a0f7a
YG
3202 total_broadcast_packets_received);
3203
3204 fstats->total_bytes_transmitted_hi =
3205 le32_to_cpu(xclient->total_sent_bytes.hi);
3206 fstats->total_bytes_transmitted_lo =
3207 le32_to_cpu(xclient->total_sent_bytes.lo);
3208
3209 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3210 total_unicast_packets_transmitted);
3211 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3212 total_multicast_packets_transmitted);
3213 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3214 total_broadcast_packets_transmitted);
3215
3216 memcpy(estats, &(fstats->total_bytes_received_hi),
3217 sizeof(struct host_func_stats) - 2*sizeof(u32));
3218
3219 estats->mac_filter_discard = le32_to_cpu(tport->mac_filter_discard);
3220 estats->xxoverflow_discard = le32_to_cpu(tport->xxoverflow_discard);
3221 estats->brb_truncate_discard =
3222 le32_to_cpu(tport->brb_truncate_discard);
3223 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3224
3225 old_tclient->rcv_unicast_bytes.hi =
a2fbb9ea 3226 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
bb2a0f7a 3227 old_tclient->rcv_unicast_bytes.lo =
a2fbb9ea 3228 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
bb2a0f7a 3229 old_tclient->rcv_broadcast_bytes.hi =
a2fbb9ea 3230 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
bb2a0f7a 3231 old_tclient->rcv_broadcast_bytes.lo =
a2fbb9ea 3232 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
bb2a0f7a 3233 old_tclient->rcv_multicast_bytes.hi =
a2fbb9ea 3234 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
bb2a0f7a 3235 old_tclient->rcv_multicast_bytes.lo =
a2fbb9ea 3236 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
bb2a0f7a 3237 old_tclient->total_rcv_pkts = le32_to_cpu(tclient->total_rcv_pkts);
a2fbb9ea 3238
bb2a0f7a
YG
3239 old_tclient->checksum_discard = le32_to_cpu(tclient->checksum_discard);
3240 old_tclient->packets_too_big_discard =
a2fbb9ea 3241 le32_to_cpu(tclient->packets_too_big_discard);
bb2a0f7a
YG
3242 estats->no_buff_discard =
3243 old_tclient->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
3244 old_tclient->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
3245
3246 old_xclient->total_sent_pkts = le32_to_cpu(xclient->total_sent_pkts);
3247 old_xclient->unicast_bytes_sent.hi =
3248 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3249 old_xclient->unicast_bytes_sent.lo =
3250 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3251 old_xclient->multicast_bytes_sent.hi =
3252 le32_to_cpu(xclient->multicast_bytes_sent.hi);
3253 old_xclient->multicast_bytes_sent.lo =
3254 le32_to_cpu(xclient->multicast_bytes_sent.lo);
3255 old_xclient->broadcast_bytes_sent.hi =
3256 le32_to_cpu(xclient->broadcast_bytes_sent.hi);
3257 old_xclient->broadcast_bytes_sent.lo =
3258 le32_to_cpu(xclient->broadcast_bytes_sent.lo);
3259
3260 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea
ET
3261
3262 return 0;
3263}
3264
bb2a0f7a 3265static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3266{
bb2a0f7a
YG
3267 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3268 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea
ET
3269 struct net_device_stats *nstats = &bp->dev->stats;
3270
3271 nstats->rx_packets =
3272 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3273 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3274 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3275
3276 nstats->tx_packets =
3277 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3278 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3279 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3280
bb2a0f7a 3281 nstats->rx_bytes = bnx2x_hilo(&estats->valid_bytes_received_hi);
a2fbb9ea 3282
0e39e645 3283 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3284
bb2a0f7a
YG
3285 nstats->rx_dropped = old_tclient->checksum_discard +
3286 estats->mac_discard;
a2fbb9ea
ET
3287 nstats->tx_dropped = 0;
3288
3289 nstats->multicast =
3290 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
3291
bb2a0f7a
YG
3292 nstats->collisions =
3293 estats->tx_stat_dot3statssinglecollisionframes_lo +
3294 estats->tx_stat_dot3statsmultiplecollisionframes_lo +
3295 estats->tx_stat_dot3statslatecollisions_lo +
3296 estats->tx_stat_dot3statsexcessivecollisions_lo;
a2fbb9ea 3297
bb2a0f7a
YG
3298 estats->jabber_packets_received =
3299 old_tclient->packets_too_big_discard +
3300 estats->rx_stat_dot3statsframestoolong_lo;
3301
3302 nstats->rx_length_errors =
3303 estats->rx_stat_etherstatsundersizepkts_lo +
3304 estats->jabber_packets_received;
3305 nstats->rx_over_errors = estats->brb_drop_lo +
0e39e645 3306 estats->brb_truncate_discard;
bb2a0f7a
YG
3307 nstats->rx_crc_errors = estats->rx_stat_dot3statsfcserrors_lo;
3308 nstats->rx_frame_errors = estats->rx_stat_dot3statsalignmenterrors_lo;
3309 nstats->rx_fifo_errors = old_tclient->no_buff_discard;
a2fbb9ea
ET
3310 nstats->rx_missed_errors = estats->xxoverflow_discard;
3311
3312 nstats->rx_errors = nstats->rx_length_errors +
3313 nstats->rx_over_errors +
3314 nstats->rx_crc_errors +
3315 nstats->rx_frame_errors +
0e39e645
ET
3316 nstats->rx_fifo_errors +
3317 nstats->rx_missed_errors;
a2fbb9ea 3318
bb2a0f7a
YG
3319 nstats->tx_aborted_errors =
3320 estats->tx_stat_dot3statslatecollisions_lo +
3321 estats->tx_stat_dot3statsexcessivecollisions_lo;
3322 nstats->tx_carrier_errors = estats->rx_stat_falsecarriererrors_lo;
a2fbb9ea
ET
3323 nstats->tx_fifo_errors = 0;
3324 nstats->tx_heartbeat_errors = 0;
3325 nstats->tx_window_errors = 0;
3326
3327 nstats->tx_errors = nstats->tx_aborted_errors +
3328 nstats->tx_carrier_errors;
a2fbb9ea
ET
3329}
3330
bb2a0f7a 3331static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3332{
bb2a0f7a
YG
3333 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3334 int update = 0;
a2fbb9ea 3335
bb2a0f7a
YG
3336 if (*stats_comp != DMAE_COMP_VAL)
3337 return;
3338
3339 if (bp->port.pmf)
3340 update = (bnx2x_hw_stats_update(bp) == 0);
a2fbb9ea 3341
bb2a0f7a 3342 update |= (bnx2x_storm_stats_update(bp) == 0);
a2fbb9ea 3343
bb2a0f7a
YG
3344 if (update)
3345 bnx2x_net_stats_update(bp);
a2fbb9ea 3346
bb2a0f7a
YG
3347 else {
3348 if (bp->stats_pending) {
3349 bp->stats_pending++;
3350 if (bp->stats_pending == 3) {
3351 BNX2X_ERR("stats not updated for 3 times\n");
3352 bnx2x_panic();
3353 return;
3354 }
3355 }
a2fbb9ea
ET
3356 }
3357
3358 if (bp->msglevel & NETIF_MSG_TIMER) {
bb2a0f7a
YG
3359 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
3360 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3361 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3362 int i;
a2fbb9ea
ET
3363
3364 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3365 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3366 " tx pkt (%lx)\n",
3367 bnx2x_tx_avail(bp->fp),
3368 *bp->fp->tx_cons_sb, nstats->tx_packets);
3369 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3370 " rx pkt (%lx)\n",
3371 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
3372 *bp->fp->rx_cons_sb, nstats->rx_packets);
3373 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
3374 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
bb2a0f7a 3375 estats->driver_xoff, estats->brb_drop_lo);
a2fbb9ea
ET
3376 printk(KERN_DEBUG "tstats: checksum_discard %u "
3377 "packets_too_big_discard %u no_buff_discard %u "
3378 "mac_discard %u mac_filter_discard %u "
3379 "xxovrflow_discard %u brb_truncate_discard %u "
3380 "ttl0_discard %u\n",
bb2a0f7a
YG
3381 old_tclient->checksum_discard,
3382 old_tclient->packets_too_big_discard,
3383 old_tclient->no_buff_discard, estats->mac_discard,
a2fbb9ea 3384 estats->mac_filter_discard, estats->xxoverflow_discard,
bb2a0f7a
YG
3385 estats->brb_truncate_discard,
3386 old_tclient->ttl0_discard);
a2fbb9ea
ET
3387
3388 for_each_queue(bp, i) {
3389 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3390 bnx2x_fp(bp, i, tx_pkt),
3391 bnx2x_fp(bp, i, rx_pkt),
3392 bnx2x_fp(bp, i, rx_calls));
3393 }
3394 }
3395
bb2a0f7a
YG
3396 bnx2x_hw_stats_post(bp);
3397 bnx2x_storm_stats_post(bp);
3398}
a2fbb9ea 3399
bb2a0f7a
YG
3400static void bnx2x_port_stats_stop(struct bnx2x *bp)
3401{
3402 struct dmae_command *dmae;
3403 u32 opcode;
3404 int loader_idx = PMF_DMAE_C(bp);
3405 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3406
bb2a0f7a 3407 bp->executer_idx = 0;
a2fbb9ea 3408
bb2a0f7a
YG
3409 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3410 DMAE_CMD_C_ENABLE |
3411 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3412#ifdef __BIG_ENDIAN
bb2a0f7a 3413 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3414#else
bb2a0f7a 3415 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3416#endif
bb2a0f7a
YG
3417 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3418 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3419
3420 if (bp->port.port_stx) {
3421
3422 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3423 if (bp->func_stx)
3424 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3425 else
3426 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3427 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3428 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3429 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3430 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3431 dmae->len = sizeof(struct host_port_stats) >> 2;
3432 if (bp->func_stx) {
3433 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3434 dmae->comp_addr_hi = 0;
3435 dmae->comp_val = 1;
3436 } else {
3437 dmae->comp_addr_lo =
3438 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3439 dmae->comp_addr_hi =
3440 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3441 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3442
bb2a0f7a
YG
3443 *stats_comp = 0;
3444 }
a2fbb9ea
ET
3445 }
3446
bb2a0f7a
YG
3447 if (bp->func_stx) {
3448
3449 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3450 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3451 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3452 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3453 dmae->dst_addr_lo = bp->func_stx >> 2;
3454 dmae->dst_addr_hi = 0;
3455 dmae->len = sizeof(struct host_func_stats) >> 2;
3456 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3457 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3458 dmae->comp_val = DMAE_COMP_VAL;
3459
3460 *stats_comp = 0;
a2fbb9ea 3461 }
bb2a0f7a
YG
3462}
3463
3464static void bnx2x_stats_stop(struct bnx2x *bp)
3465{
3466 int update = 0;
3467
3468 bnx2x_stats_comp(bp);
3469
3470 if (bp->port.pmf)
3471 update = (bnx2x_hw_stats_update(bp) == 0);
3472
3473 update |= (bnx2x_storm_stats_update(bp) == 0);
3474
3475 if (update) {
3476 bnx2x_net_stats_update(bp);
a2fbb9ea 3477
bb2a0f7a
YG
3478 if (bp->port.pmf)
3479 bnx2x_port_stats_stop(bp);
3480
3481 bnx2x_hw_stats_post(bp);
3482 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3483 }
3484}
3485
bb2a0f7a
YG
3486static void bnx2x_stats_do_nothing(struct bnx2x *bp)
3487{
3488}
3489
3490static const struct {
3491 void (*action)(struct bnx2x *bp);
3492 enum bnx2x_stats_state next_state;
3493} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
3494/* state event */
3495{
3496/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
3497/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
3498/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
3499/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
3500},
3501{
3502/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
3503/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
3504/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
3505/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
3506}
3507};
3508
3509static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
3510{
3511 enum bnx2x_stats_state state = bp->stats_state;
3512
3513 bnx2x_stats_stm[state][event].action(bp);
3514 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
3515
3516 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
3517 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
3518 state, event, bp->stats_state);
3519}
3520
a2fbb9ea
ET
3521static void bnx2x_timer(unsigned long data)
3522{
3523 struct bnx2x *bp = (struct bnx2x *) data;
3524
3525 if (!netif_running(bp->dev))
3526 return;
3527
3528 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3529 goto timer_restart;
a2fbb9ea
ET
3530
3531 if (poll) {
3532 struct bnx2x_fastpath *fp = &bp->fp[0];
3533 int rc;
3534
3535 bnx2x_tx_int(fp, 1000);
3536 rc = bnx2x_rx_int(fp, 1000);
3537 }
3538
34f80b04
EG
3539 if (!BP_NOMCP(bp)) {
3540 int func = BP_FUNC(bp);
a2fbb9ea
ET
3541 u32 drv_pulse;
3542 u32 mcp_pulse;
3543
3544 ++bp->fw_drv_pulse_wr_seq;
3545 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3546 /* TBD - add SYSTEM_TIME */
3547 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 3548 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 3549
34f80b04 3550 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
3551 MCP_PULSE_SEQ_MASK);
3552 /* The delta between driver pulse and mcp response
3553 * should be 1 (before mcp response) or 0 (after mcp response)
3554 */
3555 if ((drv_pulse != mcp_pulse) &&
3556 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3557 /* someone lost a heartbeat... */
3558 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3559 drv_pulse, mcp_pulse);
3560 }
3561 }
3562
bb2a0f7a
YG
3563 if ((bp->state == BNX2X_STATE_OPEN) ||
3564 (bp->state == BNX2X_STATE_DISABLED))
3565 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3566
f1410647 3567timer_restart:
a2fbb9ea
ET
3568 mod_timer(&bp->timer, jiffies + bp->current_interval);
3569}
3570
3571/* end of Statistics */
3572
3573/* nic init */
3574
3575/*
3576 * nic init service functions
3577 */
3578
34f80b04 3579static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 3580{
34f80b04
EG
3581 int port = BP_PORT(bp);
3582
3583 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3584 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3585 sizeof(struct ustorm_def_status_block)/4);
3586 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3587 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
3588 sizeof(struct cstorm_def_status_block)/4);
3589}
3590
3591static void bnx2x_init_sb(struct bnx2x *bp, int sb_id,
3592 struct host_status_block *sb, dma_addr_t mapping)
3593{
3594 int port = BP_PORT(bp);
bb2a0f7a 3595 int func = BP_FUNC(bp);
a2fbb9ea 3596 int index;
34f80b04 3597 u64 section;
a2fbb9ea
ET
3598
3599 /* USTORM */
3600 section = ((u64)mapping) + offsetof(struct host_status_block,
3601 u_status_block);
34f80b04 3602 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
3603
3604 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3605 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 3606 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3607 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 3608 U64_HI(section));
bb2a0f7a
YG
3609 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
3610 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
3611
3612 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
3613 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 3614 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
3615
3616 /* CSTORM */
3617 section = ((u64)mapping) + offsetof(struct host_status_block,
3618 c_status_block);
34f80b04 3619 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
3620
3621 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 3622 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 3623 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 3624 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea
ET
3625 U64_HI(section));
3626
3627 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
3628 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
3629 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
3630
3631 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
3632}
3633
3634static void bnx2x_zero_def_sb(struct bnx2x *bp)
3635{
3636 int func = BP_FUNC(bp);
a2fbb9ea 3637
34f80b04
EG
3638 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
3639 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
3640 sizeof(struct ustorm_def_status_block)/4);
3641 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
3642 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
3643 sizeof(struct cstorm_def_status_block)/4);
3644 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
3645 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
3646 sizeof(struct xstorm_def_status_block)/4);
3647 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
3648 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
3649 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
3650}
3651
3652static void bnx2x_init_def_sb(struct bnx2x *bp,
3653 struct host_def_status_block *def_sb,
34f80b04 3654 dma_addr_t mapping, int sb_id)
a2fbb9ea 3655{
34f80b04
EG
3656 int port = BP_PORT(bp);
3657 int func = BP_FUNC(bp);
a2fbb9ea
ET
3658 int index, val, reg_offset;
3659 u64 section;
3660
3661 /* ATTN */
3662 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3663 atten_status_block);
34f80b04 3664 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 3665
49d66772
ET
3666 bp->def_att_idx = 0;
3667 bp->attn_state = 0;
3668
a2fbb9ea
ET
3669 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3670 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
3671
34f80b04 3672 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
3673 bp->attn_group[index].sig[0] = REG_RD(bp,
3674 reg_offset + 0x10*index);
3675 bp->attn_group[index].sig[1] = REG_RD(bp,
3676 reg_offset + 0x4 + 0x10*index);
3677 bp->attn_group[index].sig[2] = REG_RD(bp,
3678 reg_offset + 0x8 + 0x10*index);
3679 bp->attn_group[index].sig[3] = REG_RD(bp,
3680 reg_offset + 0xc + 0x10*index);
3681 }
3682
3683 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3684 MISC_REG_AEU_MASK_ATTN_FUNC_0));
3685
3686 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3687 HC_REG_ATTN_MSG0_ADDR_L);
3688
3689 REG_WR(bp, reg_offset, U64_LO(section));
3690 REG_WR(bp, reg_offset + 4, U64_HI(section));
3691
3692 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
3693
3694 val = REG_RD(bp, reg_offset);
34f80b04 3695 val |= sb_id;
a2fbb9ea
ET
3696 REG_WR(bp, reg_offset, val);
3697
3698 /* USTORM */
3699 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3700 u_def_status_block);
34f80b04 3701 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 3702
49d66772
ET
3703 bp->def_u_idx = 0;
3704
a2fbb9ea 3705 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3706 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 3707 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3708 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 3709 U64_HI(section));
34f80b04
EG
3710 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
3711 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
3712 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
3713 BNX2X_BTR);
3714
3715 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
3716 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 3717 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
3718
3719 /* CSTORM */
3720 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3721 c_def_status_block);
34f80b04 3722 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea 3723
49d66772
ET
3724 bp->def_c_idx = 0;
3725
a2fbb9ea 3726 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 3727 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 3728 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 3729 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 3730 U64_HI(section));
34f80b04
EG
3731 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
3732 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
3733 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
3734 BNX2X_BTR);
3735
3736 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
3737 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 3738 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
3739
3740 /* TSTORM */
3741 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3742 t_def_status_block);
34f80b04 3743 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea 3744
49d66772
ET
3745 bp->def_t_idx = 0;
3746
a2fbb9ea 3747 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3748 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 3749 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 3750 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 3751 U64_HI(section));
34f80b04
EG
3752 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
3753 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
3754 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
3755 BNX2X_BTR);
3756
3757 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
3758 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 3759 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
3760
3761 /* XSTORM */
3762 section = ((u64)mapping) + offsetof(struct host_def_status_block,
3763 x_def_status_block);
34f80b04 3764 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea 3765
49d66772
ET
3766 bp->def_x_idx = 0;
3767
a2fbb9ea 3768 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 3769 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 3770 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 3771 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 3772 U64_HI(section));
34f80b04
EG
3773 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
3774 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
3775 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(func),
a2fbb9ea
ET
3776 BNX2X_BTR);
3777
3778 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
3779 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 3780 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 3781
bb2a0f7a
YG
3782 bp->stats_pending = 0;
3783
34f80b04 3784 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
3785}
3786
3787static void bnx2x_update_coalesce(struct bnx2x *bp)
3788{
34f80b04 3789 int port = BP_PORT(bp);
a2fbb9ea
ET
3790 int i;
3791
3792 for_each_queue(bp, i) {
34f80b04 3793 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
3794
3795 /* HC_INDEX_U_ETH_RX_CQ_CONS */
3796 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 3797 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 3798 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 3799 bp->rx_ticks/12);
a2fbb9ea 3800 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 3801 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 3802 HC_INDEX_U_ETH_RX_CQ_CONS),
34f80b04 3803 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
3804
3805 /* HC_INDEX_C_ETH_TX_CQ_CONS */
3806 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 3807 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
a2fbb9ea 3808 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 3809 bp->tx_ticks/12);
a2fbb9ea 3810 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 3811 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
a2fbb9ea 3812 HC_INDEX_C_ETH_TX_CQ_CONS),
34f80b04 3813 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
3814 }
3815}
3816
3817static void bnx2x_init_rx_rings(struct bnx2x *bp)
3818{
3819 u16 ring_prod;
3820 int i, j;
a2fbb9ea
ET
3821
3822 bp->rx_buf_use_size = bp->dev->mtu;
3823
3824 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
3825 bp->rx_buf_size = bp->rx_buf_use_size + 64;
3826
3827 for_each_queue(bp, j) {
3828 struct bnx2x_fastpath *fp = &bp->fp[j];
3829
3830 fp->rx_bd_cons = 0;
3831 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
3832
3833 for (i = 1; i <= NUM_RX_RINGS; i++) {
3834 struct eth_rx_bd *rx_bd;
3835
3836 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
3837 rx_bd->addr_hi =
3838 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 3839 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
3840 rx_bd->addr_lo =
3841 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 3842 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
3843 }
3844
34f80b04 3845 /* CQ ring */
a2fbb9ea
ET
3846 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3847 struct eth_rx_cqe_next_page *nextpg;
3848
3849 nextpg = (struct eth_rx_cqe_next_page *)
3850 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3851 nextpg->addr_hi =
3852 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 3853 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
3854 nextpg->addr_lo =
3855 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 3856 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
3857 }
3858
3859 /* rx completion queue */
3860 fp->rx_comp_cons = ring_prod = 0;
3861
3862 for (i = 0; i < bp->rx_ring_size; i++) {
3863 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
3864 BNX2X_ERR("was only able to allocate "
3865 "%d rx skbs\n", i);
3866 break;
3867 }
3868 ring_prod = NEXT_RX_IDX(ring_prod);
3869 BUG_TRAP(ring_prod > i);
3870 }
3871
3872 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
3873 fp->rx_pkt = fp->rx_calls = 0;
3874
c14423fe 3875 /* Warning! this will generate an interrupt (to the TSTORM) */
a2fbb9ea
ET
3876 /* must only be done when chip is initialized */
3877 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04
EG
3878 TSTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)),
3879 ring_prod);
a2fbb9ea
ET
3880 if (j != 0)
3881 continue;
3882
3883 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3884 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(BP_PORT(bp)),
a2fbb9ea
ET
3885 U64_LO(fp->rx_comp_mapping));
3886 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 3887 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(BP_PORT(bp)) + 4,
a2fbb9ea
ET
3888 U64_HI(fp->rx_comp_mapping));
3889 }
3890}
3891
3892static void bnx2x_init_tx_ring(struct bnx2x *bp)
3893{
3894 int i, j;
3895
3896 for_each_queue(bp, j) {
3897 struct bnx2x_fastpath *fp = &bp->fp[j];
3898
3899 for (i = 1; i <= NUM_TX_RINGS; i++) {
3900 struct eth_tx_bd *tx_bd =
3901 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
3902
3903 tx_bd->addr_hi =
3904 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 3905 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
3906 tx_bd->addr_lo =
3907 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 3908 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
3909 }
3910
3911 fp->tx_pkt_prod = 0;
3912 fp->tx_pkt_cons = 0;
3913 fp->tx_bd_prod = 0;
3914 fp->tx_bd_cons = 0;
3915 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3916 fp->tx_pkt = 0;
3917 }
3918}
3919
3920static void bnx2x_init_sp_ring(struct bnx2x *bp)
3921{
34f80b04 3922 int func = BP_FUNC(bp);
a2fbb9ea
ET
3923
3924 spin_lock_init(&bp->spq_lock);
3925
3926 bp->spq_left = MAX_SPQ_PENDING;
3927 bp->spq_prod_idx = 0;
a2fbb9ea
ET
3928 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3929 bp->spq_prod_bd = bp->spq;
3930 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
3931
34f80b04 3932 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 3933 U64_LO(bp->spq_mapping));
34f80b04
EG
3934 REG_WR(bp,
3935 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
3936 U64_HI(bp->spq_mapping));
3937
34f80b04 3938 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
3939 bp->spq_prod_idx);
3940}
3941
3942static void bnx2x_init_context(struct bnx2x *bp)
3943{
3944 int i;
3945
3946 for_each_queue(bp, i) {
3947 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
3948 struct bnx2x_fastpath *fp = &bp->fp[i];
34f80b04 3949 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea
ET
3950
3951 context->xstorm_st_context.tx_bd_page_base_hi =
3952 U64_HI(fp->tx_desc_mapping);
3953 context->xstorm_st_context.tx_bd_page_base_lo =
3954 U64_LO(fp->tx_desc_mapping);
3955 context->xstorm_st_context.db_data_addr_hi =
3956 U64_HI(fp->tx_prods_mapping);
3957 context->xstorm_st_context.db_data_addr_lo =
3958 U64_LO(fp->tx_prods_mapping);
34f80b04
EG
3959 context->xstorm_st_context.statistics_data = (BP_CL_ID(bp) |
3960 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3961
3962 context->ustorm_st_context.common.sb_index_numbers =
3963 BNX2X_RX_SB_INDEX_NUM;
3964 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
3965 context->ustorm_st_context.common.status_block_id = sb_id;
3966 context->ustorm_st_context.common.flags =
3967 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT;
3968 context->ustorm_st_context.common.mc_alignment_size = 64;
3969 context->ustorm_st_context.common.bd_buff_size =
3970 bp->rx_buf_use_size;
3971 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 3972 U64_HI(fp->rx_desc_mapping);
34f80b04 3973 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 3974 U64_LO(fp->rx_desc_mapping);
a2fbb9ea
ET
3975 context->cstorm_st_context.sb_index_number =
3976 HC_INDEX_C_ETH_TX_CQ_CONS;
34f80b04 3977 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
3978
3979 context->xstorm_ag_context.cdu_reserved =
3980 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3981 CDU_REGION_NUMBER_XCM_AG,
3982 ETH_CONNECTION_TYPE);
3983 context->ustorm_ag_context.cdu_usage =
3984 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3985 CDU_REGION_NUMBER_UCM_AG,
3986 ETH_CONNECTION_TYPE);
3987 }
3988}
3989
3990static void bnx2x_init_ind_table(struct bnx2x *bp)
3991{
34f80b04 3992 int port = BP_PORT(bp);
a2fbb9ea
ET
3993 int i;
3994
3995 if (!is_multi(bp))
3996 return;
3997
34f80b04 3998 DP(NETIF_MSG_IFUP, "Initializing indirection table\n");
a2fbb9ea 3999 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04
EG
4000 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4001 TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
a2fbb9ea
ET
4002 i % bp->num_queues);
4003
4004 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4005}
4006
49d66772
ET
4007static void bnx2x_set_client_config(struct bnx2x *bp)
4008{
49d66772 4009 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4010 int port = BP_PORT(bp);
4011 int i;
49d66772 4012
34f80b04 4013 tstorm_client.mtu = bp->dev->mtu + ETH_OVREHEAD;
49d66772
ET
4014 tstorm_client.statistics_counter_id = 0;
4015 tstorm_client.config_flags =
4016 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
4017#ifdef BCM_VLAN
34f80b04 4018 if (bp->rx_mode && bp->vlgrp) {
49d66772
ET
4019 tstorm_client.config_flags |=
4020 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
4021 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4022 }
4023#endif
49d66772
ET
4024
4025 for_each_queue(bp, i) {
4026 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4027 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4028 ((u32 *)&tstorm_client)[0]);
4029 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4030 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4031 ((u32 *)&tstorm_client)[1]);
4032 }
4033
34f80b04
EG
4034 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4035 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4036}
4037
a2fbb9ea
ET
4038static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4039{
a2fbb9ea 4040 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4041 int mode = bp->rx_mode;
4042 int mask = (1 << BP_L_ID(bp));
4043 int func = BP_FUNC(bp);
a2fbb9ea
ET
4044 int i;
4045
4046 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
4047
4048 switch (mode) {
4049 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4050 tstorm_mac_filter.ucast_drop_all = mask;
4051 tstorm_mac_filter.mcast_drop_all = mask;
4052 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4053 break;
4054 case BNX2X_RX_MODE_NORMAL:
34f80b04 4055 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4056 break;
4057 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4058 tstorm_mac_filter.mcast_accept_all = mask;
4059 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4060 break;
4061 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4062 tstorm_mac_filter.ucast_accept_all = mask;
4063 tstorm_mac_filter.mcast_accept_all = mask;
4064 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4065 break;
4066 default:
34f80b04
EG
4067 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4068 break;
a2fbb9ea
ET
4069 }
4070
4071 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4072 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4073 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4074 ((u32 *)&tstorm_mac_filter)[i]);
4075
34f80b04 4076/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4077 ((u32 *)&tstorm_mac_filter)[i]); */
4078 }
a2fbb9ea 4079
49d66772
ET
4080 if (mode != BNX2X_RX_MODE_NONE)
4081 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4082}
4083
4084static void bnx2x_init_internal(struct bnx2x *bp)
4085{
a2fbb9ea
ET
4086 struct tstorm_eth_function_common_config tstorm_config = {0};
4087 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4088 int port = BP_PORT(bp);
4089 int func = BP_FUNC(bp);
4090 int i;
a2fbb9ea
ET
4091
4092 if (is_multi(bp)) {
4093 tstorm_config.config_flags = MULTI_FLAGS;
4094 tstorm_config.rss_result_mask = MULTI_MASK;
4095 }
4096
34f80b04
EG
4097 tstorm_config.leading_client_id = BP_L_ID(bp);
4098
a2fbb9ea 4099 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4100 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4101 (*(u32 *)&tstorm_config));
4102
34f80b04 4103/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
a2fbb9ea
ET
4104 (*(u32 *)&tstorm_config)); */
4105
c14423fe 4106 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4107 bnx2x_set_storm_rx_mode(bp);
4108
34f80b04 4109 stats_flags.collect_eth = 1;
a2fbb9ea
ET
4110
4111 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
4112 ((u32 *)&stats_flags)[0]);
4113 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
4114 ((u32 *)&stats_flags)[1]);
4115
4116 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
4117 ((u32 *)&stats_flags)[0]);
4118 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
4119 ((u32 *)&stats_flags)[1]);
4120
4121 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
4122 ((u32 *)&stats_flags)[0]);
4123 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
4124 ((u32 *)&stats_flags)[1]);
4125
34f80b04 4126/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
a2fbb9ea 4127 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
34f80b04
EG
4128
4129 if (CHIP_IS_E1H(bp)) {
4130 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4131 IS_E1HMF(bp));
4132 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4133 IS_E1HMF(bp));
4134 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4135 IS_E1HMF(bp));
4136 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4137 IS_E1HMF(bp));
4138
4139 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4140 XSTORM_E1HOV_OFFSET(func), bp->e1hov);
4141 }
4142
4143 /* Zero this manualy as its initialization is
4144 currently missing in the initTool */
4145 for (i = 0; i < USTORM_AGG_DATA_SIZE >> 2; i++)
4146 REG_WR(bp, BAR_USTRORM_INTMEM +
4147 USTORM_AGG_DATA_OFFSET + 4*i, 0);
a2fbb9ea
ET
4148}
4149
4150static void bnx2x_nic_init(struct bnx2x *bp)
4151{
4152 int i;
4153
4154 for_each_queue(bp, i) {
4155 struct bnx2x_fastpath *fp = &bp->fp[i];
4156
34f80b04 4157 fp->bp = bp;
a2fbb9ea 4158 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4159 fp->index = i;
34f80b04
EG
4160 fp->cl_id = BP_L_ID(bp) + i;
4161 fp->sb_id = fp->cl_id;
4162 DP(NETIF_MSG_IFUP,
4163 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4164 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
4165 bnx2x_init_sb(bp, FP_SB_ID(fp), fp->status_blk,
4166 fp->status_blk_mapping);
a2fbb9ea
ET
4167 }
4168
4169 bnx2x_init_def_sb(bp, bp->def_status_blk,
34f80b04 4170 bp->def_status_blk_mapping, DEF_SB_ID);
a2fbb9ea
ET
4171 bnx2x_update_coalesce(bp);
4172 bnx2x_init_rx_rings(bp);
4173 bnx2x_init_tx_ring(bp);
4174 bnx2x_init_sp_ring(bp);
4175 bnx2x_init_context(bp);
4176 bnx2x_init_internal(bp);
bb2a0f7a 4177 bnx2x_storm_stats_init(bp);
a2fbb9ea 4178 bnx2x_init_ind_table(bp);
615f8fd9 4179 bnx2x_int_enable(bp);
a2fbb9ea
ET
4180}
4181
4182/* end of nic init */
4183
4184/*
4185 * gzip service functions
4186 */
4187
4188static int bnx2x_gunzip_init(struct bnx2x *bp)
4189{
4190 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
4191 &bp->gunzip_mapping);
4192 if (bp->gunzip_buf == NULL)
4193 goto gunzip_nomem1;
4194
4195 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4196 if (bp->strm == NULL)
4197 goto gunzip_nomem2;
4198
4199 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4200 GFP_KERNEL);
4201 if (bp->strm->workspace == NULL)
4202 goto gunzip_nomem3;
4203
4204 return 0;
4205
4206gunzip_nomem3:
4207 kfree(bp->strm);
4208 bp->strm = NULL;
4209
4210gunzip_nomem2:
4211 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4212 bp->gunzip_mapping);
4213 bp->gunzip_buf = NULL;
4214
4215gunzip_nomem1:
4216 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 4217 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
4218 return -ENOMEM;
4219}
4220
4221static void bnx2x_gunzip_end(struct bnx2x *bp)
4222{
4223 kfree(bp->strm->workspace);
4224
4225 kfree(bp->strm);
4226 bp->strm = NULL;
4227
4228 if (bp->gunzip_buf) {
4229 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
4230 bp->gunzip_mapping);
4231 bp->gunzip_buf = NULL;
4232 }
4233}
4234
4235static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
4236{
4237 int n, rc;
4238
4239 /* check gzip header */
4240 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
4241 return -EINVAL;
4242
4243 n = 10;
4244
34f80b04 4245#define FNAME 0x8
a2fbb9ea
ET
4246
4247 if (zbuf[3] & FNAME)
4248 while ((zbuf[n++] != 0) && (n < len));
4249
4250 bp->strm->next_in = zbuf + n;
4251 bp->strm->avail_in = len - n;
4252 bp->strm->next_out = bp->gunzip_buf;
4253 bp->strm->avail_out = FW_BUF_SIZE;
4254
4255 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4256 if (rc != Z_OK)
4257 return rc;
4258
4259 rc = zlib_inflate(bp->strm, Z_FINISH);
4260 if ((rc != Z_OK) && (rc != Z_STREAM_END))
4261 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
4262 bp->dev->name, bp->strm->msg);
4263
4264 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4265 if (bp->gunzip_outlen & 0x3)
4266 printk(KERN_ERR PFX "%s: Firmware decompression error:"
4267 " gunzip_outlen (%d) not aligned\n",
4268 bp->dev->name, bp->gunzip_outlen);
4269 bp->gunzip_outlen >>= 2;
4270
4271 zlib_inflateEnd(bp->strm);
4272
4273 if (rc == Z_STREAM_END)
4274 return 0;
4275
4276 return rc;
4277}
4278
4279/* nic load/unload */
4280
4281/*
34f80b04 4282 * General service functions
a2fbb9ea
ET
4283 */
4284
4285/* send a NIG loopback debug packet */
4286static void bnx2x_lb_pckt(struct bnx2x *bp)
4287{
a2fbb9ea 4288 u32 wb_write[3];
a2fbb9ea
ET
4289
4290 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4291 wb_write[0] = 0x55555555;
4292 wb_write[1] = 0x55555555;
34f80b04 4293 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4294 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4295
4296 /* NON-IP protocol */
a2fbb9ea
ET
4297 wb_write[0] = 0x09000000;
4298 wb_write[1] = 0x55555555;
34f80b04 4299 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4300 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4301}
4302
4303/* some of the internal memories
4304 * are not directly readable from the driver
4305 * to test them we send debug packets
4306 */
4307static int bnx2x_int_mem_test(struct bnx2x *bp)
4308{
4309 int factor;
4310 int count, i;
4311 u32 val = 0;
4312
ad8d3948 4313 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4314 factor = 120;
ad8d3948
EG
4315 else if (CHIP_REV_IS_EMUL(bp))
4316 factor = 200;
4317 else
a2fbb9ea 4318 factor = 1;
a2fbb9ea
ET
4319
4320 DP(NETIF_MSG_HW, "start part1\n");
4321
4322 /* Disable inputs of parser neighbor blocks */
4323 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4324 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4325 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4326 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4327
4328 /* Write 0 to parser credits for CFC search request */
4329 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4330
4331 /* send Ethernet packet */
4332 bnx2x_lb_pckt(bp);
4333
4334 /* TODO do i reset NIG statistic? */
4335 /* Wait until NIG register shows 1 packet of size 0x10 */
4336 count = 1000 * factor;
4337 while (count) {
34f80b04 4338
a2fbb9ea
ET
4339 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4340 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4341 if (val == 0x10)
4342 break;
4343
4344 msleep(10);
4345 count--;
4346 }
4347 if (val != 0x10) {
4348 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4349 return -1;
4350 }
4351
4352 /* Wait until PRS register shows 1 packet */
4353 count = 1000 * factor;
4354 while (count) {
4355 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4356 if (val == 1)
4357 break;
4358
4359 msleep(10);
4360 count--;
4361 }
4362 if (val != 0x1) {
4363 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4364 return -2;
4365 }
4366
4367 /* Reset and init BRB, PRS */
34f80b04 4368 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4369 msleep(50);
34f80b04 4370 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
4371 msleep(50);
4372 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4373 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4374
4375 DP(NETIF_MSG_HW, "part2\n");
4376
4377 /* Disable inputs of parser neighbor blocks */
4378 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4379 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4380 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
4381 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
4382
4383 /* Write 0 to parser credits for CFC search request */
4384 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4385
4386 /* send 10 Ethernet packets */
4387 for (i = 0; i < 10; i++)
4388 bnx2x_lb_pckt(bp);
4389
4390 /* Wait until NIG register shows 10 + 1
4391 packets of size 11*0x10 = 0xb0 */
4392 count = 1000 * factor;
4393 while (count) {
34f80b04 4394
a2fbb9ea
ET
4395 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4396 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4397 if (val == 0xb0)
4398 break;
4399
4400 msleep(10);
4401 count--;
4402 }
4403 if (val != 0xb0) {
4404 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4405 return -3;
4406 }
4407
4408 /* Wait until PRS register shows 2 packets */
4409 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4410 if (val != 2)
4411 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4412
4413 /* Write 1 to parser credits for CFC search request */
4414 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4415
4416 /* Wait until PRS register shows 3 packets */
4417 msleep(10 * factor);
4418 /* Wait until NIG register shows 1 packet of size 0x10 */
4419 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4420 if (val != 3)
4421 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4422
4423 /* clear NIG EOP FIFO */
4424 for (i = 0; i < 11; i++)
4425 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4426 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4427 if (val != 1) {
4428 BNX2X_ERR("clear of NIG failed\n");
4429 return -4;
4430 }
4431
4432 /* Reset and init BRB, PRS, NIG */
4433 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4434 msleep(50);
4435 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4436 msleep(50);
4437 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4438 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4439#ifndef BCM_ISCSI
4440 /* set NIC mode */
4441 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4442#endif
4443
4444 /* Enable inputs of parser neighbor blocks */
4445 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4446 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4447 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
4448 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
4449
4450 DP(NETIF_MSG_HW, "done\n");
4451
4452 return 0; /* OK */
4453}
4454
4455static void enable_blocks_attention(struct bnx2x *bp)
4456{
4457 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4458 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4459 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4460 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4461 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4462 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4463 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4464 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4465 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4466/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4467/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4468 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4469 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4470 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4471/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4472/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4473 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4474 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4475 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4476 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4477/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4478/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4479 if (CHIP_REV_IS_FPGA(bp))
4480 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4481 else
4482 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4483 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4484 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4485 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4486/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4487/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4488 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4489 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
4490/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4491 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
4492}
4493
34f80b04
EG
4494
4495static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 4496{
a2fbb9ea 4497 u32 val, i;
a2fbb9ea 4498
34f80b04 4499 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 4500
34f80b04
EG
4501 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4502 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4503
34f80b04
EG
4504 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
4505 if (CHIP_IS_E1H(bp))
4506 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 4507
34f80b04
EG
4508 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
4509 msleep(30);
4510 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 4511
34f80b04
EG
4512 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
4513 if (CHIP_IS_E1(bp)) {
4514 /* enable HW interrupt from PXP on USDM overflow
4515 bit 16 on INT_MASK_0 */
4516 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4517 }
a2fbb9ea 4518
34f80b04
EG
4519 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
4520 bnx2x_init_pxp(bp);
a2fbb9ea
ET
4521
4522#ifdef __BIG_ENDIAN
34f80b04
EG
4523 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4524 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4525 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4526 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4527 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
4528 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
4529
4530/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4531 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4532 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4533 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4534 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
4535#endif
4536
4537#ifndef BCM_ISCSI
4538 /* set NIC mode */
4539 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4540#endif
4541
34f80b04 4542 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 4543#ifdef BCM_ISCSI
34f80b04
EG
4544 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
4545 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
4546 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
4547#endif
4548
34f80b04
EG
4549 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4550 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 4551
34f80b04
EG
4552 /* let the HW do it's magic ... */
4553 msleep(100);
4554 /* finish PXP init */
4555 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4556 if (val != 1) {
4557 BNX2X_ERR("PXP2 CFG failed\n");
4558 return -EBUSY;
4559 }
4560 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4561 if (val != 1) {
4562 BNX2X_ERR("PXP2 RD_INIT failed\n");
4563 return -EBUSY;
4564 }
a2fbb9ea 4565
34f80b04
EG
4566 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4567 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 4568
34f80b04 4569 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 4570
34f80b04
EG
4571 /* clean the DMAE memory */
4572 bp->dmae_ready = 1;
4573 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 4574
34f80b04
EG
4575 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
4576 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
4577 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
4578 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 4579
34f80b04
EG
4580 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4581 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4582 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4583 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4584
4585 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
4586 /* soft reset pulse */
4587 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4588 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
4589
4590#ifdef BCM_ISCSI
34f80b04 4591 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 4592#endif
a2fbb9ea 4593
34f80b04
EG
4594 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
4595 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4596 if (!CHIP_REV_IS_SLOW(bp)) {
4597 /* enable hw interrupt from doorbell Q */
4598 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4599 }
a2fbb9ea 4600
34f80b04
EG
4601 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
4602 if (CHIP_REV_IS_SLOW(bp)) {
4603 /* fix for emulation and FPGA for no pause */
4604 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
4605 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
4606 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
4607 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
4608 }
a2fbb9ea 4609
34f80b04
EG
4610 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
4611 if (CHIP_IS_E1H(bp))
4612 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 4613
34f80b04
EG
4614 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
4615 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
4616 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
4617 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 4618
34f80b04
EG
4619 if (CHIP_IS_E1H(bp)) {
4620 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
4621 STORM_INTMEM_SIZE_E1H/2);
4622 bnx2x_init_fill(bp,
4623 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
4624 0, STORM_INTMEM_SIZE_E1H/2);
4625 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
4626 STORM_INTMEM_SIZE_E1H/2);
4627 bnx2x_init_fill(bp,
4628 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
4629 0, STORM_INTMEM_SIZE_E1H/2);
4630 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
4631 STORM_INTMEM_SIZE_E1H/2);
4632 bnx2x_init_fill(bp,
4633 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
4634 0, STORM_INTMEM_SIZE_E1H/2);
4635 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
4636 STORM_INTMEM_SIZE_E1H/2);
4637 bnx2x_init_fill(bp,
4638 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
4639 0, STORM_INTMEM_SIZE_E1H/2);
4640 } else { /* E1 */
ad8d3948
EG
4641 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
4642 STORM_INTMEM_SIZE_E1);
4643 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
4644 STORM_INTMEM_SIZE_E1);
4645 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
4646 STORM_INTMEM_SIZE_E1);
4647 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
4648 STORM_INTMEM_SIZE_E1);
34f80b04 4649 }
a2fbb9ea 4650
34f80b04
EG
4651 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
4652 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
4653 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
4654 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 4655
34f80b04
EG
4656 /* sync semi rtc */
4657 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4658 0x80000000);
4659 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4660 0x80000000);
a2fbb9ea 4661
34f80b04
EG
4662 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
4663 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
4664 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 4665
34f80b04
EG
4666 REG_WR(bp, SRC_REG_SOFT_RST, 1);
4667 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
4668 REG_WR(bp, i, 0xc0cac01a);
4669 /* TODO: replace with something meaningful */
4670 }
4671 if (CHIP_IS_E1H(bp))
4672 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
4673 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 4674
34f80b04
EG
4675 if (sizeof(union cdu_context) != 1024)
4676 /* we currently assume that a context is 1024 bytes */
4677 printk(KERN_ALERT PFX "please adjust the size of"
4678 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 4679
34f80b04
EG
4680 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
4681 val = (4 << 24) + (0 << 12) + 1024;
4682 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4683 if (CHIP_IS_E1(bp)) {
4684 /* !!! fix pxp client crdit until excel update */
4685 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
4686 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
4687 }
a2fbb9ea 4688
34f80b04
EG
4689 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
4690 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
a2fbb9ea 4691
34f80b04
EG
4692 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
4693 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 4694
34f80b04
EG
4695 /* PXPCS COMMON comes here */
4696 /* Reset PCIE errors for debug */
4697 REG_WR(bp, 0x2814, 0xffffffff);
4698 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 4699
34f80b04
EG
4700 /* EMAC0 COMMON comes here */
4701 /* EMAC1 COMMON comes here */
4702 /* DBU COMMON comes here */
4703 /* DBG COMMON comes here */
4704
4705 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
4706 if (CHIP_IS_E1H(bp)) {
4707 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4708 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4709 }
4710
4711 if (CHIP_REV_IS_SLOW(bp))
4712 msleep(200);
4713
4714 /* finish CFC init */
4715 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4716 if (val != 1) {
4717 BNX2X_ERR("CFC LL_INIT failed\n");
4718 return -EBUSY;
4719 }
4720 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4721 if (val != 1) {
4722 BNX2X_ERR("CFC AC_INIT failed\n");
4723 return -EBUSY;
4724 }
4725 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4726 if (val != 1) {
4727 BNX2X_ERR("CFC CAM_INIT failed\n");
4728 return -EBUSY;
4729 }
4730 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 4731
34f80b04
EG
4732 /* read NIG statistic
4733 to see if this is our first up since powerup */
4734 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4735 val = *bnx2x_sp(bp, wb_data[0]);
4736
4737 /* do internal memory self test */
4738 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4739 BNX2X_ERR("internal mem self test failed\n");
4740 return -EBUSY;
4741 }
4742
4743 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4744 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4745 /* Fan failure is indicated by SPIO 5 */
4746 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4747 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4748
4749 /* set to active low mode */
4750 val = REG_RD(bp, MISC_REG_SPIO_INT);
4751 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 4752 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 4753 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 4754
34f80b04
EG
4755 /* enable interrupt to signal the IGU */
4756 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4757 val |= (1 << MISC_REGISTERS_SPIO_5);
4758 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4759 break;
f1410647 4760
34f80b04
EG
4761 default:
4762 break;
4763 }
f1410647 4764
34f80b04
EG
4765 /* clear PXP2 attentions */
4766 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 4767
34f80b04 4768 enable_blocks_attention(bp);
a2fbb9ea 4769
34f80b04
EG
4770 return 0;
4771}
a2fbb9ea 4772
34f80b04
EG
4773static int bnx2x_init_port(struct bnx2x *bp)
4774{
4775 int port = BP_PORT(bp);
4776 u32 val;
a2fbb9ea 4777
34f80b04
EG
4778 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
4779
4780 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
4781
4782 /* Port PXP comes here */
4783 /* Port PXP2 comes here */
a2fbb9ea
ET
4784#ifdef BCM_ISCSI
4785 /* Port0 1
4786 * Port1 385 */
4787 i++;
4788 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
4789 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
4790 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4791 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4792
4793 /* Port0 2
4794 * Port1 386 */
4795 i++;
4796 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
4797 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
4798 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4799 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4800
4801 /* Port0 3
4802 * Port1 387 */
4803 i++;
4804 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
4805 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
4806 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4807 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4808#endif
34f80b04 4809 /* Port CMs come here */
a2fbb9ea
ET
4810
4811 /* Port QM comes here */
a2fbb9ea
ET
4812#ifdef BCM_ISCSI
4813 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
4814 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
4815
4816 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
4817 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
4818#endif
4819 /* Port DQ comes here */
4820 /* Port BRB1 comes here */
ad8d3948 4821 /* Port PRS comes here */
a2fbb9ea
ET
4822 /* Port TSDM comes here */
4823 /* Port CSDM comes here */
4824 /* Port USDM comes here */
4825 /* Port XSDM comes here */
34f80b04
EG
4826 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
4827 port ? TSEM_PORT1_END : TSEM_PORT0_END);
4828 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
4829 port ? USEM_PORT1_END : USEM_PORT0_END);
4830 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
4831 port ? CSEM_PORT1_END : CSEM_PORT0_END);
4832 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
4833 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 4834 /* Port UPB comes here */
34f80b04
EG
4835 /* Port XPB comes here */
4836
4837 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
4838 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
4839
4840 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 4841 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
4842
4843 /* update threshold */
34f80b04 4844 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 4845 /* update init credit */
34f80b04 4846 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
4847
4848 /* probe changes */
34f80b04 4849 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 4850 msleep(5);
34f80b04 4851 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
4852
4853#ifdef BCM_ISCSI
4854 /* tell the searcher where the T2 table is */
4855 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
4856
4857 wb_write[0] = U64_LO(bp->t2_mapping);
4858 wb_write[1] = U64_HI(bp->t2_mapping);
4859 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
4860 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
4861 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
4862 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
4863
4864 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
4865 /* Port SRCH comes here */
4866#endif
4867 /* Port CDU comes here */
4868 /* Port CFC comes here */
34f80b04
EG
4869
4870 if (CHIP_IS_E1(bp)) {
4871 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4872 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4873 }
4874 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
4875 port ? HC_PORT1_END : HC_PORT0_END);
4876
4877 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 4878 MISC_AEU_PORT0_START,
34f80b04
EG
4879 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
4880 /* init aeu_mask_attn_func_0/1:
4881 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4882 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4883 * bits 4-7 are used for "per vn group attention" */
4884 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4885 (IS_E1HMF(bp) ? 0xF7 : 0x7));
4886
a2fbb9ea
ET
4887 /* Port PXPCS comes here */
4888 /* Port EMAC0 comes here */
4889 /* Port EMAC1 comes here */
4890 /* Port DBU comes here */
4891 /* Port DBG comes here */
34f80b04
EG
4892 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
4893 port ? NIG_PORT1_END : NIG_PORT0_END);
4894
4895 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4896
4897 if (CHIP_IS_E1H(bp)) {
4898 u32 wsum;
4899 struct cmng_struct_per_port m_cmng_port;
4900 int vn;
4901
4902 /* 0x2 disable e1hov, 0x1 enable */
4903 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4904 (IS_E1HMF(bp) ? 0x1 : 0x2));
4905
4906 /* Init RATE SHAPING and FAIRNESS contexts.
4907 Initialize as if there is 10G link. */
4908 wsum = bnx2x_calc_vn_wsum(bp);
4909 bnx2x_init_port_minmax(bp, (int)wsum, 10000, &m_cmng_port);
4910 if (IS_E1HMF(bp))
4911 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4912 bnx2x_init_vn_minmax(bp, 2*vn + port,
4913 wsum, 10000, &m_cmng_port);
4914 }
4915
a2fbb9ea
ET
4916 /* Port MCP comes here */
4917 /* Port DMAE comes here */
4918
34f80b04 4919 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
f1410647
ET
4920 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4921 /* add SPIO 5 to group 0 */
4922 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4923 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4924 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
4925 break;
4926
4927 default:
4928 break;
4929 }
4930
c18487ee 4931 bnx2x__link_reset(bp);
a2fbb9ea 4932
34f80b04
EG
4933 return 0;
4934}
4935
4936#define ILT_PER_FUNC (768/2)
4937#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
4938/* the phys address is shifted right 12 bits and has an added
4939 1=valid bit added to the 53rd bit
4940 then since this is a wide register(TM)
4941 we split it into two 32 bit writes
4942 */
4943#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4944#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
4945#define PXP_ONE_ILT(x) (((x) << 10) | x)
4946#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
4947
4948#define CNIC_ILT_LINES 0
4949
4950static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4951{
4952 int reg;
4953
4954 if (CHIP_IS_E1H(bp))
4955 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4956 else /* E1 */
4957 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4958
4959 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4960}
4961
4962static int bnx2x_init_func(struct bnx2x *bp)
4963{
4964 int port = BP_PORT(bp);
4965 int func = BP_FUNC(bp);
4966 int i;
4967
4968 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
4969
4970 i = FUNC_ILT_BASE(func);
4971
4972 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4973 if (CHIP_IS_E1H(bp)) {
4974 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4975 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4976 } else /* E1 */
4977 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4978 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4979
4980
4981 if (CHIP_IS_E1H(bp)) {
4982 for (i = 0; i < 9; i++)
4983 bnx2x_init_block(bp,
4984 cm_start[func][i], cm_end[func][i]);
4985
4986 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4987 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4988 }
4989
4990 /* HC init per function */
4991 if (CHIP_IS_E1H(bp)) {
4992 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4993
4994 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4995 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4996 }
4997 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
4998
4999 if (CHIP_IS_E1H(bp))
5000 REG_WR(bp, HC_REG_FUNC_NUM_P0 + port*4, func);
5001
c14423fe 5002 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5003 REG_WR(bp, 0x2114, 0xffffffff);
5004 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5005
34f80b04
EG
5006 return 0;
5007}
5008
5009static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5010{
5011 int i, rc = 0;
a2fbb9ea 5012
34f80b04
EG
5013 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5014 BP_FUNC(bp), load_code);
a2fbb9ea 5015
34f80b04
EG
5016 bp->dmae_ready = 0;
5017 mutex_init(&bp->dmae_mutex);
5018 bnx2x_gunzip_init(bp);
a2fbb9ea 5019
34f80b04
EG
5020 switch (load_code) {
5021 case FW_MSG_CODE_DRV_LOAD_COMMON:
5022 rc = bnx2x_init_common(bp);
5023 if (rc)
5024 goto init_hw_err;
5025 /* no break */
5026
5027 case FW_MSG_CODE_DRV_LOAD_PORT:
5028 bp->dmae_ready = 1;
5029 rc = bnx2x_init_port(bp);
5030 if (rc)
5031 goto init_hw_err;
5032 /* no break */
5033
5034 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5035 bp->dmae_ready = 1;
5036 rc = bnx2x_init_func(bp);
5037 if (rc)
5038 goto init_hw_err;
5039 break;
5040
5041 default:
5042 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5043 break;
5044 }
5045
5046 if (!BP_NOMCP(bp)) {
5047 int func = BP_FUNC(bp);
a2fbb9ea
ET
5048
5049 bp->fw_drv_pulse_wr_seq =
34f80b04 5050 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5051 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5052 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5053 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5054 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5055 } else
5056 bp->func_stx = 0;
a2fbb9ea 5057
34f80b04
EG
5058 /* this needs to be done before gunzip end */
5059 bnx2x_zero_def_sb(bp);
5060 for_each_queue(bp, i)
5061 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5062
5063init_hw_err:
5064 bnx2x_gunzip_end(bp);
5065
5066 return rc;
a2fbb9ea
ET
5067}
5068
c14423fe 5069/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5070static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5071{
34f80b04 5072 int func = BP_FUNC(bp);
f1410647
ET
5073 u32 seq = ++bp->fw_seq;
5074 u32 rc = 0;
a2fbb9ea 5075
34f80b04 5076 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5077 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea
ET
5078
5079 /* let the FW do it's magic ... */
5080 msleep(100); /* TBD */
5081
5082 if (CHIP_REV_IS_SLOW(bp))
5083 msleep(900);
5084
34f80b04 5085 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea
ET
5086 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
5087
5088 /* is this a reply to our command? */
5089 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5090 rc &= FW_MSG_CODE_MASK;
f1410647 5091
a2fbb9ea
ET
5092 } else {
5093 /* FW BUG! */
5094 BNX2X_ERR("FW failed to respond!\n");
5095 bnx2x_fw_dump(bp);
5096 rc = 0;
5097 }
f1410647 5098
a2fbb9ea
ET
5099 return rc;
5100}
5101
5102static void bnx2x_free_mem(struct bnx2x *bp)
5103{
5104
5105#define BNX2X_PCI_FREE(x, y, size) \
5106 do { \
5107 if (x) { \
5108 pci_free_consistent(bp->pdev, size, x, y); \
5109 x = NULL; \
5110 y = 0; \
5111 } \
5112 } while (0)
5113
5114#define BNX2X_FREE(x) \
5115 do { \
5116 if (x) { \
5117 vfree(x); \
5118 x = NULL; \
5119 } \
5120 } while (0)
5121
5122 int i;
5123
5124 /* fastpath */
5125 for_each_queue(bp, i) {
5126
5127 /* Status blocks */
5128 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5129 bnx2x_fp(bp, i, status_blk_mapping),
5130 sizeof(struct host_status_block) +
5131 sizeof(struct eth_tx_db_data));
5132
5133 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5134 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5135 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5136 bnx2x_fp(bp, i, tx_desc_mapping),
5137 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5138
5139 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5140 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5141 bnx2x_fp(bp, i, rx_desc_mapping),
5142 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5143
5144 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5145 bnx2x_fp(bp, i, rx_comp_mapping),
5146 sizeof(struct eth_fast_path_rx_cqe) *
5147 NUM_RCQ_BD);
5148 }
5149
a2fbb9ea
ET
5150 /* end of fastpath */
5151
5152 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 5153 sizeof(struct host_def_status_block));
a2fbb9ea
ET
5154
5155 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 5156 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
5157
5158#ifdef BCM_ISCSI
5159 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
5160 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
5161 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
5162 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
5163#endif
5164 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
5165
5166#undef BNX2X_PCI_FREE
5167#undef BNX2X_KFREE
5168}
5169
5170static int bnx2x_alloc_mem(struct bnx2x *bp)
5171{
5172
5173#define BNX2X_PCI_ALLOC(x, y, size) \
5174 do { \
5175 x = pci_alloc_consistent(bp->pdev, size, y); \
5176 if (x == NULL) \
5177 goto alloc_mem_err; \
5178 memset(x, 0, size); \
5179 } while (0)
5180
5181#define BNX2X_ALLOC(x, size) \
5182 do { \
5183 x = vmalloc(size); \
5184 if (x == NULL) \
5185 goto alloc_mem_err; \
5186 memset(x, 0, size); \
5187 } while (0)
5188
5189 int i;
5190
5191 /* fastpath */
a2fbb9ea
ET
5192 for_each_queue(bp, i) {
5193 bnx2x_fp(bp, i, bp) = bp;
5194
5195 /* Status blocks */
5196 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
5197 &bnx2x_fp(bp, i, status_blk_mapping),
5198 sizeof(struct host_status_block) +
5199 sizeof(struct eth_tx_db_data));
5200
5201 bnx2x_fp(bp, i, hw_tx_prods) =
5202 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
5203
5204 bnx2x_fp(bp, i, tx_prods_mapping) =
5205 bnx2x_fp(bp, i, status_blk_mapping) +
5206 sizeof(struct host_status_block);
5207
5208 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
5209 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5210 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5211 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5212 &bnx2x_fp(bp, i, tx_desc_mapping),
5213 sizeof(struct eth_tx_bd) * NUM_TX_BD);
5214
5215 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5216 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5217 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5218 &bnx2x_fp(bp, i, rx_desc_mapping),
5219 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5220
5221 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5222 &bnx2x_fp(bp, i, rx_comp_mapping),
5223 sizeof(struct eth_fast_path_rx_cqe) *
5224 NUM_RCQ_BD);
5225
5226 }
5227 /* end of fastpath */
5228
5229 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
5230 sizeof(struct host_def_status_block));
5231
5232 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5233 sizeof(struct bnx2x_slowpath));
5234
5235#ifdef BCM_ISCSI
5236 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
5237
5238 /* Initialize T1 */
5239 for (i = 0; i < 64*1024; i += 64) {
5240 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
5241 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
5242 }
5243
5244 /* allocate searcher T2 table
5245 we allocate 1/4 of alloc num for T2
5246 (which is not entered into the ILT) */
5247 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
5248
5249 /* Initialize T2 */
5250 for (i = 0; i < 16*1024; i += 64)
5251 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
5252
c14423fe 5253 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
5254 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
5255
5256 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
5257 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
5258
5259 /* QM queues (128*MAX_CONN) */
5260 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
5261#endif
5262
5263 /* Slow path ring */
5264 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5265
5266 return 0;
5267
5268alloc_mem_err:
5269 bnx2x_free_mem(bp);
5270 return -ENOMEM;
5271
5272#undef BNX2X_PCI_ALLOC
5273#undef BNX2X_ALLOC
5274}
5275
5276static void bnx2x_free_tx_skbs(struct bnx2x *bp)
5277{
5278 int i;
5279
5280 for_each_queue(bp, i) {
5281 struct bnx2x_fastpath *fp = &bp->fp[i];
5282
5283 u16 bd_cons = fp->tx_bd_cons;
5284 u16 sw_prod = fp->tx_pkt_prod;
5285 u16 sw_cons = fp->tx_pkt_cons;
5286
a2fbb9ea
ET
5287 while (sw_cons != sw_prod) {
5288 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
5289 sw_cons++;
5290 }
5291 }
5292}
5293
5294static void bnx2x_free_rx_skbs(struct bnx2x *bp)
5295{
5296 int i, j;
5297
5298 for_each_queue(bp, j) {
5299 struct bnx2x_fastpath *fp = &bp->fp[j];
5300
a2fbb9ea
ET
5301 for (i = 0; i < NUM_RX_BD; i++) {
5302 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
5303 struct sk_buff *skb = rx_buf->skb;
5304
5305 if (skb == NULL)
5306 continue;
5307
5308 pci_unmap_single(bp->pdev,
5309 pci_unmap_addr(rx_buf, mapping),
5310 bp->rx_buf_use_size,
5311 PCI_DMA_FROMDEVICE);
5312
5313 rx_buf->skb = NULL;
5314 dev_kfree_skb(skb);
5315 }
5316 }
5317}
5318
5319static void bnx2x_free_skbs(struct bnx2x *bp)
5320{
5321 bnx2x_free_tx_skbs(bp);
5322 bnx2x_free_rx_skbs(bp);
5323}
5324
5325static void bnx2x_free_msix_irqs(struct bnx2x *bp)
5326{
34f80b04 5327 int i, offset = 1;
a2fbb9ea
ET
5328
5329 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 5330 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
5331 bp->msix_table[0].vector);
5332
5333 for_each_queue(bp, i) {
c14423fe 5334 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 5335 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5336 bnx2x_fp(bp, i, state));
5337
228241eb
ET
5338 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
5339 BNX2X_ERR("IRQ of fp #%d being freed while "
5340 "state != closed\n", i);
a2fbb9ea 5341
34f80b04 5342 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 5343 }
a2fbb9ea
ET
5344}
5345
5346static void bnx2x_free_irq(struct bnx2x *bp)
5347{
a2fbb9ea 5348 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
5349 bnx2x_free_msix_irqs(bp);
5350 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
5351 bp->flags &= ~USING_MSIX_FLAG;
5352
5353 } else
5354 free_irq(bp->pdev->irq, bp->dev);
5355}
5356
5357static int bnx2x_enable_msix(struct bnx2x *bp)
5358{
34f80b04 5359 int i, rc, offset;
a2fbb9ea
ET
5360
5361 bp->msix_table[0].entry = 0;
34f80b04
EG
5362 offset = 1;
5363 DP(NETIF_MSG_IFUP, "msix_table[0].entry = 0 (slowpath)\n");
a2fbb9ea 5364
34f80b04
EG
5365 for_each_queue(bp, i) {
5366 int igu_vec = offset + i + BP_L_ID(bp);
a2fbb9ea 5367
34f80b04
EG
5368 bp->msix_table[i + offset].entry = igu_vec;
5369 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
5370 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
5371 }
5372
34f80b04
EG
5373 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
5374 bp->num_queues + offset);
5375 if (rc) {
5376 DP(NETIF_MSG_IFUP, "MSI-X is not attainable\n");
5377 return -1;
5378 }
a2fbb9ea
ET
5379 bp->flags |= USING_MSIX_FLAG;
5380
5381 return 0;
a2fbb9ea
ET
5382}
5383
a2fbb9ea
ET
5384static int bnx2x_req_msix_irqs(struct bnx2x *bp)
5385{
34f80b04 5386 int i, rc, offset = 1;
a2fbb9ea 5387
a2fbb9ea
ET
5388 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
5389 bp->dev->name, bp->dev);
a2fbb9ea
ET
5390 if (rc) {
5391 BNX2X_ERR("request sp irq failed\n");
5392 return -EBUSY;
5393 }
5394
5395 for_each_queue(bp, i) {
34f80b04 5396 rc = request_irq(bp->msix_table[i + offset].vector,
a2fbb9ea
ET
5397 bnx2x_msix_fp_int, 0,
5398 bp->dev->name, &bp->fp[i]);
a2fbb9ea 5399 if (rc) {
34f80b04
EG
5400 BNX2X_ERR("request fp #%d irq failed rc %d\n",
5401 i + offset, rc);
a2fbb9ea
ET
5402 bnx2x_free_msix_irqs(bp);
5403 return -EBUSY;
5404 }
5405
5406 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
5407 }
5408
5409 return 0;
a2fbb9ea
ET
5410}
5411
5412static int bnx2x_req_irq(struct bnx2x *bp)
5413{
34f80b04 5414 int rc;
a2fbb9ea 5415
34f80b04
EG
5416 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, IRQF_SHARED,
5417 bp->dev->name, bp->dev);
a2fbb9ea
ET
5418 if (!rc)
5419 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
5420
5421 return rc;
a2fbb9ea
ET
5422}
5423
5424/*
5425 * Init service functions
5426 */
5427
34f80b04 5428static void bnx2x_set_mac_addr_e1(struct bnx2x *bp)
a2fbb9ea
ET
5429{
5430 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 5431 int port = BP_PORT(bp);
a2fbb9ea
ET
5432
5433 /* CAM allocation
5434 * unicasts 0-31:port0 32-63:port1
5435 * multicast 64-127:port0 128-191:port1
5436 */
5437 config->hdr.length_6b = 2;
34f80b04
EG
5438 config->hdr.offset = port ? 31 : 0;
5439 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
5440 config->hdr.reserved1 = 0;
5441
5442 /* primary MAC */
5443 config->config_table[0].cam_entry.msb_mac_addr =
5444 swab16(*(u16 *)&bp->dev->dev_addr[0]);
5445 config->config_table[0].cam_entry.middle_mac_addr =
5446 swab16(*(u16 *)&bp->dev->dev_addr[2]);
5447 config->config_table[0].cam_entry.lsb_mac_addr =
5448 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 5449 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
5450 config->config_table[0].target_table_entry.flags = 0;
5451 config->config_table[0].target_table_entry.client_id = 0;
5452 config->config_table[0].target_table_entry.vlan_id = 0;
5453
5454 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
5455 config->config_table[0].cam_entry.msb_mac_addr,
5456 config->config_table[0].cam_entry.middle_mac_addr,
5457 config->config_table[0].cam_entry.lsb_mac_addr);
5458
5459 /* broadcast */
5460 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
5461 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
5462 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 5463 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
a2fbb9ea
ET
5464 config->config_table[1].target_table_entry.flags =
5465 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
5466 config->config_table[1].target_table_entry.client_id = 0;
5467 config->config_table[1].target_table_entry.vlan_id = 0;
5468
5469 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5470 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
5471 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
5472}
5473
34f80b04
EG
5474static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp)
5475{
5476 struct mac_configuration_cmd_e1h *config =
5477 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
5478
5479 if (bp->state != BNX2X_STATE_OPEN) {
5480 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
5481 return;
5482 }
5483
5484 /* CAM allocation for E1H
5485 * unicasts: by func number
5486 * multicast: 20+FUNC*20, 20 each
5487 */
5488 config->hdr.length_6b = 1;
5489 config->hdr.offset = BP_FUNC(bp);
5490 config->hdr.client_id = BP_CL_ID(bp);
5491 config->hdr.reserved1 = 0;
5492
5493 /* primary MAC */
5494 config->config_table[0].msb_mac_addr =
5495 swab16(*(u16 *)&bp->dev->dev_addr[0]);
5496 config->config_table[0].middle_mac_addr =
5497 swab16(*(u16 *)&bp->dev->dev_addr[2]);
5498 config->config_table[0].lsb_mac_addr =
5499 swab16(*(u16 *)&bp->dev->dev_addr[4]);
5500 config->config_table[0].client_id = BP_L_ID(bp);
5501 config->config_table[0].vlan_id = 0;
5502 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
5503 config->config_table[0].flags = BP_PORT(bp);
5504
5505 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
5506 config->config_table[0].msb_mac_addr,
5507 config->config_table[0].middle_mac_addr,
5508 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
5509
5510 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5511 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
5512 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
5513}
5514
a2fbb9ea
ET
5515static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5516 int *state_p, int poll)
5517{
5518 /* can take a while if any port is running */
34f80b04 5519 int cnt = 500;
a2fbb9ea 5520
c14423fe
ET
5521 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
5522 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
5523
5524 might_sleep();
34f80b04 5525 while (cnt--) {
a2fbb9ea
ET
5526 if (poll) {
5527 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
5528 /* if index is different from 0
5529 * the reply for some commands will
a2fbb9ea
ET
5530 * be on the none default queue
5531 */
5532 if (idx)
5533 bnx2x_rx_int(&bp->fp[idx], 10);
5534 }
34f80b04 5535 mb(); /* state is changed by bnx2x_sp_event() */
a2fbb9ea 5536
49d66772 5537 if (*state_p == state)
a2fbb9ea
ET
5538 return 0;
5539
a2fbb9ea 5540 msleep(1);
a2fbb9ea
ET
5541 }
5542
a2fbb9ea 5543 /* timeout! */
49d66772
ET
5544 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
5545 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
5546#ifdef BNX2X_STOP_ON_ERROR
5547 bnx2x_panic();
5548#endif
a2fbb9ea 5549
49d66772 5550 return -EBUSY;
a2fbb9ea
ET
5551}
5552
5553static int bnx2x_setup_leading(struct bnx2x *bp)
5554{
34f80b04 5555 int rc;
a2fbb9ea 5556
c14423fe 5557 /* reset IGU state */
34f80b04 5558 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5559
5560 /* SETUP ramrod */
5561 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
5562
34f80b04
EG
5563 /* Wait for completion */
5564 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 5565
34f80b04 5566 return rc;
a2fbb9ea
ET
5567}
5568
5569static int bnx2x_setup_multi(struct bnx2x *bp, int index)
5570{
a2fbb9ea 5571 /* reset IGU state */
34f80b04 5572 bnx2x_ack_sb(bp, bp->fp[index].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 5573
228241eb 5574 /* SETUP ramrod */
a2fbb9ea
ET
5575 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
5576 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
5577
5578 /* Wait for completion */
5579 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 5580 &(bp->fp[index].state), 0);
a2fbb9ea
ET
5581}
5582
a2fbb9ea
ET
5583static int bnx2x_poll(struct napi_struct *napi, int budget);
5584static void bnx2x_set_rx_mode(struct net_device *dev);
5585
34f80b04
EG
5586/* must be called with rtnl_lock */
5587static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
a2fbb9ea 5588{
228241eb 5589 u32 load_code;
34f80b04
EG
5590 int i, rc;
5591
5592#ifdef BNX2X_STOP_ON_ERROR
5593 if (unlikely(bp->panic))
5594 return -EPERM;
5595#endif
a2fbb9ea
ET
5596
5597 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
5598
34f80b04
EG
5599 /* Send LOAD_REQUEST command to MCP
5600 Returns the type of LOAD command:
5601 if it is the first port to be initialized
5602 common blocks should be initialized, otherwise - not
a2fbb9ea 5603 */
34f80b04 5604 if (!BP_NOMCP(bp)) {
228241eb
ET
5605 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
5606 if (!load_code) {
5607 BNX2X_ERR("MCP response failure, unloading\n");
5608 return -EBUSY;
5609 }
34f80b04 5610 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED)
a2fbb9ea 5611 return -EBUSY; /* other port in diagnostic mode */
34f80b04 5612
a2fbb9ea 5613 } else {
34f80b04
EG
5614 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
5615 load_count[0], load_count[1], load_count[2]);
5616 load_count[0]++;
5617 load_count[1 + BP_PORT(bp)]++;
5618 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
5619 load_count[0], load_count[1], load_count[2]);
5620 if (load_count[0] == 1)
5621 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
5622 else if (load_count[1 + BP_PORT(bp)] == 1)
5623 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
5624 else
5625 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
a2fbb9ea
ET
5626 }
5627
34f80b04
EG
5628 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
5629 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
5630 bp->port.pmf = 1;
5631 else
5632 bp->port.pmf = 0;
5633 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
5634
5635 /* if we can't use MSI-X we only need one fp,
5636 * so try to enable MSI-X with the requested number of fp's
a2fbb9ea
ET
5637 * and fallback to inta with one fp
5638 */
34f80b04
EG
5639 if (use_inta) {
5640 bp->num_queues = 1;
5641
5642 } else {
5643 if ((use_multi > 1) && (use_multi <= BP_MAX_QUEUES(bp)))
5644 /* user requested number */
5645 bp->num_queues = use_multi;
5646
5647 else if (use_multi)
5648 bp->num_queues = min_t(u32, num_online_cpus(),
5649 BP_MAX_QUEUES(bp));
5650 else
a2fbb9ea 5651 bp->num_queues = 1;
34f80b04
EG
5652
5653 if (bnx2x_enable_msix(bp)) {
5654 /* failed to enable MSI-X */
5655 bp->num_queues = 1;
5656 if (use_multi)
5657 BNX2X_ERR("Multi requested but failed"
5658 " to enable MSI-X\n");
a2fbb9ea
ET
5659 }
5660 }
34f80b04
EG
5661 DP(NETIF_MSG_IFUP,
5662 "set number of queues to %d\n", bp->num_queues);
c14423fe 5663
a2fbb9ea
ET
5664 if (bnx2x_alloc_mem(bp))
5665 return -ENOMEM;
5666
34f80b04
EG
5667 /* Disable interrupt handling until HW is initialized */
5668 atomic_set(&bp->intr_sem, 1);
a2fbb9ea 5669
34f80b04
EG
5670 if (bp->flags & USING_MSIX_FLAG) {
5671 rc = bnx2x_req_msix_irqs(bp);
5672 if (rc) {
5673 pci_disable_msix(bp->pdev);
5674 goto load_error;
5675 }
5676 } else {
5677 bnx2x_ack_int(bp);
5678 rc = bnx2x_req_irq(bp);
5679 if (rc) {
5680 BNX2X_ERR("IRQ request failed, aborting\n");
5681 goto load_error;
a2fbb9ea
ET
5682 }
5683 }
5684
5685 for_each_queue(bp, i)
5686 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
5687 bnx2x_poll, 128);
5688
a2fbb9ea 5689 /* Initialize HW */
34f80b04
EG
5690 rc = bnx2x_init_hw(bp, load_code);
5691 if (rc) {
a2fbb9ea 5692 BNX2X_ERR("HW init failed, aborting\n");
228241eb 5693 goto load_error;
a2fbb9ea
ET
5694 }
5695
34f80b04 5696 /* Enable interrupt handling */
a2fbb9ea
ET
5697 atomic_set(&bp->intr_sem, 0);
5698
a2fbb9ea
ET
5699 /* Setup NIC internals and enable interrupts */
5700 bnx2x_nic_init(bp);
5701
5702 /* Send LOAD_DONE command to MCP */
34f80b04 5703 if (!BP_NOMCP(bp)) {
228241eb
ET
5704 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
5705 if (!load_code) {
a2fbb9ea 5706 BNX2X_ERR("MCP response failure, unloading\n");
34f80b04 5707 rc = -EBUSY;
228241eb 5708 goto load_int_disable;
a2fbb9ea
ET
5709 }
5710 }
5711
bb2a0f7a
YG
5712 bnx2x_stats_init(bp);
5713
a2fbb9ea
ET
5714 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
5715
5716 /* Enable Rx interrupt handling before sending the ramrod
5717 as it's completed on Rx FP queue */
5718 for_each_queue(bp, i)
5719 napi_enable(&bnx2x_fp(bp, i, napi));
5720
34f80b04
EG
5721 rc = bnx2x_setup_leading(bp);
5722 if (rc) {
5723#ifdef BNX2X_STOP_ON_ERROR
5724 bp->panic = 1;
5725#endif
228241eb 5726 goto load_stop_netif;
34f80b04 5727 }
a2fbb9ea 5728
34f80b04
EG
5729 if (CHIP_IS_E1H(bp))
5730 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
5731 BNX2X_ERR("!!! mf_cfg function disabled\n");
5732 bp->state = BNX2X_STATE_DISABLED;
5733 }
a2fbb9ea 5734
34f80b04
EG
5735 if (bp->state == BNX2X_STATE_OPEN)
5736 for_each_nondefault_queue(bp, i) {
5737 rc = bnx2x_setup_multi(bp, i);
5738 if (rc)
5739 goto load_stop_netif;
5740 }
a2fbb9ea 5741
34f80b04
EG
5742 if (CHIP_IS_E1(bp))
5743 bnx2x_set_mac_addr_e1(bp);
5744 else
5745 bnx2x_set_mac_addr_e1h(bp);
5746
5747 if (bp->port.pmf)
5748 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
5749
5750 /* Start fast path */
34f80b04
EG
5751 switch (load_mode) {
5752 case LOAD_NORMAL:
5753 /* Tx queue should be only reenabled */
5754 netif_wake_queue(bp->dev);
5755 bnx2x_set_rx_mode(bp->dev);
5756 break;
5757
5758 case LOAD_OPEN:
5759 /* IRQ is only requested from bnx2x_open */
a2fbb9ea 5760 netif_start_queue(bp->dev);
34f80b04 5761 bnx2x_set_rx_mode(bp->dev);
a2fbb9ea
ET
5762 if (bp->flags & USING_MSIX_FLAG)
5763 printk(KERN_INFO PFX "%s: using MSI-X\n",
5764 bp->dev->name);
34f80b04 5765 break;
a2fbb9ea 5766
34f80b04 5767 case LOAD_DIAG:
a2fbb9ea 5768 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
5769 bp->state = BNX2X_STATE_DIAG;
5770 break;
5771
5772 default:
5773 break;
a2fbb9ea
ET
5774 }
5775
34f80b04
EG
5776 if (!bp->port.pmf)
5777 bnx2x__link_status_update(bp);
5778
a2fbb9ea
ET
5779 /* start the timer */
5780 mod_timer(&bp->timer, jiffies + bp->current_interval);
5781
34f80b04 5782
a2fbb9ea
ET
5783 return 0;
5784
228241eb 5785load_stop_netif:
a2fbb9ea
ET
5786 for_each_queue(bp, i)
5787 napi_disable(&bnx2x_fp(bp, i, napi));
5788
228241eb 5789load_int_disable:
615f8fd9 5790 bnx2x_int_disable_sync(bp);
a2fbb9ea 5791
34f80b04 5792 /* Release IRQs */
a2fbb9ea
ET
5793 bnx2x_free_irq(bp);
5794
228241eb 5795load_error:
a2fbb9ea
ET
5796 bnx2x_free_mem(bp);
5797
5798 /* TBD we really need to reset the chip
5799 if we want to recover from this */
34f80b04 5800 return rc;
a2fbb9ea
ET
5801}
5802
5803static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5804{
a2fbb9ea
ET
5805 int rc;
5806
c14423fe 5807 /* halt the connection */
a2fbb9ea
ET
5808 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
5809 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
5810
34f80b04 5811 /* Wait for completion */
a2fbb9ea 5812 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
34f80b04 5813 &(bp->fp[index].state), 1);
c14423fe 5814 if (rc) /* timeout */
a2fbb9ea
ET
5815 return rc;
5816
5817 /* delete cfc entry */
5818 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5819
34f80b04
EG
5820 /* Wait for completion */
5821 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5822 &(bp->fp[index].state), 1);
5823 return rc;
a2fbb9ea
ET
5824}
5825
a2fbb9ea
ET
5826static void bnx2x_stop_leading(struct bnx2x *bp)
5827{
49d66772 5828 u16 dsb_sp_prod_idx;
c14423fe 5829 /* if the other port is handling traffic,
a2fbb9ea 5830 this can take a lot of time */
34f80b04
EG
5831 int cnt = 500;
5832 int rc;
a2fbb9ea
ET
5833
5834 might_sleep();
5835
5836 /* Send HALT ramrod */
5837 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 5838 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 5839
34f80b04
EG
5840 /* Wait for completion */
5841 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5842 &(bp->fp[0].state), 1);
5843 if (rc) /* timeout */
a2fbb9ea
ET
5844 return;
5845
49d66772 5846 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 5847
228241eb 5848 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
5849 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5850
49d66772 5851 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
5852 we are going to reset the chip anyway
5853 so there is not much to do if this times out
5854 */
34f80b04 5855 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
49d66772 5856 msleep(1);
34f80b04
EG
5857 if (!cnt) {
5858 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5859 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5860 *bp->dsb_sp_prod, dsb_sp_prod_idx);
5861#ifdef BNX2X_STOP_ON_ERROR
5862 bnx2x_panic();
5863#endif
5864 break;
5865 }
5866 cnt--;
49d66772
ET
5867 }
5868 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5869 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
5870}
5871
34f80b04
EG
5872static void bnx2x_reset_func(struct bnx2x *bp)
5873{
5874 int port = BP_PORT(bp);
5875 int func = BP_FUNC(bp);
5876 int base, i;
5877
5878 /* Configure IGU */
5879 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5880 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5881
5882 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
5883
5884 /* Clear ILT */
5885 base = FUNC_ILT_BASE(func);
5886 for (i = base; i < base + ILT_PER_FUNC; i++)
5887 bnx2x_ilt_wr(bp, i, 0);
5888}
5889
5890static void bnx2x_reset_port(struct bnx2x *bp)
5891{
5892 int port = BP_PORT(bp);
5893 u32 val;
5894
5895 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5896
5897 /* Do not rcv packets to BRB */
5898 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5899 /* Do not direct rcv packets that are not for MCP to the BRB */
5900 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5901 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5902
5903 /* Configure AEU */
5904 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5905
5906 msleep(100);
5907 /* Check for BRB port occupancy */
5908 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5909 if (val)
5910 DP(NETIF_MSG_IFDOWN,
5911 "BRB1 is not empty %d blooks are occupied\n", val);
5912
5913 /* TODO: Close Doorbell port? */
5914}
5915
5916static void bnx2x_reset_common(struct bnx2x *bp)
5917{
5918 /* reset_common */
5919 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5920 0xd3ffff7f);
5921 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5922}
5923
5924static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5925{
5926 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5927 BP_FUNC(bp), reset_code);
5928
5929 switch (reset_code) {
5930 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5931 bnx2x_reset_port(bp);
5932 bnx2x_reset_func(bp);
5933 bnx2x_reset_common(bp);
5934 break;
5935
5936 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5937 bnx2x_reset_port(bp);
5938 bnx2x_reset_func(bp);
5939 break;
5940
5941 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5942 bnx2x_reset_func(bp);
5943 break;
49d66772 5944
34f80b04
EG
5945 default:
5946 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5947 break;
5948 }
5949}
5950
5951/* msut be called with rtnl_lock */
5952static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea
ET
5953{
5954 u32 reset_code = 0;
34f80b04 5955 int i, cnt;
a2fbb9ea
ET
5956
5957 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
5958
228241eb
ET
5959 bp->rx_mode = BNX2X_RX_MODE_NONE;
5960 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 5961
228241eb
ET
5962 if (netif_running(bp->dev)) {
5963 netif_tx_disable(bp->dev);
5964 bp->dev->trans_start = jiffies; /* prevent tx timeout */
5965 }
5966
34f80b04
EG
5967 del_timer_sync(&bp->timer);
5968 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
5969 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 5970 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 5971
228241eb
ET
5972 /* Wait until all fast path tasks complete */
5973 for_each_queue(bp, i) {
5974 struct bnx2x_fastpath *fp = &bp->fp[i];
5975
34f80b04
EG
5976#ifdef BNX2X_STOP_ON_ERROR
5977#ifdef __powerpc64__
5978 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
5979#else
5980 DP(NETIF_MSG_IFDOWN, "fp->tpa_queue_used = 0x%llx\n",
5981#endif
5982 fp->tpa_queue_used);
5983#endif
5984 cnt = 1000;
5985 smp_rmb();
5986 while (bnx2x_has_work(fp)) {
228241eb 5987 msleep(1);
34f80b04
EG
5988 if (!cnt) {
5989 BNX2X_ERR("timeout waiting for queue[%d]\n",
5990 i);
5991#ifdef BNX2X_STOP_ON_ERROR
5992 bnx2x_panic();
5993 return -EBUSY;
5994#else
5995 break;
5996#endif
5997 }
5998 cnt--;
5999 smp_rmb();
6000 }
228241eb 6001 }
a2fbb9ea 6002
34f80b04
EG
6003 /* Wait until all slow path tasks complete */
6004 cnt = 1000;
6005 while ((bp->spq_left != MAX_SPQ_PENDING) && cnt--)
a2fbb9ea
ET
6006 msleep(1);
6007
228241eb
ET
6008 for_each_queue(bp, i)
6009 napi_disable(&bnx2x_fp(bp, i, napi));
6010 /* Disable interrupts after Tx and Rx are disabled on stack level */
6011 bnx2x_int_disable_sync(bp);
a2fbb9ea 6012
34f80b04
EG
6013 /* Release IRQs */
6014 bnx2x_free_irq(bp);
6015
a2fbb9ea
ET
6016 if (bp->flags & NO_WOL_FLAG)
6017 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
228241eb 6018
a2fbb9ea 6019 else if (bp->wol) {
34f80b04 6020 u32 emac_base = BP_PORT(bp) ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
a2fbb9ea 6021 u8 *mac_addr = bp->dev->dev_addr;
34f80b04 6022 u32 val;
a2fbb9ea 6023
34f80b04
EG
6024 /* The mac address is written to entries 1-4 to
6025 preserve entry 0 which is used by the PMF */
a2fbb9ea 6026 val = (mac_addr[0] << 8) | mac_addr[1];
34f80b04 6027 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8, val);
a2fbb9ea
ET
6028
6029 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6030 (mac_addr[4] << 8) | mac_addr[5];
34f80b04
EG
6031 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + (BP_E1HVN(bp) + 1)*8 + 4,
6032 val);
a2fbb9ea
ET
6033
6034 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 6035
a2fbb9ea
ET
6036 } else
6037 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6038
34f80b04
EG
6039 /* Close multi and leading connections
6040 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
6041 for_each_nondefault_queue(bp, i)
6042 if (bnx2x_stop_multi(bp, i))
228241eb 6043 goto unload_error;
a2fbb9ea 6044
34f80b04
EG
6045 if (CHIP_IS_E1H(bp))
6046 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + BP_PORT(bp)*8, 0);
6047
6048 bnx2x_stop_leading(bp);
6049#ifdef BNX2X_STOP_ON_ERROR
6050 /* If ramrod completion timed out - break here! */
6051 if (bp->panic) {
6052 BNX2X_ERR("Stop leading failed!\n");
6053 return -EBUSY;
6054 }
6055#endif
6056
228241eb
ET
6057 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
6058 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
34f80b04
EG
6059 DP(NETIF_MSG_IFDOWN, "failed to close leading properly! "
6060 "state 0x%x fp[0].state 0x%x\n",
228241eb
ET
6061 bp->state, bp->fp[0].state);
6062 }
6063
6064unload_error:
34f80b04 6065 if (!BP_NOMCP(bp))
228241eb 6066 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
6067 else {
6068 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
6069 load_count[0], load_count[1], load_count[2]);
6070 load_count[0]--;
6071 load_count[1 + BP_PORT(bp)]--;
6072 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
6073 load_count[0], load_count[1], load_count[2]);
6074 if (load_count[0] == 0)
6075 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
6076 else if (load_count[1 + BP_PORT(bp)] == 0)
6077 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6078 else
6079 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6080 }
a2fbb9ea 6081
34f80b04
EG
6082 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6083 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6084 bnx2x__link_reset(bp);
a2fbb9ea
ET
6085
6086 /* Reset the chip */
228241eb 6087 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
6088
6089 /* Report UNLOAD_DONE to MCP */
34f80b04 6090 if (!BP_NOMCP(bp))
a2fbb9ea
ET
6091 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6092
6093 /* Free SKBs and driver internals */
6094 bnx2x_free_skbs(bp);
6095 bnx2x_free_mem(bp);
6096
6097 bp->state = BNX2X_STATE_CLOSED;
228241eb 6098
a2fbb9ea
ET
6099 netif_carrier_off(bp->dev);
6100
6101 return 0;
6102}
6103
34f80b04
EG
6104static void bnx2x_reset_task(struct work_struct *work)
6105{
6106 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
6107
6108#ifdef BNX2X_STOP_ON_ERROR
6109 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6110 " so reset not done to allow debug dump,\n"
6111 KERN_ERR " you will need to reboot when done\n");
6112 return;
6113#endif
6114
6115 rtnl_lock();
6116
6117 if (!netif_running(bp->dev))
6118 goto reset_task_exit;
6119
6120 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6121 bnx2x_nic_load(bp, LOAD_NORMAL);
6122
6123reset_task_exit:
6124 rtnl_unlock();
6125}
6126
a2fbb9ea
ET
6127/* end of nic load/unload */
6128
6129/* ethtool_ops */
6130
6131/*
6132 * Init service functions
6133 */
6134
34f80b04
EG
6135static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
6136{
6137 u32 val;
6138
6139 /* Check if there is any driver already loaded */
6140 val = REG_RD(bp, MISC_REG_UNPREPARED);
6141 if (val == 0x1) {
6142 /* Check if it is the UNDI driver
6143 * UNDI driver initializes CID offset for normal bell to 0x7
6144 */
6145 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6146 if (val == 0x7) {
6147 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6148 /* save our func and fw_seq */
6149 int func = BP_FUNC(bp);
6150 u16 fw_seq = bp->fw_seq;
6151
6152 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6153
6154 /* try unload UNDI on port 0 */
6155 bp->func = 0;
6156 bp->fw_seq = (SHMEM_RD(bp,
6157 func_mb[bp->func].drv_mb_header) &
6158 DRV_MSG_SEQ_NUMBER_MASK);
6159
6160 reset_code = bnx2x_fw_command(bp, reset_code);
6161 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6162
6163 /* if UNDI is loaded on the other port */
6164 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6165
6166 bp->func = 1;
6167 bp->fw_seq = (SHMEM_RD(bp,
6168 func_mb[bp->func].drv_mb_header) &
6169 DRV_MSG_SEQ_NUMBER_MASK);
6170
6171 bnx2x_fw_command(bp,
6172 DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS);
6173 bnx2x_fw_command(bp,
6174 DRV_MSG_CODE_UNLOAD_DONE);
6175
6176 /* restore our func and fw_seq */
6177 bp->func = func;
6178 bp->fw_seq = fw_seq;
6179 }
6180
6181 /* reset device */
6182 REG_WR(bp,
6183 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6184 0xd3ffff7f);
6185 REG_WR(bp,
6186 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6187 0x1403);
6188 }
6189 }
6190}
6191
6192static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6193{
6194 u32 val, val2, val3, val4, id;
6195
6196 /* Get the chip revision id and number. */
6197 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6198 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6199 id = ((val & 0xffff) << 16);
6200 val = REG_RD(bp, MISC_REG_CHIP_REV);
6201 id |= ((val & 0xf) << 12);
6202 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6203 id |= ((val & 0xff) << 4);
6204 REG_RD(bp, MISC_REG_BOND_ID);
6205 id |= (val & 0xf);
6206 bp->common.chip_id = id;
6207 bp->link_params.chip_id = bp->common.chip_id;
6208 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6209
6210 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6211 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6212 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6213 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6214 bp->common.flash_size, bp->common.flash_size);
6215
6216 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6217 bp->link_params.shmem_base = bp->common.shmem_base;
6218 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
6219
6220 if (!bp->common.shmem_base ||
6221 (bp->common.shmem_base < 0xA0000) ||
6222 (bp->common.shmem_base >= 0xC0000)) {
6223 BNX2X_DEV_INFO("MCP not active\n");
6224 bp->flags |= NO_MCP_FLAG;
6225 return;
6226 }
6227
6228 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6229 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6230 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6231 BNX2X_ERR("BAD MCP validity signature\n");
6232
6233 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
6234 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
6235
6236 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
6237 bp->common.hw_config, bp->common.board);
6238
6239 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6240 SHARED_HW_CFG_LED_MODE_MASK) >>
6241 SHARED_HW_CFG_LED_MODE_SHIFT);
6242
6243 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6244 bp->common.bc_ver = val;
6245 BNX2X_DEV_INFO("bc_ver %X\n", val);
6246 if (val < BNX2X_BC_VER) {
6247 /* for now only warn
6248 * later we might need to enforce this */
6249 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
6250 " please upgrade BC\n", BNX2X_BC_VER, val);
6251 }
6252 BNX2X_DEV_INFO("%sWoL Capable\n",
6253 (bp->flags & NO_WOL_FLAG)? "Not " : "");
6254
6255 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6256 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6257 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6258 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6259
6260 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
6261 val, val2, val3, val4);
6262}
6263
6264static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6265 u32 switch_cfg)
a2fbb9ea 6266{
34f80b04 6267 int port = BP_PORT(bp);
a2fbb9ea
ET
6268 u32 ext_phy_type;
6269
a2fbb9ea
ET
6270 switch (switch_cfg) {
6271 case SWITCH_CFG_1G:
6272 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
6273
c18487ee
YR
6274 ext_phy_type =
6275 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6276 switch (ext_phy_type) {
6277 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
6278 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6279 ext_phy_type);
6280
34f80b04
EG
6281 bp->port.supported |= (SUPPORTED_10baseT_Half |
6282 SUPPORTED_10baseT_Full |
6283 SUPPORTED_100baseT_Half |
6284 SUPPORTED_100baseT_Full |
6285 SUPPORTED_1000baseT_Full |
6286 SUPPORTED_2500baseX_Full |
6287 SUPPORTED_TP |
6288 SUPPORTED_FIBRE |
6289 SUPPORTED_Autoneg |
6290 SUPPORTED_Pause |
6291 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6292 break;
6293
6294 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
6295 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
6296 ext_phy_type);
6297
34f80b04
EG
6298 bp->port.supported |= (SUPPORTED_10baseT_Half |
6299 SUPPORTED_10baseT_Full |
6300 SUPPORTED_100baseT_Half |
6301 SUPPORTED_100baseT_Full |
6302 SUPPORTED_1000baseT_Full |
6303 SUPPORTED_TP |
6304 SUPPORTED_FIBRE |
6305 SUPPORTED_Autoneg |
6306 SUPPORTED_Pause |
6307 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6308 break;
6309
6310 default:
6311 BNX2X_ERR("NVRAM config error. "
6312 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 6313 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6314 return;
6315 }
6316
34f80b04
EG
6317 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6318 port*0x10);
6319 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
6320 break;
6321
6322 case SWITCH_CFG_10G:
6323 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
6324
c18487ee
YR
6325 ext_phy_type =
6326 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
6327 switch (ext_phy_type) {
6328 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6329 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
6330 ext_phy_type);
6331
34f80b04
EG
6332 bp->port.supported |= (SUPPORTED_10baseT_Half |
6333 SUPPORTED_10baseT_Full |
6334 SUPPORTED_100baseT_Half |
6335 SUPPORTED_100baseT_Full |
6336 SUPPORTED_1000baseT_Full |
6337 SUPPORTED_2500baseX_Full |
6338 SUPPORTED_10000baseT_Full |
6339 SUPPORTED_TP |
6340 SUPPORTED_FIBRE |
6341 SUPPORTED_Autoneg |
6342 SUPPORTED_Pause |
6343 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6344 break;
6345
6346 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 6347 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 6348 ext_phy_type);
f1410647 6349
34f80b04
EG
6350 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6351 SUPPORTED_FIBRE |
6352 SUPPORTED_Pause |
6353 SUPPORTED_Asym_Pause);
f1410647
ET
6354 break;
6355
a2fbb9ea 6356 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
6357 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
6358 ext_phy_type);
6359
34f80b04
EG
6360 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6361 SUPPORTED_1000baseT_Full |
6362 SUPPORTED_FIBRE |
6363 SUPPORTED_Pause |
6364 SUPPORTED_Asym_Pause);
f1410647
ET
6365 break;
6366
6367 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6368 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
6369 ext_phy_type);
6370
34f80b04
EG
6371 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6372 SUPPORTED_1000baseT_Full |
6373 SUPPORTED_FIBRE |
6374 SUPPORTED_Autoneg |
6375 SUPPORTED_Pause |
6376 SUPPORTED_Asym_Pause);
f1410647
ET
6377 break;
6378
c18487ee
YR
6379 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6380 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
6381 ext_phy_type);
6382
34f80b04
EG
6383 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6384 SUPPORTED_2500baseX_Full |
6385 SUPPORTED_1000baseT_Full |
6386 SUPPORTED_FIBRE |
6387 SUPPORTED_Autoneg |
6388 SUPPORTED_Pause |
6389 SUPPORTED_Asym_Pause);
c18487ee
YR
6390 break;
6391
f1410647
ET
6392 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6393 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
6394 ext_phy_type);
6395
34f80b04
EG
6396 bp->port.supported |= (SUPPORTED_10000baseT_Full |
6397 SUPPORTED_TP |
6398 SUPPORTED_Autoneg |
6399 SUPPORTED_Pause |
6400 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
6401 break;
6402
c18487ee
YR
6403 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6404 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6405 bp->link_params.ext_phy_config);
6406 break;
6407
a2fbb9ea
ET
6408 default:
6409 BNX2X_ERR("NVRAM config error. "
6410 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 6411 bp->link_params.ext_phy_config);
a2fbb9ea
ET
6412 return;
6413 }
6414
34f80b04
EG
6415 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6416 port*0x18);
6417 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 6418
a2fbb9ea
ET
6419 break;
6420
6421 default:
6422 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 6423 bp->port.link_config);
a2fbb9ea
ET
6424 return;
6425 }
34f80b04 6426 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
6427
6428 /* mask what we support according to speed_cap_mask */
c18487ee
YR
6429 if (!(bp->link_params.speed_cap_mask &
6430 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 6431 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 6432
c18487ee
YR
6433 if (!(bp->link_params.speed_cap_mask &
6434 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 6435 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 6436
c18487ee
YR
6437 if (!(bp->link_params.speed_cap_mask &
6438 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 6439 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 6440
c18487ee
YR
6441 if (!(bp->link_params.speed_cap_mask &
6442 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 6443 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 6444
c18487ee
YR
6445 if (!(bp->link_params.speed_cap_mask &
6446 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
6447 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
6448 SUPPORTED_1000baseT_Full);
a2fbb9ea 6449
c18487ee
YR
6450 if (!(bp->link_params.speed_cap_mask &
6451 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 6452 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 6453
c18487ee
YR
6454 if (!(bp->link_params.speed_cap_mask &
6455 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 6456 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 6457
34f80b04 6458 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
6459}
6460
34f80b04 6461static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 6462{
c18487ee 6463 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 6464
34f80b04 6465 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 6466 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 6467 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 6468 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6469 bp->port.advertising = bp->port.supported;
a2fbb9ea 6470 } else {
c18487ee
YR
6471 u32 ext_phy_type =
6472 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
6473
6474 if ((ext_phy_type ==
6475 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
6476 (ext_phy_type ==
6477 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 6478 /* force 10G, no AN */
c18487ee 6479 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 6480 bp->port.advertising =
a2fbb9ea
ET
6481 (ADVERTISED_10000baseT_Full |
6482 ADVERTISED_FIBRE);
6483 break;
6484 }
6485 BNX2X_ERR("NVRAM config error. "
6486 "Invalid link_config 0x%x"
6487 " Autoneg not supported\n",
34f80b04 6488 bp->port.link_config);
a2fbb9ea
ET
6489 return;
6490 }
6491 break;
6492
6493 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 6494 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 6495 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
6496 bp->port.advertising = (ADVERTISED_10baseT_Full |
6497 ADVERTISED_TP);
a2fbb9ea
ET
6498 } else {
6499 BNX2X_ERR("NVRAM config error. "
6500 "Invalid link_config 0x%x"
6501 " speed_cap_mask 0x%x\n",
34f80b04 6502 bp->port.link_config,
c18487ee 6503 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6504 return;
6505 }
6506 break;
6507
6508 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 6509 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
6510 bp->link_params.req_line_speed = SPEED_10;
6511 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6512 bp->port.advertising = (ADVERTISED_10baseT_Half |
6513 ADVERTISED_TP);
a2fbb9ea
ET
6514 } else {
6515 BNX2X_ERR("NVRAM config error. "
6516 "Invalid link_config 0x%x"
6517 " speed_cap_mask 0x%x\n",
34f80b04 6518 bp->port.link_config,
c18487ee 6519 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6520 return;
6521 }
6522 break;
6523
6524 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 6525 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 6526 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
6527 bp->port.advertising = (ADVERTISED_100baseT_Full |
6528 ADVERTISED_TP);
a2fbb9ea
ET
6529 } else {
6530 BNX2X_ERR("NVRAM config error. "
6531 "Invalid link_config 0x%x"
6532 " speed_cap_mask 0x%x\n",
34f80b04 6533 bp->port.link_config,
c18487ee 6534 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6535 return;
6536 }
6537 break;
6538
6539 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 6540 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
6541 bp->link_params.req_line_speed = SPEED_100;
6542 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
6543 bp->port.advertising = (ADVERTISED_100baseT_Half |
6544 ADVERTISED_TP);
a2fbb9ea
ET
6545 } else {
6546 BNX2X_ERR("NVRAM config error. "
6547 "Invalid link_config 0x%x"
6548 " speed_cap_mask 0x%x\n",
34f80b04 6549 bp->port.link_config,
c18487ee 6550 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6551 return;
6552 }
6553 break;
6554
6555 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 6556 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 6557 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
6558 bp->port.advertising = (ADVERTISED_1000baseT_Full |
6559 ADVERTISED_TP);
a2fbb9ea
ET
6560 } else {
6561 BNX2X_ERR("NVRAM config error. "
6562 "Invalid link_config 0x%x"
6563 " speed_cap_mask 0x%x\n",
34f80b04 6564 bp->port.link_config,
c18487ee 6565 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6566 return;
6567 }
6568 break;
6569
6570 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 6571 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 6572 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
6573 bp->port.advertising = (ADVERTISED_2500baseX_Full |
6574 ADVERTISED_TP);
a2fbb9ea
ET
6575 } else {
6576 BNX2X_ERR("NVRAM config error. "
6577 "Invalid link_config 0x%x"
6578 " speed_cap_mask 0x%x\n",
34f80b04 6579 bp->port.link_config,
c18487ee 6580 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6581 return;
6582 }
6583 break;
6584
6585 case PORT_FEATURE_LINK_SPEED_10G_CX4:
6586 case PORT_FEATURE_LINK_SPEED_10G_KX4:
6587 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 6588 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 6589 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
6590 bp->port.advertising = (ADVERTISED_10000baseT_Full |
6591 ADVERTISED_FIBRE);
a2fbb9ea
ET
6592 } else {
6593 BNX2X_ERR("NVRAM config error. "
6594 "Invalid link_config 0x%x"
6595 " speed_cap_mask 0x%x\n",
34f80b04 6596 bp->port.link_config,
c18487ee 6597 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
6598 return;
6599 }
6600 break;
6601
6602 default:
6603 BNX2X_ERR("NVRAM config error. "
6604 "BAD link speed link_config 0x%x\n",
34f80b04 6605 bp->port.link_config);
c18487ee 6606 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 6607 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
6608 break;
6609 }
a2fbb9ea 6610
34f80b04
EG
6611 bp->link_params.req_flow_ctrl = (bp->port.link_config &
6612 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee 6613 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
34f80b04 6614 (!bp->port.supported & SUPPORTED_Autoneg))
c18487ee 6615 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 6616
c18487ee 6617 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 6618 " advertising 0x%x\n",
c18487ee
YR
6619 bp->link_params.req_line_speed,
6620 bp->link_params.req_duplex,
34f80b04 6621 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
6622}
6623
34f80b04 6624static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 6625{
34f80b04
EG
6626 int port = BP_PORT(bp);
6627 u32 val, val2;
a2fbb9ea 6628
c18487ee 6629 bp->link_params.bp = bp;
34f80b04 6630 bp->link_params.port = port;
c18487ee 6631
c18487ee 6632 bp->link_params.serdes_config =
f1410647 6633 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 6634 bp->link_params.lane_config =
a2fbb9ea 6635 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 6636 bp->link_params.ext_phy_config =
a2fbb9ea
ET
6637 SHMEM_RD(bp,
6638 dev_info.port_hw_config[port].external_phy_config);
c18487ee 6639 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
6640 SHMEM_RD(bp,
6641 dev_info.port_hw_config[port].speed_capability_mask);
6642
34f80b04 6643 bp->port.link_config =
a2fbb9ea
ET
6644 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6645
34f80b04
EG
6646 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
6647 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
6648 " link_config 0x%08x\n",
c18487ee
YR
6649 bp->link_params.serdes_config,
6650 bp->link_params.lane_config,
6651 bp->link_params.ext_phy_config,
34f80b04 6652 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 6653
34f80b04 6654 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
6655 PORT_FEATURE_CONNECTED_SWITCH_MASK);
6656 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
6657
6658 bnx2x_link_settings_requested(bp);
6659
6660 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6661 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6662 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6663 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6664 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6665 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6666 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6667 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
6668 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6669 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
6670}
6671
6672static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6673{
6674 int func = BP_FUNC(bp);
6675 u32 val, val2;
6676 int rc = 0;
a2fbb9ea 6677
34f80b04 6678 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 6679
34f80b04
EG
6680 bp->e1hov = 0;
6681 bp->e1hmf = 0;
6682 if (CHIP_IS_E1H(bp)) {
6683 bp->mf_config =
6684 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 6685
34f80b04
EG
6686 val =
6687 (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
6688 FUNC_MF_CFG_E1HOV_TAG_MASK);
6689 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 6690
34f80b04
EG
6691 bp->e1hov = val;
6692 bp->e1hmf = 1;
6693 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
6694 "(0x%04x)\n",
6695 func, bp->e1hov, bp->e1hov);
6696 } else {
6697 BNX2X_DEV_INFO("Single function mode\n");
6698 if (BP_E1HVN(bp)) {
6699 BNX2X_ERR("!!! No valid E1HOV for func %d,"
6700 " aborting\n", func);
6701 rc = -EPERM;
6702 }
6703 }
6704 }
a2fbb9ea 6705
34f80b04
EG
6706 if (!BP_NOMCP(bp)) {
6707 bnx2x_get_port_hwinfo(bp);
6708
6709 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6710 DRV_MSG_SEQ_NUMBER_MASK);
6711 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6712 }
6713
6714 if (IS_E1HMF(bp)) {
6715 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6716 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
6717 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6718 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6719 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6720 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6721 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6722 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6723 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
6724 bp->dev->dev_addr[5] = (u8)(val & 0xff);
6725 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6726 ETH_ALEN);
6727 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6728 ETH_ALEN);
a2fbb9ea 6729 }
34f80b04
EG
6730
6731 return rc;
a2fbb9ea
ET
6732 }
6733
34f80b04
EG
6734 if (BP_NOMCP(bp)) {
6735 /* only supposed to happen on emulation/FPGA */
6736 BNX2X_ERR("warning rendom MAC workaround active\n");
6737 random_ether_addr(bp->dev->dev_addr);
6738 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6739 }
a2fbb9ea 6740
34f80b04
EG
6741 return rc;
6742}
6743
6744static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6745{
6746 int func = BP_FUNC(bp);
6747 int rc;
6748
6749 if (nomcp)
6750 bp->flags |= NO_MCP_FLAG;
a2fbb9ea 6751
34f80b04 6752 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 6753
34f80b04
EG
6754 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
6755 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
6756
6757 rc = bnx2x_get_hwinfo(bp);
6758
6759 /* need to reset chip if undi was active */
6760 if (!BP_NOMCP(bp))
6761 bnx2x_undi_unload(bp);
6762
6763 if (CHIP_REV_IS_FPGA(bp))
6764 printk(KERN_ERR PFX "FPGA detected\n");
6765
6766 if (BP_NOMCP(bp) && (func == 0))
6767 printk(KERN_ERR PFX
6768 "MCP disabled, must load devices in order!\n");
6769
6770 bp->tx_ring_size = MAX_TX_AVAIL;
6771 bp->rx_ring_size = MAX_RX_AVAIL;
6772
6773 bp->rx_csum = 1;
6774 bp->rx_offset = 0;
6775
6776 bp->tx_ticks = 50;
6777 bp->rx_ticks = 25;
6778
6779 bp->stats_ticks = 1000000 & 0xffff00;
6780
6781 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6782 bp->current_interval = (poll ? poll : bp->timer_interval);
6783
6784 init_timer(&bp->timer);
6785 bp->timer.expires = jiffies + bp->current_interval;
6786 bp->timer.data = (unsigned long) bp;
6787 bp->timer.function = bnx2x_timer;
6788
6789 return rc;
a2fbb9ea
ET
6790}
6791
6792/*
6793 * ethtool service functions
6794 */
6795
6796/* All ethtool functions called with rtnl_lock */
6797
6798static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6799{
6800 struct bnx2x *bp = netdev_priv(dev);
6801
34f80b04
EG
6802 cmd->supported = bp->port.supported;
6803 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
6804
6805 if (netif_carrier_ok(dev)) {
c18487ee
YR
6806 cmd->speed = bp->link_vars.line_speed;
6807 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 6808 } else {
c18487ee
YR
6809 cmd->speed = bp->link_params.req_line_speed;
6810 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 6811 }
34f80b04
EG
6812 if (IS_E1HMF(bp)) {
6813 u16 vn_max_rate;
6814
6815 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
6816 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
6817 if (vn_max_rate < cmd->speed)
6818 cmd->speed = vn_max_rate;
6819 }
a2fbb9ea 6820
c18487ee
YR
6821 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
6822 u32 ext_phy_type =
6823 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
6824
6825 switch (ext_phy_type) {
6826 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
6827 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
6828 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
6829 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 6830 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
6831 cmd->port = PORT_FIBRE;
6832 break;
6833
6834 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6835 cmd->port = PORT_TP;
6836 break;
6837
c18487ee
YR
6838 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
6839 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
6840 bp->link_params.ext_phy_config);
6841 break;
6842
f1410647
ET
6843 default:
6844 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
6845 bp->link_params.ext_phy_config);
6846 break;
f1410647
ET
6847 }
6848 } else
a2fbb9ea 6849 cmd->port = PORT_TP;
a2fbb9ea 6850
34f80b04 6851 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
6852 cmd->transceiver = XCVR_INTERNAL;
6853
c18487ee 6854 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 6855 cmd->autoneg = AUTONEG_ENABLE;
f1410647 6856 else
a2fbb9ea 6857 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
6858
6859 cmd->maxtxpkt = 0;
6860 cmd->maxrxpkt = 0;
6861
6862 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
6863 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
6864 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
6865 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
6866 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
6867 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
6868 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
6869
6870 return 0;
6871}
6872
6873static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6874{
6875 struct bnx2x *bp = netdev_priv(dev);
6876 u32 advertising;
6877
34f80b04
EG
6878 if (IS_E1HMF(bp))
6879 return 0;
6880
a2fbb9ea
ET
6881 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
6882 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
6883 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
6884 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
6885 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
6886 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
6887 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
6888
a2fbb9ea 6889 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
6890 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
6891 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 6892 return -EINVAL;
f1410647 6893 }
a2fbb9ea
ET
6894
6895 /* advertise the requested speed and duplex if supported */
34f80b04 6896 cmd->advertising &= bp->port.supported;
a2fbb9ea 6897
c18487ee
YR
6898 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
6899 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
6900 bp->port.advertising |= (ADVERTISED_Autoneg |
6901 cmd->advertising);
a2fbb9ea
ET
6902
6903 } else { /* forced speed */
6904 /* advertise the requested speed and duplex if supported */
6905 switch (cmd->speed) {
6906 case SPEED_10:
6907 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 6908 if (!(bp->port.supported &
f1410647
ET
6909 SUPPORTED_10baseT_Full)) {
6910 DP(NETIF_MSG_LINK,
6911 "10M full not supported\n");
a2fbb9ea 6912 return -EINVAL;
f1410647 6913 }
a2fbb9ea
ET
6914
6915 advertising = (ADVERTISED_10baseT_Full |
6916 ADVERTISED_TP);
6917 } else {
34f80b04 6918 if (!(bp->port.supported &
f1410647
ET
6919 SUPPORTED_10baseT_Half)) {
6920 DP(NETIF_MSG_LINK,
6921 "10M half not supported\n");
a2fbb9ea 6922 return -EINVAL;
f1410647 6923 }
a2fbb9ea
ET
6924
6925 advertising = (ADVERTISED_10baseT_Half |
6926 ADVERTISED_TP);
6927 }
6928 break;
6929
6930 case SPEED_100:
6931 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 6932 if (!(bp->port.supported &
f1410647
ET
6933 SUPPORTED_100baseT_Full)) {
6934 DP(NETIF_MSG_LINK,
6935 "100M full not supported\n");
a2fbb9ea 6936 return -EINVAL;
f1410647 6937 }
a2fbb9ea
ET
6938
6939 advertising = (ADVERTISED_100baseT_Full |
6940 ADVERTISED_TP);
6941 } else {
34f80b04 6942 if (!(bp->port.supported &
f1410647
ET
6943 SUPPORTED_100baseT_Half)) {
6944 DP(NETIF_MSG_LINK,
6945 "100M half not supported\n");
a2fbb9ea 6946 return -EINVAL;
f1410647 6947 }
a2fbb9ea
ET
6948
6949 advertising = (ADVERTISED_100baseT_Half |
6950 ADVERTISED_TP);
6951 }
6952 break;
6953
6954 case SPEED_1000:
f1410647
ET
6955 if (cmd->duplex != DUPLEX_FULL) {
6956 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 6957 return -EINVAL;
f1410647 6958 }
a2fbb9ea 6959
34f80b04 6960 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 6961 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 6962 return -EINVAL;
f1410647 6963 }
a2fbb9ea
ET
6964
6965 advertising = (ADVERTISED_1000baseT_Full |
6966 ADVERTISED_TP);
6967 break;
6968
6969 case SPEED_2500:
f1410647
ET
6970 if (cmd->duplex != DUPLEX_FULL) {
6971 DP(NETIF_MSG_LINK,
6972 "2.5G half not supported\n");
a2fbb9ea 6973 return -EINVAL;
f1410647 6974 }
a2fbb9ea 6975
34f80b04 6976 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
6977 DP(NETIF_MSG_LINK,
6978 "2.5G full not supported\n");
a2fbb9ea 6979 return -EINVAL;
f1410647 6980 }
a2fbb9ea 6981
f1410647 6982 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
6983 ADVERTISED_TP);
6984 break;
6985
6986 case SPEED_10000:
f1410647
ET
6987 if (cmd->duplex != DUPLEX_FULL) {
6988 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 6989 return -EINVAL;
f1410647 6990 }
a2fbb9ea 6991
34f80b04 6992 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 6993 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 6994 return -EINVAL;
f1410647 6995 }
a2fbb9ea
ET
6996
6997 advertising = (ADVERTISED_10000baseT_Full |
6998 ADVERTISED_FIBRE);
6999 break;
7000
7001 default:
f1410647 7002 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
7003 return -EINVAL;
7004 }
7005
c18487ee
YR
7006 bp->link_params.req_line_speed = cmd->speed;
7007 bp->link_params.req_duplex = cmd->duplex;
34f80b04 7008 bp->port.advertising = advertising;
a2fbb9ea
ET
7009 }
7010
c18487ee 7011 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 7012 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 7013 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 7014 bp->port.advertising);
a2fbb9ea 7015
34f80b04 7016 if (netif_running(dev)) {
bb2a0f7a 7017 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7018 bnx2x_link_set(bp);
7019 }
a2fbb9ea
ET
7020
7021 return 0;
7022}
7023
c18487ee
YR
7024#define PHY_FW_VER_LEN 10
7025
a2fbb9ea
ET
7026static void bnx2x_get_drvinfo(struct net_device *dev,
7027 struct ethtool_drvinfo *info)
7028{
7029 struct bnx2x *bp = netdev_priv(dev);
c18487ee 7030 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
7031
7032 strcpy(info->driver, DRV_MODULE_NAME);
7033 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
7034
7035 phy_fw_ver[0] = '\0';
34f80b04
EG
7036 if (bp->port.pmf) {
7037 bnx2x_phy_hw_lock(bp);
7038 bnx2x_get_ext_phy_fw_version(&bp->link_params,
7039 (bp->state != BNX2X_STATE_CLOSED),
7040 phy_fw_ver, PHY_FW_VER_LEN);
7041 bnx2x_phy_hw_unlock(bp);
7042 }
c18487ee
YR
7043
7044 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 7045 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee 7046 BCM_5710_FW_REVISION_VERSION,
34f80b04 7047 BCM_5710_FW_COMPILE_FLAGS, bp->common.bc_ver,
c18487ee 7048 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
7049 strcpy(info->bus_info, pci_name(bp->pdev));
7050 info->n_stats = BNX2X_NUM_STATS;
7051 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 7052 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
7053 info->regdump_len = 0;
7054}
7055
7056static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7057{
7058 struct bnx2x *bp = netdev_priv(dev);
7059
7060 if (bp->flags & NO_WOL_FLAG) {
7061 wol->supported = 0;
7062 wol->wolopts = 0;
7063 } else {
7064 wol->supported = WAKE_MAGIC;
7065 if (bp->wol)
7066 wol->wolopts = WAKE_MAGIC;
7067 else
7068 wol->wolopts = 0;
7069 }
7070 memset(&wol->sopass, 0, sizeof(wol->sopass));
7071}
7072
7073static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
7074{
7075 struct bnx2x *bp = netdev_priv(dev);
7076
7077 if (wol->wolopts & ~WAKE_MAGIC)
7078 return -EINVAL;
7079
7080 if (wol->wolopts & WAKE_MAGIC) {
7081 if (bp->flags & NO_WOL_FLAG)
7082 return -EINVAL;
7083
7084 bp->wol = 1;
34f80b04 7085 } else
a2fbb9ea 7086 bp->wol = 0;
34f80b04 7087
a2fbb9ea
ET
7088 return 0;
7089}
7090
7091static u32 bnx2x_get_msglevel(struct net_device *dev)
7092{
7093 struct bnx2x *bp = netdev_priv(dev);
7094
7095 return bp->msglevel;
7096}
7097
7098static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
7099{
7100 struct bnx2x *bp = netdev_priv(dev);
7101
7102 if (capable(CAP_NET_ADMIN))
7103 bp->msglevel = level;
7104}
7105
7106static int bnx2x_nway_reset(struct net_device *dev)
7107{
7108 struct bnx2x *bp = netdev_priv(dev);
7109
34f80b04
EG
7110 if (!bp->port.pmf)
7111 return 0;
a2fbb9ea 7112
34f80b04 7113 if (netif_running(dev)) {
bb2a0f7a 7114 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7115 bnx2x_link_set(bp);
7116 }
a2fbb9ea
ET
7117
7118 return 0;
7119}
7120
7121static int bnx2x_get_eeprom_len(struct net_device *dev)
7122{
7123 struct bnx2x *bp = netdev_priv(dev);
7124
34f80b04 7125 return bp->common.flash_size;
a2fbb9ea
ET
7126}
7127
7128static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
7129{
34f80b04 7130 int port = BP_PORT(bp);
a2fbb9ea
ET
7131 int count, i;
7132 u32 val = 0;
7133
7134 /* adjust timeout for emulation/FPGA */
7135 count = NVRAM_TIMEOUT_COUNT;
7136 if (CHIP_REV_IS_SLOW(bp))
7137 count *= 100;
7138
7139 /* request access to nvram interface */
7140 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7141 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
7142
7143 for (i = 0; i < count*10; i++) {
7144 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7145 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
7146 break;
7147
7148 udelay(5);
7149 }
7150
7151 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 7152 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
7153 return -EBUSY;
7154 }
7155
7156 return 0;
7157}
7158
7159static int bnx2x_release_nvram_lock(struct bnx2x *bp)
7160{
34f80b04 7161 int port = BP_PORT(bp);
a2fbb9ea
ET
7162 int count, i;
7163 u32 val = 0;
7164
7165 /* adjust timeout for emulation/FPGA */
7166 count = NVRAM_TIMEOUT_COUNT;
7167 if (CHIP_REV_IS_SLOW(bp))
7168 count *= 100;
7169
7170 /* relinquish nvram interface */
7171 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
7172 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
7173
7174 for (i = 0; i < count*10; i++) {
7175 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
7176 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
7177 break;
7178
7179 udelay(5);
7180 }
7181
7182 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 7183 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
7184 return -EBUSY;
7185 }
7186
7187 return 0;
7188}
7189
7190static void bnx2x_enable_nvram_access(struct bnx2x *bp)
7191{
7192 u32 val;
7193
7194 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7195
7196 /* enable both bits, even on read */
7197 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7198 (val | MCPR_NVM_ACCESS_ENABLE_EN |
7199 MCPR_NVM_ACCESS_ENABLE_WR_EN));
7200}
7201
7202static void bnx2x_disable_nvram_access(struct bnx2x *bp)
7203{
7204 u32 val;
7205
7206 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
7207
7208 /* disable both bits, even after read */
7209 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
7210 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
7211 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
7212}
7213
7214static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
7215 u32 cmd_flags)
7216{
f1410647 7217 int count, i, rc;
a2fbb9ea
ET
7218 u32 val;
7219
7220 /* build the command word */
7221 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
7222
7223 /* need to clear DONE bit separately */
7224 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7225
7226 /* address of the NVRAM to read from */
7227 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7228 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7229
7230 /* issue a read command */
7231 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7232
7233 /* adjust timeout for emulation/FPGA */
7234 count = NVRAM_TIMEOUT_COUNT;
7235 if (CHIP_REV_IS_SLOW(bp))
7236 count *= 100;
7237
7238 /* wait for completion */
7239 *ret_val = 0;
7240 rc = -EBUSY;
7241 for (i = 0; i < count; i++) {
7242 udelay(5);
7243 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7244
7245 if (val & MCPR_NVM_COMMAND_DONE) {
7246 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
7247 /* we read nvram data in cpu order
7248 * but ethtool sees it as an array of bytes
7249 * converting to big-endian will do the work */
7250 val = cpu_to_be32(val);
7251 *ret_val = val;
7252 rc = 0;
7253 break;
7254 }
7255 }
7256
7257 return rc;
7258}
7259
7260static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
7261 int buf_size)
7262{
7263 int rc;
7264 u32 cmd_flags;
7265 u32 val;
7266
7267 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7268 DP(BNX2X_MSG_NVM,
c14423fe 7269 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7270 offset, buf_size);
7271 return -EINVAL;
7272 }
7273
34f80b04
EG
7274 if (offset + buf_size > bp->common.flash_size) {
7275 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7276 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7277 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7278 return -EINVAL;
7279 }
7280
7281 /* request access to nvram interface */
7282 rc = bnx2x_acquire_nvram_lock(bp);
7283 if (rc)
7284 return rc;
7285
7286 /* enable access to nvram interface */
7287 bnx2x_enable_nvram_access(bp);
7288
7289 /* read the first word(s) */
7290 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7291 while ((buf_size > sizeof(u32)) && (rc == 0)) {
7292 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7293 memcpy(ret_buf, &val, 4);
7294
7295 /* advance to the next dword */
7296 offset += sizeof(u32);
7297 ret_buf += sizeof(u32);
7298 buf_size -= sizeof(u32);
7299 cmd_flags = 0;
7300 }
7301
7302 if (rc == 0) {
7303 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7304 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
7305 memcpy(ret_buf, &val, 4);
7306 }
7307
7308 /* disable access to nvram interface */
7309 bnx2x_disable_nvram_access(bp);
7310 bnx2x_release_nvram_lock(bp);
7311
7312 return rc;
7313}
7314
7315static int bnx2x_get_eeprom(struct net_device *dev,
7316 struct ethtool_eeprom *eeprom, u8 *eebuf)
7317{
7318 struct bnx2x *bp = netdev_priv(dev);
7319 int rc;
7320
34f80b04 7321 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
7322 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7323 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7324 eeprom->len, eeprom->len);
7325
7326 /* parameters already validated in ethtool_get_eeprom */
7327
7328 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7329
7330 return rc;
7331}
7332
7333static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
7334 u32 cmd_flags)
7335{
f1410647 7336 int count, i, rc;
a2fbb9ea
ET
7337
7338 /* build the command word */
7339 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
7340
7341 /* need to clear DONE bit separately */
7342 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
7343
7344 /* write the data */
7345 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
7346
7347 /* address of the NVRAM to write to */
7348 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
7349 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
7350
7351 /* issue the write command */
7352 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
7353
7354 /* adjust timeout for emulation/FPGA */
7355 count = NVRAM_TIMEOUT_COUNT;
7356 if (CHIP_REV_IS_SLOW(bp))
7357 count *= 100;
7358
7359 /* wait for completion */
7360 rc = -EBUSY;
7361 for (i = 0; i < count; i++) {
7362 udelay(5);
7363 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
7364 if (val & MCPR_NVM_COMMAND_DONE) {
7365 rc = 0;
7366 break;
7367 }
7368 }
7369
7370 return rc;
7371}
7372
f1410647 7373#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
7374
7375static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
7376 int buf_size)
7377{
7378 int rc;
7379 u32 cmd_flags;
7380 u32 align_offset;
7381 u32 val;
7382
34f80b04
EG
7383 if (offset + buf_size > bp->common.flash_size) {
7384 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7385 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7386 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7387 return -EINVAL;
7388 }
7389
7390 /* request access to nvram interface */
7391 rc = bnx2x_acquire_nvram_lock(bp);
7392 if (rc)
7393 return rc;
7394
7395 /* enable access to nvram interface */
7396 bnx2x_enable_nvram_access(bp);
7397
7398 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
7399 align_offset = (offset & ~0x03);
7400 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
7401
7402 if (rc == 0) {
7403 val &= ~(0xff << BYTE_OFFSET(offset));
7404 val |= (*data_buf << BYTE_OFFSET(offset));
7405
7406 /* nvram data is returned as an array of bytes
7407 * convert it back to cpu order */
7408 val = be32_to_cpu(val);
7409
a2fbb9ea
ET
7410 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
7411 cmd_flags);
7412 }
7413
7414 /* disable access to nvram interface */
7415 bnx2x_disable_nvram_access(bp);
7416 bnx2x_release_nvram_lock(bp);
7417
7418 return rc;
7419}
7420
7421static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
7422 int buf_size)
7423{
7424 int rc;
7425 u32 cmd_flags;
7426 u32 val;
7427 u32 written_so_far;
7428
34f80b04 7429 if (buf_size == 1) /* ethtool */
a2fbb9ea 7430 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
7431
7432 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 7433 DP(BNX2X_MSG_NVM,
c14423fe 7434 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
7435 offset, buf_size);
7436 return -EINVAL;
7437 }
7438
34f80b04
EG
7439 if (offset + buf_size > bp->common.flash_size) {
7440 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 7441 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 7442 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
7443 return -EINVAL;
7444 }
7445
7446 /* request access to nvram interface */
7447 rc = bnx2x_acquire_nvram_lock(bp);
7448 if (rc)
7449 return rc;
7450
7451 /* enable access to nvram interface */
7452 bnx2x_enable_nvram_access(bp);
7453
7454 written_so_far = 0;
7455 cmd_flags = MCPR_NVM_COMMAND_FIRST;
7456 while ((written_so_far < buf_size) && (rc == 0)) {
7457 if (written_so_far == (buf_size - sizeof(u32)))
7458 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7459 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
7460 cmd_flags |= MCPR_NVM_COMMAND_LAST;
7461 else if ((offset % NVRAM_PAGE_SIZE) == 0)
7462 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
7463
7464 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
7465
7466 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
7467
7468 /* advance to the next dword */
7469 offset += sizeof(u32);
7470 data_buf += sizeof(u32);
7471 written_so_far += sizeof(u32);
7472 cmd_flags = 0;
7473 }
7474
7475 /* disable access to nvram interface */
7476 bnx2x_disable_nvram_access(bp);
7477 bnx2x_release_nvram_lock(bp);
7478
7479 return rc;
7480}
7481
7482static int bnx2x_set_eeprom(struct net_device *dev,
7483 struct ethtool_eeprom *eeprom, u8 *eebuf)
7484{
7485 struct bnx2x *bp = netdev_priv(dev);
7486 int rc;
7487
34f80b04 7488 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
7489 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
7490 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
7491 eeprom->len, eeprom->len);
7492
7493 /* parameters already validated in ethtool_set_eeprom */
7494
c18487ee 7495 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
7496 if (eeprom->magic == 0x00504859)
7497 if (bp->port.pmf) {
7498
7499 bnx2x_phy_hw_lock(bp);
7500 rc = bnx2x_flash_download(bp, BP_PORT(bp),
7501 bp->link_params.ext_phy_config,
7502 (bp->state != BNX2X_STATE_CLOSED),
7503 eebuf, eeprom->len);
bb2a0f7a
YG
7504 if ((bp->state == BNX2X_STATE_OPEN) ||
7505 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
7506 rc |= bnx2x_link_reset(&bp->link_params,
7507 &bp->link_vars);
7508 rc |= bnx2x_phy_init(&bp->link_params,
7509 &bp->link_vars);
bb2a0f7a 7510 }
34f80b04
EG
7511 bnx2x_phy_hw_unlock(bp);
7512
7513 } else /* Only the PMF can access the PHY */
7514 return -EINVAL;
7515 else
c18487ee 7516 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
7517
7518 return rc;
7519}
7520
7521static int bnx2x_get_coalesce(struct net_device *dev,
7522 struct ethtool_coalesce *coal)
7523{
7524 struct bnx2x *bp = netdev_priv(dev);
7525
7526 memset(coal, 0, sizeof(struct ethtool_coalesce));
7527
7528 coal->rx_coalesce_usecs = bp->rx_ticks;
7529 coal->tx_coalesce_usecs = bp->tx_ticks;
7530 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7531
7532 return 0;
7533}
7534
7535static int bnx2x_set_coalesce(struct net_device *dev,
7536 struct ethtool_coalesce *coal)
7537{
7538 struct bnx2x *bp = netdev_priv(dev);
7539
7540 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7541 if (bp->rx_ticks > 3000)
7542 bp->rx_ticks = 3000;
7543
7544 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7545 if (bp->tx_ticks > 0x3000)
7546 bp->tx_ticks = 0x3000;
7547
7548 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7549 if (bp->stats_ticks > 0xffff00)
7550 bp->stats_ticks = 0xffff00;
7551 bp->stats_ticks &= 0xffff00;
7552
34f80b04 7553 if (netif_running(dev))
a2fbb9ea
ET
7554 bnx2x_update_coalesce(bp);
7555
7556 return 0;
7557}
7558
7559static void bnx2x_get_ringparam(struct net_device *dev,
7560 struct ethtool_ringparam *ering)
7561{
7562 struct bnx2x *bp = netdev_priv(dev);
7563
7564 ering->rx_max_pending = MAX_RX_AVAIL;
7565 ering->rx_mini_max_pending = 0;
7566 ering->rx_jumbo_max_pending = 0;
7567
7568 ering->rx_pending = bp->rx_ring_size;
7569 ering->rx_mini_pending = 0;
7570 ering->rx_jumbo_pending = 0;
7571
7572 ering->tx_max_pending = MAX_TX_AVAIL;
7573 ering->tx_pending = bp->tx_ring_size;
7574}
7575
7576static int bnx2x_set_ringparam(struct net_device *dev,
7577 struct ethtool_ringparam *ering)
7578{
7579 struct bnx2x *bp = netdev_priv(dev);
34f80b04 7580 int rc = 0;
a2fbb9ea
ET
7581
7582 if ((ering->rx_pending > MAX_RX_AVAIL) ||
7583 (ering->tx_pending > MAX_TX_AVAIL) ||
7584 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
7585 return -EINVAL;
7586
7587 bp->rx_ring_size = ering->rx_pending;
7588 bp->tx_ring_size = ering->tx_pending;
7589
34f80b04
EG
7590 if (netif_running(dev)) {
7591 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7592 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
7593 }
7594
34f80b04 7595 return rc;
a2fbb9ea
ET
7596}
7597
7598static void bnx2x_get_pauseparam(struct net_device *dev,
7599 struct ethtool_pauseparam *epause)
7600{
7601 struct bnx2x *bp = netdev_priv(dev);
7602
c18487ee
YR
7603 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
7604 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
7605
7606 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
7607 FLOW_CTRL_RX);
7608 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
7609 FLOW_CTRL_TX);
a2fbb9ea
ET
7610
7611 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7612 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7613 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7614}
7615
7616static int bnx2x_set_pauseparam(struct net_device *dev,
7617 struct ethtool_pauseparam *epause)
7618{
7619 struct bnx2x *bp = netdev_priv(dev);
7620
34f80b04
EG
7621 if (IS_E1HMF(bp))
7622 return 0;
7623
a2fbb9ea
ET
7624 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
7625 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
7626 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
7627
c18487ee 7628 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 7629
f1410647 7630 if (epause->rx_pause)
c18487ee
YR
7631 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
7632
f1410647 7633 if (epause->tx_pause)
c18487ee
YR
7634 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
7635
7636 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
7637 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 7638
c18487ee 7639 if (epause->autoneg) {
34f80b04 7640 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
c18487ee
YR
7641 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
7642 return -EINVAL;
7643 }
a2fbb9ea 7644
c18487ee
YR
7645 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
7646 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
7647 }
a2fbb9ea 7648
c18487ee
YR
7649 DP(NETIF_MSG_LINK,
7650 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
7651
7652 if (netif_running(dev)) {
bb2a0f7a 7653 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
7654 bnx2x_link_set(bp);
7655 }
a2fbb9ea
ET
7656
7657 return 0;
7658}
7659
7660static u32 bnx2x_get_rx_csum(struct net_device *dev)
7661{
7662 struct bnx2x *bp = netdev_priv(dev);
7663
7664 return bp->rx_csum;
7665}
7666
7667static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
7668{
7669 struct bnx2x *bp = netdev_priv(dev);
7670
7671 bp->rx_csum = data;
7672 return 0;
7673}
7674
7675static int bnx2x_set_tso(struct net_device *dev, u32 data)
7676{
7677 if (data)
7678 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7679 else
7680 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
7681 return 0;
7682}
7683
7684static struct {
7685 char string[ETH_GSTRING_LEN];
7686} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
7687 { "MC Errors (online)" }
7688};
7689
7690static int bnx2x_self_test_count(struct net_device *dev)
7691{
7692 return BNX2X_NUM_TESTS;
7693}
7694
7695static void bnx2x_self_test(struct net_device *dev,
7696 struct ethtool_test *etest, u64 *buf)
7697{
7698 struct bnx2x *bp = netdev_priv(dev);
7699 int stats_state;
7700
7701 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
7702
7703 if (bp->state != BNX2X_STATE_OPEN) {
7704 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
7705 return;
7706 }
7707
7708 stats_state = bp->stats_state;
a2fbb9ea
ET
7709
7710 if (bnx2x_mc_assert(bp) != 0) {
7711 buf[0] = 1;
7712 etest->flags |= ETH_TEST_FL_FAILED;
7713 }
7714
7715#ifdef BNX2X_EXTRA_DEBUG
7716 bnx2x_panic_dump(bp);
7717#endif
a2fbb9ea
ET
7718}
7719
bb2a0f7a
YG
7720static const struct {
7721 long offset;
7722 int size;
7723 u32 flags;
a2fbb9ea 7724 char string[ETH_GSTRING_LEN];
bb2a0f7a
YG
7725} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
7726/* 1 */ { STATS_OFFSET32(valid_bytes_received_hi), 8, 1, "rx_bytes" },
7727 { STATS_OFFSET32(error_bytes_received_hi), 8, 1, "rx_error_bytes" },
7728 { STATS_OFFSET32(total_bytes_transmitted_hi), 8, 1, "tx_bytes" },
7729 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi), 8, 0, "tx_error_bytes" },
7730 { STATS_OFFSET32(total_unicast_packets_received_hi),
7731 8, 1, "rx_ucast_packets" },
7732 { STATS_OFFSET32(total_multicast_packets_received_hi),
7733 8, 1, "rx_mcast_packets" },
7734 { STATS_OFFSET32(total_broadcast_packets_received_hi),
7735 8, 1, "rx_bcast_packets" },
7736 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
7737 8, 1, "tx_packets" },
7738 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
7739 8, 0, "tx_mac_errors" },
7740/* 10 */{ STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
7741 8, 0, "tx_carrier_errors" },
7742 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
7743 8, 0, "rx_crc_errors" },
7744 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
7745 8, 0, "rx_align_errors" },
7746 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
7747 8, 0, "tx_single_collisions" },
7748 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
7749 8, 0, "tx_multi_collisions" },
7750 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
7751 8, 0, "tx_deferred" },
7752 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
7753 8, 0, "tx_excess_collisions" },
7754 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
7755 8, 0, "tx_late_collisions" },
7756 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
7757 8, 0, "tx_total_collisions" },
7758 { STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
7759 8, 0, "rx_fragments" },
7760/* 20 */{ STATS_OFFSET32(rx_stat_etherstatsjabbers_hi), 8, 0, "rx_jabbers" },
7761 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
7762 8, 0, "rx_undersize_packets" },
7763 { STATS_OFFSET32(jabber_packets_received),
7764 4, 1, "rx_oversize_packets" },
7765 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
7766 8, 0, "tx_64_byte_packets" },
7767 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
7768 8, 0, "tx_65_to_127_byte_packets" },
7769 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
7770 8, 0, "tx_128_to_255_byte_packets" },
7771 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
7772 8, 0, "tx_256_to_511_byte_packets" },
7773 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
7774 8, 0, "tx_512_to_1023_byte_packets" },
7775 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
7776 8, 0, "tx_1024_to_1522_byte_packets" },
7777 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
7778 8, 0, "tx_1523_to_9022_byte_packets" },
7779/* 30 */{ STATS_OFFSET32(rx_stat_xonpauseframesreceived_hi),
7780 8, 0, "rx_xon_frames" },
7781 { STATS_OFFSET32(rx_stat_xoffpauseframesreceived_hi),
7782 8, 0, "rx_xoff_frames" },
7783 { STATS_OFFSET32(tx_stat_outxonsent_hi), 8, 0, "tx_xon_frames" },
7784 { STATS_OFFSET32(tx_stat_outxoffsent_hi), 8, 0, "tx_xoff_frames" },
7785 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
7786 8, 0, "rx_mac_ctrl_frames" },
7787 { STATS_OFFSET32(mac_filter_discard), 4, 1, "rx_filtered_packets" },
7788 { STATS_OFFSET32(no_buff_discard), 4, 1, "rx_discards" },
7789 { STATS_OFFSET32(xxoverflow_discard), 4, 1, "rx_fw_discards" },
7790 { STATS_OFFSET32(brb_drop_hi), 8, 1, "brb_discard" },
7791/* 39 */{ STATS_OFFSET32(brb_truncate_discard), 8, 1, "brb_truncate" }
a2fbb9ea
ET
7792};
7793
7794static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7795{
bb2a0f7a
YG
7796 struct bnx2x *bp = netdev_priv(dev);
7797 int i, j;
7798
a2fbb9ea
ET
7799 switch (stringset) {
7800 case ETH_SS_STATS:
bb2a0f7a
YG
7801 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
7802 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
7803 continue;
7804 strcpy(buf + j*ETH_GSTRING_LEN,
7805 bnx2x_stats_arr[i].string);
7806 j++;
7807 }
a2fbb9ea
ET
7808 break;
7809
7810 case ETH_SS_TEST:
7811 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
7812 break;
7813 }
7814}
7815
7816static int bnx2x_get_stats_count(struct net_device *dev)
7817{
bb2a0f7a
YG
7818 struct bnx2x *bp = netdev_priv(dev);
7819 int i, num_stats = 0;
7820
7821 for (i = 0; i < BNX2X_NUM_STATS; i++) {
7822 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
7823 continue;
7824 num_stats++;
7825 }
7826 return num_stats;
a2fbb9ea
ET
7827}
7828
7829static void bnx2x_get_ethtool_stats(struct net_device *dev,
7830 struct ethtool_stats *stats, u64 *buf)
7831{
7832 struct bnx2x *bp = netdev_priv(dev);
bb2a0f7a
YG
7833 u32 *hw_stats = (u32 *)&bp->eth_stats;
7834 int i, j;
a2fbb9ea 7835
bb2a0f7a
YG
7836 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
7837 if (IS_E1HMF(bp) && (!bnx2x_stats_arr[i].flags))
a2fbb9ea 7838 continue;
bb2a0f7a
YG
7839
7840 if (bnx2x_stats_arr[i].size == 0) {
7841 /* skip this counter */
7842 buf[j] = 0;
7843 j++;
a2fbb9ea
ET
7844 continue;
7845 }
bb2a0f7a 7846 if (bnx2x_stats_arr[i].size == 4) {
a2fbb9ea 7847 /* 4-byte counter */
bb2a0f7a
YG
7848 buf[j] = (u64) *(hw_stats + bnx2x_stats_arr[i].offset);
7849 j++;
a2fbb9ea
ET
7850 continue;
7851 }
7852 /* 8-byte counter */
bb2a0f7a
YG
7853 buf[j] = HILO_U64(*(hw_stats + bnx2x_stats_arr[i].offset),
7854 *(hw_stats + bnx2x_stats_arr[i].offset + 1));
7855 j++;
a2fbb9ea
ET
7856 }
7857}
7858
7859static int bnx2x_phys_id(struct net_device *dev, u32 data)
7860{
7861 struct bnx2x *bp = netdev_priv(dev);
34f80b04 7862 int port = BP_PORT(bp);
a2fbb9ea
ET
7863 int i;
7864
34f80b04
EG
7865 if (!netif_running(dev))
7866 return 0;
7867
7868 if (!bp->port.pmf)
7869 return 0;
7870
a2fbb9ea
ET
7871 if (data == 0)
7872 data = 2;
7873
7874 for (i = 0; i < (data * 2); i++) {
c18487ee 7875 if ((i % 2) == 0)
34f80b04 7876 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
7877 bp->link_params.hw_led_mode,
7878 bp->link_params.chip_id);
7879 else
34f80b04 7880 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
7881 bp->link_params.hw_led_mode,
7882 bp->link_params.chip_id);
7883
a2fbb9ea
ET
7884 msleep_interruptible(500);
7885 if (signal_pending(current))
7886 break;
7887 }
7888
c18487ee 7889 if (bp->link_vars.link_up)
34f80b04 7890 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
7891 bp->link_vars.line_speed,
7892 bp->link_params.hw_led_mode,
7893 bp->link_params.chip_id);
a2fbb9ea
ET
7894
7895 return 0;
7896}
7897
7898static struct ethtool_ops bnx2x_ethtool_ops = {
7899 .get_settings = bnx2x_get_settings,
7900 .set_settings = bnx2x_set_settings,
7901 .get_drvinfo = bnx2x_get_drvinfo,
7902 .get_wol = bnx2x_get_wol,
7903 .set_wol = bnx2x_set_wol,
7904 .get_msglevel = bnx2x_get_msglevel,
7905 .set_msglevel = bnx2x_set_msglevel,
7906 .nway_reset = bnx2x_nway_reset,
7907 .get_link = ethtool_op_get_link,
7908 .get_eeprom_len = bnx2x_get_eeprom_len,
7909 .get_eeprom = bnx2x_get_eeprom,
7910 .set_eeprom = bnx2x_set_eeprom,
7911 .get_coalesce = bnx2x_get_coalesce,
7912 .set_coalesce = bnx2x_set_coalesce,
7913 .get_ringparam = bnx2x_get_ringparam,
7914 .set_ringparam = bnx2x_set_ringparam,
7915 .get_pauseparam = bnx2x_get_pauseparam,
7916 .set_pauseparam = bnx2x_set_pauseparam,
7917 .get_rx_csum = bnx2x_get_rx_csum,
7918 .set_rx_csum = bnx2x_set_rx_csum,
7919 .get_tx_csum = ethtool_op_get_tx_csum,
7920 .set_tx_csum = ethtool_op_set_tx_csum,
7921 .get_sg = ethtool_op_get_sg,
7922 .set_sg = ethtool_op_set_sg,
7923 .get_tso = ethtool_op_get_tso,
7924 .set_tso = bnx2x_set_tso,
7925 .self_test_count = bnx2x_self_test_count,
7926 .self_test = bnx2x_self_test,
7927 .get_strings = bnx2x_get_strings,
7928 .phys_id = bnx2x_phys_id,
7929 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 7930 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
7931};
7932
7933/* end of ethtool_ops */
7934
7935/****************************************************************************
7936* General service functions
7937****************************************************************************/
7938
7939static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
7940{
7941 u16 pmcsr;
7942
7943 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
7944
7945 switch (state) {
7946 case PCI_D0:
34f80b04 7947 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
7948 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
7949 PCI_PM_CTRL_PME_STATUS));
7950
7951 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
7952 /* delay required during transition out of D3hot */
7953 msleep(20);
34f80b04 7954 break;
a2fbb9ea 7955
34f80b04
EG
7956 case PCI_D3hot:
7957 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
7958 pmcsr |= 3;
a2fbb9ea 7959
34f80b04
EG
7960 if (bp->wol)
7961 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 7962
34f80b04
EG
7963 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
7964 pmcsr);
a2fbb9ea 7965
34f80b04
EG
7966 /* No more memory access after this point until
7967 * device is brought back to D0.
7968 */
7969 break;
7970
7971 default:
7972 return -EINVAL;
7973 }
7974 return 0;
a2fbb9ea
ET
7975}
7976
34f80b04
EG
7977/*
7978 * net_device service functions
7979 */
7980
a2fbb9ea
ET
7981static int bnx2x_poll(struct napi_struct *napi, int budget)
7982{
7983 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
7984 napi);
7985 struct bnx2x *bp = fp->bp;
7986 int work_done = 0;
7987
7988#ifdef BNX2X_STOP_ON_ERROR
7989 if (unlikely(bp->panic))
34f80b04 7990 goto poll_panic;
a2fbb9ea
ET
7991#endif
7992
7993 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
7994 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
7995 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
7996
7997 bnx2x_update_fpsb_idx(fp);
7998
34f80b04
EG
7999 if ((fp->tx_pkt_prod != le16_to_cpu(*fp->tx_cons_sb)) ||
8000 (fp->tx_pkt_prod != fp->tx_pkt_cons))
a2fbb9ea
ET
8001 bnx2x_tx_int(fp, budget);
8002
a2fbb9ea
ET
8003 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
8004 work_done = bnx2x_rx_int(fp, budget);
8005
a2fbb9ea
ET
8006 rmb(); /* bnx2x_has_work() reads the status block */
8007
8008 /* must not complete if we consumed full budget */
8009 if ((work_done < budget) && !bnx2x_has_work(fp)) {
8010
8011#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8012poll_panic:
a2fbb9ea
ET
8013#endif
8014 netif_rx_complete(bp->dev, napi);
8015
34f80b04 8016 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 8017 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 8018 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
8019 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
8020 }
a2fbb9ea
ET
8021 return work_done;
8022}
8023
8024/* Called with netif_tx_lock.
8025 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
8026 * netif_wake_queue().
8027 */
8028static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
8029{
8030 struct bnx2x *bp = netdev_priv(dev);
8031 struct bnx2x_fastpath *fp;
8032 struct sw_tx_bd *tx_buf;
8033 struct eth_tx_bd *tx_bd;
8034 struct eth_tx_parse_bd *pbd = NULL;
8035 u16 pkt_prod, bd_prod;
8036 int nbd, fp_index = 0;
8037 dma_addr_t mapping;
8038
8039#ifdef BNX2X_STOP_ON_ERROR
8040 if (unlikely(bp->panic))
8041 return NETDEV_TX_BUSY;
8042#endif
8043
8044 fp_index = smp_processor_id() % (bp->num_queues);
8045
8046 fp = &bp->fp[fp_index];
8047 if (unlikely(bnx2x_tx_avail(bp->fp) <
8048 (skb_shinfo(skb)->nr_frags + 3))) {
bb2a0f7a 8049 bp->eth_stats.driver_xoff++,
a2fbb9ea
ET
8050 netif_stop_queue(dev);
8051 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
8052 return NETDEV_TX_BUSY;
8053 }
8054
8055 /*
8056 This is a bit ugly. First we use one BD which we mark as start,
8057 then for TSO or xsum we have a parsing info BD,
8058 and only then we have the rest of the TSO bds.
8059 (don't forget to mark the last one as last,
8060 and to unmap only AFTER you write to the BD ...)
8061 I would like to thank DovH for this mess.
8062 */
8063
8064 pkt_prod = fp->tx_pkt_prod++;
8065 bd_prod = fp->tx_bd_prod;
8066 bd_prod = TX_BD(bd_prod);
8067
8068 /* get a tx_buff and first bd */
8069 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
8070 tx_bd = &fp->tx_desc_ring[bd_prod];
8071
8072 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
8073 tx_bd->general_data = (UNICAST_ADDRESS <<
8074 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
8075 tx_bd->general_data |= 1; /* header nbd */
8076
c14423fe 8077 /* remember the first bd of the packet */
a2fbb9ea
ET
8078 tx_buf->first_bd = bd_prod;
8079
8080 DP(NETIF_MSG_TX_QUEUED,
8081 "sending pkt %u @%p next_idx %u bd %u @%p\n",
8082 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
8083
8084 if (skb->ip_summed == CHECKSUM_PARTIAL) {
8085 struct iphdr *iph = ip_hdr(skb);
8086 u8 len;
8087
8088 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
8089
8090 /* turn on parsing and get a bd */
8091 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8092 pbd = (void *)&fp->tx_desc_ring[bd_prod];
8093 len = ((u8 *)iph - (u8 *)skb->data) / 2;
8094
8095 /* for now NS flag is not used in Linux */
8096 pbd->global_data = (len |
96fc1784 8097 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea
ET
8098 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
8099 pbd->ip_hlen = ip_hdrlen(skb) / 2;
8100 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
8101 if (iph->protocol == IPPROTO_TCP) {
8102 struct tcphdr *th = tcp_hdr(skb);
8103
8104 tx_bd->bd_flags.as_bitfield |=
8105 ETH_TX_BD_FLAGS_TCP_CSUM;
96fc1784 8106 pbd->tcp_flags = pbd_tcp_flags(skb);
a2fbb9ea
ET
8107 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
8108 pbd->tcp_pseudo_csum = swab16(th->check);
8109
8110 } else if (iph->protocol == IPPROTO_UDP) {
8111 struct udphdr *uh = udp_hdr(skb);
8112
8113 tx_bd->bd_flags.as_bitfield |=
8114 ETH_TX_BD_FLAGS_TCP_CSUM;
8115 pbd->total_hlen += cpu_to_le16(4);
8116 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
8117 pbd->cs_offset = 5; /* 10 >> 1 */
8118 pbd->tcp_pseudo_csum = 0;
8119 /* HW bug: we need to subtract 10 bytes before the
8120 * UDP header from the csum
8121 */
8122 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
8123 csum_partial(((u8 *)(uh)-10), 10, 0)));
8124 }
8125 }
8126
8127 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
8128 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
8129 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
8130 } else {
8131 tx_bd->vlan = cpu_to_le16(pkt_prod);
8132 }
8133
8134 mapping = pci_map_single(bp->pdev, skb->data,
8135 skb->len, PCI_DMA_TODEVICE);
8136
8137 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8138 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8139 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
8140 tx_bd->nbd = cpu_to_le16(nbd);
8141 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
8142
8143 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
8144 " nbytes %d flags %x vlan %u\n",
8145 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
8146 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
8147
8148 if (skb_shinfo(skb)->gso_size &&
8149 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
96fc1784 8150 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
a2fbb9ea
ET
8151
8152 DP(NETIF_MSG_TX_QUEUED,
8153 "TSO packet len %d hlen %d total len %d tso size %d\n",
8154 skb->len, hlen, skb_headlen(skb),
8155 skb_shinfo(skb)->gso_size);
8156
8157 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
8158
8159 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
8160 /* we split the first bd into headers and data bds
8161 * to ease the pain of our fellow micocode engineers
8162 * we use one mapping for both bds
8163 * So far this has only been observed to happen
8164 * in Other Operating Systems(TM)
8165 */
8166
8167 /* first fix first bd */
8168 nbd++;
8169 tx_bd->nbd = cpu_to_le16(nbd);
8170 tx_bd->nbytes = cpu_to_le16(hlen);
8171
8172 /* we only print this as an error
8173 * because we don't think this will ever happen.
8174 */
8175 BNX2X_ERR("TSO split header size is %d (%x:%x)"
8176 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
8177 tx_bd->addr_lo, tx_bd->nbd);
8178
8179 /* now get a new data bd
8180 * (after the pbd) and fill it */
8181 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8182 tx_bd = &fp->tx_desc_ring[bd_prod];
8183
8184 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8185 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
8186 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
8187 tx_bd->vlan = cpu_to_le16(pkt_prod);
8188 /* this marks the bd
8189 * as one that has no individual mapping
c14423fe 8190 * the FW ignores this flag in a bd not marked start
a2fbb9ea
ET
8191 */
8192 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
8193 DP(NETIF_MSG_TX_QUEUED,
8194 "TSO split data size is %d (%x:%x)\n",
8195 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
8196 }
8197
8198 if (!pbd) {
8199 /* supposed to be unreached
8200 * (and therefore not handled properly...)
8201 */
8202 BNX2X_ERR("LSO with no PBD\n");
8203 BUG();
8204 }
8205
8206 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
8207 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
8208 pbd->ip_id = swab16(ip_hdr(skb)->id);
8209 pbd->tcp_pseudo_csum =
8210 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
8211 ip_hdr(skb)->daddr,
8212 0, IPPROTO_TCP, 0));
8213 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
8214 }
8215
8216 {
8217 int i;
8218
8219 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
8220 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
8221
8222 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8223 tx_bd = &fp->tx_desc_ring[bd_prod];
8224
8225 mapping = pci_map_page(bp->pdev, frag->page,
8226 frag->page_offset,
8227 frag->size, PCI_DMA_TODEVICE);
8228
8229 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
8230 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
8231 tx_bd->nbytes = cpu_to_le16(frag->size);
8232 tx_bd->vlan = cpu_to_le16(pkt_prod);
8233 tx_bd->bd_flags.as_bitfield = 0;
8234 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
8235 " addr (%x:%x) nbytes %d flags %x\n",
8236 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
8237 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
8238 } /* for */
8239 }
8240
8241 /* now at last mark the bd as the last bd */
8242 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
8243
8244 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
8245 tx_bd, tx_bd->bd_flags.as_bitfield);
8246
8247 tx_buf->skb = skb;
8248
8249 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
8250
8251 /* now send a tx doorbell, counting the next bd
8252 * if the packet contains or ends with it
8253 */
8254 if (TX_BD_POFF(bd_prod) < nbd)
8255 nbd++;
8256
8257 if (pbd)
8258 DP(NETIF_MSG_TX_QUEUED,
8259 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
8260 " tcp_flags %x xsum %x seq %u hlen %u\n",
8261 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
8262 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
8263 pbd->tcp_send_seq, pbd->total_hlen);
8264
8265 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
8266
96fc1784
ET
8267 fp->hw_tx_prods->bds_prod =
8268 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 8269 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
8270 fp->hw_tx_prods->packets_prod =
8271 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
a2fbb9ea
ET
8272 DOORBELL(bp, fp_index, 0);
8273
8274 mmiowb();
8275
8276 fp->tx_bd_prod = bd_prod;
8277 dev->trans_start = jiffies;
8278
8279 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
8280 netif_stop_queue(dev);
bb2a0f7a 8281 bp->eth_stats.driver_xoff++;
a2fbb9ea
ET
8282 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
8283 netif_wake_queue(dev);
8284 }
8285 fp->tx_pkt++;
8286
8287 return NETDEV_TX_OK;
8288}
8289
bb2a0f7a 8290/* called with rtnl_lock */
a2fbb9ea
ET
8291static int bnx2x_open(struct net_device *dev)
8292{
8293 struct bnx2x *bp = netdev_priv(dev);
8294
8295 bnx2x_set_power_state(bp, PCI_D0);
8296
bb2a0f7a 8297 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8298}
8299
bb2a0f7a 8300/* called with rtnl_lock */
a2fbb9ea
ET
8301static int bnx2x_close(struct net_device *dev)
8302{
a2fbb9ea
ET
8303 struct bnx2x *bp = netdev_priv(dev);
8304
8305 /* Unload the driver, release IRQs */
bb2a0f7a
YG
8306 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
8307 if (atomic_read(&bp->pdev->enable_cnt) == 1)
8308 if (!CHIP_REV_IS_SLOW(bp))
8309 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8310
8311 return 0;
8312}
8313
34f80b04
EG
8314/* called with netif_tx_lock from set_multicast */
8315static void bnx2x_set_rx_mode(struct net_device *dev)
8316{
8317 struct bnx2x *bp = netdev_priv(dev);
8318 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8319 int port = BP_PORT(bp);
8320
8321 if (bp->state != BNX2X_STATE_OPEN) {
8322 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8323 return;
8324 }
8325
8326 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8327
8328 if (dev->flags & IFF_PROMISC)
8329 rx_mode = BNX2X_RX_MODE_PROMISC;
8330
8331 else if ((dev->flags & IFF_ALLMULTI) ||
8332 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
8333 rx_mode = BNX2X_RX_MODE_ALLMULTI;
8334
8335 else { /* some multicasts */
8336 if (CHIP_IS_E1(bp)) {
8337 int i, old, offset;
8338 struct dev_mc_list *mclist;
8339 struct mac_configuration_cmd *config =
8340 bnx2x_sp(bp, mcast_config);
8341
8342 for (i = 0, mclist = dev->mc_list;
8343 mclist && (i < dev->mc_count);
8344 i++, mclist = mclist->next) {
8345
8346 config->config_table[i].
8347 cam_entry.msb_mac_addr =
8348 swab16(*(u16 *)&mclist->dmi_addr[0]);
8349 config->config_table[i].
8350 cam_entry.middle_mac_addr =
8351 swab16(*(u16 *)&mclist->dmi_addr[2]);
8352 config->config_table[i].
8353 cam_entry.lsb_mac_addr =
8354 swab16(*(u16 *)&mclist->dmi_addr[4]);
8355 config->config_table[i].cam_entry.flags =
8356 cpu_to_le16(port);
8357 config->config_table[i].
8358 target_table_entry.flags = 0;
8359 config->config_table[i].
8360 target_table_entry.client_id = 0;
8361 config->config_table[i].
8362 target_table_entry.vlan_id = 0;
8363
8364 DP(NETIF_MSG_IFUP,
8365 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
8366 config->config_table[i].
8367 cam_entry.msb_mac_addr,
8368 config->config_table[i].
8369 cam_entry.middle_mac_addr,
8370 config->config_table[i].
8371 cam_entry.lsb_mac_addr);
8372 }
8373 old = config->hdr.length_6b;
8374 if (old > i) {
8375 for (; i < old; i++) {
8376 if (CAM_IS_INVALID(config->
8377 config_table[i])) {
8378 i--; /* already invalidated */
8379 break;
8380 }
8381 /* invalidate */
8382 CAM_INVALIDATE(config->
8383 config_table[i]);
8384 }
8385 }
8386
8387 if (CHIP_REV_IS_SLOW(bp))
8388 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8389 else
8390 offset = BNX2X_MAX_MULTICAST*(1 + port);
8391
8392 config->hdr.length_6b = i;
8393 config->hdr.offset = offset;
8394 config->hdr.client_id = BP_CL_ID(bp);
8395 config->hdr.reserved1 = 0;
8396
8397 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8398 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8399 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
8400 0);
8401 } else { /* E1H */
8402 /* Accept one or more multicasts */
8403 struct dev_mc_list *mclist;
8404 u32 mc_filter[MC_HASH_SIZE];
8405 u32 crc, bit, regidx;
8406 int i;
8407
8408 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8409
8410 for (i = 0, mclist = dev->mc_list;
8411 mclist && (i < dev->mc_count);
8412 i++, mclist = mclist->next) {
8413
8414 DP(NETIF_MSG_IFUP, "Adding mcast MAC: "
8415 "%02x:%02x:%02x:%02x:%02x:%02x\n",
8416 mclist->dmi_addr[0], mclist->dmi_addr[1],
8417 mclist->dmi_addr[2], mclist->dmi_addr[3],
8418 mclist->dmi_addr[4], mclist->dmi_addr[5]);
8419
8420 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
8421 bit = (crc >> 24) & 0xff;
8422 regidx = bit >> 5;
8423 bit &= 0x1f;
8424 mc_filter[regidx] |= (1 << bit);
8425 }
8426
8427 for (i = 0; i < MC_HASH_SIZE; i++)
8428 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8429 mc_filter[i]);
8430 }
8431 }
8432
8433 bp->rx_mode = rx_mode;
8434 bnx2x_set_storm_rx_mode(bp);
8435}
8436
8437/* called with rtnl_lock */
a2fbb9ea
ET
8438static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
8439{
8440 struct sockaddr *addr = p;
8441 struct bnx2x *bp = netdev_priv(dev);
8442
34f80b04 8443 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
8444 return -EINVAL;
8445
8446 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
8447 if (netif_running(dev)) {
8448 if (CHIP_IS_E1(bp))
8449 bnx2x_set_mac_addr_e1(bp);
8450 else
8451 bnx2x_set_mac_addr_e1h(bp);
8452 }
a2fbb9ea
ET
8453
8454 return 0;
8455}
8456
c18487ee 8457/* called with rtnl_lock */
a2fbb9ea
ET
8458static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8459{
8460 struct mii_ioctl_data *data = if_mii(ifr);
8461 struct bnx2x *bp = netdev_priv(dev);
8462 int err;
8463
8464 switch (cmd) {
8465 case SIOCGMIIPHY:
34f80b04 8466 data->phy_id = bp->port.phy_addr;
a2fbb9ea 8467
c14423fe 8468 /* fallthrough */
c18487ee 8469
a2fbb9ea 8470 case SIOCGMIIREG: {
c18487ee 8471 u16 mii_regval;
a2fbb9ea 8472
c18487ee
YR
8473 if (!netif_running(dev))
8474 return -EAGAIN;
a2fbb9ea 8475
34f80b04
EG
8476 mutex_lock(&bp->port.phy_mutex);
8477 err = bnx2x_cl45_read(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
8478 DEFAULT_PHY_DEV_ADDR,
8479 (data->reg_num & 0x1f), &mii_regval);
8480 data->val_out = mii_regval;
34f80b04 8481 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
8482 return err;
8483 }
8484
8485 case SIOCSMIIREG:
8486 if (!capable(CAP_NET_ADMIN))
8487 return -EPERM;
8488
c18487ee
YR
8489 if (!netif_running(dev))
8490 return -EAGAIN;
8491
34f80b04
EG
8492 mutex_lock(&bp->port.phy_mutex);
8493 err = bnx2x_cl45_write(bp, BP_PORT(bp), 0, bp->port.phy_addr,
c18487ee
YR
8494 DEFAULT_PHY_DEV_ADDR,
8495 (data->reg_num & 0x1f), data->val_in);
34f80b04 8496 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
8497 return err;
8498
8499 default:
8500 /* do nothing */
8501 break;
8502 }
8503
8504 return -EOPNOTSUPP;
8505}
8506
34f80b04 8507/* called with rtnl_lock */
a2fbb9ea
ET
8508static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
8509{
8510 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8511 int rc = 0;
a2fbb9ea
ET
8512
8513 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
8514 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
8515 return -EINVAL;
8516
8517 /* This does not race with packet allocation
c14423fe 8518 * because the actual alloc size is
a2fbb9ea
ET
8519 * only updated as part of load
8520 */
8521 dev->mtu = new_mtu;
8522
8523 if (netif_running(dev)) {
34f80b04
EG
8524 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8525 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 8526 }
34f80b04
EG
8527
8528 return rc;
a2fbb9ea
ET
8529}
8530
8531static void bnx2x_tx_timeout(struct net_device *dev)
8532{
8533 struct bnx2x *bp = netdev_priv(dev);
8534
8535#ifdef BNX2X_STOP_ON_ERROR
8536 if (!bp->panic)
8537 bnx2x_panic();
8538#endif
8539 /* This allows the netif to be shutdown gracefully before resetting */
8540 schedule_work(&bp->reset_task);
8541}
8542
8543#ifdef BCM_VLAN
34f80b04 8544/* called with rtnl_lock */
a2fbb9ea
ET
8545static void bnx2x_vlan_rx_register(struct net_device *dev,
8546 struct vlan_group *vlgrp)
8547{
8548 struct bnx2x *bp = netdev_priv(dev);
8549
8550 bp->vlgrp = vlgrp;
8551 if (netif_running(dev))
49d66772 8552 bnx2x_set_client_config(bp);
a2fbb9ea 8553}
34f80b04 8554
a2fbb9ea
ET
8555#endif
8556
8557#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8558static void poll_bnx2x(struct net_device *dev)
8559{
8560 struct bnx2x *bp = netdev_priv(dev);
8561
8562 disable_irq(bp->pdev->irq);
8563 bnx2x_interrupt(bp->pdev->irq, dev);
8564 enable_irq(bp->pdev->irq);
8565}
8566#endif
8567
34f80b04
EG
8568static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8569 struct net_device *dev)
a2fbb9ea
ET
8570{
8571 struct bnx2x *bp;
8572 int rc;
8573
8574 SET_NETDEV_DEV(dev, &pdev->dev);
8575 bp = netdev_priv(dev);
8576
34f80b04
EG
8577 bp->dev = dev;
8578 bp->pdev = pdev;
a2fbb9ea 8579 bp->flags = 0;
34f80b04 8580 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
8581
8582 rc = pci_enable_device(pdev);
8583 if (rc) {
8584 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
8585 goto err_out;
8586 }
8587
8588 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8589 printk(KERN_ERR PFX "Cannot find PCI device base address,"
8590 " aborting\n");
8591 rc = -ENODEV;
8592 goto err_out_disable;
8593 }
8594
8595 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
8596 printk(KERN_ERR PFX "Cannot find second PCI device"
8597 " base address, aborting\n");
8598 rc = -ENODEV;
8599 goto err_out_disable;
8600 }
8601
34f80b04
EG
8602 if (atomic_read(&pdev->enable_cnt) == 1) {
8603 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8604 if (rc) {
8605 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
8606 " aborting\n");
8607 goto err_out_disable;
8608 }
a2fbb9ea 8609
34f80b04
EG
8610 pci_set_master(pdev);
8611 pci_save_state(pdev);
8612 }
a2fbb9ea
ET
8613
8614 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8615 if (bp->pm_cap == 0) {
8616 printk(KERN_ERR PFX "Cannot find power management"
8617 " capability, aborting\n");
8618 rc = -EIO;
8619 goto err_out_release;
8620 }
8621
8622 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8623 if (bp->pcie_cap == 0) {
8624 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
8625 " aborting\n");
8626 rc = -EIO;
8627 goto err_out_release;
8628 }
8629
8630 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
8631 bp->flags |= USING_DAC_FLAG;
8632 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
8633 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
8634 " failed, aborting\n");
8635 rc = -EIO;
8636 goto err_out_release;
8637 }
8638
8639 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
8640 printk(KERN_ERR PFX "System does not support DMA,"
8641 " aborting\n");
8642 rc = -EIO;
8643 goto err_out_release;
8644 }
8645
34f80b04
EG
8646 dev->mem_start = pci_resource_start(pdev, 0);
8647 dev->base_addr = dev->mem_start;
8648 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
8649
8650 dev->irq = pdev->irq;
8651
8652 bp->regview = ioremap_nocache(dev->base_addr,
8653 pci_resource_len(pdev, 0));
8654 if (!bp->regview) {
8655 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
8656 rc = -ENOMEM;
8657 goto err_out_release;
8658 }
8659
34f80b04
EG
8660 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
8661 min_t(u64, BNX2X_DB_SIZE,
8662 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
8663 if (!bp->doorbells) {
8664 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
8665 rc = -ENOMEM;
8666 goto err_out_unmap;
8667 }
8668
8669 bnx2x_set_power_state(bp, PCI_D0);
8670
34f80b04
EG
8671 /* clean indirect addresses */
8672 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8673 PCICFG_VENDOR_ID_OFFSET);
8674 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8675 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8676 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8677 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 8678
34f80b04
EG
8679 dev->hard_start_xmit = bnx2x_start_xmit;
8680 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 8681
34f80b04
EG
8682 dev->ethtool_ops = &bnx2x_ethtool_ops;
8683 dev->open = bnx2x_open;
8684 dev->stop = bnx2x_close;
8685 dev->set_multicast_list = bnx2x_set_rx_mode;
8686 dev->set_mac_address = bnx2x_change_mac_addr;
8687 dev->do_ioctl = bnx2x_ioctl;
8688 dev->change_mtu = bnx2x_change_mtu;
8689 dev->tx_timeout = bnx2x_tx_timeout;
8690#ifdef BCM_VLAN
8691 dev->vlan_rx_register = bnx2x_vlan_rx_register;
8692#endif
8693#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8694 dev->poll_controller = poll_bnx2x;
8695#endif
8696 dev->features |= NETIF_F_SG;
8697 dev->features |= NETIF_F_HW_CSUM;
8698 if (bp->flags & USING_DAC_FLAG)
8699 dev->features |= NETIF_F_HIGHDMA;
8700#ifdef BCM_VLAN
8701 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8702#endif
8703 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
a2fbb9ea
ET
8704
8705 bp->timer_interval = HZ;
8706 bp->current_interval = (poll ? poll : HZ);
8707
a2fbb9ea
ET
8708
8709 return 0;
8710
8711err_out_unmap:
8712 if (bp->regview) {
8713 iounmap(bp->regview);
8714 bp->regview = NULL;
8715 }
a2fbb9ea
ET
8716 if (bp->doorbells) {
8717 iounmap(bp->doorbells);
8718 bp->doorbells = NULL;
8719 }
8720
8721err_out_release:
34f80b04
EG
8722 if (atomic_read(&pdev->enable_cnt) == 1)
8723 pci_release_regions(pdev);
a2fbb9ea
ET
8724
8725err_out_disable:
8726 pci_disable_device(pdev);
8727 pci_set_drvdata(pdev, NULL);
8728
8729err_out:
8730 return rc;
8731}
8732
25047950
ET
8733static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
8734{
8735 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8736
8737 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8738 return val;
8739}
8740
8741/* return value of 1=2.5GHz 2=5GHz */
8742static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
8743{
8744 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8745
8746 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
8747 return val;
8748}
8749
a2fbb9ea
ET
8750static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8751 const struct pci_device_id *ent)
8752{
8753 static int version_printed;
8754 struct net_device *dev = NULL;
8755 struct bnx2x *bp;
25047950 8756 int rc;
25047950 8757 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
8758
8759 if (version_printed++ == 0)
8760 printk(KERN_INFO "%s", version);
8761
8762 /* dev zeroed in init_etherdev */
8763 dev = alloc_etherdev(sizeof(*bp));
34f80b04
EG
8764 if (!dev) {
8765 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 8766 return -ENOMEM;
34f80b04 8767 }
a2fbb9ea
ET
8768
8769 netif_carrier_off(dev);
8770
8771 bp = netdev_priv(dev);
8772 bp->msglevel = debug;
8773
34f80b04 8774 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
8775 if (rc < 0) {
8776 free_netdev(dev);
8777 return rc;
8778 }
8779
a2fbb9ea
ET
8780 rc = register_netdev(dev);
8781 if (rc) {
c14423fe 8782 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04 8783 goto init_one_exit;
a2fbb9ea
ET
8784 }
8785
8786 pci_set_drvdata(pdev, dev);
8787
34f80b04
EG
8788 rc = bnx2x_init_bp(bp);
8789 if (rc) {
8790 unregister_netdev(dev);
8791 goto init_one_exit;
8792 }
8793
8794 bp->common.name = board_info[ent->driver_data].name;
25047950 8795 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
8796 " IRQ %d, ", dev->name, bp->common.name,
8797 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
8798 bnx2x_get_pcie_width(bp),
8799 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
8800 dev->base_addr, bp->pdev->irq);
8801 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea 8802 return 0;
34f80b04
EG
8803
8804init_one_exit:
8805 if (bp->regview)
8806 iounmap(bp->regview);
8807
8808 if (bp->doorbells)
8809 iounmap(bp->doorbells);
8810
8811 free_netdev(dev);
8812
8813 if (atomic_read(&pdev->enable_cnt) == 1)
8814 pci_release_regions(pdev);
8815
8816 pci_disable_device(pdev);
8817 pci_set_drvdata(pdev, NULL);
8818
8819 return rc;
a2fbb9ea
ET
8820}
8821
8822static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8823{
8824 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
8825 struct bnx2x *bp;
8826
8827 if (!dev) {
228241eb
ET
8828 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
8829 return;
8830 }
228241eb 8831 bp = netdev_priv(dev);
a2fbb9ea 8832
a2fbb9ea
ET
8833 unregister_netdev(dev);
8834
8835 if (bp->regview)
8836 iounmap(bp->regview);
8837
8838 if (bp->doorbells)
8839 iounmap(bp->doorbells);
8840
8841 free_netdev(dev);
34f80b04
EG
8842
8843 if (atomic_read(&pdev->enable_cnt) == 1)
8844 pci_release_regions(pdev);
8845
a2fbb9ea
ET
8846 pci_disable_device(pdev);
8847 pci_set_drvdata(pdev, NULL);
8848}
8849
8850static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
8851{
8852 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
8853 struct bnx2x *bp;
8854
34f80b04
EG
8855 if (!dev) {
8856 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
8857 return -ENODEV;
8858 }
8859 bp = netdev_priv(dev);
a2fbb9ea 8860
34f80b04 8861 rtnl_lock();
a2fbb9ea 8862
34f80b04 8863 pci_save_state(pdev);
228241eb 8864
34f80b04
EG
8865 if (!netif_running(dev)) {
8866 rtnl_unlock();
8867 return 0;
8868 }
a2fbb9ea
ET
8869
8870 netif_device_detach(dev);
a2fbb9ea 8871
34f80b04
EG
8872 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8873
a2fbb9ea 8874 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 8875
34f80b04
EG
8876 rtnl_unlock();
8877
a2fbb9ea
ET
8878 return 0;
8879}
8880
8881static int bnx2x_resume(struct pci_dev *pdev)
8882{
8883 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 8884 struct bnx2x *bp;
a2fbb9ea
ET
8885 int rc;
8886
228241eb
ET
8887 if (!dev) {
8888 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
8889 return -ENODEV;
8890 }
228241eb 8891 bp = netdev_priv(dev);
a2fbb9ea 8892
34f80b04
EG
8893 rtnl_lock();
8894
228241eb 8895 pci_restore_state(pdev);
34f80b04
EG
8896
8897 if (!netif_running(dev)) {
8898 rtnl_unlock();
8899 return 0;
8900 }
8901
a2fbb9ea
ET
8902 bnx2x_set_power_state(bp, PCI_D0);
8903 netif_device_attach(dev);
8904
34f80b04 8905 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 8906
34f80b04
EG
8907 rtnl_unlock();
8908
8909 return rc;
a2fbb9ea
ET
8910}
8911
8912static struct pci_driver bnx2x_pci_driver = {
8913 .name = DRV_MODULE_NAME,
8914 .id_table = bnx2x_pci_tbl,
8915 .probe = bnx2x_init_one,
8916 .remove = __devexit_p(bnx2x_remove_one),
8917 .suspend = bnx2x_suspend,
8918 .resume = bnx2x_resume,
8919};
8920
8921static int __init bnx2x_init(void)
8922{
8923 return pci_register_driver(&bnx2x_pci_driver);
8924}
8925
8926static void __exit bnx2x_cleanup(void)
8927{
8928 pci_unregister_driver(&bnx2x_pci_driver);
8929}
8930
8931module_init(bnx2x_init);
8932module_exit(bnx2x_cleanup);
8933