bnx2x: Removing unused definitions
[linux-2.6-block.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
359d8b15 53
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
0a64ea57 57#include "bnx2x_dump.h"
a2fbb9ea 58
573dd788
EG
59#define DRV_MODULE_VERSION "1.48.114-1"
60#define DRV_MODULE_RELDATE "2009/07/29"
34f80b04 61#define BNX2X_BC_VER 0x040200
a2fbb9ea 62
94a78b79
VZ
63#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h"
65/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
68
34f80b04
EG
69/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ)
a2fbb9ea 71
53a10565 72static char version[] __devinitdata =
34f80b04 73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
75
24e3fcef 76MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
78MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 80
555f6c78
EG
81static int multi_mode = 1;
82module_param(multi_mode, int, 0);
ca00392c
EG
83MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
85
86static int num_rx_queues;
87module_param(num_rx_queues, int, 0);
88MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
90
91static int num_tx_queues;
92module_param(num_tx_queues, int, 0);
93MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
555f6c78 95
19680c48 96static int disable_tpa;
19680c48 97module_param(disable_tpa, int, 0);
9898f86d 98MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
99
100static int int_mode;
101module_param(int_mode, int, 0);
102MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
103
a18f5128
EG
104static int dropless_fc;
105module_param(dropless_fc, int, 0);
106MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
107
9898f86d 108static int poll;
a2fbb9ea 109module_param(poll, int, 0);
9898f86d 110MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
111
112static int mrrs = -1;
113module_param(mrrs, int, 0);
114MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
115
9898f86d 116static int debug;
a2fbb9ea 117module_param(debug, int, 0);
9898f86d
EG
118MODULE_PARM_DESC(debug, " Default debug msglevel");
119
120static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 121
1cf167f2 122static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
123
124enum bnx2x_board_type {
125 BCM57710 = 0,
34f80b04
EG
126 BCM57711 = 1,
127 BCM57711E = 2,
a2fbb9ea
ET
128};
129
34f80b04 130/* indexed by board_type, above */
53a10565 131static struct {
a2fbb9ea
ET
132 char *name;
133} board_info[] __devinitdata = {
34f80b04
EG
134 { "Broadcom NetXtreme II BCM57710 XGb" },
135 { "Broadcom NetXtreme II BCM57711 XGb" },
136 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
137};
138
34f80b04 139
a2fbb9ea 140static const struct pci_device_id bnx2x_pci_tbl[] = {
e4ed7113
EG
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
143 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
144 { 0 }
145};
146
147MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148
149/****************************************************************************
150* General service functions
151****************************************************************************/
152
153/* used only at init
154 * locking is done by mcp
155 */
573f2035 156void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
157{
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
160 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
161 PCICFG_VENDOR_ID_OFFSET);
162}
163
a2fbb9ea
ET
164static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
165{
166 u32 val;
167
168 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
169 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
170 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
171 PCICFG_VENDOR_ID_OFFSET);
172
173 return val;
174}
a2fbb9ea
ET
175
176static const u32 dmae_reg_go_c[] = {
177 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
178 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
179 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
180 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
181};
182
183/* copy command into DMAE command memory and set DMAE command go */
184static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
185 int idx)
186{
187 u32 cmd_offset;
188 int i;
189
190 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
191 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
192 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193
ad8d3948
EG
194 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
195 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
196 }
197 REG_WR(bp, dmae_reg_go_c[idx], 1);
198}
199
ad8d3948
EG
200void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
201 u32 len32)
a2fbb9ea 202{
5ff7b6d4 203 struct dmae_command dmae;
a2fbb9ea 204 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
205 int cnt = 200;
206
207 if (!bp->dmae_ready) {
208 u32 *data = bnx2x_sp(bp, wb_data[0]);
209
210 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
211 " using indirect\n", dst_addr, len32);
212 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
213 return;
214 }
215
5ff7b6d4 216 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 217
5ff7b6d4
EG
218 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
219 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
220 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 221#ifdef __BIG_ENDIAN
5ff7b6d4 222 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 223#else
5ff7b6d4 224 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 225#endif
5ff7b6d4
EG
226 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
227 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
228 dmae.src_addr_lo = U64_LO(dma_addr);
229 dmae.src_addr_hi = U64_HI(dma_addr);
230 dmae.dst_addr_lo = dst_addr >> 2;
231 dmae.dst_addr_hi = 0;
232 dmae.len = len32;
233 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
234 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
235 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 236
c3eefaf6 237 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
238 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
239 "dst_addr [%x:%08x (%08x)]\n"
240 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
241 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
242 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
243 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 244 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
245 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
246 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 247
5ff7b6d4
EG
248 mutex_lock(&bp->dmae_mutex);
249
a2fbb9ea
ET
250 *wb_comp = 0;
251
5ff7b6d4 252 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
253
254 udelay(5);
ad8d3948
EG
255
256 while (*wb_comp != DMAE_COMP_VAL) {
257 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
258
ad8d3948 259 if (!cnt) {
c3eefaf6 260 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
261 break;
262 }
ad8d3948 263 cnt--;
12469401
YG
264 /* adjust delay for emulation/FPGA */
265 if (CHIP_REV_IS_SLOW(bp))
266 msleep(100);
267 else
268 udelay(5);
a2fbb9ea 269 }
ad8d3948
EG
270
271 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
272}
273
c18487ee 274void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 275{
5ff7b6d4 276 struct dmae_command dmae;
a2fbb9ea 277 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
278 int cnt = 200;
279
280 if (!bp->dmae_ready) {
281 u32 *data = bnx2x_sp(bp, wb_data[0]);
282 int i;
283
284 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
285 " using indirect\n", src_addr, len32);
286 for (i = 0; i < len32; i++)
287 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
288 return;
289 }
290
5ff7b6d4 291 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 292
5ff7b6d4
EG
293 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
294 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
295 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 296#ifdef __BIG_ENDIAN
5ff7b6d4 297 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 298#else
5ff7b6d4 299 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 300#endif
5ff7b6d4
EG
301 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
302 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
303 dmae.src_addr_lo = src_addr >> 2;
304 dmae.src_addr_hi = 0;
305 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
306 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
307 dmae.len = len32;
308 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
309 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
310 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 311
c3eefaf6 312 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
313 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
314 "dst_addr [%x:%08x (%08x)]\n"
315 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
316 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
317 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
318 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 319
5ff7b6d4
EG
320 mutex_lock(&bp->dmae_mutex);
321
322 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
323 *wb_comp = 0;
324
5ff7b6d4 325 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
326
327 udelay(5);
ad8d3948
EG
328
329 while (*wb_comp != DMAE_COMP_VAL) {
330
ad8d3948 331 if (!cnt) {
c3eefaf6 332 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
333 break;
334 }
ad8d3948 335 cnt--;
12469401
YG
336 /* adjust delay for emulation/FPGA */
337 if (CHIP_REV_IS_SLOW(bp))
338 msleep(100);
339 else
340 udelay(5);
a2fbb9ea 341 }
ad8d3948 342 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
343 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
344 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
345
346 mutex_unlock(&bp->dmae_mutex);
347}
348
573f2035
EG
349void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
350 u32 addr, u32 len)
351{
352 int offset = 0;
353
354 while (len > DMAE_LEN32_WR_MAX) {
355 bnx2x_write_dmae(bp, phys_addr + offset,
356 addr + offset, DMAE_LEN32_WR_MAX);
357 offset += DMAE_LEN32_WR_MAX * 4;
358 len -= DMAE_LEN32_WR_MAX;
359 }
360
361 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
362}
363
ad8d3948
EG
364/* used only for slowpath so not inlined */
365static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
366{
367 u32 wb_write[2];
368
369 wb_write[0] = val_hi;
370 wb_write[1] = val_lo;
371 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 372}
a2fbb9ea 373
ad8d3948
EG
374#ifdef USE_WB_RD
375static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
376{
377 u32 wb_data[2];
378
379 REG_RD_DMAE(bp, reg, wb_data, 2);
380
381 return HILO_U64(wb_data[0], wb_data[1]);
382}
383#endif
384
a2fbb9ea
ET
385static int bnx2x_mc_assert(struct bnx2x *bp)
386{
a2fbb9ea 387 char last_idx;
34f80b04
EG
388 int i, rc = 0;
389 u32 row0, row1, row2, row3;
390
391 /* XSTORM */
392 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
393 XSTORM_ASSERT_LIST_INDEX_OFFSET);
394 if (last_idx)
395 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
396
397 /* print the asserts */
398 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
399
400 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
401 XSTORM_ASSERT_LIST_OFFSET(i));
402 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
403 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
404 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
406 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
408
409 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
410 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
411 " 0x%08x 0x%08x 0x%08x\n",
412 i, row3, row2, row1, row0);
413 rc++;
414 } else {
415 break;
416 }
417 }
418
419 /* TSTORM */
420 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
421 TSTORM_ASSERT_LIST_INDEX_OFFSET);
422 if (last_idx)
423 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
424
425 /* print the asserts */
426 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
427
428 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
429 TSTORM_ASSERT_LIST_OFFSET(i));
430 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
431 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
432 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
434 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
436
437 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
438 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
439 " 0x%08x 0x%08x 0x%08x\n",
440 i, row3, row2, row1, row0);
441 rc++;
442 } else {
443 break;
444 }
445 }
446
447 /* CSTORM */
448 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
449 CSTORM_ASSERT_LIST_INDEX_OFFSET);
450 if (last_idx)
451 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
452
453 /* print the asserts */
454 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
455
456 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
457 CSTORM_ASSERT_LIST_OFFSET(i));
458 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
459 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
460 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
462 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
464
465 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
466 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
467 " 0x%08x 0x%08x 0x%08x\n",
468 i, row3, row2, row1, row0);
469 rc++;
470 } else {
471 break;
472 }
473 }
474
475 /* USTORM */
476 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
477 USTORM_ASSERT_LIST_INDEX_OFFSET);
478 if (last_idx)
479 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
480
481 /* print the asserts */
482 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
483
484 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
485 USTORM_ASSERT_LIST_OFFSET(i));
486 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
487 USTORM_ASSERT_LIST_OFFSET(i) + 4);
488 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i) + 8);
490 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 12);
492
493 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
494 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
495 " 0x%08x 0x%08x 0x%08x\n",
496 i, row3, row2, row1, row0);
497 rc++;
498 } else {
499 break;
a2fbb9ea
ET
500 }
501 }
34f80b04 502
a2fbb9ea
ET
503 return rc;
504}
c14423fe 505
a2fbb9ea
ET
506static void bnx2x_fw_dump(struct bnx2x *bp)
507{
508 u32 mark, offset;
4781bfad 509 __be32 data[9];
a2fbb9ea
ET
510 int word;
511
512 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 513 mark = ((mark + 0x3) & ~0x3);
ad361c98 514 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 515
ad361c98 516 printk(KERN_ERR PFX);
a2fbb9ea
ET
517 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
518 for (word = 0; word < 8; word++)
519 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
520 offset + 4*word));
521 data[8] = 0x0;
49d66772 522 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
523 }
524 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
525 for (word = 0; word < 8; word++)
526 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
527 offset + 4*word));
528 data[8] = 0x0;
49d66772 529 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea 530 }
ad361c98 531 printk(KERN_ERR PFX "end of fw dump\n");
a2fbb9ea
ET
532}
533
534static void bnx2x_panic_dump(struct bnx2x *bp)
535{
536 int i;
537 u16 j, start, end;
538
66e855f3
YG
539 bp->stats_state = STATS_STATE_DISABLED;
540 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
541
a2fbb9ea
ET
542 BNX2X_ERR("begin crash dump -----------------\n");
543
8440d2b6
EG
544 /* Indices */
545 /* Common */
546 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
547 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
548 " spq_prod_idx(%u)\n",
549 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
550 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
551
552 /* Rx */
553 for_each_rx_queue(bp, i) {
a2fbb9ea 554 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 555
c3eefaf6 556 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
557 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
558 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 559 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
560 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
561 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 562 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
563 " fp_u_idx(%x) *sb_u_idx(%x)\n",
564 fp->rx_sge_prod, fp->last_max_sge,
565 le16_to_cpu(fp->fp_u_idx),
566 fp->status_blk->u_status_block.status_block_index);
567 }
a2fbb9ea 568
8440d2b6
EG
569 /* Tx */
570 for_each_tx_queue(bp, i) {
571 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 572
c3eefaf6 573 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
574 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
575 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
576 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 577 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 578 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 579 fp->status_blk->c_status_block.status_block_index,
ca00392c 580 fp->tx_db.data.prod);
8440d2b6 581 }
a2fbb9ea 582
8440d2b6
EG
583 /* Rings */
584 /* Rx */
585 for_each_rx_queue(bp, i) {
586 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
587
588 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
589 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 590 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
591 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
592 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
593
c3eefaf6
EG
594 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
595 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
596 }
597
3196a88a
EG
598 start = RX_SGE(fp->rx_sge_prod);
599 end = RX_SGE(fp->last_max_sge);
8440d2b6 600 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
601 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
602 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
603
c3eefaf6
EG
604 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
605 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
606 }
607
a2fbb9ea
ET
608 start = RCQ_BD(fp->rx_comp_cons - 10);
609 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 610 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
611 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
612
c3eefaf6
EG
613 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
614 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
615 }
616 }
617
8440d2b6
EG
618 /* Tx */
619 for_each_tx_queue(bp, i) {
620 struct bnx2x_fastpath *fp = &bp->fp[i];
621
622 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
623 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
624 for (j = start; j != end; j = TX_BD(j + 1)) {
625 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
626
c3eefaf6
EG
627 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
628 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
629 }
630
631 start = TX_BD(fp->tx_bd_cons - 10);
632 end = TX_BD(fp->tx_bd_cons + 254);
633 for (j = start; j != end; j = TX_BD(j + 1)) {
634 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
635
c3eefaf6
EG
636 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
637 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
638 }
639 }
a2fbb9ea 640
34f80b04 641 bnx2x_fw_dump(bp);
a2fbb9ea
ET
642 bnx2x_mc_assert(bp);
643 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
644}
645
615f8fd9 646static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 647{
34f80b04 648 int port = BP_PORT(bp);
a2fbb9ea
ET
649 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
650 u32 val = REG_RD(bp, addr);
651 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 652 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
653
654 if (msix) {
8badd27a
EG
655 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
656 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
657 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
658 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
659 } else if (msi) {
660 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
661 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
662 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
664 } else {
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
667 HC_CONFIG_0_REG_INT_LINE_EN_0 |
668 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 669
8badd27a
EG
670 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
671 val, port, addr);
615f8fd9
ET
672
673 REG_WR(bp, addr, val);
674
a2fbb9ea
ET
675 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
676 }
677
8badd27a
EG
678 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
679 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
680
681 REG_WR(bp, addr, val);
37dbbf32
EG
682 /*
683 * Ensure that HC_CONFIG is written before leading/trailing edge config
684 */
685 mmiowb();
686 barrier();
34f80b04
EG
687
688 if (CHIP_IS_E1H(bp)) {
689 /* init leading/trailing edge */
690 if (IS_E1HMF(bp)) {
8badd27a 691 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 692 if (bp->port.pmf)
4acac6a5
EG
693 /* enable nig and gpio3 attention */
694 val |= 0x1100;
34f80b04
EG
695 } else
696 val = 0xffff;
697
698 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
699 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
700 }
37dbbf32
EG
701
702 /* Make sure that interrupts are indeed enabled from here on */
703 mmiowb();
a2fbb9ea
ET
704}
705
615f8fd9 706static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 707{
34f80b04 708 int port = BP_PORT(bp);
a2fbb9ea
ET
709 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
710 u32 val = REG_RD(bp, addr);
711
712 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
713 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
714 HC_CONFIG_0_REG_INT_LINE_EN_0 |
715 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
716
717 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
718 val, port, addr);
719
8badd27a
EG
720 /* flush all outstanding writes */
721 mmiowb();
722
a2fbb9ea
ET
723 REG_WR(bp, addr, val);
724 if (REG_RD(bp, addr) != val)
725 BNX2X_ERR("BUG! proper val not read from IGU!\n");
356e2385 726
a2fbb9ea
ET
727}
728
f8ef6e44 729static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 730{
a2fbb9ea 731 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 732 int i, offset;
a2fbb9ea 733
34f80b04 734 /* disable interrupt handling */
a2fbb9ea 735 atomic_inc(&bp->intr_sem);
e1510706
EG
736 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
737
f8ef6e44
YG
738 if (disable_hw)
739 /* prevent the HW from sending interrupts */
740 bnx2x_int_disable(bp);
a2fbb9ea
ET
741
742 /* make sure all ISRs are done */
743 if (msix) {
8badd27a
EG
744 synchronize_irq(bp->msix_table[0].vector);
745 offset = 1;
a2fbb9ea 746 for_each_queue(bp, i)
8badd27a 747 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
748 } else
749 synchronize_irq(bp->pdev->irq);
750
751 /* make sure sp_task is not running */
1cf167f2
EG
752 cancel_delayed_work(&bp->sp_task);
753 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
754}
755
34f80b04 756/* fast path */
a2fbb9ea
ET
757
758/*
34f80b04 759 * General service functions
a2fbb9ea
ET
760 */
761
34f80b04 762static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
763 u8 storm, u16 index, u8 op, u8 update)
764{
5c862848
EG
765 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
766 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
767 struct igu_ack_register igu_ack;
768
769 igu_ack.status_block_index = index;
770 igu_ack.sb_id_and_flags =
34f80b04 771 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
772 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
773 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
774 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
775
5c862848
EG
776 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
777 (*(u32 *)&igu_ack), hc_addr);
778 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
779
780 /* Make sure that ACK is written */
781 mmiowb();
782 barrier();
a2fbb9ea
ET
783}
784
785static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
786{
787 struct host_status_block *fpsb = fp->status_blk;
788 u16 rc = 0;
789
790 barrier(); /* status block is written to by the chip */
791 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
792 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
793 rc |= 1;
794 }
795 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
796 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
797 rc |= 2;
798 }
799 return rc;
800}
801
a2fbb9ea
ET
802static u16 bnx2x_ack_int(struct bnx2x *bp)
803{
5c862848
EG
804 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
805 COMMAND_REG_SIMD_MASK);
806 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 807
5c862848
EG
808 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
809 result, hc_addr);
a2fbb9ea 810
a2fbb9ea
ET
811 return result;
812}
813
814
815/*
816 * fast path service functions
817 */
818
e8b5fc51
VZ
819static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
820{
821 /* Tell compiler that consumer and producer can change */
822 barrier();
823 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
824}
825
a2fbb9ea
ET
826/* free skb in the packet ring at pos idx
827 * return idx of last bd freed
828 */
829static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
830 u16 idx)
831{
832 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
833 struct eth_tx_start_bd *tx_start_bd;
834 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 835 struct sk_buff *skb = tx_buf->skb;
34f80b04 836 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
837 int nbd;
838
839 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
840 idx, tx_buf, skb);
841
842 /* unmap first bd */
843 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c
EG
844 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
845 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
846 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 847
ca00392c 848 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 849#ifdef BNX2X_STOP_ON_ERROR
ca00392c 850 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 851 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
852 bnx2x_panic();
853 }
854#endif
ca00392c 855 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 856
ca00392c
EG
857 /* Get the next bd */
858 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 859
ca00392c
EG
860 /* Skip a parse bd... */
861 --nbd;
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
863
864 /* ...and the TSO split header bd since they have no mapping */
865 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
866 --nbd;
867 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
868 }
869
870 /* now free frags */
871 while (nbd > 0) {
872
873 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c
EG
874 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
875 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
876 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
a2fbb9ea
ET
877 if (--nbd)
878 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
879 }
880
881 /* release skb */
53e5e96e 882 WARN_ON(!skb);
ca00392c 883 dev_kfree_skb_any(skb);
a2fbb9ea
ET
884 tx_buf->first_bd = 0;
885 tx_buf->skb = NULL;
886
34f80b04 887 return new_cons;
a2fbb9ea
ET
888}
889
34f80b04 890static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 891{
34f80b04
EG
892 s16 used;
893 u16 prod;
894 u16 cons;
a2fbb9ea 895
34f80b04 896 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
897 prod = fp->tx_bd_prod;
898 cons = fp->tx_bd_cons;
899
34f80b04
EG
900 /* NUM_TX_RINGS = number of "next-page" entries
901 It will be used as a threshold */
902 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 903
34f80b04 904#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
905 WARN_ON(used < 0);
906 WARN_ON(used > fp->bp->tx_ring_size);
907 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 908#endif
a2fbb9ea 909
34f80b04 910 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
911}
912
7961f791 913static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
914{
915 struct bnx2x *bp = fp->bp;
555f6c78 916 struct netdev_queue *txq;
a2fbb9ea
ET
917 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
918 int done = 0;
919
920#ifdef BNX2X_STOP_ON_ERROR
921 if (unlikely(bp->panic))
922 return;
923#endif
924
ca00392c 925 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
a2fbb9ea
ET
926 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
927 sw_cons = fp->tx_pkt_cons;
928
929 while (sw_cons != hw_cons) {
930 u16 pkt_cons;
931
932 pkt_cons = TX_BD(sw_cons);
933
934 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
935
34f80b04 936 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
937 hw_cons, sw_cons, pkt_cons);
938
34f80b04 939/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
940 rmb();
941 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
942 }
943*/
944 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
945 sw_cons++;
946 done++;
a2fbb9ea
ET
947 }
948
949 fp->tx_pkt_cons = sw_cons;
950 fp->tx_bd_cons = bd_cons;
951
a2fbb9ea 952 /* TBD need a thresh? */
555f6c78 953 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 954
6044735d
EG
955 /* Need to make the tx_bd_cons update visible to start_xmit()
956 * before checking for netif_tx_queue_stopped(). Without the
957 * memory barrier, there is a small possibility that
958 * start_xmit() will miss it and cause the queue to be stopped
959 * forever.
960 */
961 smp_mb();
962
555f6c78 963 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 964 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 965 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 966 netif_tx_wake_queue(txq);
a2fbb9ea
ET
967 }
968}
969
3196a88a 970
a2fbb9ea
ET
971static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
972 union eth_rx_cqe *rr_cqe)
973{
974 struct bnx2x *bp = fp->bp;
975 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
976 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
977
34f80b04 978 DP(BNX2X_MSG_SP,
a2fbb9ea 979 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 980 fp->index, cid, command, bp->state,
34f80b04 981 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
982
983 bp->spq_left++;
984
0626b899 985 if (fp->index) {
a2fbb9ea
ET
986 switch (command | fp->state) {
987 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
988 BNX2X_FP_STATE_OPENING):
989 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
990 cid);
991 fp->state = BNX2X_FP_STATE_OPEN;
992 break;
993
994 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
995 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
996 cid);
997 fp->state = BNX2X_FP_STATE_HALTED;
998 break;
999
1000 default:
34f80b04
EG
1001 BNX2X_ERR("unexpected MC reply (%d) "
1002 "fp->state is %x\n", command, fp->state);
1003 break;
a2fbb9ea 1004 }
34f80b04 1005 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1006 return;
1007 }
c14423fe 1008
a2fbb9ea
ET
1009 switch (command | bp->state) {
1010 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1011 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1012 bp->state = BNX2X_STATE_OPEN;
1013 break;
1014
1015 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1016 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1017 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1018 fp->state = BNX2X_FP_STATE_HALTED;
1019 break;
1020
a2fbb9ea 1021 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1022 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1023 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1024 break;
1025
3196a88a 1026
a2fbb9ea 1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1028 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1029 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 1030 bp->set_mac_pending = 0;
a2fbb9ea
ET
1031 break;
1032
49d66772 1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
ca00392c 1034 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
34f80b04 1035 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
1036 break;
1037
a2fbb9ea 1038 default:
34f80b04 1039 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1040 command, bp->state);
34f80b04 1041 break;
a2fbb9ea 1042 }
34f80b04 1043 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1044}
1045
7a9b2557
VZ
1046static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1047 struct bnx2x_fastpath *fp, u16 index)
1048{
1049 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1050 struct page *page = sw_buf->page;
1051 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1052
1053 /* Skip "next page" elements */
1054 if (!page)
1055 return;
1056
1057 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1058 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1059 __free_pages(page, PAGES_PER_SGE_SHIFT);
1060
1061 sw_buf->page = NULL;
1062 sge->addr_hi = 0;
1063 sge->addr_lo = 0;
1064}
1065
1066static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, int last)
1068{
1069 int i;
1070
1071 for (i = 0; i < last; i++)
1072 bnx2x_free_rx_sge(bp, fp, i);
1073}
1074
1075static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1076 struct bnx2x_fastpath *fp, u16 index)
1077{
1078 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1079 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1080 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1081 dma_addr_t mapping;
1082
1083 if (unlikely(page == NULL))
1084 return -ENOMEM;
1085
4f40f2cb 1086 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1087 PCI_DMA_FROMDEVICE);
8d8bb39b 1088 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1089 __free_pages(page, PAGES_PER_SGE_SHIFT);
1090 return -ENOMEM;
1091 }
1092
1093 sw_buf->page = page;
1094 pci_unmap_addr_set(sw_buf, mapping, mapping);
1095
1096 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1097 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1098
1099 return 0;
1100}
1101
a2fbb9ea
ET
1102static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1103 struct bnx2x_fastpath *fp, u16 index)
1104{
1105 struct sk_buff *skb;
1106 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1107 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1108 dma_addr_t mapping;
1109
1110 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1111 if (unlikely(skb == NULL))
1112 return -ENOMEM;
1113
437cf2f1 1114 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1115 PCI_DMA_FROMDEVICE);
8d8bb39b 1116 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1117 dev_kfree_skb(skb);
1118 return -ENOMEM;
1119 }
1120
1121 rx_buf->skb = skb;
1122 pci_unmap_addr_set(rx_buf, mapping, mapping);
1123
1124 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1125 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1126
1127 return 0;
1128}
1129
1130/* note that we are not allocating a new skb,
1131 * we are just moving one from cons to prod
1132 * we are not creating a new mapping,
1133 * so there is no need to check for dma_mapping_error().
1134 */
1135static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1136 struct sk_buff *skb, u16 cons, u16 prod)
1137{
1138 struct bnx2x *bp = fp->bp;
1139 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1140 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1141 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1142 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1143
1144 pci_dma_sync_single_for_device(bp->pdev,
1145 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1146 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1147
1148 prod_rx_buf->skb = cons_rx_buf->skb;
1149 pci_unmap_addr_set(prod_rx_buf, mapping,
1150 pci_unmap_addr(cons_rx_buf, mapping));
1151 *prod_bd = *cons_bd;
1152}
1153
7a9b2557
VZ
1154static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1155 u16 idx)
1156{
1157 u16 last_max = fp->last_max_sge;
1158
1159 if (SUB_S16(idx, last_max) > 0)
1160 fp->last_max_sge = idx;
1161}
1162
1163static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1164{
1165 int i, j;
1166
1167 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1168 int idx = RX_SGE_CNT * i - 1;
1169
1170 for (j = 0; j < 2; j++) {
1171 SGE_MASK_CLEAR_BIT(fp, idx);
1172 idx--;
1173 }
1174 }
1175}
1176
1177static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1178 struct eth_fast_path_rx_cqe *fp_cqe)
1179{
1180 struct bnx2x *bp = fp->bp;
4f40f2cb 1181 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1182 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1183 SGE_PAGE_SHIFT;
7a9b2557
VZ
1184 u16 last_max, last_elem, first_elem;
1185 u16 delta = 0;
1186 u16 i;
1187
1188 if (!sge_len)
1189 return;
1190
1191 /* First mark all used pages */
1192 for (i = 0; i < sge_len; i++)
1193 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1194
1195 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1196 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1197
1198 /* Here we assume that the last SGE index is the biggest */
1199 prefetch((void *)(fp->sge_mask));
1200 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1201
1202 last_max = RX_SGE(fp->last_max_sge);
1203 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1204 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1205
1206 /* If ring is not full */
1207 if (last_elem + 1 != first_elem)
1208 last_elem++;
1209
1210 /* Now update the prod */
1211 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1212 if (likely(fp->sge_mask[i]))
1213 break;
1214
1215 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1216 delta += RX_SGE_MASK_ELEM_SZ;
1217 }
1218
1219 if (delta > 0) {
1220 fp->rx_sge_prod += delta;
1221 /* clear page-end entries */
1222 bnx2x_clear_sge_mask_next_elems(fp);
1223 }
1224
1225 DP(NETIF_MSG_RX_STATUS,
1226 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1227 fp->last_max_sge, fp->rx_sge_prod);
1228}
1229
1230static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1231{
1232 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1233 memset(fp->sge_mask, 0xff,
1234 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1235
33471629
EG
1236 /* Clear the two last indices in the page to 1:
1237 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1238 hence will never be indicated and should be removed from
1239 the calculations. */
1240 bnx2x_clear_sge_mask_next_elems(fp);
1241}
1242
1243static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1244 struct sk_buff *skb, u16 cons, u16 prod)
1245{
1246 struct bnx2x *bp = fp->bp;
1247 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1248 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1249 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1250 dma_addr_t mapping;
1251
1252 /* move empty skb from pool to prod and map it */
1253 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1254 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1255 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1256 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1257
1258 /* move partial skb from cons to pool (don't unmap yet) */
1259 fp->tpa_pool[queue] = *cons_rx_buf;
1260
1261 /* mark bin state as start - print error if current state != stop */
1262 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1263 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1264
1265 fp->tpa_state[queue] = BNX2X_TPA_START;
1266
1267 /* point prod_bd to new skb */
1268 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1269 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1270
1271#ifdef BNX2X_STOP_ON_ERROR
1272 fp->tpa_queue_used |= (1 << queue);
1273#ifdef __powerpc64__
1274 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1275#else
1276 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1277#endif
1278 fp->tpa_queue_used);
1279#endif
1280}
1281
1282static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1283 struct sk_buff *skb,
1284 struct eth_fast_path_rx_cqe *fp_cqe,
1285 u16 cqe_idx)
1286{
1287 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1288 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1289 u32 i, frag_len, frag_size, pages;
1290 int err;
1291 int j;
1292
1293 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1294 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1295
1296 /* This is needed in order to enable forwarding support */
1297 if (frag_size)
4f40f2cb 1298 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1299 max(frag_size, (u32)len_on_bd));
1300
1301#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1302 if (pages >
1303 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1304 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1305 pages, cqe_idx);
1306 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1307 fp_cqe->pkt_len, len_on_bd);
1308 bnx2x_panic();
1309 return -EINVAL;
1310 }
1311#endif
1312
1313 /* Run through the SGL and compose the fragmented skb */
1314 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1315 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1316
1317 /* FW gives the indices of the SGE as if the ring is an array
1318 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1319 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1320 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1321 old_rx_pg = *rx_pg;
1322
1323 /* If we fail to allocate a substitute page, we simply stop
1324 where we are and drop the whole packet */
1325 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1326 if (unlikely(err)) {
de832a55 1327 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1328 return err;
1329 }
1330
1331 /* Unmap the page as we r going to pass it to the stack */
1332 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1333 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1334
1335 /* Add one frag and update the appropriate fields in the skb */
1336 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1337
1338 skb->data_len += frag_len;
1339 skb->truesize += frag_len;
1340 skb->len += frag_len;
1341
1342 frag_size -= frag_len;
1343 }
1344
1345 return 0;
1346}
1347
1348static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1349 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1350 u16 cqe_idx)
1351{
1352 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1353 struct sk_buff *skb = rx_buf->skb;
1354 /* alloc new skb */
1355 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1356
1357 /* Unmap skb in the pool anyway, as we are going to change
1358 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1359 fails. */
1360 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1361 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1362
7a9b2557 1363 if (likely(new_skb)) {
66e855f3
YG
1364 /* fix ip xsum and give it to the stack */
1365 /* (no need to map the new skb) */
0c6671b0
EG
1366#ifdef BCM_VLAN
1367 int is_vlan_cqe =
1368 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1369 PARSING_FLAGS_VLAN);
1370 int is_not_hwaccel_vlan_cqe =
1371 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1372#endif
7a9b2557
VZ
1373
1374 prefetch(skb);
1375 prefetch(((char *)(skb)) + 128);
1376
7a9b2557
VZ
1377#ifdef BNX2X_STOP_ON_ERROR
1378 if (pad + len > bp->rx_buf_size) {
1379 BNX2X_ERR("skb_put is about to fail... "
1380 "pad %d len %d rx_buf_size %d\n",
1381 pad, len, bp->rx_buf_size);
1382 bnx2x_panic();
1383 return;
1384 }
1385#endif
1386
1387 skb_reserve(skb, pad);
1388 skb_put(skb, len);
1389
1390 skb->protocol = eth_type_trans(skb, bp->dev);
1391 skb->ip_summed = CHECKSUM_UNNECESSARY;
1392
1393 {
1394 struct iphdr *iph;
1395
1396 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1397#ifdef BCM_VLAN
1398 /* If there is no Rx VLAN offloading -
1399 take VLAN tag into an account */
1400 if (unlikely(is_not_hwaccel_vlan_cqe))
1401 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1402#endif
7a9b2557
VZ
1403 iph->check = 0;
1404 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1405 }
1406
1407 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1408 &cqe->fast_path_cqe, cqe_idx)) {
1409#ifdef BCM_VLAN
0c6671b0
EG
1410 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1411 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1412 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1413 le16_to_cpu(cqe->fast_path_cqe.
1414 vlan_tag));
1415 else
1416#endif
1417 netif_receive_skb(skb);
1418 } else {
1419 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1420 " - dropping packet!\n");
1421 dev_kfree_skb(skb);
1422 }
1423
7a9b2557
VZ
1424
1425 /* put new skb in bin */
1426 fp->tpa_pool[queue].skb = new_skb;
1427
1428 } else {
66e855f3 1429 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1430 DP(NETIF_MSG_RX_STATUS,
1431 "Failed to allocate new skb - dropping packet!\n");
de832a55 1432 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1433 }
1434
1435 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1436}
1437
1438static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1439 struct bnx2x_fastpath *fp,
1440 u16 bd_prod, u16 rx_comp_prod,
1441 u16 rx_sge_prod)
1442{
8d9c5f34 1443 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1444 int i;
1445
1446 /* Update producers */
1447 rx_prods.bd_prod = bd_prod;
1448 rx_prods.cqe_prod = rx_comp_prod;
1449 rx_prods.sge_prod = rx_sge_prod;
1450
58f4c4cf
EG
1451 /*
1452 * Make sure that the BD and SGE data is updated before updating the
1453 * producers since FW might read the BD/SGE right after the producer
1454 * is updated.
1455 * This is only applicable for weak-ordered memory model archs such
1456 * as IA-64. The following barrier is also mandatory since FW will
1457 * assumes BDs must have buffers.
1458 */
1459 wmb();
1460
8d9c5f34
EG
1461 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1462 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1463 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1464 ((u32 *)&rx_prods)[i]);
1465
58f4c4cf
EG
1466 mmiowb(); /* keep prod updates ordered */
1467
7a9b2557 1468 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1469 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1470 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1471}
1472
a2fbb9ea
ET
1473static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1474{
1475 struct bnx2x *bp = fp->bp;
34f80b04 1476 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1477 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1478 int rx_pkt = 0;
1479
1480#ifdef BNX2X_STOP_ON_ERROR
1481 if (unlikely(bp->panic))
1482 return 0;
1483#endif
1484
34f80b04
EG
1485 /* CQ "next element" is of the size of the regular element,
1486 that's why it's ok here */
a2fbb9ea
ET
1487 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1488 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1489 hw_comp_cons++;
1490
1491 bd_cons = fp->rx_bd_cons;
1492 bd_prod = fp->rx_bd_prod;
34f80b04 1493 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1494 sw_comp_cons = fp->rx_comp_cons;
1495 sw_comp_prod = fp->rx_comp_prod;
1496
1497 /* Memory barrier necessary as speculative reads of the rx
1498 * buffer can be ahead of the index in the status block
1499 */
1500 rmb();
1501
1502 DP(NETIF_MSG_RX_STATUS,
1503 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1504 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1505
1506 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1507 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1508 struct sk_buff *skb;
1509 union eth_rx_cqe *cqe;
34f80b04
EG
1510 u8 cqe_fp_flags;
1511 u16 len, pad;
a2fbb9ea
ET
1512
1513 comp_ring_cons = RCQ_BD(sw_comp_cons);
1514 bd_prod = RX_BD(bd_prod);
1515 bd_cons = RX_BD(bd_cons);
1516
619e7a66
EG
1517 /* Prefetch the page containing the BD descriptor
1518 at producer's index. It will be needed when new skb is
1519 allocated */
1520 prefetch((void *)(PAGE_ALIGN((unsigned long)
1521 (&fp->rx_desc_ring[bd_prod])) -
1522 PAGE_SIZE + 1));
1523
a2fbb9ea 1524 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1525 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1526
a2fbb9ea 1527 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1528 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1529 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1530 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1531 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1532 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1533
1534 /* is this a slowpath msg? */
34f80b04 1535 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1536 bnx2x_sp_event(fp, cqe);
1537 goto next_cqe;
1538
1539 /* this is an rx packet */
1540 } else {
1541 rx_buf = &fp->rx_buf_ring[bd_cons];
1542 skb = rx_buf->skb;
a2fbb9ea
ET
1543 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1544 pad = cqe->fast_path_cqe.placement_offset;
1545
7a9b2557
VZ
1546 /* If CQE is marked both TPA_START and TPA_END
1547 it is a non-TPA CQE */
1548 if ((!fp->disable_tpa) &&
1549 (TPA_TYPE(cqe_fp_flags) !=
1550 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1551 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1552
1553 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1554 DP(NETIF_MSG_RX_STATUS,
1555 "calling tpa_start on queue %d\n",
1556 queue);
1557
1558 bnx2x_tpa_start(fp, queue, skb,
1559 bd_cons, bd_prod);
1560 goto next_rx;
1561 }
1562
1563 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1564 DP(NETIF_MSG_RX_STATUS,
1565 "calling tpa_stop on queue %d\n",
1566 queue);
1567
1568 if (!BNX2X_RX_SUM_FIX(cqe))
1569 BNX2X_ERR("STOP on none TCP "
1570 "data\n");
1571
1572 /* This is a size of the linear data
1573 on this skb */
1574 len = le16_to_cpu(cqe->fast_path_cqe.
1575 len_on_bd);
1576 bnx2x_tpa_stop(bp, fp, queue, pad,
1577 len, cqe, comp_ring_cons);
1578#ifdef BNX2X_STOP_ON_ERROR
1579 if (bp->panic)
17cb4006 1580 return 0;
7a9b2557
VZ
1581#endif
1582
1583 bnx2x_update_sge_prod(fp,
1584 &cqe->fast_path_cqe);
1585 goto next_cqe;
1586 }
1587 }
1588
a2fbb9ea
ET
1589 pci_dma_sync_single_for_device(bp->pdev,
1590 pci_unmap_addr(rx_buf, mapping),
1591 pad + RX_COPY_THRESH,
1592 PCI_DMA_FROMDEVICE);
1593 prefetch(skb);
1594 prefetch(((char *)(skb)) + 128);
1595
1596 /* is this an error packet? */
34f80b04 1597 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1598 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1599 "ERROR flags %x rx packet %u\n",
1600 cqe_fp_flags, sw_comp_cons);
de832a55 1601 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1602 goto reuse_rx;
1603 }
1604
1605 /* Since we don't have a jumbo ring
1606 * copy small packets if mtu > 1500
1607 */
1608 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1609 (len <= RX_COPY_THRESH)) {
1610 struct sk_buff *new_skb;
1611
1612 new_skb = netdev_alloc_skb(bp->dev,
1613 len + pad);
1614 if (new_skb == NULL) {
1615 DP(NETIF_MSG_RX_ERR,
34f80b04 1616 "ERROR packet dropped "
a2fbb9ea 1617 "because of alloc failure\n");
de832a55 1618 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1619 goto reuse_rx;
1620 }
1621
1622 /* aligned copy */
1623 skb_copy_from_linear_data_offset(skb, pad,
1624 new_skb->data + pad, len);
1625 skb_reserve(new_skb, pad);
1626 skb_put(new_skb, len);
1627
1628 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1629
1630 skb = new_skb;
1631
a119a069
EG
1632 } else
1633 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
a2fbb9ea
ET
1634 pci_unmap_single(bp->pdev,
1635 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1636 bp->rx_buf_size,
a2fbb9ea
ET
1637 PCI_DMA_FROMDEVICE);
1638 skb_reserve(skb, pad);
1639 skb_put(skb, len);
1640
1641 } else {
1642 DP(NETIF_MSG_RX_ERR,
34f80b04 1643 "ERROR packet dropped because "
a2fbb9ea 1644 "of alloc failure\n");
de832a55 1645 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1646reuse_rx:
1647 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1648 goto next_rx;
1649 }
1650
1651 skb->protocol = eth_type_trans(skb, bp->dev);
1652
1653 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1654 if (bp->rx_csum) {
1adcd8be
EG
1655 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1656 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1657 else
de832a55 1658 fp->eth_q_stats.hw_csum_err++;
66e855f3 1659 }
a2fbb9ea
ET
1660 }
1661
748e5439 1662 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1663#ifdef BCM_VLAN
0c6671b0 1664 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1665 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1666 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1667 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1668 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1669 else
1670#endif
34f80b04 1671 netif_receive_skb(skb);
a2fbb9ea 1672
a2fbb9ea
ET
1673
1674next_rx:
1675 rx_buf->skb = NULL;
1676
1677 bd_cons = NEXT_RX_IDX(bd_cons);
1678 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1679 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1680 rx_pkt++;
a2fbb9ea
ET
1681next_cqe:
1682 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1683 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1684
34f80b04 1685 if (rx_pkt == budget)
a2fbb9ea
ET
1686 break;
1687 } /* while */
1688
1689 fp->rx_bd_cons = bd_cons;
34f80b04 1690 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1691 fp->rx_comp_cons = sw_comp_cons;
1692 fp->rx_comp_prod = sw_comp_prod;
1693
7a9b2557
VZ
1694 /* Update producers */
1695 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1696 fp->rx_sge_prod);
a2fbb9ea
ET
1697
1698 fp->rx_pkt += rx_pkt;
1699 fp->rx_calls++;
1700
1701 return rx_pkt;
1702}
1703
1704static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1705{
1706 struct bnx2x_fastpath *fp = fp_cookie;
1707 struct bnx2x *bp = fp->bp;
a2fbb9ea 1708
da5a662a
VZ
1709 /* Return here if interrupt is disabled */
1710 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1711 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1712 return IRQ_HANDLED;
1713 }
1714
34f80b04 1715 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1716 fp->index, fp->sb_id);
0626b899 1717 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1718
1719#ifdef BNX2X_STOP_ON_ERROR
1720 if (unlikely(bp->panic))
1721 return IRQ_HANDLED;
1722#endif
ca00392c
EG
1723 /* Handle Rx or Tx according to MSI-X vector */
1724 if (fp->is_rx_queue) {
1725 prefetch(fp->rx_cons_sb);
1726 prefetch(&fp->status_blk->u_status_block.status_block_index);
a2fbb9ea 1727
ca00392c 1728 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1729
ca00392c
EG
1730 } else {
1731 prefetch(fp->tx_cons_sb);
1732 prefetch(&fp->status_blk->c_status_block.status_block_index);
1733
1734 bnx2x_update_fpsb_idx(fp);
1735 rmb();
1736 bnx2x_tx_int(fp);
1737
1738 /* Re-enable interrupts */
1739 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1740 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1741 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1742 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1743 }
34f80b04 1744
a2fbb9ea
ET
1745 return IRQ_HANDLED;
1746}
1747
1748static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1749{
555f6c78 1750 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1751 u16 status = bnx2x_ack_int(bp);
34f80b04 1752 u16 mask;
ca00392c 1753 int i;
a2fbb9ea 1754
34f80b04 1755 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1756 if (unlikely(status == 0)) {
1757 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1758 return IRQ_NONE;
1759 }
f5372251 1760 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1761
34f80b04 1762 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1763 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1764 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1765 return IRQ_HANDLED;
1766 }
1767
3196a88a
EG
1768#ifdef BNX2X_STOP_ON_ERROR
1769 if (unlikely(bp->panic))
1770 return IRQ_HANDLED;
1771#endif
1772
ca00392c
EG
1773 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1774 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1775
ca00392c
EG
1776 mask = 0x2 << fp->sb_id;
1777 if (status & mask) {
1778 /* Handle Rx or Tx according to SB id */
1779 if (fp->is_rx_queue) {
1780 prefetch(fp->rx_cons_sb);
1781 prefetch(&fp->status_blk->u_status_block.
1782 status_block_index);
a2fbb9ea 1783
ca00392c 1784 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
a2fbb9ea 1785
ca00392c
EG
1786 } else {
1787 prefetch(fp->tx_cons_sb);
1788 prefetch(&fp->status_blk->c_status_block.
1789 status_block_index);
1790
1791 bnx2x_update_fpsb_idx(fp);
1792 rmb();
1793 bnx2x_tx_int(fp);
1794
1795 /* Re-enable interrupts */
1796 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1797 le16_to_cpu(fp->fp_u_idx),
1798 IGU_INT_NOP, 1);
1799 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1800 le16_to_cpu(fp->fp_c_idx),
1801 IGU_INT_ENABLE, 1);
1802 }
1803 status &= ~mask;
1804 }
a2fbb9ea
ET
1805 }
1806
a2fbb9ea 1807
34f80b04 1808 if (unlikely(status & 0x1)) {
1cf167f2 1809 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1810
1811 status &= ~0x1;
1812 if (!status)
1813 return IRQ_HANDLED;
1814 }
1815
34f80b04
EG
1816 if (status)
1817 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1818 status);
a2fbb9ea 1819
c18487ee 1820 return IRQ_HANDLED;
a2fbb9ea
ET
1821}
1822
c18487ee 1823/* end of fast path */
a2fbb9ea 1824
bb2a0f7a 1825static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1826
c18487ee
YR
1827/* Link */
1828
1829/*
1830 * General service functions
1831 */
a2fbb9ea 1832
4a37fb66 1833static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1834{
1835 u32 lock_status;
1836 u32 resource_bit = (1 << resource);
4a37fb66
YG
1837 int func = BP_FUNC(bp);
1838 u32 hw_lock_control_reg;
c18487ee 1839 int cnt;
a2fbb9ea 1840
c18487ee
YR
1841 /* Validating that the resource is within range */
1842 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1843 DP(NETIF_MSG_HW,
1844 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1845 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1846 return -EINVAL;
1847 }
a2fbb9ea 1848
4a37fb66
YG
1849 if (func <= 5) {
1850 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1851 } else {
1852 hw_lock_control_reg =
1853 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1854 }
1855
c18487ee 1856 /* Validating that the resource is not already taken */
4a37fb66 1857 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1858 if (lock_status & resource_bit) {
1859 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1860 lock_status, resource_bit);
1861 return -EEXIST;
1862 }
a2fbb9ea 1863
46230476
EG
1864 /* Try for 5 second every 5ms */
1865 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1866 /* Try to acquire the lock */
4a37fb66
YG
1867 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1868 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1869 if (lock_status & resource_bit)
1870 return 0;
a2fbb9ea 1871
c18487ee 1872 msleep(5);
a2fbb9ea 1873 }
c18487ee
YR
1874 DP(NETIF_MSG_HW, "Timeout\n");
1875 return -EAGAIN;
1876}
a2fbb9ea 1877
4a37fb66 1878static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1879{
1880 u32 lock_status;
1881 u32 resource_bit = (1 << resource);
4a37fb66
YG
1882 int func = BP_FUNC(bp);
1883 u32 hw_lock_control_reg;
a2fbb9ea 1884
c18487ee
YR
1885 /* Validating that the resource is within range */
1886 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1887 DP(NETIF_MSG_HW,
1888 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1889 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1890 return -EINVAL;
1891 }
1892
4a37fb66
YG
1893 if (func <= 5) {
1894 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1895 } else {
1896 hw_lock_control_reg =
1897 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1898 }
1899
c18487ee 1900 /* Validating that the resource is currently taken */
4a37fb66 1901 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1902 if (!(lock_status & resource_bit)) {
1903 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1904 lock_status, resource_bit);
1905 return -EFAULT;
a2fbb9ea
ET
1906 }
1907
4a37fb66 1908 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1909 return 0;
1910}
1911
1912/* HW Lock for shared dual port PHYs */
4a37fb66 1913static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1914{
34f80b04 1915 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1916
46c6a674
EG
1917 if (bp->port.need_hw_lock)
1918 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1919}
a2fbb9ea 1920
4a37fb66 1921static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1922{
46c6a674
EG
1923 if (bp->port.need_hw_lock)
1924 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1925
34f80b04 1926 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1927}
a2fbb9ea 1928
4acac6a5
EG
1929int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1930{
1931 /* The GPIO should be swapped if swap register is set and active */
1932 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1933 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1934 int gpio_shift = gpio_num +
1935 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1936 u32 gpio_mask = (1 << gpio_shift);
1937 u32 gpio_reg;
1938 int value;
1939
1940 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1941 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1942 return -EINVAL;
1943 }
1944
1945 /* read GPIO value */
1946 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1947
1948 /* get the requested pin value */
1949 if ((gpio_reg & gpio_mask) == gpio_mask)
1950 value = 1;
1951 else
1952 value = 0;
1953
1954 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1955
1956 return value;
1957}
1958
17de50b7 1959int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1960{
1961 /* The GPIO should be swapped if swap register is set and active */
1962 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1963 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1964 int gpio_shift = gpio_num +
1965 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1966 u32 gpio_mask = (1 << gpio_shift);
1967 u32 gpio_reg;
a2fbb9ea 1968
c18487ee
YR
1969 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1970 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1971 return -EINVAL;
1972 }
a2fbb9ea 1973
4a37fb66 1974 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1975 /* read GPIO and mask except the float bits */
1976 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1977
c18487ee
YR
1978 switch (mode) {
1979 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1980 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1981 gpio_num, gpio_shift);
1982 /* clear FLOAT and set CLR */
1983 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1984 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1985 break;
a2fbb9ea 1986
c18487ee
YR
1987 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1988 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1989 gpio_num, gpio_shift);
1990 /* clear FLOAT and set SET */
1991 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1992 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1993 break;
a2fbb9ea 1994
17de50b7 1995 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1996 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1997 gpio_num, gpio_shift);
1998 /* set FLOAT */
1999 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2000 break;
a2fbb9ea 2001
c18487ee
YR
2002 default:
2003 break;
a2fbb9ea
ET
2004 }
2005
c18487ee 2006 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2007 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2008
c18487ee 2009 return 0;
a2fbb9ea
ET
2010}
2011
4acac6a5
EG
2012int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2013{
2014 /* The GPIO should be swapped if swap register is set and active */
2015 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2016 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2017 int gpio_shift = gpio_num +
2018 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2019 u32 gpio_mask = (1 << gpio_shift);
2020 u32 gpio_reg;
2021
2022 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2023 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2024 return -EINVAL;
2025 }
2026
2027 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2028 /* read GPIO int */
2029 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2030
2031 switch (mode) {
2032 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2033 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2034 "output low\n", gpio_num, gpio_shift);
2035 /* clear SET and set CLR */
2036 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2037 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2038 break;
2039
2040 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2041 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2042 "output high\n", gpio_num, gpio_shift);
2043 /* clear CLR and set SET */
2044 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2045 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2046 break;
2047
2048 default:
2049 break;
2050 }
2051
2052 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2053 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2054
2055 return 0;
2056}
2057
c18487ee 2058static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2059{
c18487ee
YR
2060 u32 spio_mask = (1 << spio_num);
2061 u32 spio_reg;
a2fbb9ea 2062
c18487ee
YR
2063 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2064 (spio_num > MISC_REGISTERS_SPIO_7)) {
2065 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2066 return -EINVAL;
a2fbb9ea
ET
2067 }
2068
4a37fb66 2069 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2070 /* read SPIO and mask except the float bits */
2071 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2072
c18487ee 2073 switch (mode) {
6378c025 2074 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2075 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2076 /* clear FLOAT and set CLR */
2077 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2078 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2079 break;
a2fbb9ea 2080
6378c025 2081 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2082 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2083 /* clear FLOAT and set SET */
2084 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2085 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2086 break;
a2fbb9ea 2087
c18487ee
YR
2088 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2089 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2090 /* set FLOAT */
2091 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2092 break;
a2fbb9ea 2093
c18487ee
YR
2094 default:
2095 break;
a2fbb9ea
ET
2096 }
2097
c18487ee 2098 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2099 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2100
a2fbb9ea
ET
2101 return 0;
2102}
2103
c18487ee 2104static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2105{
ad33ea3a
EG
2106 switch (bp->link_vars.ieee_fc &
2107 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2108 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2109 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2110 ADVERTISED_Pause);
2111 break;
356e2385 2112
c18487ee 2113 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2114 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2115 ADVERTISED_Pause);
2116 break;
356e2385 2117
c18487ee 2118 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2119 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2120 break;
356e2385 2121
c18487ee 2122 default:
34f80b04 2123 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2124 ADVERTISED_Pause);
2125 break;
2126 }
2127}
f1410647 2128
c18487ee
YR
2129static void bnx2x_link_report(struct bnx2x *bp)
2130{
2691d51d
EG
2131 if (bp->state == BNX2X_STATE_DISABLED) {
2132 netif_carrier_off(bp->dev);
2133 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2134 return;
2135 }
2136
c18487ee
YR
2137 if (bp->link_vars.link_up) {
2138 if (bp->state == BNX2X_STATE_OPEN)
2139 netif_carrier_on(bp->dev);
2140 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2141
c18487ee 2142 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2143
c18487ee
YR
2144 if (bp->link_vars.duplex == DUPLEX_FULL)
2145 printk("full duplex");
2146 else
2147 printk("half duplex");
f1410647 2148
c0700f90
DM
2149 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2150 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2151 printk(", receive ");
356e2385
EG
2152 if (bp->link_vars.flow_ctrl &
2153 BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2154 printk("& transmit ");
2155 } else {
2156 printk(", transmit ");
2157 }
2158 printk("flow control ON");
2159 }
2160 printk("\n");
f1410647 2161
c18487ee
YR
2162 } else { /* link_down */
2163 netif_carrier_off(bp->dev);
2164 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2165 }
c18487ee
YR
2166}
2167
b5bf9068 2168static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2169{
19680c48
EG
2170 if (!BP_NOMCP(bp)) {
2171 u8 rc;
a2fbb9ea 2172
19680c48 2173 /* Initialize link parameters structure variables */
8c99e7b0
YR
2174 /* It is recommended to turn off RX FC for jumbo frames
2175 for better performance */
0c593270 2176 if (bp->dev->mtu > 5000)
c0700f90 2177 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2178 else
c0700f90 2179 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2180
4a37fb66 2181 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2182
2183 if (load_mode == LOAD_DIAG)
2184 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2185
19680c48 2186 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2187
4a37fb66 2188 bnx2x_release_phy_lock(bp);
a2fbb9ea 2189
3c96c68b
EG
2190 bnx2x_calc_fc_adv(bp);
2191
b5bf9068
EG
2192 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2193 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2194 bnx2x_link_report(bp);
b5bf9068 2195 }
34f80b04 2196
19680c48
EG
2197 return rc;
2198 }
f5372251 2199 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2200 return -EINVAL;
a2fbb9ea
ET
2201}
2202
c18487ee 2203static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2204{
19680c48 2205 if (!BP_NOMCP(bp)) {
4a37fb66 2206 bnx2x_acquire_phy_lock(bp);
19680c48 2207 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2208 bnx2x_release_phy_lock(bp);
a2fbb9ea 2209
19680c48
EG
2210 bnx2x_calc_fc_adv(bp);
2211 } else
f5372251 2212 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2213}
a2fbb9ea 2214
c18487ee
YR
2215static void bnx2x__link_reset(struct bnx2x *bp)
2216{
19680c48 2217 if (!BP_NOMCP(bp)) {
4a37fb66 2218 bnx2x_acquire_phy_lock(bp);
589abe3a 2219 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2220 bnx2x_release_phy_lock(bp);
19680c48 2221 } else
f5372251 2222 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2223}
a2fbb9ea 2224
c18487ee
YR
2225static u8 bnx2x_link_test(struct bnx2x *bp)
2226{
2227 u8 rc;
a2fbb9ea 2228
4a37fb66 2229 bnx2x_acquire_phy_lock(bp);
c18487ee 2230 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2231 bnx2x_release_phy_lock(bp);
a2fbb9ea 2232
c18487ee
YR
2233 return rc;
2234}
a2fbb9ea 2235
8a1c38d1 2236static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2237{
8a1c38d1
EG
2238 u32 r_param = bp->link_vars.line_speed / 8;
2239 u32 fair_periodic_timeout_usec;
2240 u32 t_fair;
34f80b04 2241
8a1c38d1
EG
2242 memset(&(bp->cmng.rs_vars), 0,
2243 sizeof(struct rate_shaping_vars_per_port));
2244 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2245
8a1c38d1
EG
2246 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2247 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2248
8a1c38d1
EG
2249 /* this is the threshold below which no timer arming will occur
2250 1.25 coefficient is for the threshold to be a little bigger
2251 than the real time, to compensate for timer in-accuracy */
2252 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2253 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2254
8a1c38d1
EG
2255 /* resolution of fairness timer */
2256 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2257 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2258 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2259
8a1c38d1
EG
2260 /* this is the threshold below which we won't arm the timer anymore */
2261 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2262
8a1c38d1
EG
2263 /* we multiply by 1e3/8 to get bytes/msec.
2264 We don't want the credits to pass a credit
2265 of the t_fair*FAIR_MEM (algorithm resolution) */
2266 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2267 /* since each tick is 4 usec */
2268 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2269}
2270
2691d51d
EG
2271/* Calculates the sum of vn_min_rates.
2272 It's needed for further normalizing of the min_rates.
2273 Returns:
2274 sum of vn_min_rates.
2275 or
2276 0 - if all the min_rates are 0.
2277 In the later case fainess algorithm should be deactivated.
2278 If not all min_rates are zero then those that are zeroes will be set to 1.
2279 */
2280static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2281{
2282 int all_zero = 1;
2283 int port = BP_PORT(bp);
2284 int vn;
2285
2286 bp->vn_weight_sum = 0;
2287 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2288 int func = 2*vn + port;
2289 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2290 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2291 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2292
2293 /* Skip hidden vns */
2294 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2295 continue;
2296
2297 /* If min rate is zero - set it to 1 */
2298 if (!vn_min_rate)
2299 vn_min_rate = DEF_MIN_RATE;
2300 else
2301 all_zero = 0;
2302
2303 bp->vn_weight_sum += vn_min_rate;
2304 }
2305
2306 /* ... only if all min rates are zeros - disable fairness */
2307 if (all_zero)
2308 bp->vn_weight_sum = 0;
2309}
2310
8a1c38d1 2311static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2312{
2313 struct rate_shaping_vars_per_vn m_rs_vn;
2314 struct fairness_vars_per_vn m_fair_vn;
2315 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2316 u16 vn_min_rate, vn_max_rate;
2317 int i;
2318
2319 /* If function is hidden - set min and max to zeroes */
2320 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2321 vn_min_rate = 0;
2322 vn_max_rate = 0;
2323
2324 } else {
2325 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2326 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2327 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2328 if current min rate is zero - set it to 1.
33471629 2329 This is a requirement of the algorithm. */
8a1c38d1 2330 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2331 vn_min_rate = DEF_MIN_RATE;
2332 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2333 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2334 }
2335
8a1c38d1
EG
2336 DP(NETIF_MSG_IFUP,
2337 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2338 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2339
2340 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2341 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2342
2343 /* global vn counter - maximal Mbps for this vn */
2344 m_rs_vn.vn_counter.rate = vn_max_rate;
2345
2346 /* quota - number of bytes transmitted in this period */
2347 m_rs_vn.vn_counter.quota =
2348 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2349
8a1c38d1 2350 if (bp->vn_weight_sum) {
34f80b04
EG
2351 /* credit for each period of the fairness algorithm:
2352 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2353 vn_weight_sum should not be larger than 10000, thus
2354 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2355 than zero */
34f80b04 2356 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2357 max((u32)(vn_min_rate * (T_FAIR_COEF /
2358 (8 * bp->vn_weight_sum))),
2359 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2360 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2361 m_fair_vn.vn_credit_delta);
2362 }
2363
34f80b04
EG
2364 /* Store it to internal memory */
2365 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2366 REG_WR(bp, BAR_XSTRORM_INTMEM +
2367 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2368 ((u32 *)(&m_rs_vn))[i]);
2369
2370 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2371 REG_WR(bp, BAR_XSTRORM_INTMEM +
2372 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2373 ((u32 *)(&m_fair_vn))[i]);
2374}
2375
8a1c38d1 2376
c18487ee
YR
2377/* This function is called upon link interrupt */
2378static void bnx2x_link_attn(struct bnx2x *bp)
2379{
bb2a0f7a
YG
2380 /* Make sure that we are synced with the current statistics */
2381 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2382
c18487ee 2383 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2384
bb2a0f7a
YG
2385 if (bp->link_vars.link_up) {
2386
1c06328c 2387 /* dropless flow control */
a18f5128 2388 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2389 int port = BP_PORT(bp);
2390 u32 pause_enabled = 0;
2391
2392 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2393 pause_enabled = 1;
2394
2395 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2396 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2397 pause_enabled);
2398 }
2399
bb2a0f7a
YG
2400 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2401 struct host_port_stats *pstats;
2402
2403 pstats = bnx2x_sp(bp, port_stats);
2404 /* reset old bmac stats */
2405 memset(&(pstats->mac_stx[0]), 0,
2406 sizeof(struct mac_stx));
2407 }
2408 if ((bp->state == BNX2X_STATE_OPEN) ||
2409 (bp->state == BNX2X_STATE_DISABLED))
2410 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2411 }
2412
c18487ee
YR
2413 /* indicate link status */
2414 bnx2x_link_report(bp);
34f80b04
EG
2415
2416 if (IS_E1HMF(bp)) {
8a1c38d1 2417 int port = BP_PORT(bp);
34f80b04 2418 int func;
8a1c38d1 2419 int vn;
34f80b04
EG
2420
2421 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2422 if (vn == BP_E1HVN(bp))
2423 continue;
2424
8a1c38d1 2425 func = ((vn << 1) | port);
34f80b04
EG
2426
2427 /* Set the attention towards other drivers
2428 on the same port */
2429 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2430 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2431 }
34f80b04 2432
8a1c38d1
EG
2433 if (bp->link_vars.link_up) {
2434 int i;
2435
2436 /* Init rate shaping and fairness contexts */
2437 bnx2x_init_port_minmax(bp);
34f80b04 2438
34f80b04 2439 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2440 bnx2x_init_vn_minmax(bp, 2*vn + port);
2441
2442 /* Store it to internal memory */
2443 for (i = 0;
2444 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2445 REG_WR(bp, BAR_XSTRORM_INTMEM +
2446 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2447 ((u32 *)(&bp->cmng))[i]);
2448 }
34f80b04 2449 }
c18487ee 2450}
a2fbb9ea 2451
c18487ee
YR
2452static void bnx2x__link_status_update(struct bnx2x *bp)
2453{
2691d51d
EG
2454 int func = BP_FUNC(bp);
2455
c18487ee
YR
2456 if (bp->state != BNX2X_STATE_OPEN)
2457 return;
a2fbb9ea 2458
c18487ee 2459 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2460
bb2a0f7a
YG
2461 if (bp->link_vars.link_up)
2462 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2463 else
2464 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2465
2691d51d
EG
2466 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2467 bnx2x_calc_vn_weight_sum(bp);
2468
c18487ee
YR
2469 /* indicate link status */
2470 bnx2x_link_report(bp);
a2fbb9ea 2471}
a2fbb9ea 2472
34f80b04
EG
2473static void bnx2x_pmf_update(struct bnx2x *bp)
2474{
2475 int port = BP_PORT(bp);
2476 u32 val;
2477
2478 bp->port.pmf = 1;
2479 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2480
2481 /* enable nig attention */
2482 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2483 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2484 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2485
2486 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2487}
2488
c18487ee 2489/* end of Link */
a2fbb9ea
ET
2490
2491/* slow path */
2492
2493/*
2494 * General service functions
2495 */
2496
2691d51d
EG
2497/* send the MCP a request, block until there is a reply */
2498u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2499{
2500 int func = BP_FUNC(bp);
2501 u32 seq = ++bp->fw_seq;
2502 u32 rc = 0;
2503 u32 cnt = 1;
2504 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2505
2506 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2507 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2508
2509 do {
2510 /* let the FW do it's magic ... */
2511 msleep(delay);
2512
2513 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2514
2515 /* Give the FW up to 2 second (200*10ms) */
2516 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
2517
2518 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2519 cnt*delay, rc, seq);
2520
2521 /* is this a reply to our command? */
2522 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2523 rc &= FW_MSG_CODE_MASK;
2524 else {
2525 /* FW BUG! */
2526 BNX2X_ERR("FW failed to respond!\n");
2527 bnx2x_fw_dump(bp);
2528 rc = 0;
2529 }
2530
2531 return rc;
2532}
2533
2534static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2535static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set);
2536static void bnx2x_set_rx_mode(struct net_device *dev);
2537
2538static void bnx2x_e1h_disable(struct bnx2x *bp)
2539{
2540 int port = BP_PORT(bp);
2541 int i;
2542
2543 bp->rx_mode = BNX2X_RX_MODE_NONE;
2544 bnx2x_set_storm_rx_mode(bp);
2545
2546 netif_tx_disable(bp->dev);
2547 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2548
2549 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2550
2551 bnx2x_set_mac_addr_e1h(bp, 0);
2552
2553 for (i = 0; i < MC_HASH_SIZE; i++)
2554 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2555
2556 netif_carrier_off(bp->dev);
2557}
2558
2559static void bnx2x_e1h_enable(struct bnx2x *bp)
2560{
2561 int port = BP_PORT(bp);
2562
2563 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2564
2565 bnx2x_set_mac_addr_e1h(bp, 1);
2566
2567 /* Tx queue should be only reenabled */
2568 netif_tx_wake_all_queues(bp->dev);
2569
2570 /* Initialize the receive filter. */
2571 bnx2x_set_rx_mode(bp->dev);
2572}
2573
2574static void bnx2x_update_min_max(struct bnx2x *bp)
2575{
2576 int port = BP_PORT(bp);
2577 int vn, i;
2578
2579 /* Init rate shaping and fairness contexts */
2580 bnx2x_init_port_minmax(bp);
2581
2582 bnx2x_calc_vn_weight_sum(bp);
2583
2584 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2585 bnx2x_init_vn_minmax(bp, 2*vn + port);
2586
2587 if (bp->port.pmf) {
2588 int func;
2589
2590 /* Set the attention towards other drivers on the same port */
2591 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2592 if (vn == BP_E1HVN(bp))
2593 continue;
2594
2595 func = ((vn << 1) | port);
2596 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2597 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2598 }
2599
2600 /* Store it to internal memory */
2601 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2602 REG_WR(bp, BAR_XSTRORM_INTMEM +
2603 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2604 ((u32 *)(&bp->cmng))[i]);
2605 }
2606}
2607
2608static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2609{
2610 int func = BP_FUNC(bp);
2611
2612 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2613 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2614
2615 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2616
2617 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2618 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2619 bp->state = BNX2X_STATE_DISABLED;
2620
2621 bnx2x_e1h_disable(bp);
2622 } else {
2623 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2624 bp->state = BNX2X_STATE_OPEN;
2625
2626 bnx2x_e1h_enable(bp);
2627 }
2628 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2629 }
2630 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2631
2632 bnx2x_update_min_max(bp);
2633 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2634 }
2635
2636 /* Report results to MCP */
2637 if (dcc_event)
2638 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2639 else
2640 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2641}
2642
a2fbb9ea
ET
2643/* the slow path queue is odd since completions arrive on the fastpath ring */
2644static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2645 u32 data_hi, u32 data_lo, int common)
2646{
34f80b04 2647 int func = BP_FUNC(bp);
a2fbb9ea 2648
34f80b04
EG
2649 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2650 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2651 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2652 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2653 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2654
2655#ifdef BNX2X_STOP_ON_ERROR
2656 if (unlikely(bp->panic))
2657 return -EIO;
2658#endif
2659
34f80b04 2660 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2661
2662 if (!bp->spq_left) {
2663 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2664 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2665 bnx2x_panic();
2666 return -EBUSY;
2667 }
f1410647 2668
a2fbb9ea
ET
2669 /* CID needs port number to be encoded int it */
2670 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2671 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2672 HW_CID(bp, cid)));
2673 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2674 if (common)
2675 bp->spq_prod_bd->hdr.type |=
2676 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2677
2678 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2679 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2680
2681 bp->spq_left--;
2682
2683 if (bp->spq_prod_bd == bp->spq_last_bd) {
2684 bp->spq_prod_bd = bp->spq;
2685 bp->spq_prod_idx = 0;
2686 DP(NETIF_MSG_TIMER, "end of spq\n");
2687
2688 } else {
2689 bp->spq_prod_bd++;
2690 bp->spq_prod_idx++;
2691 }
2692
37dbbf32
EG
2693 /* Make sure that BD data is updated before writing the producer */
2694 wmb();
2695
34f80b04 2696 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2697 bp->spq_prod_idx);
2698
37dbbf32
EG
2699 mmiowb();
2700
34f80b04 2701 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2702 return 0;
2703}
2704
2705/* acquire split MCP access lock register */
4a37fb66 2706static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2707{
a2fbb9ea 2708 u32 i, j, val;
34f80b04 2709 int rc = 0;
a2fbb9ea
ET
2710
2711 might_sleep();
2712 i = 100;
2713 for (j = 0; j < i*10; j++) {
2714 val = (1UL << 31);
2715 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2716 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2717 if (val & (1L << 31))
2718 break;
2719
2720 msleep(5);
2721 }
a2fbb9ea 2722 if (!(val & (1L << 31))) {
19680c48 2723 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2724 rc = -EBUSY;
2725 }
2726
2727 return rc;
2728}
2729
4a37fb66
YG
2730/* release split MCP access lock register */
2731static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2732{
2733 u32 val = 0;
2734
2735 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2736}
2737
2738static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2739{
2740 struct host_def_status_block *def_sb = bp->def_status_blk;
2741 u16 rc = 0;
2742
2743 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2744 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2745 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2746 rc |= 1;
2747 }
2748 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2749 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2750 rc |= 2;
2751 }
2752 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2753 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2754 rc |= 4;
2755 }
2756 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2757 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2758 rc |= 8;
2759 }
2760 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2761 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2762 rc |= 16;
2763 }
2764 return rc;
2765}
2766
2767/*
2768 * slow path service functions
2769 */
2770
2771static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2772{
34f80b04 2773 int port = BP_PORT(bp);
5c862848
EG
2774 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2775 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2776 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2777 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2778 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2779 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2780 u32 aeu_mask;
87942b46 2781 u32 nig_mask = 0;
a2fbb9ea 2782
a2fbb9ea
ET
2783 if (bp->attn_state & asserted)
2784 BNX2X_ERR("IGU ERROR\n");
2785
3fcaf2e5
EG
2786 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2787 aeu_mask = REG_RD(bp, aeu_addr);
2788
a2fbb9ea 2789 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2790 aeu_mask, asserted);
2791 aeu_mask &= ~(asserted & 0xff);
2792 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2793
3fcaf2e5
EG
2794 REG_WR(bp, aeu_addr, aeu_mask);
2795 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2796
3fcaf2e5 2797 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2798 bp->attn_state |= asserted;
3fcaf2e5 2799 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2800
2801 if (asserted & ATTN_HARD_WIRED_MASK) {
2802 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2803
a5e9a7cf
EG
2804 bnx2x_acquire_phy_lock(bp);
2805
877e9aa4 2806 /* save nig interrupt mask */
87942b46 2807 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2808 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2809
c18487ee 2810 bnx2x_link_attn(bp);
a2fbb9ea
ET
2811
2812 /* handle unicore attn? */
2813 }
2814 if (asserted & ATTN_SW_TIMER_4_FUNC)
2815 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2816
2817 if (asserted & GPIO_2_FUNC)
2818 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2819
2820 if (asserted & GPIO_3_FUNC)
2821 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2822
2823 if (asserted & GPIO_4_FUNC)
2824 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2825
2826 if (port == 0) {
2827 if (asserted & ATTN_GENERAL_ATTN_1) {
2828 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2829 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2830 }
2831 if (asserted & ATTN_GENERAL_ATTN_2) {
2832 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2833 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2834 }
2835 if (asserted & ATTN_GENERAL_ATTN_3) {
2836 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2837 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2838 }
2839 } else {
2840 if (asserted & ATTN_GENERAL_ATTN_4) {
2841 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2842 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2843 }
2844 if (asserted & ATTN_GENERAL_ATTN_5) {
2845 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2846 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2847 }
2848 if (asserted & ATTN_GENERAL_ATTN_6) {
2849 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2850 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2851 }
2852 }
2853
2854 } /* if hardwired */
2855
5c862848
EG
2856 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2857 asserted, hc_addr);
2858 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2859
2860 /* now set back the mask */
a5e9a7cf 2861 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2862 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2863 bnx2x_release_phy_lock(bp);
2864 }
a2fbb9ea
ET
2865}
2866
fd4ef40d
EG
2867static inline void bnx2x_fan_failure(struct bnx2x *bp)
2868{
2869 int port = BP_PORT(bp);
2870
2871 /* mark the failure */
2872 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2873 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2874 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2875 bp->link_params.ext_phy_config);
2876
2877 /* log the failure */
2878 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2879 " the driver to shutdown the card to prevent permanent"
2880 " damage. Please contact Dell Support for assistance\n",
2881 bp->dev->name);
2882}
877e9aa4 2883static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2884{
34f80b04 2885 int port = BP_PORT(bp);
877e9aa4 2886 int reg_offset;
4d295db0 2887 u32 val, swap_val, swap_override;
877e9aa4 2888
34f80b04
EG
2889 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2890 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2891
34f80b04 2892 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2893
2894 val = REG_RD(bp, reg_offset);
2895 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2896 REG_WR(bp, reg_offset, val);
2897
2898 BNX2X_ERR("SPIO5 hw attention\n");
2899
fd4ef40d 2900 /* Fan failure attention */
35b19ba5
EG
2901 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2902 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2903 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2904 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2905 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2906 /* The PHY reset is controlled by GPIO 1 */
2907 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2908 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2909 break;
2910
4d295db0
EG
2911 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2912 /* The PHY reset is controlled by GPIO 1 */
2913 /* fake the port number to cancel the swap done in
2914 set_gpio() */
2915 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2916 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2917 port = (swap_val && swap_override) ^ 1;
2918 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2919 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2920 break;
2921
877e9aa4
ET
2922 default:
2923 break;
2924 }
fd4ef40d 2925 bnx2x_fan_failure(bp);
877e9aa4 2926 }
34f80b04 2927
589abe3a
EG
2928 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2929 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2930 bnx2x_acquire_phy_lock(bp);
2931 bnx2x_handle_module_detect_int(&bp->link_params);
2932 bnx2x_release_phy_lock(bp);
2933 }
2934
34f80b04
EG
2935 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2936
2937 val = REG_RD(bp, reg_offset);
2938 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2939 REG_WR(bp, reg_offset, val);
2940
2941 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 2942 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
2943 bnx2x_panic();
2944 }
877e9aa4
ET
2945}
2946
2947static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2948{
2949 u32 val;
2950
0626b899 2951 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
2952
2953 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2954 BNX2X_ERR("DB hw attention 0x%x\n", val);
2955 /* DORQ discard attention */
2956 if (val & 0x2)
2957 BNX2X_ERR("FATAL error from DORQ\n");
2958 }
34f80b04
EG
2959
2960 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2961
2962 int port = BP_PORT(bp);
2963 int reg_offset;
2964
2965 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2966 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2967
2968 val = REG_RD(bp, reg_offset);
2969 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2970 REG_WR(bp, reg_offset, val);
2971
2972 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 2973 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
2974 bnx2x_panic();
2975 }
877e9aa4
ET
2976}
2977
2978static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2979{
2980 u32 val;
2981
2982 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2983
2984 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2985 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2986 /* CFC error attention */
2987 if (val & 0x2)
2988 BNX2X_ERR("FATAL error from CFC\n");
2989 }
2990
2991 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2992
2993 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2994 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2995 /* RQ_USDMDP_FIFO_OVERFLOW */
2996 if (val & 0x18000)
2997 BNX2X_ERR("FATAL error from PXP\n");
2998 }
34f80b04
EG
2999
3000 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3001
3002 int port = BP_PORT(bp);
3003 int reg_offset;
3004
3005 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3006 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3007
3008 val = REG_RD(bp, reg_offset);
3009 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3010 REG_WR(bp, reg_offset, val);
3011
3012 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3013 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3014 bnx2x_panic();
3015 }
877e9aa4
ET
3016}
3017
3018static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3019{
34f80b04
EG
3020 u32 val;
3021
877e9aa4
ET
3022 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3023
34f80b04
EG
3024 if (attn & BNX2X_PMF_LINK_ASSERT) {
3025 int func = BP_FUNC(bp);
3026
3027 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2691d51d
EG
3028 val = SHMEM_RD(bp, func_mb[func].drv_status);
3029 if (val & DRV_STATUS_DCC_EVENT_MASK)
3030 bnx2x_dcc_event(bp,
3031 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3032 bnx2x__link_status_update(bp);
2691d51d 3033 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3034 bnx2x_pmf_update(bp);
3035
3036 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3037
3038 BNX2X_ERR("MC assert!\n");
3039 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3041 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3042 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3043 bnx2x_panic();
3044
3045 } else if (attn & BNX2X_MCP_ASSERT) {
3046
3047 BNX2X_ERR("MCP assert!\n");
3048 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3049 bnx2x_fw_dump(bp);
877e9aa4
ET
3050
3051 } else
3052 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3053 }
3054
3055 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3056 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3057 if (attn & BNX2X_GRC_TIMEOUT) {
3058 val = CHIP_IS_E1H(bp) ?
3059 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3060 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3061 }
3062 if (attn & BNX2X_GRC_RSV) {
3063 val = CHIP_IS_E1H(bp) ?
3064 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3065 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3066 }
877e9aa4 3067 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3068 }
3069}
3070
3071static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3072{
a2fbb9ea
ET
3073 struct attn_route attn;
3074 struct attn_route group_mask;
34f80b04 3075 int port = BP_PORT(bp);
877e9aa4 3076 int index;
a2fbb9ea
ET
3077 u32 reg_addr;
3078 u32 val;
3fcaf2e5 3079 u32 aeu_mask;
a2fbb9ea
ET
3080
3081 /* need to take HW lock because MCP or other port might also
3082 try to handle this event */
4a37fb66 3083 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
3084
3085 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3086 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3087 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3088 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3089 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3090 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3091
3092 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3093 if (deasserted & (1 << index)) {
3094 group_mask = bp->attn_group[index];
3095
34f80b04
EG
3096 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
3097 index, group_mask.sig[0], group_mask.sig[1],
3098 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 3099
877e9aa4
ET
3100 bnx2x_attn_int_deasserted3(bp,
3101 attn.sig[3] & group_mask.sig[3]);
3102 bnx2x_attn_int_deasserted1(bp,
3103 attn.sig[1] & group_mask.sig[1]);
3104 bnx2x_attn_int_deasserted2(bp,
3105 attn.sig[2] & group_mask.sig[2]);
3106 bnx2x_attn_int_deasserted0(bp,
3107 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 3108
a2fbb9ea
ET
3109 if ((attn.sig[0] & group_mask.sig[0] &
3110 HW_PRTY_ASSERT_SET_0) ||
3111 (attn.sig[1] & group_mask.sig[1] &
3112 HW_PRTY_ASSERT_SET_1) ||
3113 (attn.sig[2] & group_mask.sig[2] &
3114 HW_PRTY_ASSERT_SET_2))
6378c025 3115 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
3116 }
3117 }
3118
4a37fb66 3119 bnx2x_release_alr(bp);
a2fbb9ea 3120
5c862848 3121 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3122
3123 val = ~deasserted;
3fcaf2e5
EG
3124 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3125 val, reg_addr);
5c862848 3126 REG_WR(bp, reg_addr, val);
a2fbb9ea 3127
a2fbb9ea 3128 if (~bp->attn_state & deasserted)
3fcaf2e5 3129 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3130
3131 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3132 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3133
3fcaf2e5
EG
3134 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3135 aeu_mask = REG_RD(bp, reg_addr);
3136
3137 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3138 aeu_mask, deasserted);
3139 aeu_mask |= (deasserted & 0xff);
3140 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3141
3fcaf2e5
EG
3142 REG_WR(bp, reg_addr, aeu_mask);
3143 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3144
3145 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3146 bp->attn_state &= ~deasserted;
3147 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3148}
3149
3150static void bnx2x_attn_int(struct bnx2x *bp)
3151{
3152 /* read local copy of bits */
68d59484
EG
3153 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3154 attn_bits);
3155 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3156 attn_bits_ack);
a2fbb9ea
ET
3157 u32 attn_state = bp->attn_state;
3158
3159 /* look for changed bits */
3160 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3161 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3162
3163 DP(NETIF_MSG_HW,
3164 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3165 attn_bits, attn_ack, asserted, deasserted);
3166
3167 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3168 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3169
3170 /* handle bits that were raised */
3171 if (asserted)
3172 bnx2x_attn_int_asserted(bp, asserted);
3173
3174 if (deasserted)
3175 bnx2x_attn_int_deasserted(bp, deasserted);
3176}
3177
3178static void bnx2x_sp_task(struct work_struct *work)
3179{
1cf167f2 3180 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3181 u16 status;
3182
34f80b04 3183
a2fbb9ea
ET
3184 /* Return here if interrupt is disabled */
3185 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3186 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3187 return;
3188 }
3189
3190 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3191/* if (status == 0) */
3192/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3193
3196a88a 3194 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3195
877e9aa4
ET
3196 /* HW attentions */
3197 if (status & 0x1)
a2fbb9ea 3198 bnx2x_attn_int(bp);
a2fbb9ea 3199
68d59484 3200 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3201 IGU_INT_NOP, 1);
3202 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3203 IGU_INT_NOP, 1);
3204 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3205 IGU_INT_NOP, 1);
3206 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3207 IGU_INT_NOP, 1);
3208 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3209 IGU_INT_ENABLE, 1);
877e9aa4 3210
a2fbb9ea
ET
3211}
3212
3213static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3214{
3215 struct net_device *dev = dev_instance;
3216 struct bnx2x *bp = netdev_priv(dev);
3217
3218 /* Return here if interrupt is disabled */
3219 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3220 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3221 return IRQ_HANDLED;
3222 }
3223
8d9c5f34 3224 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3225
3226#ifdef BNX2X_STOP_ON_ERROR
3227 if (unlikely(bp->panic))
3228 return IRQ_HANDLED;
3229#endif
3230
1cf167f2 3231 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3232
3233 return IRQ_HANDLED;
3234}
3235
3236/* end of slow path */
3237
3238/* Statistics */
3239
3240/****************************************************************************
3241* Macros
3242****************************************************************************/
3243
a2fbb9ea
ET
3244/* sum[hi:lo] += add[hi:lo] */
3245#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3246 do { \
3247 s_lo += a_lo; \
f5ba6772 3248 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3249 } while (0)
3250
3251/* difference = minuend - subtrahend */
3252#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3253 do { \
bb2a0f7a
YG
3254 if (m_lo < s_lo) { \
3255 /* underflow */ \
a2fbb9ea 3256 d_hi = m_hi - s_hi; \
bb2a0f7a 3257 if (d_hi > 0) { \
6378c025 3258 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3259 d_hi--; \
3260 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3261 } else { \
6378c025 3262 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3263 d_hi = 0; \
3264 d_lo = 0; \
3265 } \
bb2a0f7a
YG
3266 } else { \
3267 /* m_lo >= s_lo */ \
a2fbb9ea 3268 if (m_hi < s_hi) { \
bb2a0f7a
YG
3269 d_hi = 0; \
3270 d_lo = 0; \
3271 } else { \
6378c025 3272 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3273 d_hi = m_hi - s_hi; \
3274 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3275 } \
3276 } \
3277 } while (0)
3278
bb2a0f7a 3279#define UPDATE_STAT64(s, t) \
a2fbb9ea 3280 do { \
bb2a0f7a
YG
3281 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3282 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3283 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3284 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3285 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3286 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3287 } while (0)
3288
bb2a0f7a 3289#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3290 do { \
bb2a0f7a
YG
3291 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3292 diff.lo, new->s##_lo, old->s##_lo); \
3293 ADD_64(estats->t##_hi, diff.hi, \
3294 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3295 } while (0)
3296
3297/* sum[hi:lo] += add */
3298#define ADD_EXTEND_64(s_hi, s_lo, a) \
3299 do { \
3300 s_lo += a; \
3301 s_hi += (s_lo < a) ? 1 : 0; \
3302 } while (0)
3303
bb2a0f7a 3304#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3305 do { \
bb2a0f7a
YG
3306 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3307 pstats->mac_stx[1].s##_lo, \
3308 new->s); \
a2fbb9ea
ET
3309 } while (0)
3310
bb2a0f7a 3311#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3312 do { \
4781bfad
EG
3313 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3314 old_tclient->s = tclient->s; \
de832a55
EG
3315 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3316 } while (0)
3317
3318#define UPDATE_EXTEND_USTAT(s, t) \
3319 do { \
3320 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3321 old_uclient->s = uclient->s; \
3322 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3323 } while (0)
3324
3325#define UPDATE_EXTEND_XSTAT(s, t) \
3326 do { \
4781bfad
EG
3327 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3328 old_xclient->s = xclient->s; \
de832a55
EG
3329 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3330 } while (0)
3331
3332/* minuend -= subtrahend */
3333#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3334 do { \
3335 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3336 } while (0)
3337
3338/* minuend[hi:lo] -= subtrahend */
3339#define SUB_EXTEND_64(m_hi, m_lo, s) \
3340 do { \
3341 SUB_64(m_hi, 0, m_lo, s); \
3342 } while (0)
3343
3344#define SUB_EXTEND_USTAT(s, t) \
3345 do { \
3346 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3347 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3348 } while (0)
3349
3350/*
3351 * General service functions
3352 */
3353
3354static inline long bnx2x_hilo(u32 *hiref)
3355{
3356 u32 lo = *(hiref + 1);
3357#if (BITS_PER_LONG == 64)
3358 u32 hi = *hiref;
3359
3360 return HILO_U64(hi, lo);
3361#else
3362 return lo;
3363#endif
3364}
3365
3366/*
3367 * Init service functions
3368 */
3369
bb2a0f7a
YG
3370static void bnx2x_storm_stats_post(struct bnx2x *bp)
3371{
3372 if (!bp->stats_pending) {
3373 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3374 int i, rc;
bb2a0f7a
YG
3375
3376 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3377 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3378 for_each_queue(bp, i)
3379 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3380
3381 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3382 ((u32 *)&ramrod_data)[1],
3383 ((u32 *)&ramrod_data)[0], 0);
3384 if (rc == 0) {
3385 /* stats ramrod has it's own slot on the spq */
3386 bp->spq_left++;
3387 bp->stats_pending = 1;
3388 }
3389 }
3390}
3391
bb2a0f7a
YG
3392static void bnx2x_hw_stats_post(struct bnx2x *bp)
3393{
3394 struct dmae_command *dmae = &bp->stats_dmae;
3395 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3396
3397 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3398 if (CHIP_REV_IS_SLOW(bp))
3399 return;
bb2a0f7a
YG
3400
3401 /* loader */
3402 if (bp->executer_idx) {
3403 int loader_idx = PMF_DMAE_C(bp);
3404
3405 memset(dmae, 0, sizeof(struct dmae_command));
3406
3407 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3408 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3409 DMAE_CMD_DST_RESET |
3410#ifdef __BIG_ENDIAN
3411 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3412#else
3413 DMAE_CMD_ENDIANITY_DW_SWAP |
3414#endif
3415 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3416 DMAE_CMD_PORT_0) |
3417 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3418 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3419 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3420 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3421 sizeof(struct dmae_command) *
3422 (loader_idx + 1)) >> 2;
3423 dmae->dst_addr_hi = 0;
3424 dmae->len = sizeof(struct dmae_command) >> 2;
3425 if (CHIP_IS_E1(bp))
3426 dmae->len--;
3427 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3428 dmae->comp_addr_hi = 0;
3429 dmae->comp_val = 1;
3430
3431 *stats_comp = 0;
3432 bnx2x_post_dmae(bp, dmae, loader_idx);
3433
3434 } else if (bp->func_stx) {
3435 *stats_comp = 0;
3436 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3437 }
3438}
3439
3440static int bnx2x_stats_comp(struct bnx2x *bp)
3441{
3442 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3443 int cnt = 10;
3444
3445 might_sleep();
3446 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3447 if (!cnt) {
3448 BNX2X_ERR("timeout waiting for stats finished\n");
3449 break;
3450 }
3451 cnt--;
12469401 3452 msleep(1);
bb2a0f7a
YG
3453 }
3454 return 1;
3455}
3456
3457/*
3458 * Statistics service functions
3459 */
3460
3461static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3462{
3463 struct dmae_command *dmae;
3464 u32 opcode;
3465 int loader_idx = PMF_DMAE_C(bp);
3466 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3467
3468 /* sanity */
3469 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3470 BNX2X_ERR("BUG!\n");
3471 return;
3472 }
3473
3474 bp->executer_idx = 0;
3475
3476 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3477 DMAE_CMD_C_ENABLE |
3478 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3479#ifdef __BIG_ENDIAN
3480 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3481#else
3482 DMAE_CMD_ENDIANITY_DW_SWAP |
3483#endif
3484 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3485 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3486
3487 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3488 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3489 dmae->src_addr_lo = bp->port.port_stx >> 2;
3490 dmae->src_addr_hi = 0;
3491 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3492 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3493 dmae->len = DMAE_LEN32_RD_MAX;
3494 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3495 dmae->comp_addr_hi = 0;
3496 dmae->comp_val = 1;
3497
3498 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3499 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3500 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3501 dmae->src_addr_hi = 0;
7a9b2557
VZ
3502 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3503 DMAE_LEN32_RD_MAX * 4);
3504 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3505 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3506 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3507 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3508 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3509 dmae->comp_val = DMAE_COMP_VAL;
3510
3511 *stats_comp = 0;
3512 bnx2x_hw_stats_post(bp);
3513 bnx2x_stats_comp(bp);
3514}
3515
3516static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3517{
3518 struct dmae_command *dmae;
34f80b04 3519 int port = BP_PORT(bp);
bb2a0f7a 3520 int vn = BP_E1HVN(bp);
a2fbb9ea 3521 u32 opcode;
bb2a0f7a 3522 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3523 u32 mac_addr;
bb2a0f7a
YG
3524 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3525
3526 /* sanity */
3527 if (!bp->link_vars.link_up || !bp->port.pmf) {
3528 BNX2X_ERR("BUG!\n");
3529 return;
3530 }
a2fbb9ea
ET
3531
3532 bp->executer_idx = 0;
bb2a0f7a
YG
3533
3534 /* MCP */
3535 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3536 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3537 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3538#ifdef __BIG_ENDIAN
bb2a0f7a 3539 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3540#else
bb2a0f7a 3541 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3542#endif
bb2a0f7a
YG
3543 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3544 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3545
bb2a0f7a 3546 if (bp->port.port_stx) {
a2fbb9ea
ET
3547
3548 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3549 dmae->opcode = opcode;
bb2a0f7a
YG
3550 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3551 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3552 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3553 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3554 dmae->len = sizeof(struct host_port_stats) >> 2;
3555 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3556 dmae->comp_addr_hi = 0;
3557 dmae->comp_val = 1;
a2fbb9ea
ET
3558 }
3559
bb2a0f7a
YG
3560 if (bp->func_stx) {
3561
3562 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3563 dmae->opcode = opcode;
3564 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3565 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3566 dmae->dst_addr_lo = bp->func_stx >> 2;
3567 dmae->dst_addr_hi = 0;
3568 dmae->len = sizeof(struct host_func_stats) >> 2;
3569 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3570 dmae->comp_addr_hi = 0;
3571 dmae->comp_val = 1;
a2fbb9ea
ET
3572 }
3573
bb2a0f7a 3574 /* MAC */
a2fbb9ea
ET
3575 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3576 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3577 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3578#ifdef __BIG_ENDIAN
3579 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3580#else
3581 DMAE_CMD_ENDIANITY_DW_SWAP |
3582#endif
bb2a0f7a
YG
3583 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3584 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3585
c18487ee 3586 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3587
3588 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3589 NIG_REG_INGRESS_BMAC0_MEM);
3590
3591 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3592 BIGMAC_REGISTER_TX_STAT_GTBYT */
3593 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3594 dmae->opcode = opcode;
3595 dmae->src_addr_lo = (mac_addr +
3596 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3597 dmae->src_addr_hi = 0;
3598 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3599 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3600 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3601 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3602 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3603 dmae->comp_addr_hi = 0;
3604 dmae->comp_val = 1;
3605
3606 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3607 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3608 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3609 dmae->opcode = opcode;
3610 dmae->src_addr_lo = (mac_addr +
3611 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3612 dmae->src_addr_hi = 0;
3613 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3614 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3615 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3616 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3617 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3618 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3619 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3620 dmae->comp_addr_hi = 0;
3621 dmae->comp_val = 1;
3622
c18487ee 3623 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3624
3625 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3626
3627 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3628 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3629 dmae->opcode = opcode;
3630 dmae->src_addr_lo = (mac_addr +
3631 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3632 dmae->src_addr_hi = 0;
3633 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3634 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3635 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3636 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3637 dmae->comp_addr_hi = 0;
3638 dmae->comp_val = 1;
3639
3640 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3641 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3642 dmae->opcode = opcode;
3643 dmae->src_addr_lo = (mac_addr +
3644 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3645 dmae->src_addr_hi = 0;
3646 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3647 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3648 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3649 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3650 dmae->len = 1;
3651 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3652 dmae->comp_addr_hi = 0;
3653 dmae->comp_val = 1;
3654
3655 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3656 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3657 dmae->opcode = opcode;
3658 dmae->src_addr_lo = (mac_addr +
3659 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3660 dmae->src_addr_hi = 0;
3661 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3662 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3663 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3664 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3665 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3666 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3667 dmae->comp_addr_hi = 0;
3668 dmae->comp_val = 1;
3669 }
3670
3671 /* NIG */
bb2a0f7a
YG
3672 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3673 dmae->opcode = opcode;
3674 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3675 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3676 dmae->src_addr_hi = 0;
3677 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3678 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3679 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3680 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3681 dmae->comp_addr_hi = 0;
3682 dmae->comp_val = 1;
3683
3684 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3685 dmae->opcode = opcode;
3686 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3687 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3688 dmae->src_addr_hi = 0;
3689 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3690 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3691 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3692 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3693 dmae->len = (2*sizeof(u32)) >> 2;
3694 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3695 dmae->comp_addr_hi = 0;
3696 dmae->comp_val = 1;
3697
a2fbb9ea
ET
3698 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3699 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3700 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3701 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3702#ifdef __BIG_ENDIAN
3703 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3704#else
3705 DMAE_CMD_ENDIANITY_DW_SWAP |
3706#endif
bb2a0f7a
YG
3707 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3708 (vn << DMAE_CMD_E1HVN_SHIFT));
3709 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3710 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3711 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3712 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3713 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3714 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3715 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3716 dmae->len = (2*sizeof(u32)) >> 2;
3717 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3718 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3719 dmae->comp_val = DMAE_COMP_VAL;
3720
3721 *stats_comp = 0;
a2fbb9ea
ET
3722}
3723
bb2a0f7a 3724static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3725{
bb2a0f7a
YG
3726 struct dmae_command *dmae = &bp->stats_dmae;
3727 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3728
bb2a0f7a
YG
3729 /* sanity */
3730 if (!bp->func_stx) {
3731 BNX2X_ERR("BUG!\n");
3732 return;
3733 }
a2fbb9ea 3734
bb2a0f7a
YG
3735 bp->executer_idx = 0;
3736 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3737
bb2a0f7a
YG
3738 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3739 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3740 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3741#ifdef __BIG_ENDIAN
3742 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3743#else
3744 DMAE_CMD_ENDIANITY_DW_SWAP |
3745#endif
3746 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3747 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3748 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3749 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3750 dmae->dst_addr_lo = bp->func_stx >> 2;
3751 dmae->dst_addr_hi = 0;
3752 dmae->len = sizeof(struct host_func_stats) >> 2;
3753 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3754 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3755 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3756
bb2a0f7a
YG
3757 *stats_comp = 0;
3758}
a2fbb9ea 3759
bb2a0f7a
YG
3760static void bnx2x_stats_start(struct bnx2x *bp)
3761{
3762 if (bp->port.pmf)
3763 bnx2x_port_stats_init(bp);
3764
3765 else if (bp->func_stx)
3766 bnx2x_func_stats_init(bp);
3767
3768 bnx2x_hw_stats_post(bp);
3769 bnx2x_storm_stats_post(bp);
3770}
3771
3772static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3773{
3774 bnx2x_stats_comp(bp);
3775 bnx2x_stats_pmf_update(bp);
3776 bnx2x_stats_start(bp);
3777}
3778
3779static void bnx2x_stats_restart(struct bnx2x *bp)
3780{
3781 bnx2x_stats_comp(bp);
3782 bnx2x_stats_start(bp);
3783}
3784
3785static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3786{
3787 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3788 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3789 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3790 struct {
3791 u32 lo;
3792 u32 hi;
3793 } diff;
bb2a0f7a
YG
3794
3795 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3796 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3797 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3798 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3799 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3800 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3801 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3802 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3803 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3804 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3805 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3806 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3807 UPDATE_STAT64(tx_stat_gt127,
3808 tx_stat_etherstatspkts65octetsto127octets);
3809 UPDATE_STAT64(tx_stat_gt255,
3810 tx_stat_etherstatspkts128octetsto255octets);
3811 UPDATE_STAT64(tx_stat_gt511,
3812 tx_stat_etherstatspkts256octetsto511octets);
3813 UPDATE_STAT64(tx_stat_gt1023,
3814 tx_stat_etherstatspkts512octetsto1023octets);
3815 UPDATE_STAT64(tx_stat_gt1518,
3816 tx_stat_etherstatspkts1024octetsto1522octets);
3817 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3818 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3819 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3820 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3821 UPDATE_STAT64(tx_stat_gterr,
3822 tx_stat_dot3statsinternalmactransmiterrors);
3823 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3824
3825 estats->pause_frames_received_hi =
3826 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3827 estats->pause_frames_received_lo =
3828 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3829
3830 estats->pause_frames_sent_hi =
3831 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3832 estats->pause_frames_sent_lo =
3833 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3834}
3835
3836static void bnx2x_emac_stats_update(struct bnx2x *bp)
3837{
3838 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3839 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3840 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3841
3842 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3843 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3844 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3845 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3846 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3847 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3848 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3849 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3850 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3851 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3852 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3853 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3854 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3855 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3856 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3857 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3858 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3859 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3860 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3861 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3862 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3863 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3864 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3865 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3866 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3867 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3868 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3869 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3870 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3871 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3872 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3873
3874 estats->pause_frames_received_hi =
3875 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3876 estats->pause_frames_received_lo =
3877 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3878 ADD_64(estats->pause_frames_received_hi,
3879 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3880 estats->pause_frames_received_lo,
3881 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3882
3883 estats->pause_frames_sent_hi =
3884 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3885 estats->pause_frames_sent_lo =
3886 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3887 ADD_64(estats->pause_frames_sent_hi,
3888 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3889 estats->pause_frames_sent_lo,
3890 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3891}
3892
3893static int bnx2x_hw_stats_update(struct bnx2x *bp)
3894{
3895 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3896 struct nig_stats *old = &(bp->port.old_nig_stats);
3897 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3898 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
3899 struct {
3900 u32 lo;
3901 u32 hi;
3902 } diff;
de832a55 3903 u32 nig_timer_max;
bb2a0f7a
YG
3904
3905 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3906 bnx2x_bmac_stats_update(bp);
3907
3908 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3909 bnx2x_emac_stats_update(bp);
3910
3911 else { /* unreached */
c3eefaf6 3912 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
3913 return -1;
3914 }
a2fbb9ea 3915
bb2a0f7a
YG
3916 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3917 new->brb_discard - old->brb_discard);
66e855f3
YG
3918 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3919 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3920
bb2a0f7a
YG
3921 UPDATE_STAT64_NIG(egress_mac_pkt0,
3922 etherstatspkts1024octetsto1522octets);
3923 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3924
bb2a0f7a 3925 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3926
bb2a0f7a
YG
3927 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3928 sizeof(struct mac_stx));
3929 estats->brb_drop_hi = pstats->brb_drop_hi;
3930 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3931
bb2a0f7a 3932 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3933
de832a55
EG
3934 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3935 if (nig_timer_max != estats->nig_timer_max) {
3936 estats->nig_timer_max = nig_timer_max;
3937 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3938 }
3939
bb2a0f7a 3940 return 0;
a2fbb9ea
ET
3941}
3942
bb2a0f7a 3943static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3944{
3945 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3946 struct tstorm_per_port_stats *tport =
de832a55 3947 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3948 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3949 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3950 int i;
3951
6fe49bb9
EG
3952 memcpy(&(fstats->total_bytes_received_hi),
3953 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
3954 sizeof(struct host_func_stats) - 2*sizeof(u32));
3955 estats->error_bytes_received_hi = 0;
3956 estats->error_bytes_received_lo = 0;
3957 estats->etherstatsoverrsizepkts_hi = 0;
3958 estats->etherstatsoverrsizepkts_lo = 0;
3959 estats->no_buff_discard_hi = 0;
3960 estats->no_buff_discard_lo = 0;
a2fbb9ea 3961
ca00392c 3962 for_each_rx_queue(bp, i) {
de832a55
EG
3963 struct bnx2x_fastpath *fp = &bp->fp[i];
3964 int cl_id = fp->cl_id;
3965 struct tstorm_per_client_stats *tclient =
3966 &stats->tstorm_common.client_statistics[cl_id];
3967 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3968 struct ustorm_per_client_stats *uclient =
3969 &stats->ustorm_common.client_statistics[cl_id];
3970 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3971 struct xstorm_per_client_stats *xclient =
3972 &stats->xstorm_common.client_statistics[cl_id];
3973 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3974 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3975 u32 diff;
3976
3977 /* are storm stats valid? */
3978 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3979 bp->stats_counter) {
de832a55
EG
3980 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3981 " xstorm counter (%d) != stats_counter (%d)\n",
3982 i, xclient->stats_counter, bp->stats_counter);
3983 return -1;
3984 }
3985 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3986 bp->stats_counter) {
de832a55
EG
3987 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3988 " tstorm counter (%d) != stats_counter (%d)\n",
3989 i, tclient->stats_counter, bp->stats_counter);
3990 return -2;
3991 }
3992 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3993 bp->stats_counter) {
3994 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3995 " ustorm counter (%d) != stats_counter (%d)\n",
3996 i, uclient->stats_counter, bp->stats_counter);
3997 return -4;
3998 }
a2fbb9ea 3999
de832a55 4000 qstats->total_bytes_received_hi =
ca00392c 4001 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4002 qstats->total_bytes_received_lo =
ca00392c
EG
4003 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4004
4005 ADD_64(qstats->total_bytes_received_hi,
4006 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4007 qstats->total_bytes_received_lo,
4008 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4009
4010 ADD_64(qstats->total_bytes_received_hi,
4011 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4012 qstats->total_bytes_received_lo,
4013 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4014
4015 qstats->valid_bytes_received_hi =
4016 qstats->total_bytes_received_hi;
de832a55 4017 qstats->valid_bytes_received_lo =
ca00392c 4018 qstats->total_bytes_received_lo;
bb2a0f7a 4019
de832a55 4020 qstats->error_bytes_received_hi =
bb2a0f7a 4021 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4022 qstats->error_bytes_received_lo =
bb2a0f7a 4023 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4024
de832a55
EG
4025 ADD_64(qstats->total_bytes_received_hi,
4026 qstats->error_bytes_received_hi,
4027 qstats->total_bytes_received_lo,
4028 qstats->error_bytes_received_lo);
4029
4030 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4031 total_unicast_packets_received);
4032 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4033 total_multicast_packets_received);
4034 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4035 total_broadcast_packets_received);
4036 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4037 etherstatsoverrsizepkts);
4038 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4039
4040 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4041 total_unicast_packets_received);
4042 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4043 total_multicast_packets_received);
4044 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4045 total_broadcast_packets_received);
4046 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4047 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4048 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4049
4050 qstats->total_bytes_transmitted_hi =
ca00392c 4051 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4052 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4053 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4054
4055 ADD_64(qstats->total_bytes_transmitted_hi,
4056 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4057 qstats->total_bytes_transmitted_lo,
4058 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4059
4060 ADD_64(qstats->total_bytes_transmitted_hi,
4061 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4062 qstats->total_bytes_transmitted_lo,
4063 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4064
de832a55
EG
4065 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4066 total_unicast_packets_transmitted);
4067 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4068 total_multicast_packets_transmitted);
4069 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4070 total_broadcast_packets_transmitted);
4071
4072 old_tclient->checksum_discard = tclient->checksum_discard;
4073 old_tclient->ttl0_discard = tclient->ttl0_discard;
4074
4075 ADD_64(fstats->total_bytes_received_hi,
4076 qstats->total_bytes_received_hi,
4077 fstats->total_bytes_received_lo,
4078 qstats->total_bytes_received_lo);
4079 ADD_64(fstats->total_bytes_transmitted_hi,
4080 qstats->total_bytes_transmitted_hi,
4081 fstats->total_bytes_transmitted_lo,
4082 qstats->total_bytes_transmitted_lo);
4083 ADD_64(fstats->total_unicast_packets_received_hi,
4084 qstats->total_unicast_packets_received_hi,
4085 fstats->total_unicast_packets_received_lo,
4086 qstats->total_unicast_packets_received_lo);
4087 ADD_64(fstats->total_multicast_packets_received_hi,
4088 qstats->total_multicast_packets_received_hi,
4089 fstats->total_multicast_packets_received_lo,
4090 qstats->total_multicast_packets_received_lo);
4091 ADD_64(fstats->total_broadcast_packets_received_hi,
4092 qstats->total_broadcast_packets_received_hi,
4093 fstats->total_broadcast_packets_received_lo,
4094 qstats->total_broadcast_packets_received_lo);
4095 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4096 qstats->total_unicast_packets_transmitted_hi,
4097 fstats->total_unicast_packets_transmitted_lo,
4098 qstats->total_unicast_packets_transmitted_lo);
4099 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4100 qstats->total_multicast_packets_transmitted_hi,
4101 fstats->total_multicast_packets_transmitted_lo,
4102 qstats->total_multicast_packets_transmitted_lo);
4103 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4104 qstats->total_broadcast_packets_transmitted_hi,
4105 fstats->total_broadcast_packets_transmitted_lo,
4106 qstats->total_broadcast_packets_transmitted_lo);
4107 ADD_64(fstats->valid_bytes_received_hi,
4108 qstats->valid_bytes_received_hi,
4109 fstats->valid_bytes_received_lo,
4110 qstats->valid_bytes_received_lo);
4111
4112 ADD_64(estats->error_bytes_received_hi,
4113 qstats->error_bytes_received_hi,
4114 estats->error_bytes_received_lo,
4115 qstats->error_bytes_received_lo);
4116 ADD_64(estats->etherstatsoverrsizepkts_hi,
4117 qstats->etherstatsoverrsizepkts_hi,
4118 estats->etherstatsoverrsizepkts_lo,
4119 qstats->etherstatsoverrsizepkts_lo);
4120 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4121 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4122 }
4123
4124 ADD_64(fstats->total_bytes_received_hi,
4125 estats->rx_stat_ifhcinbadoctets_hi,
4126 fstats->total_bytes_received_lo,
4127 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4128
4129 memcpy(estats, &(fstats->total_bytes_received_hi),
4130 sizeof(struct host_func_stats) - 2*sizeof(u32));
4131
de832a55
EG
4132 ADD_64(estats->etherstatsoverrsizepkts_hi,
4133 estats->rx_stat_dot3statsframestoolong_hi,
4134 estats->etherstatsoverrsizepkts_lo,
4135 estats->rx_stat_dot3statsframestoolong_lo);
4136 ADD_64(estats->error_bytes_received_hi,
4137 estats->rx_stat_ifhcinbadoctets_hi,
4138 estats->error_bytes_received_lo,
4139 estats->rx_stat_ifhcinbadoctets_lo);
4140
4141 if (bp->port.pmf) {
4142 estats->mac_filter_discard =
4143 le32_to_cpu(tport->mac_filter_discard);
4144 estats->xxoverflow_discard =
4145 le32_to_cpu(tport->xxoverflow_discard);
4146 estats->brb_truncate_discard =
bb2a0f7a 4147 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4148 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4149 }
bb2a0f7a
YG
4150
4151 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4152
de832a55
EG
4153 bp->stats_pending = 0;
4154
a2fbb9ea
ET
4155 return 0;
4156}
4157
bb2a0f7a 4158static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4159{
bb2a0f7a 4160 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4161 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4162 int i;
a2fbb9ea
ET
4163
4164 nstats->rx_packets =
4165 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4166 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4167 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4168
4169 nstats->tx_packets =
4170 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4171 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4172 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4173
de832a55 4174 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4175
0e39e645 4176 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4177
de832a55 4178 nstats->rx_dropped = estats->mac_discard;
ca00392c 4179 for_each_rx_queue(bp, i)
de832a55
EG
4180 nstats->rx_dropped +=
4181 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4182
a2fbb9ea
ET
4183 nstats->tx_dropped = 0;
4184
4185 nstats->multicast =
de832a55 4186 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4187
bb2a0f7a 4188 nstats->collisions =
de832a55 4189 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4190
4191 nstats->rx_length_errors =
de832a55
EG
4192 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4193 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4194 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4195 bnx2x_hilo(&estats->brb_truncate_hi);
4196 nstats->rx_crc_errors =
4197 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4198 nstats->rx_frame_errors =
4199 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4200 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4201 nstats->rx_missed_errors = estats->xxoverflow_discard;
4202
4203 nstats->rx_errors = nstats->rx_length_errors +
4204 nstats->rx_over_errors +
4205 nstats->rx_crc_errors +
4206 nstats->rx_frame_errors +
0e39e645
ET
4207 nstats->rx_fifo_errors +
4208 nstats->rx_missed_errors;
a2fbb9ea 4209
bb2a0f7a 4210 nstats->tx_aborted_errors =
de832a55
EG
4211 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4212 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4213 nstats->tx_carrier_errors =
4214 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4215 nstats->tx_fifo_errors = 0;
4216 nstats->tx_heartbeat_errors = 0;
4217 nstats->tx_window_errors = 0;
4218
4219 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4220 nstats->tx_carrier_errors +
4221 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4222}
4223
4224static void bnx2x_drv_stats_update(struct bnx2x *bp)
4225{
4226 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4227 int i;
4228
4229 estats->driver_xoff = 0;
4230 estats->rx_err_discard_pkt = 0;
4231 estats->rx_skb_alloc_failed = 0;
4232 estats->hw_csum_err = 0;
ca00392c 4233 for_each_rx_queue(bp, i) {
de832a55
EG
4234 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4235
4236 estats->driver_xoff += qstats->driver_xoff;
4237 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4238 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4239 estats->hw_csum_err += qstats->hw_csum_err;
4240 }
a2fbb9ea
ET
4241}
4242
bb2a0f7a 4243static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4244{
bb2a0f7a 4245 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4246
bb2a0f7a
YG
4247 if (*stats_comp != DMAE_COMP_VAL)
4248 return;
4249
4250 if (bp->port.pmf)
de832a55 4251 bnx2x_hw_stats_update(bp);
a2fbb9ea 4252
de832a55
EG
4253 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4254 BNX2X_ERR("storm stats were not updated for 3 times\n");
4255 bnx2x_panic();
4256 return;
a2fbb9ea
ET
4257 }
4258
de832a55
EG
4259 bnx2x_net_stats_update(bp);
4260 bnx2x_drv_stats_update(bp);
4261
a2fbb9ea 4262 if (bp->msglevel & NETIF_MSG_TIMER) {
ca00392c
EG
4263 struct bnx2x_fastpath *fp0_rx = bp->fp;
4264 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
de832a55
EG
4265 struct tstorm_per_client_stats *old_tclient =
4266 &bp->fp->old_tclient;
4267 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4268 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4269 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4270 int i;
a2fbb9ea
ET
4271
4272 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4273 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4274 " tx pkt (%lx)\n",
ca00392c
EG
4275 bnx2x_tx_avail(fp0_tx),
4276 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4277 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4278 " rx pkt (%lx)\n",
ca00392c
EG
4279 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4280 fp0_rx->rx_comp_cons),
4281 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4282 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4283 "brb truncate %u\n",
4284 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4285 qstats->driver_xoff,
4286 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4287 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4288 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4289 "mac_discard %u mac_filter_discard %u "
4290 "xxovrflow_discard %u brb_truncate_discard %u "
4291 "ttl0_discard %u\n",
4781bfad 4292 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4293 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4294 bnx2x_hilo(&qstats->no_buff_discard_hi),
4295 estats->mac_discard, estats->mac_filter_discard,
4296 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4297 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4298
4299 for_each_queue(bp, i) {
4300 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4301 bnx2x_fp(bp, i, tx_pkt),
4302 bnx2x_fp(bp, i, rx_pkt),
4303 bnx2x_fp(bp, i, rx_calls));
4304 }
4305 }
4306
bb2a0f7a
YG
4307 bnx2x_hw_stats_post(bp);
4308 bnx2x_storm_stats_post(bp);
4309}
a2fbb9ea 4310
bb2a0f7a
YG
4311static void bnx2x_port_stats_stop(struct bnx2x *bp)
4312{
4313 struct dmae_command *dmae;
4314 u32 opcode;
4315 int loader_idx = PMF_DMAE_C(bp);
4316 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4317
bb2a0f7a 4318 bp->executer_idx = 0;
a2fbb9ea 4319
bb2a0f7a
YG
4320 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4321 DMAE_CMD_C_ENABLE |
4322 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4323#ifdef __BIG_ENDIAN
bb2a0f7a 4324 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4325#else
bb2a0f7a 4326 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4327#endif
bb2a0f7a
YG
4328 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4329 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4330
4331 if (bp->port.port_stx) {
4332
4333 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4334 if (bp->func_stx)
4335 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4336 else
4337 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4338 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4339 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4340 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4341 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4342 dmae->len = sizeof(struct host_port_stats) >> 2;
4343 if (bp->func_stx) {
4344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4345 dmae->comp_addr_hi = 0;
4346 dmae->comp_val = 1;
4347 } else {
4348 dmae->comp_addr_lo =
4349 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4350 dmae->comp_addr_hi =
4351 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4352 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4353
bb2a0f7a
YG
4354 *stats_comp = 0;
4355 }
a2fbb9ea
ET
4356 }
4357
bb2a0f7a
YG
4358 if (bp->func_stx) {
4359
4360 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4361 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4362 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4363 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4364 dmae->dst_addr_lo = bp->func_stx >> 2;
4365 dmae->dst_addr_hi = 0;
4366 dmae->len = sizeof(struct host_func_stats) >> 2;
4367 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4368 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4369 dmae->comp_val = DMAE_COMP_VAL;
4370
4371 *stats_comp = 0;
a2fbb9ea 4372 }
bb2a0f7a
YG
4373}
4374
4375static void bnx2x_stats_stop(struct bnx2x *bp)
4376{
4377 int update = 0;
4378
4379 bnx2x_stats_comp(bp);
4380
4381 if (bp->port.pmf)
4382 update = (bnx2x_hw_stats_update(bp) == 0);
4383
4384 update |= (bnx2x_storm_stats_update(bp) == 0);
4385
4386 if (update) {
4387 bnx2x_net_stats_update(bp);
a2fbb9ea 4388
bb2a0f7a
YG
4389 if (bp->port.pmf)
4390 bnx2x_port_stats_stop(bp);
4391
4392 bnx2x_hw_stats_post(bp);
4393 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4394 }
4395}
4396
bb2a0f7a
YG
4397static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4398{
4399}
4400
4401static const struct {
4402 void (*action)(struct bnx2x *bp);
4403 enum bnx2x_stats_state next_state;
4404} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4405/* state event */
4406{
4407/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4408/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4409/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4410/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4411},
4412{
4413/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4414/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4415/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4416/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4417}
4418};
4419
4420static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4421{
4422 enum bnx2x_stats_state state = bp->stats_state;
4423
4424 bnx2x_stats_stm[state][event].action(bp);
4425 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4426
8924665a
EG
4427 /* Make sure the state has been "changed" */
4428 smp_wmb();
4429
bb2a0f7a
YG
4430 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4431 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4432 state, event, bp->stats_state);
4433}
4434
6fe49bb9
EG
4435static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4436{
4437 struct dmae_command *dmae;
4438 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4439
4440 /* sanity */
4441 if (!bp->port.pmf || !bp->port.port_stx) {
4442 BNX2X_ERR("BUG!\n");
4443 return;
4444 }
4445
4446 bp->executer_idx = 0;
4447
4448 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4449 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4450 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4451 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4452#ifdef __BIG_ENDIAN
4453 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4454#else
4455 DMAE_CMD_ENDIANITY_DW_SWAP |
4456#endif
4457 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4458 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4459 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4460 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4461 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4462 dmae->dst_addr_hi = 0;
4463 dmae->len = sizeof(struct host_port_stats) >> 2;
4464 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4465 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4466 dmae->comp_val = DMAE_COMP_VAL;
4467
4468 *stats_comp = 0;
4469 bnx2x_hw_stats_post(bp);
4470 bnx2x_stats_comp(bp);
4471}
4472
4473static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4474{
4475 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4476 int port = BP_PORT(bp);
4477 int func;
4478 u32 func_stx;
4479
4480 /* sanity */
4481 if (!bp->port.pmf || !bp->func_stx) {
4482 BNX2X_ERR("BUG!\n");
4483 return;
4484 }
4485
4486 /* save our func_stx */
4487 func_stx = bp->func_stx;
4488
4489 for (vn = VN_0; vn < vn_max; vn++) {
4490 func = 2*vn + port;
4491
4492 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4493 bnx2x_func_stats_init(bp);
4494 bnx2x_hw_stats_post(bp);
4495 bnx2x_stats_comp(bp);
4496 }
4497
4498 /* restore our func_stx */
4499 bp->func_stx = func_stx;
4500}
4501
4502static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4503{
4504 struct dmae_command *dmae = &bp->stats_dmae;
4505 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4506
4507 /* sanity */
4508 if (!bp->func_stx) {
4509 BNX2X_ERR("BUG!\n");
4510 return;
4511 }
4512
4513 bp->executer_idx = 0;
4514 memset(dmae, 0, sizeof(struct dmae_command));
4515
4516 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4517 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4518 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4519#ifdef __BIG_ENDIAN
4520 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4521#else
4522 DMAE_CMD_ENDIANITY_DW_SWAP |
4523#endif
4524 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4525 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4526 dmae->src_addr_lo = bp->func_stx >> 2;
4527 dmae->src_addr_hi = 0;
4528 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4529 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4530 dmae->len = sizeof(struct host_func_stats) >> 2;
4531 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4532 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4533 dmae->comp_val = DMAE_COMP_VAL;
4534
4535 *stats_comp = 0;
4536 bnx2x_hw_stats_post(bp);
4537 bnx2x_stats_comp(bp);
4538}
4539
4540static void bnx2x_stats_init(struct bnx2x *bp)
4541{
4542 int port = BP_PORT(bp);
4543 int func = BP_FUNC(bp);
4544 int i;
4545
4546 bp->stats_pending = 0;
4547 bp->executer_idx = 0;
4548 bp->stats_counter = 0;
4549
4550 /* port and func stats for management */
4551 if (!BP_NOMCP(bp)) {
4552 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4553 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4554
4555 } else {
4556 bp->port.port_stx = 0;
4557 bp->func_stx = 0;
4558 }
4559 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4560 bp->port.port_stx, bp->func_stx);
4561
4562 /* port stats */
4563 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4564 bp->port.old_nig_stats.brb_discard =
4565 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4566 bp->port.old_nig_stats.brb_truncate =
4567 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4568 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4569 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4570 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4571 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4572
4573 /* function stats */
4574 for_each_queue(bp, i) {
4575 struct bnx2x_fastpath *fp = &bp->fp[i];
4576
4577 memset(&fp->old_tclient, 0,
4578 sizeof(struct tstorm_per_client_stats));
4579 memset(&fp->old_uclient, 0,
4580 sizeof(struct ustorm_per_client_stats));
4581 memset(&fp->old_xclient, 0,
4582 sizeof(struct xstorm_per_client_stats));
4583 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4584 }
4585
4586 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4587 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4588
4589 bp->stats_state = STATS_STATE_DISABLED;
4590
4591 if (bp->port.pmf) {
4592 if (bp->port.port_stx)
4593 bnx2x_port_stats_base_init(bp);
4594
4595 if (bp->func_stx)
4596 bnx2x_func_stats_base_init(bp);
4597
4598 } else if (bp->func_stx)
4599 bnx2x_func_stats_base_update(bp);
4600}
4601
a2fbb9ea
ET
4602static void bnx2x_timer(unsigned long data)
4603{
4604 struct bnx2x *bp = (struct bnx2x *) data;
4605
4606 if (!netif_running(bp->dev))
4607 return;
4608
4609 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4610 goto timer_restart;
a2fbb9ea
ET
4611
4612 if (poll) {
4613 struct bnx2x_fastpath *fp = &bp->fp[0];
4614 int rc;
4615
7961f791 4616 bnx2x_tx_int(fp);
a2fbb9ea
ET
4617 rc = bnx2x_rx_int(fp, 1000);
4618 }
4619
34f80b04
EG
4620 if (!BP_NOMCP(bp)) {
4621 int func = BP_FUNC(bp);
a2fbb9ea
ET
4622 u32 drv_pulse;
4623 u32 mcp_pulse;
4624
4625 ++bp->fw_drv_pulse_wr_seq;
4626 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4627 /* TBD - add SYSTEM_TIME */
4628 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4629 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4630
34f80b04 4631 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4632 MCP_PULSE_SEQ_MASK);
4633 /* The delta between driver pulse and mcp response
4634 * should be 1 (before mcp response) or 0 (after mcp response)
4635 */
4636 if ((drv_pulse != mcp_pulse) &&
4637 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4638 /* someone lost a heartbeat... */
4639 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4640 drv_pulse, mcp_pulse);
4641 }
4642 }
4643
bb2a0f7a
YG
4644 if ((bp->state == BNX2X_STATE_OPEN) ||
4645 (bp->state == BNX2X_STATE_DISABLED))
4646 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4647
f1410647 4648timer_restart:
a2fbb9ea
ET
4649 mod_timer(&bp->timer, jiffies + bp->current_interval);
4650}
4651
4652/* end of Statistics */
4653
4654/* nic init */
4655
4656/*
4657 * nic init service functions
4658 */
4659
34f80b04 4660static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4661{
34f80b04
EG
4662 int port = BP_PORT(bp);
4663
ca00392c
EG
4664 /* "CSTORM" */
4665 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4666 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4667 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4668 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4669 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4670 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
4671}
4672
5c862848
EG
4673static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4674 dma_addr_t mapping, int sb_id)
34f80b04
EG
4675{
4676 int port = BP_PORT(bp);
bb2a0f7a 4677 int func = BP_FUNC(bp);
a2fbb9ea 4678 int index;
34f80b04 4679 u64 section;
a2fbb9ea
ET
4680
4681 /* USTORM */
4682 section = ((u64)mapping) + offsetof(struct host_status_block,
4683 u_status_block);
34f80b04 4684 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 4685
ca00392c
EG
4686 REG_WR(bp, BAR_CSTRORM_INTMEM +
4687 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4688 REG_WR(bp, BAR_CSTRORM_INTMEM +
4689 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4690 U64_HI(section));
ca00392c
EG
4691 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4692 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4693
4694 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
4695 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4696 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4697
4698 /* CSTORM */
4699 section = ((u64)mapping) + offsetof(struct host_status_block,
4700 c_status_block);
34f80b04 4701 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4702
4703 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4704 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4705 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4706 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4707 U64_HI(section));
7a9b2557 4708 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 4709 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4710
4711 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4712 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4713 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
4714
4715 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4716}
4717
4718static void bnx2x_zero_def_sb(struct bnx2x *bp)
4719{
4720 int func = BP_FUNC(bp);
a2fbb9ea 4721
ca00392c 4722 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
4723 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4724 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
4725 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4726 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4727 sizeof(struct cstorm_def_status_block_u)/4);
4728 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4729 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4730 sizeof(struct cstorm_def_status_block_c)/4);
4731 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
4732 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4733 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
4734}
4735
4736static void bnx2x_init_def_sb(struct bnx2x *bp,
4737 struct host_def_status_block *def_sb,
34f80b04 4738 dma_addr_t mapping, int sb_id)
a2fbb9ea 4739{
34f80b04
EG
4740 int port = BP_PORT(bp);
4741 int func = BP_FUNC(bp);
a2fbb9ea
ET
4742 int index, val, reg_offset;
4743 u64 section;
4744
4745 /* ATTN */
4746 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4747 atten_status_block);
34f80b04 4748 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4749
49d66772
ET
4750 bp->attn_state = 0;
4751
a2fbb9ea
ET
4752 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4753 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4754
34f80b04 4755 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4756 bp->attn_group[index].sig[0] = REG_RD(bp,
4757 reg_offset + 0x10*index);
4758 bp->attn_group[index].sig[1] = REG_RD(bp,
4759 reg_offset + 0x4 + 0x10*index);
4760 bp->attn_group[index].sig[2] = REG_RD(bp,
4761 reg_offset + 0x8 + 0x10*index);
4762 bp->attn_group[index].sig[3] = REG_RD(bp,
4763 reg_offset + 0xc + 0x10*index);
4764 }
4765
a2fbb9ea
ET
4766 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4767 HC_REG_ATTN_MSG0_ADDR_L);
4768
4769 REG_WR(bp, reg_offset, U64_LO(section));
4770 REG_WR(bp, reg_offset + 4, U64_HI(section));
4771
4772 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4773
4774 val = REG_RD(bp, reg_offset);
34f80b04 4775 val |= sb_id;
a2fbb9ea
ET
4776 REG_WR(bp, reg_offset, val);
4777
4778 /* USTORM */
4779 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4780 u_def_status_block);
34f80b04 4781 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 4782
ca00392c
EG
4783 REG_WR(bp, BAR_CSTRORM_INTMEM +
4784 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4785 REG_WR(bp, BAR_CSTRORM_INTMEM +
4786 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 4787 U64_HI(section));
ca00392c
EG
4788 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4789 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
4790
4791 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
4792 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4793 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
4794
4795 /* CSTORM */
4796 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4797 c_def_status_block);
34f80b04 4798 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4799
4800 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4801 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 4802 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 4803 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 4804 U64_HI(section));
5c862848 4805 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 4806 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
4807
4808 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4809 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 4810 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
4811
4812 /* TSTORM */
4813 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4814 t_def_status_block);
34f80b04 4815 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4816
4817 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4818 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4819 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4820 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4821 U64_HI(section));
5c862848 4822 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4823 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4824
4825 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4826 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4827 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4828
4829 /* XSTORM */
4830 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4831 x_def_status_block);
34f80b04 4832 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4833
4834 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4835 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4836 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4837 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4838 U64_HI(section));
5c862848 4839 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4840 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4841
4842 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4843 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4844 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4845
bb2a0f7a 4846 bp->stats_pending = 0;
66e855f3 4847 bp->set_mac_pending = 0;
bb2a0f7a 4848
34f80b04 4849 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4850}
4851
4852static void bnx2x_update_coalesce(struct bnx2x *bp)
4853{
34f80b04 4854 int port = BP_PORT(bp);
a2fbb9ea
ET
4855 int i;
4856
4857 for_each_queue(bp, i) {
34f80b04 4858 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4859
4860 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
4861 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4862 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4863 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4864 bp->rx_ticks/12);
ca00392c
EG
4865 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4866 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4867 U_SB_ETH_RX_CQ_INDEX),
3799cf47 4868 (bp->rx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4869
4870 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4871 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4872 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4873 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4874 bp->tx_ticks/12);
a2fbb9ea 4875 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
4876 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4877 C_SB_ETH_TX_CQ_INDEX),
3799cf47 4878 (bp->tx_ticks/12) ? 0 : 1);
a2fbb9ea
ET
4879 }
4880}
4881
7a9b2557
VZ
4882static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4883 struct bnx2x_fastpath *fp, int last)
4884{
4885 int i;
4886
4887 for (i = 0; i < last; i++) {
4888 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4889 struct sk_buff *skb = rx_buf->skb;
4890
4891 if (skb == NULL) {
4892 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4893 continue;
4894 }
4895
4896 if (fp->tpa_state[i] == BNX2X_TPA_START)
4897 pci_unmap_single(bp->pdev,
4898 pci_unmap_addr(rx_buf, mapping),
356e2385 4899 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
4900
4901 dev_kfree_skb(skb);
4902 rx_buf->skb = NULL;
4903 }
4904}
4905
a2fbb9ea
ET
4906static void bnx2x_init_rx_rings(struct bnx2x *bp)
4907{
7a9b2557 4908 int func = BP_FUNC(bp);
32626230
EG
4909 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4910 ETH_MAX_AGGREGATION_QUEUES_E1H;
4911 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4912 int i, j;
a2fbb9ea 4913
87942b46 4914 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4915 DP(NETIF_MSG_IFUP,
4916 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4917
7a9b2557 4918 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4919
555f6c78 4920 for_each_rx_queue(bp, j) {
32626230 4921 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4922
32626230 4923 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4924 fp->tpa_pool[i].skb =
4925 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4926 if (!fp->tpa_pool[i].skb) {
4927 BNX2X_ERR("Failed to allocate TPA "
4928 "skb pool for queue[%d] - "
4929 "disabling TPA on this "
4930 "queue!\n", j);
4931 bnx2x_free_tpa_pool(bp, fp, i);
4932 fp->disable_tpa = 1;
4933 break;
4934 }
4935 pci_unmap_addr_set((struct sw_rx_bd *)
4936 &bp->fp->tpa_pool[i],
4937 mapping, 0);
4938 fp->tpa_state[i] = BNX2X_TPA_STOP;
4939 }
4940 }
4941 }
4942
555f6c78 4943 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4944 struct bnx2x_fastpath *fp = &bp->fp[j];
4945
4946 fp->rx_bd_cons = 0;
4947 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4948 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4949
ca00392c
EG
4950 /* Mark queue as Rx */
4951 fp->is_rx_queue = 1;
4952
7a9b2557
VZ
4953 /* "next page" elements initialization */
4954 /* SGE ring */
4955 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4956 struct eth_rx_sge *sge;
4957
4958 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4959 sge->addr_hi =
4960 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4961 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4962 sge->addr_lo =
4963 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4964 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4965 }
4966
4967 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4968
7a9b2557 4969 /* RX BD ring */
a2fbb9ea
ET
4970 for (i = 1; i <= NUM_RX_RINGS; i++) {
4971 struct eth_rx_bd *rx_bd;
4972
4973 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4974 rx_bd->addr_hi =
4975 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4976 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4977 rx_bd->addr_lo =
4978 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4979 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4980 }
4981
34f80b04 4982 /* CQ ring */
a2fbb9ea
ET
4983 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4984 struct eth_rx_cqe_next_page *nextpg;
4985
4986 nextpg = (struct eth_rx_cqe_next_page *)
4987 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4988 nextpg->addr_hi =
4989 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4990 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4991 nextpg->addr_lo =
4992 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4993 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4994 }
4995
7a9b2557
VZ
4996 /* Allocate SGEs and initialize the ring elements */
4997 for (i = 0, ring_prod = 0;
4998 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4999
7a9b2557
VZ
5000 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5001 BNX2X_ERR("was only able to allocate "
5002 "%d rx sges\n", i);
5003 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5004 /* Cleanup already allocated elements */
5005 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5006 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5007 fp->disable_tpa = 1;
5008 ring_prod = 0;
5009 break;
5010 }
5011 ring_prod = NEXT_SGE_IDX(ring_prod);
5012 }
5013 fp->rx_sge_prod = ring_prod;
5014
5015 /* Allocate BDs and initialize BD ring */
66e855f3 5016 fp->rx_comp_cons = 0;
7a9b2557 5017 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5018 for (i = 0; i < bp->rx_ring_size; i++) {
5019 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5020 BNX2X_ERR("was only able to allocate "
de832a55
EG
5021 "%d rx skbs on queue[%d]\n", i, j);
5022 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5023 break;
5024 }
5025 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5026 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5027 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5028 }
5029
7a9b2557
VZ
5030 fp->rx_bd_prod = ring_prod;
5031 /* must not have more available CQEs than BDs */
5032 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5033 cqe_ring_prod);
a2fbb9ea
ET
5034 fp->rx_pkt = fp->rx_calls = 0;
5035
7a9b2557
VZ
5036 /* Warning!
5037 * this will generate an interrupt (to the TSTORM)
5038 * must only be done after chip is initialized
5039 */
5040 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5041 fp->rx_sge_prod);
a2fbb9ea
ET
5042 if (j != 0)
5043 continue;
5044
5045 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5046 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5047 U64_LO(fp->rx_comp_mapping));
5048 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5049 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5050 U64_HI(fp->rx_comp_mapping));
5051 }
5052}
5053
5054static void bnx2x_init_tx_ring(struct bnx2x *bp)
5055{
5056 int i, j;
5057
555f6c78 5058 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
5059 struct bnx2x_fastpath *fp = &bp->fp[j];
5060
5061 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5062 struct eth_tx_next_bd *tx_next_bd =
5063 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5064
ca00392c 5065 tx_next_bd->addr_hi =
a2fbb9ea 5066 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5067 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5068 tx_next_bd->addr_lo =
a2fbb9ea 5069 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5070 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5071 }
5072
ca00392c
EG
5073 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5074 fp->tx_db.data.zero_fill1 = 0;
5075 fp->tx_db.data.prod = 0;
5076
a2fbb9ea
ET
5077 fp->tx_pkt_prod = 0;
5078 fp->tx_pkt_cons = 0;
5079 fp->tx_bd_prod = 0;
5080 fp->tx_bd_cons = 0;
5081 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5082 fp->tx_pkt = 0;
5083 }
6fe49bb9
EG
5084
5085 /* clean tx statistics */
5086 for_each_rx_queue(bp, i)
5087 bnx2x_fp(bp, i, tx_pkt) = 0;
a2fbb9ea
ET
5088}
5089
5090static void bnx2x_init_sp_ring(struct bnx2x *bp)
5091{
34f80b04 5092 int func = BP_FUNC(bp);
a2fbb9ea
ET
5093
5094 spin_lock_init(&bp->spq_lock);
5095
5096 bp->spq_left = MAX_SPQ_PENDING;
5097 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5098 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5099 bp->spq_prod_bd = bp->spq;
5100 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5101
34f80b04 5102 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5103 U64_LO(bp->spq_mapping));
34f80b04
EG
5104 REG_WR(bp,
5105 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5106 U64_HI(bp->spq_mapping));
5107
34f80b04 5108 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5109 bp->spq_prod_idx);
5110}
5111
5112static void bnx2x_init_context(struct bnx2x *bp)
5113{
5114 int i;
5115
ca00392c 5116 for_each_rx_queue(bp, i) {
a2fbb9ea
ET
5117 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5118 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5119 u8 cl_id = fp->cl_id;
a2fbb9ea 5120
34f80b04
EG
5121 context->ustorm_st_context.common.sb_index_numbers =
5122 BNX2X_RX_SB_INDEX_NUM;
0626b899 5123 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5124 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5125 context->ustorm_st_context.common.flags =
de832a55
EG
5126 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5127 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5128 context->ustorm_st_context.common.statistics_counter_id =
5129 cl_id;
8d9c5f34 5130 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5131 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5132 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5133 bp->rx_buf_size;
34f80b04 5134 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5135 U64_HI(fp->rx_desc_mapping);
34f80b04 5136 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5137 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5138 if (!fp->disable_tpa) {
5139 context->ustorm_st_context.common.flags |=
ca00392c 5140 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5141 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5142 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5143 (u32)0xffff);
7a9b2557
VZ
5144 context->ustorm_st_context.common.sge_page_base_hi =
5145 U64_HI(fp->rx_sge_mapping);
5146 context->ustorm_st_context.common.sge_page_base_lo =
5147 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5148
5149 context->ustorm_st_context.common.max_sges_for_packet =
5150 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5151 context->ustorm_st_context.common.max_sges_for_packet =
5152 ((context->ustorm_st_context.common.
5153 max_sges_for_packet + PAGES_PER_SGE - 1) &
5154 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5155 }
5156
8d9c5f34
EG
5157 context->ustorm_ag_context.cdu_usage =
5158 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5159 CDU_REGION_NUMBER_UCM_AG,
5160 ETH_CONNECTION_TYPE);
5161
ca00392c
EG
5162 context->xstorm_ag_context.cdu_reserved =
5163 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5164 CDU_REGION_NUMBER_XCM_AG,
5165 ETH_CONNECTION_TYPE);
5166 }
5167
5168 for_each_tx_queue(bp, i) {
5169 struct bnx2x_fastpath *fp = &bp->fp[i];
5170 struct eth_context *context =
5171 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
5172
5173 context->cstorm_st_context.sb_index_number =
5174 C_SB_ETH_TX_CQ_INDEX;
5175 context->cstorm_st_context.status_block_id = fp->sb_id;
5176
8d9c5f34
EG
5177 context->xstorm_st_context.tx_bd_page_base_hi =
5178 U64_HI(fp->tx_desc_mapping);
5179 context->xstorm_st_context.tx_bd_page_base_lo =
5180 U64_LO(fp->tx_desc_mapping);
ca00392c 5181 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5182 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5183 }
5184}
5185
5186static void bnx2x_init_ind_table(struct bnx2x *bp)
5187{
26c8fa4d 5188 int func = BP_FUNC(bp);
a2fbb9ea
ET
5189 int i;
5190
555f6c78 5191 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5192 return;
5193
555f6c78
EG
5194 DP(NETIF_MSG_IFUP,
5195 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5196 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5197 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5198 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
0626b899 5199 bp->fp->cl_id + (i % bp->num_rx_queues));
a2fbb9ea
ET
5200}
5201
49d66772
ET
5202static void bnx2x_set_client_config(struct bnx2x *bp)
5203{
49d66772 5204 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5205 int port = BP_PORT(bp);
5206 int i;
49d66772 5207
e7799c5f 5208 tstorm_client.mtu = bp->dev->mtu;
49d66772 5209 tstorm_client.config_flags =
de832a55
EG
5210 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5211 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5212#ifdef BCM_VLAN
0c6671b0 5213 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5214 tstorm_client.config_flags |=
8d9c5f34 5215 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5216 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5217 }
5218#endif
49d66772
ET
5219
5220 for_each_queue(bp, i) {
de832a55
EG
5221 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5222
49d66772 5223 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5224 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5225 ((u32 *)&tstorm_client)[0]);
5226 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5227 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5228 ((u32 *)&tstorm_client)[1]);
5229 }
5230
34f80b04
EG
5231 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5232 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5233}
5234
a2fbb9ea
ET
5235static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5236{
a2fbb9ea 5237 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
5238 int mode = bp->rx_mode;
5239 int mask = (1 << BP_L_ID(bp));
5240 int func = BP_FUNC(bp);
581ce43d 5241 int port = BP_PORT(bp);
a2fbb9ea 5242 int i;
581ce43d
EG
5243 /* All but management unicast packets should pass to the host as well */
5244 u32 llh_mask =
5245 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5246 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5247 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5248 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5249
3196a88a 5250 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5251
5252 switch (mode) {
5253 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5254 tstorm_mac_filter.ucast_drop_all = mask;
5255 tstorm_mac_filter.mcast_drop_all = mask;
5256 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5257 break;
356e2385 5258
a2fbb9ea 5259 case BNX2X_RX_MODE_NORMAL:
34f80b04 5260 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5261 break;
356e2385 5262
a2fbb9ea 5263 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5264 tstorm_mac_filter.mcast_accept_all = mask;
5265 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5266 break;
356e2385 5267
a2fbb9ea 5268 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5269 tstorm_mac_filter.ucast_accept_all = mask;
5270 tstorm_mac_filter.mcast_accept_all = mask;
5271 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5272 /* pass management unicast packets as well */
5273 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5274 break;
356e2385 5275
a2fbb9ea 5276 default:
34f80b04
EG
5277 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5278 break;
a2fbb9ea
ET
5279 }
5280
581ce43d
EG
5281 REG_WR(bp,
5282 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5283 llh_mask);
5284
a2fbb9ea
ET
5285 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5286 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5287 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5288 ((u32 *)&tstorm_mac_filter)[i]);
5289
34f80b04 5290/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5291 ((u32 *)&tstorm_mac_filter)[i]); */
5292 }
a2fbb9ea 5293
49d66772
ET
5294 if (mode != BNX2X_RX_MODE_NONE)
5295 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5296}
5297
471de716
EG
5298static void bnx2x_init_internal_common(struct bnx2x *bp)
5299{
5300 int i;
5301
5302 /* Zero this manually as its initialization is
5303 currently missing in the initTool */
5304 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5305 REG_WR(bp, BAR_USTRORM_INTMEM +
5306 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5307}
5308
5309static void bnx2x_init_internal_port(struct bnx2x *bp)
5310{
5311 int port = BP_PORT(bp);
5312
ca00392c
EG
5313 REG_WR(bp,
5314 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5315 REG_WR(bp,
5316 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5317 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5318 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5319}
5320
5321static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5322{
a2fbb9ea
ET
5323 struct tstorm_eth_function_common_config tstorm_config = {0};
5324 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5325 int port = BP_PORT(bp);
5326 int func = BP_FUNC(bp);
de832a55
EG
5327 int i, j;
5328 u32 offset;
471de716 5329 u16 max_agg_size;
a2fbb9ea
ET
5330
5331 if (is_multi(bp)) {
555f6c78 5332 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5333 tstorm_config.rss_result_mask = MULTI_MASK;
5334 }
ca00392c
EG
5335
5336 /* Enable TPA if needed */
5337 if (bp->flags & TPA_ENABLE_FLAG)
5338 tstorm_config.config_flags |=
5339 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5340
8d9c5f34
EG
5341 if (IS_E1HMF(bp))
5342 tstorm_config.config_flags |=
5343 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5344
34f80b04
EG
5345 tstorm_config.leading_client_id = BP_L_ID(bp);
5346
a2fbb9ea 5347 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5348 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5349 (*(u32 *)&tstorm_config));
5350
c14423fe 5351 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
5352 bnx2x_set_storm_rx_mode(bp);
5353
de832a55
EG
5354 for_each_queue(bp, i) {
5355 u8 cl_id = bp->fp[i].cl_id;
5356
5357 /* reset xstorm per client statistics */
5358 offset = BAR_XSTRORM_INTMEM +
5359 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5360 for (j = 0;
5361 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5362 REG_WR(bp, offset + j*4, 0);
5363
5364 /* reset tstorm per client statistics */
5365 offset = BAR_TSTRORM_INTMEM +
5366 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5367 for (j = 0;
5368 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5369 REG_WR(bp, offset + j*4, 0);
5370
5371 /* reset ustorm per client statistics */
5372 offset = BAR_USTRORM_INTMEM +
5373 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5374 for (j = 0;
5375 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5376 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5377 }
5378
5379 /* Init statistics related context */
34f80b04 5380 stats_flags.collect_eth = 1;
a2fbb9ea 5381
66e855f3 5382 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5383 ((u32 *)&stats_flags)[0]);
66e855f3 5384 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5385 ((u32 *)&stats_flags)[1]);
5386
66e855f3 5387 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5388 ((u32 *)&stats_flags)[0]);
66e855f3 5389 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5390 ((u32 *)&stats_flags)[1]);
5391
de832a55
EG
5392 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5393 ((u32 *)&stats_flags)[0]);
5394 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5395 ((u32 *)&stats_flags)[1]);
5396
66e855f3 5397 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5398 ((u32 *)&stats_flags)[0]);
66e855f3 5399 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5400 ((u32 *)&stats_flags)[1]);
5401
66e855f3
YG
5402 REG_WR(bp, BAR_XSTRORM_INTMEM +
5403 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5404 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5405 REG_WR(bp, BAR_XSTRORM_INTMEM +
5406 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5407 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5408
5409 REG_WR(bp, BAR_TSTRORM_INTMEM +
5410 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5411 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5412 REG_WR(bp, BAR_TSTRORM_INTMEM +
5413 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5414 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5415
de832a55
EG
5416 REG_WR(bp, BAR_USTRORM_INTMEM +
5417 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5418 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5419 REG_WR(bp, BAR_USTRORM_INTMEM +
5420 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5421 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5422
34f80b04
EG
5423 if (CHIP_IS_E1H(bp)) {
5424 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5425 IS_E1HMF(bp));
5426 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5427 IS_E1HMF(bp));
5428 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5429 IS_E1HMF(bp));
5430 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5431 IS_E1HMF(bp));
5432
7a9b2557
VZ
5433 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5434 bp->e1hov);
34f80b04
EG
5435 }
5436
4f40f2cb
EG
5437 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5438 max_agg_size =
5439 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5440 SGE_PAGE_SIZE * PAGES_PER_SGE),
5441 (u32)0xffff);
555f6c78 5442 for_each_rx_queue(bp, i) {
7a9b2557 5443 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5444
5445 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5446 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5447 U64_LO(fp->rx_comp_mapping));
5448 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5449 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5450 U64_HI(fp->rx_comp_mapping));
5451
ca00392c
EG
5452 /* Next page */
5453 REG_WR(bp, BAR_USTRORM_INTMEM +
5454 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5455 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5456 REG_WR(bp, BAR_USTRORM_INTMEM +
5457 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5458 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5459
7a9b2557 5460 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5461 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5462 max_agg_size);
5463 }
8a1c38d1 5464
1c06328c
EG
5465 /* dropless flow control */
5466 if (CHIP_IS_E1H(bp)) {
5467 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5468
5469 rx_pause.bd_thr_low = 250;
5470 rx_pause.cqe_thr_low = 250;
5471 rx_pause.cos = 1;
5472 rx_pause.sge_thr_low = 0;
5473 rx_pause.bd_thr_high = 350;
5474 rx_pause.cqe_thr_high = 350;
5475 rx_pause.sge_thr_high = 0;
5476
5477 for_each_rx_queue(bp, i) {
5478 struct bnx2x_fastpath *fp = &bp->fp[i];
5479
5480 if (!fp->disable_tpa) {
5481 rx_pause.sge_thr_low = 150;
5482 rx_pause.sge_thr_high = 250;
5483 }
5484
5485
5486 offset = BAR_USTRORM_INTMEM +
5487 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5488 fp->cl_id);
5489 for (j = 0;
5490 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5491 j++)
5492 REG_WR(bp, offset + j*4,
5493 ((u32 *)&rx_pause)[j]);
5494 }
5495 }
5496
8a1c38d1
EG
5497 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5498
5499 /* Init rate shaping and fairness contexts */
5500 if (IS_E1HMF(bp)) {
5501 int vn;
5502
5503 /* During init there is no active link
5504 Until link is up, set link rate to 10Gbps */
5505 bp->link_vars.line_speed = SPEED_10000;
5506 bnx2x_init_port_minmax(bp);
5507
5508 bnx2x_calc_vn_weight_sum(bp);
5509
5510 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5511 bnx2x_init_vn_minmax(bp, 2*vn + port);
5512
5513 /* Enable rate shaping and fairness */
5514 bp->cmng.flags.cmng_enables =
5515 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5516 if (bp->vn_weight_sum)
5517 bp->cmng.flags.cmng_enables |=
5518 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5519 else
5520 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5521 " fairness will be disabled\n");
5522 } else {
5523 /* rate shaping and fairness are disabled */
5524 DP(NETIF_MSG_IFUP,
5525 "single function mode minmax will be disabled\n");
5526 }
5527
5528
5529 /* Store it to internal memory */
5530 if (bp->port.pmf)
5531 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5532 REG_WR(bp, BAR_XSTRORM_INTMEM +
5533 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5534 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5535}
5536
471de716
EG
5537static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5538{
5539 switch (load_code) {
5540 case FW_MSG_CODE_DRV_LOAD_COMMON:
5541 bnx2x_init_internal_common(bp);
5542 /* no break */
5543
5544 case FW_MSG_CODE_DRV_LOAD_PORT:
5545 bnx2x_init_internal_port(bp);
5546 /* no break */
5547
5548 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5549 bnx2x_init_internal_func(bp);
5550 break;
5551
5552 default:
5553 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5554 break;
5555 }
5556}
5557
5558static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5559{
5560 int i;
5561
5562 for_each_queue(bp, i) {
5563 struct bnx2x_fastpath *fp = &bp->fp[i];
5564
34f80b04 5565 fp->bp = bp;
a2fbb9ea 5566 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5567 fp->index = i;
34f80b04
EG
5568 fp->cl_id = BP_L_ID(bp) + i;
5569 fp->sb_id = fp->cl_id;
ca00392c
EG
5570 /* Suitable Rx and Tx SBs are served by the same client */
5571 if (i >= bp->num_rx_queues)
5572 fp->cl_id -= bp->num_rx_queues;
34f80b04 5573 DP(NETIF_MSG_IFUP,
f5372251
EG
5574 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5575 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5576 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5577 fp->sb_id);
5c862848 5578 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5579 }
5580
16119785
EG
5581 /* ensure status block indices were read */
5582 rmb();
5583
5584
5c862848
EG
5585 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5586 DEF_SB_ID);
5587 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5588 bnx2x_update_coalesce(bp);
5589 bnx2x_init_rx_rings(bp);
5590 bnx2x_init_tx_ring(bp);
5591 bnx2x_init_sp_ring(bp);
5592 bnx2x_init_context(bp);
471de716 5593 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5594 bnx2x_init_ind_table(bp);
0ef00459
EG
5595 bnx2x_stats_init(bp);
5596
5597 /* At this point, we are ready for interrupts */
5598 atomic_set(&bp->intr_sem, 0);
5599
5600 /* flush all before enabling interrupts */
5601 mb();
5602 mmiowb();
5603
615f8fd9 5604 bnx2x_int_enable(bp);
eb8da205
EG
5605
5606 /* Check for SPIO5 */
5607 bnx2x_attn_int_deasserted0(bp,
5608 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5609 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5610}
5611
5612/* end of nic init */
5613
5614/*
5615 * gzip service functions
5616 */
5617
5618static int bnx2x_gunzip_init(struct bnx2x *bp)
5619{
5620 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5621 &bp->gunzip_mapping);
5622 if (bp->gunzip_buf == NULL)
5623 goto gunzip_nomem1;
5624
5625 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5626 if (bp->strm == NULL)
5627 goto gunzip_nomem2;
5628
5629 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5630 GFP_KERNEL);
5631 if (bp->strm->workspace == NULL)
5632 goto gunzip_nomem3;
5633
5634 return 0;
5635
5636gunzip_nomem3:
5637 kfree(bp->strm);
5638 bp->strm = NULL;
5639
5640gunzip_nomem2:
5641 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5642 bp->gunzip_mapping);
5643 bp->gunzip_buf = NULL;
5644
5645gunzip_nomem1:
5646 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5647 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5648 return -ENOMEM;
5649}
5650
5651static void bnx2x_gunzip_end(struct bnx2x *bp)
5652{
5653 kfree(bp->strm->workspace);
5654
5655 kfree(bp->strm);
5656 bp->strm = NULL;
5657
5658 if (bp->gunzip_buf) {
5659 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5660 bp->gunzip_mapping);
5661 bp->gunzip_buf = NULL;
5662 }
5663}
5664
94a78b79 5665static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
5666{
5667 int n, rc;
5668
5669 /* check gzip header */
94a78b79
VZ
5670 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5671 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 5672 return -EINVAL;
94a78b79 5673 }
a2fbb9ea
ET
5674
5675 n = 10;
5676
34f80b04 5677#define FNAME 0x8
a2fbb9ea
ET
5678
5679 if (zbuf[3] & FNAME)
5680 while ((zbuf[n++] != 0) && (n < len));
5681
94a78b79 5682 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
5683 bp->strm->avail_in = len - n;
5684 bp->strm->next_out = bp->gunzip_buf;
5685 bp->strm->avail_out = FW_BUF_SIZE;
5686
5687 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5688 if (rc != Z_OK)
5689 return rc;
5690
5691 rc = zlib_inflate(bp->strm, Z_FINISH);
5692 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5693 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5694 bp->dev->name, bp->strm->msg);
5695
5696 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5697 if (bp->gunzip_outlen & 0x3)
5698 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5699 " gunzip_outlen (%d) not aligned\n",
5700 bp->dev->name, bp->gunzip_outlen);
5701 bp->gunzip_outlen >>= 2;
5702
5703 zlib_inflateEnd(bp->strm);
5704
5705 if (rc == Z_STREAM_END)
5706 return 0;
5707
5708 return rc;
5709}
5710
5711/* nic load/unload */
5712
5713/*
34f80b04 5714 * General service functions
a2fbb9ea
ET
5715 */
5716
5717/* send a NIG loopback debug packet */
5718static void bnx2x_lb_pckt(struct bnx2x *bp)
5719{
a2fbb9ea 5720 u32 wb_write[3];
a2fbb9ea
ET
5721
5722 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5723 wb_write[0] = 0x55555555;
5724 wb_write[1] = 0x55555555;
34f80b04 5725 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5726 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5727
5728 /* NON-IP protocol */
a2fbb9ea
ET
5729 wb_write[0] = 0x09000000;
5730 wb_write[1] = 0x55555555;
34f80b04 5731 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5732 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5733}
5734
5735/* some of the internal memories
5736 * are not directly readable from the driver
5737 * to test them we send debug packets
5738 */
5739static int bnx2x_int_mem_test(struct bnx2x *bp)
5740{
5741 int factor;
5742 int count, i;
5743 u32 val = 0;
5744
ad8d3948 5745 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5746 factor = 120;
ad8d3948
EG
5747 else if (CHIP_REV_IS_EMUL(bp))
5748 factor = 200;
5749 else
a2fbb9ea 5750 factor = 1;
a2fbb9ea
ET
5751
5752 DP(NETIF_MSG_HW, "start part1\n");
5753
5754 /* Disable inputs of parser neighbor blocks */
5755 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5756 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5757 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5758 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5759
5760 /* Write 0 to parser credits for CFC search request */
5761 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5762
5763 /* send Ethernet packet */
5764 bnx2x_lb_pckt(bp);
5765
5766 /* TODO do i reset NIG statistic? */
5767 /* Wait until NIG register shows 1 packet of size 0x10 */
5768 count = 1000 * factor;
5769 while (count) {
34f80b04 5770
a2fbb9ea
ET
5771 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5772 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5773 if (val == 0x10)
5774 break;
5775
5776 msleep(10);
5777 count--;
5778 }
5779 if (val != 0x10) {
5780 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5781 return -1;
5782 }
5783
5784 /* Wait until PRS register shows 1 packet */
5785 count = 1000 * factor;
5786 while (count) {
5787 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5788 if (val == 1)
5789 break;
5790
5791 msleep(10);
5792 count--;
5793 }
5794 if (val != 0x1) {
5795 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5796 return -2;
5797 }
5798
5799 /* Reset and init BRB, PRS */
34f80b04 5800 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5801 msleep(50);
34f80b04 5802 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 5803 msleep(50);
94a78b79
VZ
5804 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5805 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5806
5807 DP(NETIF_MSG_HW, "part2\n");
5808
5809 /* Disable inputs of parser neighbor blocks */
5810 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5811 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5812 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5813 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5814
5815 /* Write 0 to parser credits for CFC search request */
5816 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5817
5818 /* send 10 Ethernet packets */
5819 for (i = 0; i < 10; i++)
5820 bnx2x_lb_pckt(bp);
5821
5822 /* Wait until NIG register shows 10 + 1
5823 packets of size 11*0x10 = 0xb0 */
5824 count = 1000 * factor;
5825 while (count) {
34f80b04 5826
a2fbb9ea
ET
5827 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5828 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5829 if (val == 0xb0)
5830 break;
5831
5832 msleep(10);
5833 count--;
5834 }
5835 if (val != 0xb0) {
5836 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5837 return -3;
5838 }
5839
5840 /* Wait until PRS register shows 2 packets */
5841 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5842 if (val != 2)
5843 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5844
5845 /* Write 1 to parser credits for CFC search request */
5846 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5847
5848 /* Wait until PRS register shows 3 packets */
5849 msleep(10 * factor);
5850 /* Wait until NIG register shows 1 packet of size 0x10 */
5851 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5852 if (val != 3)
5853 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5854
5855 /* clear NIG EOP FIFO */
5856 for (i = 0; i < 11; i++)
5857 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5858 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5859 if (val != 1) {
5860 BNX2X_ERR("clear of NIG failed\n");
5861 return -4;
5862 }
5863
5864 /* Reset and init BRB, PRS, NIG */
5865 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5866 msleep(50);
5867 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5868 msleep(50);
94a78b79
VZ
5869 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5870 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
5871#ifndef BCM_ISCSI
5872 /* set NIC mode */
5873 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5874#endif
5875
5876 /* Enable inputs of parser neighbor blocks */
5877 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5878 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5879 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5880 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5881
5882 DP(NETIF_MSG_HW, "done\n");
5883
5884 return 0; /* OK */
5885}
5886
5887static void enable_blocks_attention(struct bnx2x *bp)
5888{
5889 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5890 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5891 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5892 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5893 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5894 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5895 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5896 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5897 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5898/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5899/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5900 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5901 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5902 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5903/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5904/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5905 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5906 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5907 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5908 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5909/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5910/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5911 if (CHIP_REV_IS_FPGA(bp))
5912 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5913 else
5914 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5915 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5916 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5917 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5918/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5919/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5920 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5921 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5922/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5923 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5924}
5925
34f80b04 5926
81f75bbf
EG
5927static void bnx2x_reset_common(struct bnx2x *bp)
5928{
5929 /* reset_common */
5930 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5931 0xd3ffff7f);
5932 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5933}
5934
573f2035
EG
5935static void bnx2x_init_pxp(struct bnx2x *bp)
5936{
5937 u16 devctl;
5938 int r_order, w_order;
5939
5940 pci_read_config_word(bp->pdev,
5941 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
5942 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
5943 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
5944 if (bp->mrrs == -1)
5945 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
5946 else {
5947 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
5948 r_order = bp->mrrs;
5949 }
5950
5951 bnx2x_init_pxp_arb(bp, r_order, w_order);
5952}
fd4ef40d
EG
5953
5954static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5955{
5956 u32 val;
5957 u8 port;
5958 u8 is_required = 0;
5959
5960 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5961 SHARED_HW_CFG_FAN_FAILURE_MASK;
5962
5963 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5964 is_required = 1;
5965
5966 /*
5967 * The fan failure mechanism is usually related to the PHY type since
5968 * the power consumption of the board is affected by the PHY. Currently,
5969 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5970 */
5971 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5972 for (port = PORT_0; port < PORT_MAX; port++) {
5973 u32 phy_type =
5974 SHMEM_RD(bp, dev_info.port_hw_config[port].
5975 external_phy_config) &
5976 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5977 is_required |=
5978 ((phy_type ==
5979 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
5980 (phy_type ==
5981 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
5982 (phy_type ==
5983 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5984 }
5985
5986 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5987
5988 if (is_required == 0)
5989 return;
5990
5991 /* Fan failure is indicated by SPIO 5 */
5992 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5993 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5994
5995 /* set to active low mode */
5996 val = REG_RD(bp, MISC_REG_SPIO_INT);
5997 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5998 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5999 REG_WR(bp, MISC_REG_SPIO_INT, val);
6000
6001 /* enable interrupt to signal the IGU */
6002 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6003 val |= (1 << MISC_REGISTERS_SPIO_5);
6004 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6005}
6006
34f80b04 6007static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6008{
a2fbb9ea 6009 u32 val, i;
a2fbb9ea 6010
34f80b04 6011 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6012
81f75bbf 6013 bnx2x_reset_common(bp);
34f80b04
EG
6014 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6015 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6016
94a78b79 6017 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6018 if (CHIP_IS_E1H(bp))
6019 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6020
34f80b04
EG
6021 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6022 msleep(30);
6023 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6024
94a78b79 6025 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6026 if (CHIP_IS_E1(bp)) {
6027 /* enable HW interrupt from PXP on USDM overflow
6028 bit 16 on INT_MASK_0 */
6029 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6030 }
a2fbb9ea 6031
94a78b79 6032 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6033 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6034
6035#ifdef __BIG_ENDIAN
34f80b04
EG
6036 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6037 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6038 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6039 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6040 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6041 /* make sure this value is 0 */
6042 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6043
6044/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6045 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6046 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6047 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6048 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6049#endif
6050
34f80b04 6051 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 6052#ifdef BCM_ISCSI
34f80b04
EG
6053 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6054 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6055 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6056#endif
6057
34f80b04
EG
6058 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6059 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6060
34f80b04
EG
6061 /* let the HW do it's magic ... */
6062 msleep(100);
6063 /* finish PXP init */
6064 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6065 if (val != 1) {
6066 BNX2X_ERR("PXP2 CFG failed\n");
6067 return -EBUSY;
6068 }
6069 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6070 if (val != 1) {
6071 BNX2X_ERR("PXP2 RD_INIT failed\n");
6072 return -EBUSY;
6073 }
a2fbb9ea 6074
34f80b04
EG
6075 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6076 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6077
94a78b79 6078 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6079
34f80b04
EG
6080 /* clean the DMAE memory */
6081 bp->dmae_ready = 1;
6082 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6083
94a78b79
VZ
6084 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6085 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6086 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6087 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6088
34f80b04
EG
6089 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6090 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6091 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6092 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6093
94a78b79 6094 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
34f80b04
EG
6095 /* soft reset pulse */
6096 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6097 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
6098
6099#ifdef BCM_ISCSI
94a78b79 6100 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6101#endif
a2fbb9ea 6102
94a78b79 6103 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6104 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6105 if (!CHIP_REV_IS_SLOW(bp)) {
6106 /* enable hw interrupt from doorbell Q */
6107 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6108 }
a2fbb9ea 6109
94a78b79
VZ
6110 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6111 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6112 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
6113 /* set NIC mode */
6114 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
6115 if (CHIP_IS_E1H(bp))
6116 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6117
94a78b79
VZ
6118 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6119 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6120 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6121 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6122
ca00392c
EG
6123 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6124 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6125 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6126 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6127
94a78b79
VZ
6128 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6129 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6130 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6131 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6132
34f80b04
EG
6133 /* sync semi rtc */
6134 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6135 0x80000000);
6136 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6137 0x80000000);
a2fbb9ea 6138
94a78b79
VZ
6139 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6140 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6141 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6142
34f80b04
EG
6143 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6144 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6145 REG_WR(bp, i, 0xc0cac01a);
6146 /* TODO: replace with something meaningful */
6147 }
94a78b79 6148 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
34f80b04 6149 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6150
34f80b04
EG
6151 if (sizeof(union cdu_context) != 1024)
6152 /* we currently assume that a context is 1024 bytes */
6153 printk(KERN_ALERT PFX "please adjust the size of"
6154 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 6155
94a78b79 6156 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6157 val = (4 << 24) + (0 << 12) + 1024;
6158 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6159
94a78b79 6160 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6161 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6162 /* enable context validation interrupt from CFC */
6163 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6164
6165 /* set the thresholds to prevent CFC/CDU race */
6166 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6167
94a78b79
VZ
6168 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6169 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6170
94a78b79 6171 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6172 /* Reset PCIE errors for debug */
6173 REG_WR(bp, 0x2814, 0xffffffff);
6174 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6175
94a78b79 6176 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6177 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6178 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6179 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6180
94a78b79 6181 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6182 if (CHIP_IS_E1H(bp)) {
6183 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6184 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6185 }
6186
6187 if (CHIP_REV_IS_SLOW(bp))
6188 msleep(200);
6189
6190 /* finish CFC init */
6191 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6192 if (val != 1) {
6193 BNX2X_ERR("CFC LL_INIT failed\n");
6194 return -EBUSY;
6195 }
6196 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6197 if (val != 1) {
6198 BNX2X_ERR("CFC AC_INIT failed\n");
6199 return -EBUSY;
6200 }
6201 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6202 if (val != 1) {
6203 BNX2X_ERR("CFC CAM_INIT failed\n");
6204 return -EBUSY;
6205 }
6206 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6207
34f80b04
EG
6208 /* read NIG statistic
6209 to see if this is our first up since powerup */
6210 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6211 val = *bnx2x_sp(bp, wb_data[0]);
6212
6213 /* do internal memory self test */
6214 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6215 BNX2X_ERR("internal mem self test failed\n");
6216 return -EBUSY;
6217 }
6218
35b19ba5 6219 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6220 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6221 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6222 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6223 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6224 bp->port.need_hw_lock = 1;
6225 break;
6226
34f80b04
EG
6227 default:
6228 break;
6229 }
f1410647 6230
fd4ef40d
EG
6231 bnx2x_setup_fan_failure_detection(bp);
6232
34f80b04
EG
6233 /* clear PXP2 attentions */
6234 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6235
34f80b04 6236 enable_blocks_attention(bp);
a2fbb9ea 6237
6bbca910
YR
6238 if (!BP_NOMCP(bp)) {
6239 bnx2x_acquire_phy_lock(bp);
6240 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6241 bnx2x_release_phy_lock(bp);
6242 } else
6243 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6244
34f80b04
EG
6245 return 0;
6246}
a2fbb9ea 6247
34f80b04
EG
6248static int bnx2x_init_port(struct bnx2x *bp)
6249{
6250 int port = BP_PORT(bp);
94a78b79 6251 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6252 u32 low, high;
34f80b04 6253 u32 val;
a2fbb9ea 6254
34f80b04
EG
6255 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6256
6257 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6258
94a78b79 6259 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6260 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6261
6262 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6263 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6264 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
a2fbb9ea
ET
6265#ifdef BCM_ISCSI
6266 /* Port0 1
6267 * Port1 385 */
6268 i++;
6269 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6270 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6271 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6272 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6273
6274 /* Port0 2
6275 * Port1 386 */
6276 i++;
6277 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6278 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6279 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6280 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6281
6282 /* Port0 3
6283 * Port1 387 */
6284 i++;
6285 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6286 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6287 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6288 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6289#endif
94a78b79 6290 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6291
a2fbb9ea
ET
6292#ifdef BCM_ISCSI
6293 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
6294 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6295
94a78b79 6296 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
a2fbb9ea 6297#endif
94a78b79 6298 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6299
94a78b79 6300 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6301 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6302 /* no pause for emulation and FPGA */
6303 low = 0;
6304 high = 513;
6305 } else {
6306 if (IS_E1HMF(bp))
6307 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6308 else if (bp->dev->mtu > 4096) {
6309 if (bp->flags & ONE_PORT_FLAG)
6310 low = 160;
6311 else {
6312 val = bp->dev->mtu;
6313 /* (24*1024 + val*4)/256 */
6314 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6315 }
6316 } else
6317 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6318 high = low + 56; /* 14*1024/256 */
6319 }
6320 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6321 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6322
6323
94a78b79 6324 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6325
94a78b79 6326 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6327 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6328 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6329 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6330
94a78b79
VZ
6331 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6332 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6333 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6334 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6335
94a78b79 6336 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6337 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6338
94a78b79 6339 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6340
6341 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6342 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6343
6344 /* update threshold */
34f80b04 6345 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6346 /* update init credit */
34f80b04 6347 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6348
6349 /* probe changes */
34f80b04 6350 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6351 msleep(5);
34f80b04 6352 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
6353
6354#ifdef BCM_ISCSI
6355 /* tell the searcher where the T2 table is */
6356 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6357
6358 wb_write[0] = U64_LO(bp->t2_mapping);
6359 wb_write[1] = U64_HI(bp->t2_mapping);
6360 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6361 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6362 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6363 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6364
6365 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
a2fbb9ea 6366#endif
94a78b79 6367 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6368 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6369
6370 if (CHIP_IS_E1(bp)) {
6371 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6372 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6373 }
94a78b79 6374 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6375
94a78b79 6376 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6377 /* init aeu_mask_attn_func_0/1:
6378 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6379 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6380 * bits 4-7 are used for "per vn group attention" */
6381 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6382 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6383
94a78b79 6384 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6385 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6386 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6387 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6388 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6389
94a78b79 6390 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6391
6392 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6393
6394 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6395 /* 0x2 disable e1hov, 0x1 enable */
6396 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6397 (IS_E1HMF(bp) ? 0x1 : 0x2));
6398
1c06328c
EG
6399 {
6400 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6401 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6402 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6403 }
34f80b04
EG
6404 }
6405
94a78b79 6406 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6407 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6408
35b19ba5 6409 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6410 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6411 {
6412 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6413
6414 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6415 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6416
6417 /* The GPIO should be swapped if the swap register is
6418 set and active */
6419 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6420 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6421
6422 /* Select function upon port-swap configuration */
6423 if (port == 0) {
6424 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6425 aeu_gpio_mask = (swap_val && swap_override) ?
6426 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6427 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6428 } else {
6429 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6430 aeu_gpio_mask = (swap_val && swap_override) ?
6431 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6432 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6433 }
6434 val = REG_RD(bp, offset);
6435 /* add GPIO3 to group */
6436 val |= aeu_gpio_mask;
6437 REG_WR(bp, offset, val);
6438 }
6439 break;
6440
35b19ba5 6441 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6442 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6443 /* add SPIO 5 to group 0 */
4d295db0
EG
6444 {
6445 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6446 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6447 val = REG_RD(bp, reg_addr);
f1410647 6448 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6449 REG_WR(bp, reg_addr, val);
6450 }
f1410647
ET
6451 break;
6452
6453 default:
6454 break;
6455 }
6456
c18487ee 6457 bnx2x__link_reset(bp);
a2fbb9ea 6458
34f80b04
EG
6459 return 0;
6460}
6461
6462#define ILT_PER_FUNC (768/2)
6463#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6464/* the phys address is shifted right 12 bits and has an added
6465 1=valid bit added to the 53rd bit
6466 then since this is a wide register(TM)
6467 we split it into two 32 bit writes
6468 */
6469#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6470#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6471#define PXP_ONE_ILT(x) (((x) << 10) | x)
6472#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6473
6474#define CNIC_ILT_LINES 0
6475
6476static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6477{
6478 int reg;
6479
6480 if (CHIP_IS_E1H(bp))
6481 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6482 else /* E1 */
6483 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6484
6485 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6486}
6487
6488static int bnx2x_init_func(struct bnx2x *bp)
6489{
6490 int port = BP_PORT(bp);
6491 int func = BP_FUNC(bp);
8badd27a 6492 u32 addr, val;
34f80b04
EG
6493 int i;
6494
6495 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6496
8badd27a
EG
6497 /* set MSI reconfigure capability */
6498 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6499 val = REG_RD(bp, addr);
6500 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6501 REG_WR(bp, addr, val);
6502
34f80b04
EG
6503 i = FUNC_ILT_BASE(func);
6504
6505 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6506 if (CHIP_IS_E1H(bp)) {
6507 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6508 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6509 } else /* E1 */
6510 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6511 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6512
6513
6514 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6515 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6516 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6517 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6518 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6519 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6520 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6521 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6522 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6523 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6524
6525 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6526 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6527 }
6528
6529 /* HC init per function */
6530 if (CHIP_IS_E1H(bp)) {
6531 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6532
6533 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6534 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6535 }
94a78b79 6536 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 6537
c14423fe 6538 /* Reset PCIE errors for debug */
a2fbb9ea
ET
6539 REG_WR(bp, 0x2114, 0xffffffff);
6540 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 6541
34f80b04
EG
6542 return 0;
6543}
6544
6545static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6546{
6547 int i, rc = 0;
a2fbb9ea 6548
34f80b04
EG
6549 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6550 BP_FUNC(bp), load_code);
a2fbb9ea 6551
34f80b04
EG
6552 bp->dmae_ready = 0;
6553 mutex_init(&bp->dmae_mutex);
54016b26
EG
6554 rc = bnx2x_gunzip_init(bp);
6555 if (rc)
6556 return rc;
a2fbb9ea 6557
34f80b04
EG
6558 switch (load_code) {
6559 case FW_MSG_CODE_DRV_LOAD_COMMON:
6560 rc = bnx2x_init_common(bp);
6561 if (rc)
6562 goto init_hw_err;
6563 /* no break */
6564
6565 case FW_MSG_CODE_DRV_LOAD_PORT:
6566 bp->dmae_ready = 1;
6567 rc = bnx2x_init_port(bp);
6568 if (rc)
6569 goto init_hw_err;
6570 /* no break */
6571
6572 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6573 bp->dmae_ready = 1;
6574 rc = bnx2x_init_func(bp);
6575 if (rc)
6576 goto init_hw_err;
6577 break;
6578
6579 default:
6580 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6581 break;
6582 }
6583
6584 if (!BP_NOMCP(bp)) {
6585 int func = BP_FUNC(bp);
a2fbb9ea
ET
6586
6587 bp->fw_drv_pulse_wr_seq =
34f80b04 6588 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6589 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
6590 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
6591 }
a2fbb9ea 6592
34f80b04
EG
6593 /* this needs to be done before gunzip end */
6594 bnx2x_zero_def_sb(bp);
6595 for_each_queue(bp, i)
6596 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6597
6598init_hw_err:
6599 bnx2x_gunzip_end(bp);
6600
6601 return rc;
a2fbb9ea
ET
6602}
6603
a2fbb9ea
ET
6604static void bnx2x_free_mem(struct bnx2x *bp)
6605{
6606
6607#define BNX2X_PCI_FREE(x, y, size) \
6608 do { \
6609 if (x) { \
6610 pci_free_consistent(bp->pdev, size, x, y); \
6611 x = NULL; \
6612 y = 0; \
6613 } \
6614 } while (0)
6615
6616#define BNX2X_FREE(x) \
6617 do { \
6618 if (x) { \
6619 vfree(x); \
6620 x = NULL; \
6621 } \
6622 } while (0)
6623
6624 int i;
6625
6626 /* fastpath */
555f6c78 6627 /* Common */
a2fbb9ea
ET
6628 for_each_queue(bp, i) {
6629
555f6c78 6630 /* status blocks */
a2fbb9ea
ET
6631 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6632 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6633 sizeof(struct host_status_block));
555f6c78
EG
6634 }
6635 /* Rx */
6636 for_each_rx_queue(bp, i) {
a2fbb9ea 6637
555f6c78 6638 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6639 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6640 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6641 bnx2x_fp(bp, i, rx_desc_mapping),
6642 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6643
6644 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6645 bnx2x_fp(bp, i, rx_comp_mapping),
6646 sizeof(struct eth_fast_path_rx_cqe) *
6647 NUM_RCQ_BD);
a2fbb9ea 6648
7a9b2557 6649 /* SGE ring */
32626230 6650 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6651 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6652 bnx2x_fp(bp, i, rx_sge_mapping),
6653 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6654 }
555f6c78
EG
6655 /* Tx */
6656 for_each_tx_queue(bp, i) {
6657
6658 /* fastpath tx rings: tx_buf tx_desc */
6659 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6660 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6661 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6662 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6663 }
a2fbb9ea
ET
6664 /* end of fastpath */
6665
6666 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6667 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6668
6669 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6670 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6671
6672#ifdef BCM_ISCSI
6673 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6674 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6675 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6676 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6677#endif
7a9b2557 6678 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6679
6680#undef BNX2X_PCI_FREE
6681#undef BNX2X_KFREE
6682}
6683
6684static int bnx2x_alloc_mem(struct bnx2x *bp)
6685{
6686
6687#define BNX2X_PCI_ALLOC(x, y, size) \
6688 do { \
6689 x = pci_alloc_consistent(bp->pdev, size, y); \
6690 if (x == NULL) \
6691 goto alloc_mem_err; \
6692 memset(x, 0, size); \
6693 } while (0)
6694
6695#define BNX2X_ALLOC(x, size) \
6696 do { \
6697 x = vmalloc(size); \
6698 if (x == NULL) \
6699 goto alloc_mem_err; \
6700 memset(x, 0, size); \
6701 } while (0)
6702
6703 int i;
6704
6705 /* fastpath */
555f6c78 6706 /* Common */
a2fbb9ea
ET
6707 for_each_queue(bp, i) {
6708 bnx2x_fp(bp, i, bp) = bp;
6709
555f6c78 6710 /* status blocks */
a2fbb9ea
ET
6711 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6712 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 6713 sizeof(struct host_status_block));
555f6c78
EG
6714 }
6715 /* Rx */
6716 for_each_rx_queue(bp, i) {
a2fbb9ea 6717
555f6c78 6718 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6719 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6720 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6721 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6722 &bnx2x_fp(bp, i, rx_desc_mapping),
6723 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6724
6725 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6726 &bnx2x_fp(bp, i, rx_comp_mapping),
6727 sizeof(struct eth_fast_path_rx_cqe) *
6728 NUM_RCQ_BD);
6729
7a9b2557
VZ
6730 /* SGE ring */
6731 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6732 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6733 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6734 &bnx2x_fp(bp, i, rx_sge_mapping),
6735 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6736 }
555f6c78
EG
6737 /* Tx */
6738 for_each_tx_queue(bp, i) {
6739
555f6c78
EG
6740 /* fastpath tx rings: tx_buf tx_desc */
6741 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6742 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6743 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6744 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 6745 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 6746 }
a2fbb9ea
ET
6747 /* end of fastpath */
6748
6749 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6750 sizeof(struct host_def_status_block));
6751
6752 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6753 sizeof(struct bnx2x_slowpath));
6754
6755#ifdef BCM_ISCSI
6756 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6757
6758 /* Initialize T1 */
6759 for (i = 0; i < 64*1024; i += 64) {
6760 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6761 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6762 }
6763
6764 /* allocate searcher T2 table
6765 we allocate 1/4 of alloc num for T2
6766 (which is not entered into the ILT) */
6767 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6768
6769 /* Initialize T2 */
6770 for (i = 0; i < 16*1024; i += 64)
6771 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6772
c14423fe 6773 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6774 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6775
6776 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6777 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6778
6779 /* QM queues (128*MAX_CONN) */
6780 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6781#endif
6782
6783 /* Slow path ring */
6784 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6785
6786 return 0;
6787
6788alloc_mem_err:
6789 bnx2x_free_mem(bp);
6790 return -ENOMEM;
6791
6792#undef BNX2X_PCI_ALLOC
6793#undef BNX2X_ALLOC
6794}
6795
6796static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6797{
6798 int i;
6799
555f6c78 6800 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6801 struct bnx2x_fastpath *fp = &bp->fp[i];
6802
6803 u16 bd_cons = fp->tx_bd_cons;
6804 u16 sw_prod = fp->tx_pkt_prod;
6805 u16 sw_cons = fp->tx_pkt_cons;
6806
a2fbb9ea
ET
6807 while (sw_cons != sw_prod) {
6808 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6809 sw_cons++;
6810 }
6811 }
6812}
6813
6814static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6815{
6816 int i, j;
6817
555f6c78 6818 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6819 struct bnx2x_fastpath *fp = &bp->fp[j];
6820
a2fbb9ea
ET
6821 for (i = 0; i < NUM_RX_BD; i++) {
6822 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6823 struct sk_buff *skb = rx_buf->skb;
6824
6825 if (skb == NULL)
6826 continue;
6827
6828 pci_unmap_single(bp->pdev,
6829 pci_unmap_addr(rx_buf, mapping),
356e2385 6830 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
6831
6832 rx_buf->skb = NULL;
6833 dev_kfree_skb(skb);
6834 }
7a9b2557 6835 if (!fp->disable_tpa)
32626230
EG
6836 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6837 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6838 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6839 }
6840}
6841
6842static void bnx2x_free_skbs(struct bnx2x *bp)
6843{
6844 bnx2x_free_tx_skbs(bp);
6845 bnx2x_free_rx_skbs(bp);
6846}
6847
6848static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6849{
34f80b04 6850 int i, offset = 1;
a2fbb9ea
ET
6851
6852 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6853 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6854 bp->msix_table[0].vector);
6855
6856 for_each_queue(bp, i) {
c14423fe 6857 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6858 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6859 bnx2x_fp(bp, i, state));
6860
34f80b04 6861 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6862 }
a2fbb9ea
ET
6863}
6864
6865static void bnx2x_free_irq(struct bnx2x *bp)
6866{
a2fbb9ea 6867 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6868 bnx2x_free_msix_irqs(bp);
6869 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6870 bp->flags &= ~USING_MSIX_FLAG;
6871
8badd27a
EG
6872 } else if (bp->flags & USING_MSI_FLAG) {
6873 free_irq(bp->pdev->irq, bp->dev);
6874 pci_disable_msi(bp->pdev);
6875 bp->flags &= ~USING_MSI_FLAG;
6876
a2fbb9ea
ET
6877 } else
6878 free_irq(bp->pdev->irq, bp->dev);
6879}
6880
6881static int bnx2x_enable_msix(struct bnx2x *bp)
6882{
8badd27a
EG
6883 int i, rc, offset = 1;
6884 int igu_vec = 0;
a2fbb9ea 6885
8badd27a
EG
6886 bp->msix_table[0].entry = igu_vec;
6887 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6888
34f80b04 6889 for_each_queue(bp, i) {
8badd27a 6890 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6891 bp->msix_table[i + offset].entry = igu_vec;
6892 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6893 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6894 }
6895
34f80b04 6896 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6897 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6898 if (rc) {
8badd27a
EG
6899 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6900 return rc;
34f80b04 6901 }
8badd27a 6902
a2fbb9ea
ET
6903 bp->flags |= USING_MSIX_FLAG;
6904
6905 return 0;
a2fbb9ea
ET
6906}
6907
a2fbb9ea
ET
6908static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6909{
34f80b04 6910 int i, rc, offset = 1;
a2fbb9ea 6911
a2fbb9ea
ET
6912 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6913 bp->dev->name, bp->dev);
a2fbb9ea
ET
6914 if (rc) {
6915 BNX2X_ERR("request sp irq failed\n");
6916 return -EBUSY;
6917 }
6918
6919 for_each_queue(bp, i) {
555f6c78
EG
6920 struct bnx2x_fastpath *fp = &bp->fp[i];
6921
ca00392c
EG
6922 if (i < bp->num_rx_queues)
6923 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6924 else
6925 sprintf(fp->name, "%s-tx-%d",
6926 bp->dev->name, i - bp->num_rx_queues);
6927
34f80b04 6928 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6929 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6930 if (rc) {
555f6c78 6931 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6932 bnx2x_free_msix_irqs(bp);
6933 return -EBUSY;
6934 }
6935
555f6c78 6936 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6937 }
6938
555f6c78 6939 i = BNX2X_NUM_QUEUES(bp);
ca00392c
EG
6940 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6941 " ... fp[%d] %d\n",
6942 bp->dev->name, bp->msix_table[0].vector,
6943 0, bp->msix_table[offset].vector,
6944 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 6945
a2fbb9ea 6946 return 0;
a2fbb9ea
ET
6947}
6948
8badd27a
EG
6949static int bnx2x_enable_msi(struct bnx2x *bp)
6950{
6951 int rc;
6952
6953 rc = pci_enable_msi(bp->pdev);
6954 if (rc) {
6955 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6956 return -1;
6957 }
6958 bp->flags |= USING_MSI_FLAG;
6959
6960 return 0;
6961}
6962
a2fbb9ea
ET
6963static int bnx2x_req_irq(struct bnx2x *bp)
6964{
8badd27a 6965 unsigned long flags;
34f80b04 6966 int rc;
a2fbb9ea 6967
8badd27a
EG
6968 if (bp->flags & USING_MSI_FLAG)
6969 flags = 0;
6970 else
6971 flags = IRQF_SHARED;
6972
6973 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6974 bp->dev->name, bp->dev);
a2fbb9ea
ET
6975 if (!rc)
6976 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6977
6978 return rc;
a2fbb9ea
ET
6979}
6980
65abd74d
YG
6981static void bnx2x_napi_enable(struct bnx2x *bp)
6982{
6983 int i;
6984
555f6c78 6985 for_each_rx_queue(bp, i)
65abd74d
YG
6986 napi_enable(&bnx2x_fp(bp, i, napi));
6987}
6988
6989static void bnx2x_napi_disable(struct bnx2x *bp)
6990{
6991 int i;
6992
555f6c78 6993 for_each_rx_queue(bp, i)
65abd74d
YG
6994 napi_disable(&bnx2x_fp(bp, i, napi));
6995}
6996
6997static void bnx2x_netif_start(struct bnx2x *bp)
6998{
e1510706
EG
6999 int intr_sem;
7000
7001 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7002 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7003
7004 if (intr_sem) {
65abd74d 7005 if (netif_running(bp->dev)) {
65abd74d
YG
7006 bnx2x_napi_enable(bp);
7007 bnx2x_int_enable(bp);
555f6c78
EG
7008 if (bp->state == BNX2X_STATE_OPEN)
7009 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7010 }
7011 }
7012}
7013
f8ef6e44 7014static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7015{
f8ef6e44 7016 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7017 bnx2x_napi_disable(bp);
762d5f6c
EG
7018 netif_tx_disable(bp->dev);
7019 bp->dev->trans_start = jiffies; /* prevent tx timeout */
65abd74d
YG
7020}
7021
a2fbb9ea
ET
7022/*
7023 * Init service functions
7024 */
7025
3101c2bc 7026static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
7027{
7028 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7029 int port = BP_PORT(bp);
a2fbb9ea
ET
7030
7031 /* CAM allocation
7032 * unicasts 0-31:port0 32-63:port1
7033 * multicast 64-127:port0 128-191:port1
7034 */
8d9c5f34 7035 config->hdr.length = 2;
af246401 7036 config->hdr.offset = port ? 32 : 0;
0626b899 7037 config->hdr.client_id = bp->fp->cl_id;
a2fbb9ea
ET
7038 config->hdr.reserved1 = 0;
7039
7040 /* primary MAC */
7041 config->config_table[0].cam_entry.msb_mac_addr =
7042 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7043 config->config_table[0].cam_entry.middle_mac_addr =
7044 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7045 config->config_table[0].cam_entry.lsb_mac_addr =
7046 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 7047 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7048 if (set)
7049 config->config_table[0].target_table_entry.flags = 0;
7050 else
7051 CAM_INVALIDATE(config->config_table[0]);
ca00392c
EG
7052 config->config_table[0].target_table_entry.clients_bit_vector =
7053 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7054 config->config_table[0].target_table_entry.vlan_id = 0;
7055
3101c2bc
YG
7056 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7057 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7058 config->config_table[0].cam_entry.msb_mac_addr,
7059 config->config_table[0].cam_entry.middle_mac_addr,
7060 config->config_table[0].cam_entry.lsb_mac_addr);
7061
7062 /* broadcast */
4781bfad
EG
7063 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
7064 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
7065 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
34f80b04 7066 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7067 if (set)
7068 config->config_table[1].target_table_entry.flags =
a2fbb9ea 7069 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
7070 else
7071 CAM_INVALIDATE(config->config_table[1]);
ca00392c
EG
7072 config->config_table[1].target_table_entry.clients_bit_vector =
7073 cpu_to_le32(1 << BP_L_ID(bp));
a2fbb9ea
ET
7074 config->config_table[1].target_table_entry.vlan_id = 0;
7075
7076 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7077 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7078 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7079}
7080
3101c2bc 7081static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
7082{
7083 struct mac_configuration_cmd_e1h *config =
7084 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7085
34f80b04
EG
7086 /* CAM allocation for E1H
7087 * unicasts: by func number
7088 * multicast: 20+FUNC*20, 20 each
7089 */
8d9c5f34 7090 config->hdr.length = 1;
34f80b04 7091 config->hdr.offset = BP_FUNC(bp);
0626b899 7092 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
7093 config->hdr.reserved1 = 0;
7094
7095 /* primary MAC */
7096 config->config_table[0].msb_mac_addr =
7097 swab16(*(u16 *)&bp->dev->dev_addr[0]);
7098 config->config_table[0].middle_mac_addr =
7099 swab16(*(u16 *)&bp->dev->dev_addr[2]);
7100 config->config_table[0].lsb_mac_addr =
7101 swab16(*(u16 *)&bp->dev->dev_addr[4]);
ca00392c
EG
7102 config->config_table[0].clients_bit_vector =
7103 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
7104 config->config_table[0].vlan_id = 0;
7105 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7106 if (set)
7107 config->config_table[0].flags = BP_PORT(bp);
7108 else
7109 config->config_table[0].flags =
7110 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7111
3101c2bc
YG
7112 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
7113 (set ? "setting" : "clearing"),
34f80b04
EG
7114 config->config_table[0].msb_mac_addr,
7115 config->config_table[0].middle_mac_addr,
7116 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
7117
7118 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7119 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7120 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7121}
7122
a2fbb9ea
ET
7123static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7124 int *state_p, int poll)
7125{
7126 /* can take a while if any port is running */
8b3a0f0b 7127 int cnt = 5000;
a2fbb9ea 7128
c14423fe
ET
7129 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7130 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7131
7132 might_sleep();
34f80b04 7133 while (cnt--) {
a2fbb9ea
ET
7134 if (poll) {
7135 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7136 /* if index is different from 0
7137 * the reply for some commands will
3101c2bc 7138 * be on the non default queue
a2fbb9ea
ET
7139 */
7140 if (idx)
7141 bnx2x_rx_int(&bp->fp[idx], 10);
7142 }
a2fbb9ea 7143
3101c2bc 7144 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7145 if (*state_p == state) {
7146#ifdef BNX2X_STOP_ON_ERROR
7147 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7148#endif
a2fbb9ea 7149 return 0;
8b3a0f0b 7150 }
a2fbb9ea 7151
a2fbb9ea 7152 msleep(1);
e3553b29
EG
7153
7154 if (bp->panic)
7155 return -EIO;
a2fbb9ea
ET
7156 }
7157
a2fbb9ea 7158 /* timeout! */
49d66772
ET
7159 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7160 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7161#ifdef BNX2X_STOP_ON_ERROR
7162 bnx2x_panic();
7163#endif
a2fbb9ea 7164
49d66772 7165 return -EBUSY;
a2fbb9ea
ET
7166}
7167
7168static int bnx2x_setup_leading(struct bnx2x *bp)
7169{
34f80b04 7170 int rc;
a2fbb9ea 7171
c14423fe 7172 /* reset IGU state */
34f80b04 7173 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7174
7175 /* SETUP ramrod */
7176 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7177
34f80b04
EG
7178 /* Wait for completion */
7179 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7180
34f80b04 7181 return rc;
a2fbb9ea
ET
7182}
7183
7184static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7185{
555f6c78
EG
7186 struct bnx2x_fastpath *fp = &bp->fp[index];
7187
a2fbb9ea 7188 /* reset IGU state */
555f6c78 7189 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7190
228241eb 7191 /* SETUP ramrod */
555f6c78
EG
7192 fp->state = BNX2X_FP_STATE_OPENING;
7193 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7194 fp->cl_id, 0);
a2fbb9ea
ET
7195
7196 /* Wait for completion */
7197 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7198 &(fp->state), 0);
a2fbb9ea
ET
7199}
7200
a2fbb9ea 7201static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7202
ca00392c
EG
7203static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
7204 int *num_tx_queues_out)
7205{
7206 int _num_rx_queues = 0, _num_tx_queues = 0;
7207
7208 switch (bp->multi_mode) {
7209 case ETH_RSS_MODE_DISABLED:
7210 _num_rx_queues = 1;
7211 _num_tx_queues = 1;
7212 break;
7213
7214 case ETH_RSS_MODE_REGULAR:
7215 if (num_rx_queues)
7216 _num_rx_queues = min_t(u32, num_rx_queues,
7217 BNX2X_MAX_QUEUES(bp));
7218 else
7219 _num_rx_queues = min_t(u32, num_online_cpus(),
7220 BNX2X_MAX_QUEUES(bp));
7221
7222 if (num_tx_queues)
7223 _num_tx_queues = min_t(u32, num_tx_queues,
7224 BNX2X_MAX_QUEUES(bp));
7225 else
7226 _num_tx_queues = min_t(u32, num_online_cpus(),
7227 BNX2X_MAX_QUEUES(bp));
7228
7229 /* There must be not more Tx queues than Rx queues */
7230 if (_num_tx_queues > _num_rx_queues) {
7231 BNX2X_ERR("number of tx queues (%d) > "
7232 "number of rx queues (%d)"
7233 " defaulting to %d\n",
7234 _num_tx_queues, _num_rx_queues,
7235 _num_rx_queues);
7236 _num_tx_queues = _num_rx_queues;
7237 }
7238 break;
7239
7240
7241 default:
7242 _num_rx_queues = 1;
7243 _num_tx_queues = 1;
7244 break;
7245 }
7246
7247 *num_rx_queues_out = _num_rx_queues;
7248 *num_tx_queues_out = _num_tx_queues;
7249}
7250
7251static int bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 7252{
ca00392c 7253 int rc = 0;
a2fbb9ea 7254
8badd27a
EG
7255 switch (int_mode) {
7256 case INT_MODE_INTx:
7257 case INT_MODE_MSI:
ca00392c
EG
7258 bp->num_rx_queues = 1;
7259 bp->num_tx_queues = 1;
7260 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7261 break;
7262
7263 case INT_MODE_MSIX:
7264 default:
ca00392c
EG
7265 /* Set interrupt mode according to bp->multi_mode value */
7266 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
7267 &bp->num_tx_queues);
7268
7269 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
555f6c78 7270 bp->num_rx_queues, bp->num_tx_queues);
ca00392c 7271
2dfe0e1f
EG
7272 /* if we can't use MSI-X we only need one fp,
7273 * so try to enable MSI-X with the requested number of fp's
7274 * and fallback to MSI or legacy INTx with one fp
7275 */
ca00392c
EG
7276 rc = bnx2x_enable_msix(bp);
7277 if (rc) {
34f80b04 7278 /* failed to enable MSI-X */
555f6c78
EG
7279 if (bp->multi_mode)
7280 BNX2X_ERR("Multi requested but failed to "
ca00392c
EG
7281 "enable MSI-X (rx %d tx %d), "
7282 "set number of queues to 1\n",
7283 bp->num_rx_queues, bp->num_tx_queues);
7284 bp->num_rx_queues = 1;
7285 bp->num_tx_queues = 1;
a2fbb9ea 7286 }
8badd27a 7287 break;
a2fbb9ea 7288 }
555f6c78 7289 bp->dev->real_num_tx_queues = bp->num_tx_queues;
ca00392c 7290 return rc;
8badd27a
EG
7291}
7292
8badd27a
EG
7293
7294/* must be called with rtnl_lock */
7295static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7296{
7297 u32 load_code;
ca00392c
EG
7298 int i, rc;
7299
8badd27a 7300#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7301 if (unlikely(bp->panic))
7302 return -EPERM;
7303#endif
7304
7305 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7306
ca00392c 7307 rc = bnx2x_set_int_mode(bp);
c14423fe 7308
a2fbb9ea
ET
7309 if (bnx2x_alloc_mem(bp))
7310 return -ENOMEM;
7311
555f6c78 7312 for_each_rx_queue(bp, i)
7a9b2557
VZ
7313 bnx2x_fp(bp, i, disable_tpa) =
7314 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7315
555f6c78 7316 for_each_rx_queue(bp, i)
2dfe0e1f
EG
7317 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7318 bnx2x_poll, 128);
7319
2dfe0e1f
EG
7320 bnx2x_napi_enable(bp);
7321
34f80b04
EG
7322 if (bp->flags & USING_MSIX_FLAG) {
7323 rc = bnx2x_req_msix_irqs(bp);
7324 if (rc) {
7325 pci_disable_msix(bp->pdev);
2dfe0e1f 7326 goto load_error1;
34f80b04
EG
7327 }
7328 } else {
ca00392c
EG
7329 /* Fall to INTx if failed to enable MSI-X due to lack of
7330 memory (in bnx2x_set_int_mode()) */
8badd27a
EG
7331 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7332 bnx2x_enable_msi(bp);
34f80b04
EG
7333 bnx2x_ack_int(bp);
7334 rc = bnx2x_req_irq(bp);
7335 if (rc) {
2dfe0e1f 7336 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
7337 if (bp->flags & USING_MSI_FLAG)
7338 pci_disable_msi(bp->pdev);
2dfe0e1f 7339 goto load_error1;
a2fbb9ea 7340 }
8badd27a
EG
7341 if (bp->flags & USING_MSI_FLAG) {
7342 bp->dev->irq = bp->pdev->irq;
7343 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7344 bp->dev->name, bp->pdev->irq);
7345 }
a2fbb9ea
ET
7346 }
7347
2dfe0e1f
EG
7348 /* Send LOAD_REQUEST command to MCP
7349 Returns the type of LOAD command:
7350 if it is the first port to be initialized
7351 common blocks should be initialized, otherwise - not
7352 */
7353 if (!BP_NOMCP(bp)) {
7354 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7355 if (!load_code) {
7356 BNX2X_ERR("MCP response failure, aborting\n");
7357 rc = -EBUSY;
7358 goto load_error2;
7359 }
7360 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7361 rc = -EBUSY; /* other port in diagnostic mode */
7362 goto load_error2;
7363 }
7364
7365 } else {
7366 int port = BP_PORT(bp);
7367
f5372251 7368 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7369 load_count[0], load_count[1], load_count[2]);
7370 load_count[0]++;
7371 load_count[1 + port]++;
f5372251 7372 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7373 load_count[0], load_count[1], load_count[2]);
7374 if (load_count[0] == 1)
7375 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7376 else if (load_count[1 + port] == 1)
7377 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7378 else
7379 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7380 }
7381
7382 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7383 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7384 bp->port.pmf = 1;
7385 else
7386 bp->port.pmf = 0;
7387 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7388
a2fbb9ea 7389 /* Initialize HW */
34f80b04
EG
7390 rc = bnx2x_init_hw(bp, load_code);
7391 if (rc) {
a2fbb9ea 7392 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 7393 goto load_error2;
a2fbb9ea
ET
7394 }
7395
a2fbb9ea 7396 /* Setup NIC internals and enable interrupts */
471de716 7397 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7398
2691d51d
EG
7399 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7400 (bp->common.shmem2_base))
7401 SHMEM2_WR(bp, dcc_support,
7402 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7403 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7404
a2fbb9ea 7405 /* Send LOAD_DONE command to MCP */
34f80b04 7406 if (!BP_NOMCP(bp)) {
228241eb
ET
7407 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7408 if (!load_code) {
da5a662a 7409 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7410 rc = -EBUSY;
2dfe0e1f 7411 goto load_error3;
a2fbb9ea
ET
7412 }
7413 }
7414
7415 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7416
34f80b04
EG
7417 rc = bnx2x_setup_leading(bp);
7418 if (rc) {
da5a662a 7419 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7420#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7421 goto load_error3;
e3553b29
EG
7422#else
7423 bp->panic = 1;
7424 return -EBUSY;
7425#endif
34f80b04 7426 }
a2fbb9ea 7427
34f80b04
EG
7428 if (CHIP_IS_E1H(bp))
7429 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7430 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
34f80b04
EG
7431 bp->state = BNX2X_STATE_DISABLED;
7432 }
a2fbb9ea 7433
ca00392c 7434 if (bp->state == BNX2X_STATE_OPEN) {
34f80b04
EG
7435 for_each_nondefault_queue(bp, i) {
7436 rc = bnx2x_setup_multi(bp, i);
7437 if (rc)
2dfe0e1f 7438 goto load_error3;
34f80b04 7439 }
a2fbb9ea 7440
ca00392c
EG
7441 if (CHIP_IS_E1(bp))
7442 bnx2x_set_mac_addr_e1(bp, 1);
7443 else
7444 bnx2x_set_mac_addr_e1h(bp, 1);
7445 }
34f80b04
EG
7446
7447 if (bp->port.pmf)
b5bf9068 7448 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
7449
7450 /* Start fast path */
34f80b04
EG
7451 switch (load_mode) {
7452 case LOAD_NORMAL:
ca00392c
EG
7453 if (bp->state == BNX2X_STATE_OPEN) {
7454 /* Tx queue should be only reenabled */
7455 netif_tx_wake_all_queues(bp->dev);
7456 }
2dfe0e1f 7457 /* Initialize the receive filter. */
34f80b04
EG
7458 bnx2x_set_rx_mode(bp->dev);
7459 break;
7460
7461 case LOAD_OPEN:
555f6c78 7462 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
7463 if (bp->state != BNX2X_STATE_OPEN)
7464 netif_tx_disable(bp->dev);
2dfe0e1f 7465 /* Initialize the receive filter. */
34f80b04 7466 bnx2x_set_rx_mode(bp->dev);
34f80b04 7467 break;
a2fbb9ea 7468
34f80b04 7469 case LOAD_DIAG:
2dfe0e1f 7470 /* Initialize the receive filter. */
a2fbb9ea 7471 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
7472 bp->state = BNX2X_STATE_DIAG;
7473 break;
7474
7475 default:
7476 break;
a2fbb9ea
ET
7477 }
7478
34f80b04
EG
7479 if (!bp->port.pmf)
7480 bnx2x__link_status_update(bp);
7481
a2fbb9ea
ET
7482 /* start the timer */
7483 mod_timer(&bp->timer, jiffies + bp->current_interval);
7484
34f80b04 7485
a2fbb9ea
ET
7486 return 0;
7487
2dfe0e1f
EG
7488load_error3:
7489 bnx2x_int_disable_sync(bp, 1);
7490 if (!BP_NOMCP(bp)) {
7491 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7492 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7493 }
7494 bp->port.pmf = 0;
7a9b2557
VZ
7495 /* Free SKBs, SGEs, TPA pool and driver internals */
7496 bnx2x_free_skbs(bp);
555f6c78 7497 for_each_rx_queue(bp, i)
3196a88a 7498 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 7499load_error2:
d1014634
YG
7500 /* Release IRQs */
7501 bnx2x_free_irq(bp);
2dfe0e1f
EG
7502load_error1:
7503 bnx2x_napi_disable(bp);
555f6c78 7504 for_each_rx_queue(bp, i)
7cde1c8b 7505 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7506 bnx2x_free_mem(bp);
7507
34f80b04 7508 return rc;
a2fbb9ea
ET
7509}
7510
7511static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7512{
555f6c78 7513 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
7514 int rc;
7515
c14423fe 7516 /* halt the connection */
555f6c78
EG
7517 fp->state = BNX2X_FP_STATE_HALTING;
7518 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 7519
34f80b04 7520 /* Wait for completion */
a2fbb9ea 7521 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 7522 &(fp->state), 1);
c14423fe 7523 if (rc) /* timeout */
a2fbb9ea
ET
7524 return rc;
7525
7526 /* delete cfc entry */
7527 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7528
34f80b04
EG
7529 /* Wait for completion */
7530 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 7531 &(fp->state), 1);
34f80b04 7532 return rc;
a2fbb9ea
ET
7533}
7534
da5a662a 7535static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 7536{
4781bfad 7537 __le16 dsb_sp_prod_idx;
c14423fe 7538 /* if the other port is handling traffic,
a2fbb9ea 7539 this can take a lot of time */
34f80b04
EG
7540 int cnt = 500;
7541 int rc;
a2fbb9ea
ET
7542
7543 might_sleep();
7544
7545 /* Send HALT ramrod */
7546 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 7547 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 7548
34f80b04
EG
7549 /* Wait for completion */
7550 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7551 &(bp->fp[0].state), 1);
7552 if (rc) /* timeout */
da5a662a 7553 return rc;
a2fbb9ea 7554
49d66772 7555 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7556
228241eb 7557 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7558 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7559
49d66772 7560 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7561 we are going to reset the chip anyway
7562 so there is not much to do if this times out
7563 */
34f80b04 7564 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7565 if (!cnt) {
7566 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7567 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7568 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7569#ifdef BNX2X_STOP_ON_ERROR
7570 bnx2x_panic();
7571#endif
36e552ab 7572 rc = -EBUSY;
34f80b04
EG
7573 break;
7574 }
7575 cnt--;
da5a662a 7576 msleep(1);
5650d9d4 7577 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7578 }
7579 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7580 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7581
7582 return rc;
a2fbb9ea
ET
7583}
7584
34f80b04
EG
7585static void bnx2x_reset_func(struct bnx2x *bp)
7586{
7587 int port = BP_PORT(bp);
7588 int func = BP_FUNC(bp);
7589 int base, i;
7590
7591 /* Configure IGU */
7592 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7593 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7594
34f80b04
EG
7595 /* Clear ILT */
7596 base = FUNC_ILT_BASE(func);
7597 for (i = base; i < base + ILT_PER_FUNC; i++)
7598 bnx2x_ilt_wr(bp, i, 0);
7599}
7600
7601static void bnx2x_reset_port(struct bnx2x *bp)
7602{
7603 int port = BP_PORT(bp);
7604 u32 val;
7605
7606 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7607
7608 /* Do not rcv packets to BRB */
7609 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7610 /* Do not direct rcv packets that are not for MCP to the BRB */
7611 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7612 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7613
7614 /* Configure AEU */
7615 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7616
7617 msleep(100);
7618 /* Check for BRB port occupancy */
7619 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7620 if (val)
7621 DP(NETIF_MSG_IFDOWN,
33471629 7622 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7623
7624 /* TODO: Close Doorbell port? */
7625}
7626
34f80b04
EG
7627static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7628{
7629 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7630 BP_FUNC(bp), reset_code);
7631
7632 switch (reset_code) {
7633 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7634 bnx2x_reset_port(bp);
7635 bnx2x_reset_func(bp);
7636 bnx2x_reset_common(bp);
7637 break;
7638
7639 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7640 bnx2x_reset_port(bp);
7641 bnx2x_reset_func(bp);
7642 break;
7643
7644 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7645 bnx2x_reset_func(bp);
7646 break;
49d66772 7647
34f80b04
EG
7648 default:
7649 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7650 break;
7651 }
7652}
7653
33471629 7654/* must be called with rtnl_lock */
34f80b04 7655static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7656{
da5a662a 7657 int port = BP_PORT(bp);
a2fbb9ea 7658 u32 reset_code = 0;
da5a662a 7659 int i, cnt, rc;
a2fbb9ea
ET
7660
7661 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7662
228241eb
ET
7663 bp->rx_mode = BNX2X_RX_MODE_NONE;
7664 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7665
f8ef6e44 7666 bnx2x_netif_stop(bp, 1);
e94d8af3 7667
34f80b04
EG
7668 del_timer_sync(&bp->timer);
7669 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7670 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7671 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7672
70b9986c
EG
7673 /* Release IRQs */
7674 bnx2x_free_irq(bp);
7675
555f6c78
EG
7676 /* Wait until tx fastpath tasks complete */
7677 for_each_tx_queue(bp, i) {
228241eb
ET
7678 struct bnx2x_fastpath *fp = &bp->fp[i];
7679
34f80b04 7680 cnt = 1000;
e8b5fc51 7681 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7682
7961f791 7683 bnx2x_tx_int(fp);
34f80b04
EG
7684 if (!cnt) {
7685 BNX2X_ERR("timeout waiting for queue[%d]\n",
7686 i);
7687#ifdef BNX2X_STOP_ON_ERROR
7688 bnx2x_panic();
7689 return -EBUSY;
7690#else
7691 break;
7692#endif
7693 }
7694 cnt--;
da5a662a 7695 msleep(1);
34f80b04 7696 }
228241eb 7697 }
da5a662a
VZ
7698 /* Give HW time to discard old tx messages */
7699 msleep(1);
a2fbb9ea 7700
3101c2bc
YG
7701 if (CHIP_IS_E1(bp)) {
7702 struct mac_configuration_cmd *config =
7703 bnx2x_sp(bp, mcast_config);
7704
7705 bnx2x_set_mac_addr_e1(bp, 0);
7706
8d9c5f34 7707 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7708 CAM_INVALIDATE(config->config_table[i]);
7709
8d9c5f34 7710 config->hdr.length = i;
3101c2bc
YG
7711 if (CHIP_REV_IS_SLOW(bp))
7712 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7713 else
7714 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 7715 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
7716 config->hdr.reserved1 = 0;
7717
7718 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7719 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7720 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7721
7722 } else { /* E1H */
65abd74d
YG
7723 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7724
3101c2bc
YG
7725 bnx2x_set_mac_addr_e1h(bp, 0);
7726
7727 for (i = 0; i < MC_HASH_SIZE; i++)
7728 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
7729
7730 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc
YG
7731 }
7732
65abd74d
YG
7733 if (unload_mode == UNLOAD_NORMAL)
7734 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7735
7d0446c2 7736 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7737 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7738
7d0446c2 7739 else if (bp->wol) {
65abd74d
YG
7740 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7741 u8 *mac_addr = bp->dev->dev_addr;
7742 u32 val;
7743 /* The mac address is written to entries 1-4 to
7744 preserve entry 0 which is used by the PMF */
7745 u8 entry = (BP_E1HVN(bp) + 1)*8;
7746
7747 val = (mac_addr[0] << 8) | mac_addr[1];
7748 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7749
7750 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7751 (mac_addr[4] << 8) | mac_addr[5];
7752 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7753
7754 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7755
7756 } else
7757 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7758
34f80b04
EG
7759 /* Close multi and leading connections
7760 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7761 for_each_nondefault_queue(bp, i)
7762 if (bnx2x_stop_multi(bp, i))
228241eb 7763 goto unload_error;
a2fbb9ea 7764
da5a662a
VZ
7765 rc = bnx2x_stop_leading(bp);
7766 if (rc) {
34f80b04 7767 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7768#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7769 return -EBUSY;
da5a662a
VZ
7770#else
7771 goto unload_error;
34f80b04 7772#endif
228241eb
ET
7773 }
7774
7775unload_error:
34f80b04 7776 if (!BP_NOMCP(bp))
228241eb 7777 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 7778 else {
f5372251 7779 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
7780 load_count[0], load_count[1], load_count[2]);
7781 load_count[0]--;
da5a662a 7782 load_count[1 + port]--;
f5372251 7783 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
7784 load_count[0], load_count[1], load_count[2]);
7785 if (load_count[0] == 0)
7786 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7787 else if (load_count[1 + port] == 0)
34f80b04
EG
7788 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7789 else
7790 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7791 }
a2fbb9ea 7792
34f80b04
EG
7793 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7794 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7795 bnx2x__link_reset(bp);
a2fbb9ea
ET
7796
7797 /* Reset the chip */
228241eb 7798 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7799
7800 /* Report UNLOAD_DONE to MCP */
34f80b04 7801 if (!BP_NOMCP(bp))
a2fbb9ea 7802 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 7803
9a035440 7804 bp->port.pmf = 0;
a2fbb9ea 7805
7a9b2557 7806 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7807 bnx2x_free_skbs(bp);
555f6c78 7808 for_each_rx_queue(bp, i)
3196a88a 7809 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7810 for_each_rx_queue(bp, i)
7cde1c8b 7811 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7812 bnx2x_free_mem(bp);
7813
7814 bp->state = BNX2X_STATE_CLOSED;
228241eb 7815
a2fbb9ea
ET
7816 netif_carrier_off(bp->dev);
7817
7818 return 0;
7819}
7820
34f80b04
EG
7821static void bnx2x_reset_task(struct work_struct *work)
7822{
7823 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7824
7825#ifdef BNX2X_STOP_ON_ERROR
7826 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7827 " so reset not done to allow debug dump,\n"
ad361c98 7828 " you will need to reboot when done\n");
34f80b04
EG
7829 return;
7830#endif
7831
7832 rtnl_lock();
7833
7834 if (!netif_running(bp->dev))
7835 goto reset_task_exit;
7836
7837 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7838 bnx2x_nic_load(bp, LOAD_NORMAL);
7839
7840reset_task_exit:
7841 rtnl_unlock();
7842}
7843
a2fbb9ea
ET
7844/* end of nic load/unload */
7845
7846/* ethtool_ops */
7847
7848/*
7849 * Init service functions
7850 */
7851
f1ef27ef
EG
7852static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7853{
7854 switch (func) {
7855 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7856 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7857 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7858 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7859 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7860 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7861 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7862 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7863 default:
7864 BNX2X_ERR("Unsupported function index: %d\n", func);
7865 return (u32)(-1);
7866 }
7867}
7868
7869static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7870{
7871 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7872
7873 /* Flush all outstanding writes */
7874 mmiowb();
7875
7876 /* Pretend to be function 0 */
7877 REG_WR(bp, reg, 0);
7878 /* Flush the GRC transaction (in the chip) */
7879 new_val = REG_RD(bp, reg);
7880 if (new_val != 0) {
7881 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7882 new_val);
7883 BUG();
7884 }
7885
7886 /* From now we are in the "like-E1" mode */
7887 bnx2x_int_disable(bp);
7888
7889 /* Flush all outstanding writes */
7890 mmiowb();
7891
7892 /* Restore the original funtion settings */
7893 REG_WR(bp, reg, orig_func);
7894 new_val = REG_RD(bp, reg);
7895 if (new_val != orig_func) {
7896 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7897 orig_func, new_val);
7898 BUG();
7899 }
7900}
7901
7902static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7903{
7904 if (CHIP_IS_E1H(bp))
7905 bnx2x_undi_int_disable_e1h(bp, func);
7906 else
7907 bnx2x_int_disable(bp);
7908}
7909
34f80b04
EG
7910static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7911{
7912 u32 val;
7913
7914 /* Check if there is any driver already loaded */
7915 val = REG_RD(bp, MISC_REG_UNPREPARED);
7916 if (val == 0x1) {
7917 /* Check if it is the UNDI driver
7918 * UNDI driver initializes CID offset for normal bell to 0x7
7919 */
4a37fb66 7920 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7921 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7922 if (val == 0x7) {
7923 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7924 /* save our func */
34f80b04 7925 int func = BP_FUNC(bp);
da5a662a
VZ
7926 u32 swap_en;
7927 u32 swap_val;
34f80b04 7928
b4661739
EG
7929 /* clear the UNDI indication */
7930 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7931
34f80b04
EG
7932 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7933
7934 /* try unload UNDI on port 0 */
7935 bp->func = 0;
da5a662a
VZ
7936 bp->fw_seq =
7937 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7938 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7939 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7940
7941 /* if UNDI is loaded on the other port */
7942 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7943
da5a662a
VZ
7944 /* send "DONE" for previous unload */
7945 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7946
7947 /* unload UNDI on port 1 */
34f80b04 7948 bp->func = 1;
da5a662a
VZ
7949 bp->fw_seq =
7950 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7951 DRV_MSG_SEQ_NUMBER_MASK);
7952 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7953
7954 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7955 }
7956
b4661739
EG
7957 /* now it's safe to release the lock */
7958 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7959
f1ef27ef 7960 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7961
7962 /* close input traffic and wait for it */
7963 /* Do not rcv packets to BRB */
7964 REG_WR(bp,
7965 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7966 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7967 /* Do not direct rcv packets that are not for MCP to
7968 * the BRB */
7969 REG_WR(bp,
7970 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7971 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7972 /* clear AEU */
7973 REG_WR(bp,
7974 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7975 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7976 msleep(10);
7977
7978 /* save NIG port swap info */
7979 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7980 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7981 /* reset device */
7982 REG_WR(bp,
7983 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7984 0xd3ffffff);
34f80b04
EG
7985 REG_WR(bp,
7986 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7987 0x1403);
da5a662a
VZ
7988 /* take the NIG out of reset and restore swap values */
7989 REG_WR(bp,
7990 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7991 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7992 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7993 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7994
7995 /* send unload done to the MCP */
7996 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7997
7998 /* restore our func and fw_seq */
7999 bp->func = func;
8000 bp->fw_seq =
8001 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
8002 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
8003
8004 } else
8005 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8006 }
8007}
8008
8009static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
8010{
8011 u32 val, val2, val3, val4, id;
72ce58c3 8012 u16 pmc;
34f80b04
EG
8013
8014 /* Get the chip revision id and number. */
8015 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
8016 val = REG_RD(bp, MISC_REG_CHIP_NUM);
8017 id = ((val & 0xffff) << 16);
8018 val = REG_RD(bp, MISC_REG_CHIP_REV);
8019 id |= ((val & 0xf) << 12);
8020 val = REG_RD(bp, MISC_REG_CHIP_METAL);
8021 id |= ((val & 0xff) << 4);
5a40e08e 8022 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
8023 id |= (val & 0xf);
8024 bp->common.chip_id = id;
8025 bp->link_params.chip_id = bp->common.chip_id;
8026 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
8027
1c06328c
EG
8028 val = (REG_RD(bp, 0x2874) & 0x55);
8029 if ((bp->common.chip_id & 0x1) ||
8030 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
8031 bp->flags |= ONE_PORT_FLAG;
8032 BNX2X_DEV_INFO("single port device\n");
8033 }
8034
34f80b04
EG
8035 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8036 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8037 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8038 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8039 bp->common.flash_size, bp->common.flash_size);
8040
8041 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 8042 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 8043 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
8044 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8045 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
8046
8047 if (!bp->common.shmem_base ||
8048 (bp->common.shmem_base < 0xA0000) ||
8049 (bp->common.shmem_base >= 0xC0000)) {
8050 BNX2X_DEV_INFO("MCP not active\n");
8051 bp->flags |= NO_MCP_FLAG;
8052 return;
8053 }
8054
8055 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8056 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8057 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8058 BNX2X_ERR("BAD MCP validity signature\n");
8059
8060 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8061 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8062
8063 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8064 SHARED_HW_CFG_LED_MODE_MASK) >>
8065 SHARED_HW_CFG_LED_MODE_SHIFT);
8066
c2c8b03e
EG
8067 bp->link_params.feature_config_flags = 0;
8068 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8069 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8070 bp->link_params.feature_config_flags |=
8071 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8072 else
8073 bp->link_params.feature_config_flags &=
8074 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8075
34f80b04
EG
8076 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8077 bp->common.bc_ver = val;
8078 BNX2X_DEV_INFO("bc_ver %X\n", val);
8079 if (val < BNX2X_BC_VER) {
8080 /* for now only warn
8081 * later we might need to enforce this */
8082 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
8083 " please upgrade BC\n", BNX2X_BC_VER, val);
8084 }
4d295db0
EG
8085 bp->link_params.feature_config_flags |=
8086 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
8087 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
8088
8089 if (BP_E1HVN(bp) == 0) {
8090 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8091 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8092 } else {
8093 /* no WOL capability for E1HVN != 0 */
8094 bp->flags |= NO_WOL_FLAG;
8095 }
8096 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8097 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8098
8099 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8100 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8101 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8102 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8103
8104 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
8105 val, val2, val3, val4);
8106}
8107
8108static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8109 u32 switch_cfg)
a2fbb9ea 8110{
34f80b04 8111 int port = BP_PORT(bp);
a2fbb9ea
ET
8112 u32 ext_phy_type;
8113
a2fbb9ea
ET
8114 switch (switch_cfg) {
8115 case SWITCH_CFG_1G:
8116 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
8117
c18487ee
YR
8118 ext_phy_type =
8119 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8120 switch (ext_phy_type) {
8121 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
8122 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8123 ext_phy_type);
8124
34f80b04
EG
8125 bp->port.supported |= (SUPPORTED_10baseT_Half |
8126 SUPPORTED_10baseT_Full |
8127 SUPPORTED_100baseT_Half |
8128 SUPPORTED_100baseT_Full |
8129 SUPPORTED_1000baseT_Full |
8130 SUPPORTED_2500baseX_Full |
8131 SUPPORTED_TP |
8132 SUPPORTED_FIBRE |
8133 SUPPORTED_Autoneg |
8134 SUPPORTED_Pause |
8135 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8136 break;
8137
8138 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
8139 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
8140 ext_phy_type);
8141
34f80b04
EG
8142 bp->port.supported |= (SUPPORTED_10baseT_Half |
8143 SUPPORTED_10baseT_Full |
8144 SUPPORTED_100baseT_Half |
8145 SUPPORTED_100baseT_Full |
8146 SUPPORTED_1000baseT_Full |
8147 SUPPORTED_TP |
8148 SUPPORTED_FIBRE |
8149 SUPPORTED_Autoneg |
8150 SUPPORTED_Pause |
8151 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8152 break;
8153
8154 default:
8155 BNX2X_ERR("NVRAM config error. "
8156 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 8157 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8158 return;
8159 }
8160
34f80b04
EG
8161 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8162 port*0x10);
8163 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8164 break;
8165
8166 case SWITCH_CFG_10G:
8167 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
8168
c18487ee
YR
8169 ext_phy_type =
8170 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
8171 switch (ext_phy_type) {
8172 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8173 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
8174 ext_phy_type);
8175
34f80b04
EG
8176 bp->port.supported |= (SUPPORTED_10baseT_Half |
8177 SUPPORTED_10baseT_Full |
8178 SUPPORTED_100baseT_Half |
8179 SUPPORTED_100baseT_Full |
8180 SUPPORTED_1000baseT_Full |
8181 SUPPORTED_2500baseX_Full |
8182 SUPPORTED_10000baseT_Full |
8183 SUPPORTED_TP |
8184 SUPPORTED_FIBRE |
8185 SUPPORTED_Autoneg |
8186 SUPPORTED_Pause |
8187 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8188 break;
8189
589abe3a
EG
8190 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8191 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 8192 ext_phy_type);
f1410647 8193
34f80b04 8194 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8195 SUPPORTED_1000baseT_Full |
34f80b04 8196 SUPPORTED_FIBRE |
589abe3a 8197 SUPPORTED_Autoneg |
34f80b04
EG
8198 SUPPORTED_Pause |
8199 SUPPORTED_Asym_Pause);
f1410647
ET
8200 break;
8201
589abe3a
EG
8202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8203 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
8204 ext_phy_type);
8205
34f80b04 8206 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 8207 SUPPORTED_2500baseX_Full |
34f80b04 8208 SUPPORTED_1000baseT_Full |
589abe3a
EG
8209 SUPPORTED_FIBRE |
8210 SUPPORTED_Autoneg |
8211 SUPPORTED_Pause |
8212 SUPPORTED_Asym_Pause);
8213 break;
8214
8215 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8216 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
8217 ext_phy_type);
8218
8219 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
8220 SUPPORTED_FIBRE |
8221 SUPPORTED_Pause |
8222 SUPPORTED_Asym_Pause);
f1410647
ET
8223 break;
8224
589abe3a
EG
8225 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8226 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
8227 ext_phy_type);
8228
34f80b04
EG
8229 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8230 SUPPORTED_1000baseT_Full |
8231 SUPPORTED_FIBRE |
34f80b04
EG
8232 SUPPORTED_Pause |
8233 SUPPORTED_Asym_Pause);
f1410647
ET
8234 break;
8235
589abe3a
EG
8236 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8237 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
8238 ext_phy_type);
8239
34f80b04 8240 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 8241 SUPPORTED_1000baseT_Full |
34f80b04 8242 SUPPORTED_Autoneg |
589abe3a 8243 SUPPORTED_FIBRE |
34f80b04
EG
8244 SUPPORTED_Pause |
8245 SUPPORTED_Asym_Pause);
c18487ee
YR
8246 break;
8247
4d295db0
EG
8248 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8249 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
8250 ext_phy_type);
8251
8252 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8253 SUPPORTED_1000baseT_Full |
8254 SUPPORTED_Autoneg |
8255 SUPPORTED_FIBRE |
8256 SUPPORTED_Pause |
8257 SUPPORTED_Asym_Pause);
8258 break;
8259
f1410647
ET
8260 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8261 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
8262 ext_phy_type);
8263
34f80b04
EG
8264 bp->port.supported |= (SUPPORTED_10000baseT_Full |
8265 SUPPORTED_TP |
8266 SUPPORTED_Autoneg |
8267 SUPPORTED_Pause |
8268 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
8269 break;
8270
28577185
EG
8271 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8272 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
8273 ext_phy_type);
8274
8275 bp->port.supported |= (SUPPORTED_10baseT_Half |
8276 SUPPORTED_10baseT_Full |
8277 SUPPORTED_100baseT_Half |
8278 SUPPORTED_100baseT_Full |
8279 SUPPORTED_1000baseT_Full |
8280 SUPPORTED_10000baseT_Full |
8281 SUPPORTED_TP |
8282 SUPPORTED_Autoneg |
8283 SUPPORTED_Pause |
8284 SUPPORTED_Asym_Pause);
8285 break;
8286
c18487ee
YR
8287 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8288 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8289 bp->link_params.ext_phy_config);
8290 break;
8291
a2fbb9ea
ET
8292 default:
8293 BNX2X_ERR("NVRAM config error. "
8294 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 8295 bp->link_params.ext_phy_config);
a2fbb9ea
ET
8296 return;
8297 }
8298
34f80b04
EG
8299 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8300 port*0x18);
8301 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 8302
a2fbb9ea
ET
8303 break;
8304
8305 default:
8306 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 8307 bp->port.link_config);
a2fbb9ea
ET
8308 return;
8309 }
34f80b04 8310 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
8311
8312 /* mask what we support according to speed_cap_mask */
c18487ee
YR
8313 if (!(bp->link_params.speed_cap_mask &
8314 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 8315 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8316
c18487ee
YR
8317 if (!(bp->link_params.speed_cap_mask &
8318 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 8319 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8320
c18487ee
YR
8321 if (!(bp->link_params.speed_cap_mask &
8322 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 8323 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8324
c18487ee
YR
8325 if (!(bp->link_params.speed_cap_mask &
8326 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 8327 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8328
c18487ee
YR
8329 if (!(bp->link_params.speed_cap_mask &
8330 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
8331 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8332 SUPPORTED_1000baseT_Full);
a2fbb9ea 8333
c18487ee
YR
8334 if (!(bp->link_params.speed_cap_mask &
8335 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 8336 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8337
c18487ee
YR
8338 if (!(bp->link_params.speed_cap_mask &
8339 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 8340 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 8341
34f80b04 8342 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
8343}
8344
34f80b04 8345static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8346{
c18487ee 8347 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 8348
34f80b04 8349 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 8350 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 8351 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 8352 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8353 bp->port.advertising = bp->port.supported;
a2fbb9ea 8354 } else {
c18487ee
YR
8355 u32 ext_phy_type =
8356 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8357
8358 if ((ext_phy_type ==
8359 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8360 (ext_phy_type ==
8361 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 8362 /* force 10G, no AN */
c18487ee 8363 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 8364 bp->port.advertising =
a2fbb9ea
ET
8365 (ADVERTISED_10000baseT_Full |
8366 ADVERTISED_FIBRE);
8367 break;
8368 }
8369 BNX2X_ERR("NVRAM config error. "
8370 "Invalid link_config 0x%x"
8371 " Autoneg not supported\n",
34f80b04 8372 bp->port.link_config);
a2fbb9ea
ET
8373 return;
8374 }
8375 break;
8376
8377 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 8378 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 8379 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
8380 bp->port.advertising = (ADVERTISED_10baseT_Full |
8381 ADVERTISED_TP);
a2fbb9ea
ET
8382 } else {
8383 BNX2X_ERR("NVRAM config error. "
8384 "Invalid link_config 0x%x"
8385 " speed_cap_mask 0x%x\n",
34f80b04 8386 bp->port.link_config,
c18487ee 8387 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8388 return;
8389 }
8390 break;
8391
8392 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 8393 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
8394 bp->link_params.req_line_speed = SPEED_10;
8395 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8396 bp->port.advertising = (ADVERTISED_10baseT_Half |
8397 ADVERTISED_TP);
a2fbb9ea
ET
8398 } else {
8399 BNX2X_ERR("NVRAM config error. "
8400 "Invalid link_config 0x%x"
8401 " speed_cap_mask 0x%x\n",
34f80b04 8402 bp->port.link_config,
c18487ee 8403 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8404 return;
8405 }
8406 break;
8407
8408 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 8409 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 8410 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
8411 bp->port.advertising = (ADVERTISED_100baseT_Full |
8412 ADVERTISED_TP);
a2fbb9ea
ET
8413 } else {
8414 BNX2X_ERR("NVRAM config error. "
8415 "Invalid link_config 0x%x"
8416 " speed_cap_mask 0x%x\n",
34f80b04 8417 bp->port.link_config,
c18487ee 8418 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8419 return;
8420 }
8421 break;
8422
8423 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 8424 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
8425 bp->link_params.req_line_speed = SPEED_100;
8426 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
8427 bp->port.advertising = (ADVERTISED_100baseT_Half |
8428 ADVERTISED_TP);
a2fbb9ea
ET
8429 } else {
8430 BNX2X_ERR("NVRAM config error. "
8431 "Invalid link_config 0x%x"
8432 " speed_cap_mask 0x%x\n",
34f80b04 8433 bp->port.link_config,
c18487ee 8434 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8435 return;
8436 }
8437 break;
8438
8439 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 8440 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 8441 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
8442 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8443 ADVERTISED_TP);
a2fbb9ea
ET
8444 } else {
8445 BNX2X_ERR("NVRAM config error. "
8446 "Invalid link_config 0x%x"
8447 " speed_cap_mask 0x%x\n",
34f80b04 8448 bp->port.link_config,
c18487ee 8449 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8450 return;
8451 }
8452 break;
8453
8454 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 8455 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 8456 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
8457 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8458 ADVERTISED_TP);
a2fbb9ea
ET
8459 } else {
8460 BNX2X_ERR("NVRAM config error. "
8461 "Invalid link_config 0x%x"
8462 " speed_cap_mask 0x%x\n",
34f80b04 8463 bp->port.link_config,
c18487ee 8464 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8465 return;
8466 }
8467 break;
8468
8469 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8470 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8471 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 8472 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 8473 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
8474 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8475 ADVERTISED_FIBRE);
a2fbb9ea
ET
8476 } else {
8477 BNX2X_ERR("NVRAM config error. "
8478 "Invalid link_config 0x%x"
8479 " speed_cap_mask 0x%x\n",
34f80b04 8480 bp->port.link_config,
c18487ee 8481 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
8482 return;
8483 }
8484 break;
8485
8486 default:
8487 BNX2X_ERR("NVRAM config error. "
8488 "BAD link speed link_config 0x%x\n",
34f80b04 8489 bp->port.link_config);
c18487ee 8490 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 8491 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
8492 break;
8493 }
a2fbb9ea 8494
34f80b04
EG
8495 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8496 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 8497 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 8498 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 8499 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8500
c18487ee 8501 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 8502 " advertising 0x%x\n",
c18487ee
YR
8503 bp->link_params.req_line_speed,
8504 bp->link_params.req_duplex,
34f80b04 8505 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
8506}
8507
34f80b04 8508static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8509{
34f80b04
EG
8510 int port = BP_PORT(bp);
8511 u32 val, val2;
589abe3a 8512 u32 config;
c2c8b03e 8513 u16 i;
01cd4528 8514 u32 ext_phy_type;
a2fbb9ea 8515
c18487ee 8516 bp->link_params.bp = bp;
34f80b04 8517 bp->link_params.port = port;
c18487ee 8518
c18487ee 8519 bp->link_params.lane_config =
a2fbb9ea 8520 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 8521 bp->link_params.ext_phy_config =
a2fbb9ea
ET
8522 SHMEM_RD(bp,
8523 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
8524 /* BCM8727_NOC => BCM8727 no over current */
8525 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8526 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8527 bp->link_params.ext_phy_config &=
8528 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8529 bp->link_params.ext_phy_config |=
8530 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8531 bp->link_params.feature_config_flags |=
8532 FEATURE_CONFIG_BCM8727_NOC;
8533 }
8534
c18487ee 8535 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
8536 SHMEM_RD(bp,
8537 dev_info.port_hw_config[port].speed_capability_mask);
8538
34f80b04 8539 bp->port.link_config =
a2fbb9ea
ET
8540 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8541
c2c8b03e
EG
8542 /* Get the 4 lanes xgxs config rx and tx */
8543 for (i = 0; i < 2; i++) {
8544 val = SHMEM_RD(bp,
8545 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8546 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8547 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8548
8549 val = SHMEM_RD(bp,
8550 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8551 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8552 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8553 }
8554
3ce2c3f9
EG
8555 /* If the device is capable of WoL, set the default state according
8556 * to the HW
8557 */
4d295db0 8558 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8559 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8560 (config & PORT_FEATURE_WOL_ENABLED));
8561
c2c8b03e
EG
8562 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8563 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
8564 bp->link_params.lane_config,
8565 bp->link_params.ext_phy_config,
34f80b04 8566 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 8567
4d295db0
EG
8568 bp->link_params.switch_cfg |= (bp->port.link_config &
8569 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 8570 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8571
8572 bnx2x_link_settings_requested(bp);
8573
01cd4528
EG
8574 /*
8575 * If connected directly, work with the internal PHY, otherwise, work
8576 * with the external PHY
8577 */
8578 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8579 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
8580 bp->mdio.prtad = bp->link_params.phy_addr;
8581
8582 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8583 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8584 bp->mdio.prtad =
659bc5c4 8585 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 8586
a2fbb9ea
ET
8587 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8588 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8589 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8590 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8591 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8592 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8593 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8594 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
8595 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8596 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
8597}
8598
8599static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8600{
8601 int func = BP_FUNC(bp);
8602 u32 val, val2;
8603 int rc = 0;
a2fbb9ea 8604
34f80b04 8605 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8606
34f80b04
EG
8607 bp->e1hov = 0;
8608 bp->e1hmf = 0;
8609 if (CHIP_IS_E1H(bp)) {
8610 bp->mf_config =
8611 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 8612
2691d51d 8613 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 8614 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 8615 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 8616 bp->e1hmf = 1;
2691d51d
EG
8617 BNX2X_DEV_INFO("%s function mode\n",
8618 IS_E1HMF(bp) ? "multi" : "single");
8619
8620 if (IS_E1HMF(bp)) {
8621 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
8622 e1hov_tag) &
8623 FUNC_MF_CFG_E1HOV_TAG_MASK);
8624 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8625 bp->e1hov = val;
8626 BNX2X_DEV_INFO("E1HOV for func %d is %d "
8627 "(0x%04x)\n",
8628 func, bp->e1hov, bp->e1hov);
8629 } else {
34f80b04
EG
8630 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8631 " aborting\n", func);
8632 rc = -EPERM;
8633 }
2691d51d
EG
8634 } else {
8635 if (BP_E1HVN(bp)) {
8636 BNX2X_ERR("!!! VN %d in single function mode,"
8637 " aborting\n", BP_E1HVN(bp));
8638 rc = -EPERM;
8639 }
34f80b04
EG
8640 }
8641 }
a2fbb9ea 8642
34f80b04
EG
8643 if (!BP_NOMCP(bp)) {
8644 bnx2x_get_port_hwinfo(bp);
8645
8646 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8647 DRV_MSG_SEQ_NUMBER_MASK);
8648 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8649 }
8650
8651 if (IS_E1HMF(bp)) {
8652 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8653 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8654 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8655 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8656 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8657 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8658 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8659 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8660 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8661 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8662 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8663 ETH_ALEN);
8664 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8665 ETH_ALEN);
a2fbb9ea 8666 }
34f80b04
EG
8667
8668 return rc;
a2fbb9ea
ET
8669 }
8670
34f80b04
EG
8671 if (BP_NOMCP(bp)) {
8672 /* only supposed to happen on emulation/FPGA */
33471629 8673 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8674 random_ether_addr(bp->dev->dev_addr);
8675 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8676 }
a2fbb9ea 8677
34f80b04
EG
8678 return rc;
8679}
8680
8681static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8682{
8683 int func = BP_FUNC(bp);
87942b46 8684 int timer_interval;
34f80b04
EG
8685 int rc;
8686
da5a662a
VZ
8687 /* Disable interrupt handling until HW is initialized */
8688 atomic_set(&bp->intr_sem, 1);
e1510706 8689 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8690
34f80b04 8691 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8692
1cf167f2 8693 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8694 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8695
8696 rc = bnx2x_get_hwinfo(bp);
8697
8698 /* need to reset chip if undi was active */
8699 if (!BP_NOMCP(bp))
8700 bnx2x_undi_unload(bp);
8701
8702 if (CHIP_REV_IS_FPGA(bp))
8703 printk(KERN_ERR PFX "FPGA detected\n");
8704
8705 if (BP_NOMCP(bp) && (func == 0))
8706 printk(KERN_ERR PFX
8707 "MCP disabled, must load devices in order!\n");
8708
555f6c78 8709 /* Set multi queue mode */
8badd27a
EG
8710 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8711 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8712 printk(KERN_ERR PFX
8badd27a 8713 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8714 multi_mode = ETH_RSS_MODE_DISABLED;
8715 }
8716 bp->multi_mode = multi_mode;
8717
8718
7a9b2557
VZ
8719 /* Set TPA flags */
8720 if (disable_tpa) {
8721 bp->flags &= ~TPA_ENABLE_FLAG;
8722 bp->dev->features &= ~NETIF_F_LRO;
8723 } else {
8724 bp->flags |= TPA_ENABLE_FLAG;
8725 bp->dev->features |= NETIF_F_LRO;
8726 }
8727
a18f5128
EG
8728 if (CHIP_IS_E1(bp))
8729 bp->dropless_fc = 0;
8730 else
8731 bp->dropless_fc = dropless_fc;
8732
8d5726c4 8733 bp->mrrs = mrrs;
7a9b2557 8734
34f80b04
EG
8735 bp->tx_ring_size = MAX_TX_AVAIL;
8736 bp->rx_ring_size = MAX_RX_AVAIL;
8737
8738 bp->rx_csum = 1;
34f80b04
EG
8739
8740 bp->tx_ticks = 50;
8741 bp->rx_ticks = 25;
8742
87942b46
EG
8743 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8744 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8745
8746 init_timer(&bp->timer);
8747 bp->timer.expires = jiffies + bp->current_interval;
8748 bp->timer.data = (unsigned long) bp;
8749 bp->timer.function = bnx2x_timer;
8750
8751 return rc;
a2fbb9ea
ET
8752}
8753
8754/*
8755 * ethtool service functions
8756 */
8757
8758/* All ethtool functions called with rtnl_lock */
8759
8760static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8761{
8762 struct bnx2x *bp = netdev_priv(dev);
8763
34f80b04
EG
8764 cmd->supported = bp->port.supported;
8765 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8766
8767 if (netif_carrier_ok(dev)) {
c18487ee
YR
8768 cmd->speed = bp->link_vars.line_speed;
8769 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8770 } else {
c18487ee
YR
8771 cmd->speed = bp->link_params.req_line_speed;
8772 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8773 }
34f80b04
EG
8774 if (IS_E1HMF(bp)) {
8775 u16 vn_max_rate;
8776
8777 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8778 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8779 if (vn_max_rate < cmd->speed)
8780 cmd->speed = vn_max_rate;
8781 }
a2fbb9ea 8782
c18487ee
YR
8783 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8784 u32 ext_phy_type =
8785 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8786
8787 switch (ext_phy_type) {
8788 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 8789 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8790 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
8791 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8792 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8793 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 8794 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
8795 cmd->port = PORT_FIBRE;
8796 break;
8797
8798 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 8799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
8800 cmd->port = PORT_TP;
8801 break;
8802
c18487ee
YR
8803 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8804 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8805 bp->link_params.ext_phy_config);
8806 break;
8807
f1410647
ET
8808 default:
8809 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8810 bp->link_params.ext_phy_config);
8811 break;
f1410647
ET
8812 }
8813 } else
a2fbb9ea 8814 cmd->port = PORT_TP;
a2fbb9ea 8815
01cd4528 8816 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
8817 cmd->transceiver = XCVR_INTERNAL;
8818
c18487ee 8819 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8820 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8821 else
a2fbb9ea 8822 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8823
8824 cmd->maxtxpkt = 0;
8825 cmd->maxrxpkt = 0;
8826
8827 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8828 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8829 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8830 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8831 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8832 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8833 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8834
8835 return 0;
8836}
8837
8838static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8839{
8840 struct bnx2x *bp = netdev_priv(dev);
8841 u32 advertising;
8842
34f80b04
EG
8843 if (IS_E1HMF(bp))
8844 return 0;
8845
a2fbb9ea
ET
8846 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8847 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8848 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8849 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8850 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8851 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8852 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8853
a2fbb9ea 8854 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8855 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8856 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8857 return -EINVAL;
f1410647 8858 }
a2fbb9ea
ET
8859
8860 /* advertise the requested speed and duplex if supported */
34f80b04 8861 cmd->advertising &= bp->port.supported;
a2fbb9ea 8862
c18487ee
YR
8863 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8864 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8865 bp->port.advertising |= (ADVERTISED_Autoneg |
8866 cmd->advertising);
a2fbb9ea
ET
8867
8868 } else { /* forced speed */
8869 /* advertise the requested speed and duplex if supported */
8870 switch (cmd->speed) {
8871 case SPEED_10:
8872 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8873 if (!(bp->port.supported &
f1410647
ET
8874 SUPPORTED_10baseT_Full)) {
8875 DP(NETIF_MSG_LINK,
8876 "10M full not supported\n");
a2fbb9ea 8877 return -EINVAL;
f1410647 8878 }
a2fbb9ea
ET
8879
8880 advertising = (ADVERTISED_10baseT_Full |
8881 ADVERTISED_TP);
8882 } else {
34f80b04 8883 if (!(bp->port.supported &
f1410647
ET
8884 SUPPORTED_10baseT_Half)) {
8885 DP(NETIF_MSG_LINK,
8886 "10M half not supported\n");
a2fbb9ea 8887 return -EINVAL;
f1410647 8888 }
a2fbb9ea
ET
8889
8890 advertising = (ADVERTISED_10baseT_Half |
8891 ADVERTISED_TP);
8892 }
8893 break;
8894
8895 case SPEED_100:
8896 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8897 if (!(bp->port.supported &
f1410647
ET
8898 SUPPORTED_100baseT_Full)) {
8899 DP(NETIF_MSG_LINK,
8900 "100M full not supported\n");
a2fbb9ea 8901 return -EINVAL;
f1410647 8902 }
a2fbb9ea
ET
8903
8904 advertising = (ADVERTISED_100baseT_Full |
8905 ADVERTISED_TP);
8906 } else {
34f80b04 8907 if (!(bp->port.supported &
f1410647
ET
8908 SUPPORTED_100baseT_Half)) {
8909 DP(NETIF_MSG_LINK,
8910 "100M half not supported\n");
a2fbb9ea 8911 return -EINVAL;
f1410647 8912 }
a2fbb9ea
ET
8913
8914 advertising = (ADVERTISED_100baseT_Half |
8915 ADVERTISED_TP);
8916 }
8917 break;
8918
8919 case SPEED_1000:
f1410647
ET
8920 if (cmd->duplex != DUPLEX_FULL) {
8921 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8922 return -EINVAL;
f1410647 8923 }
a2fbb9ea 8924
34f80b04 8925 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8926 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8927 return -EINVAL;
f1410647 8928 }
a2fbb9ea
ET
8929
8930 advertising = (ADVERTISED_1000baseT_Full |
8931 ADVERTISED_TP);
8932 break;
8933
8934 case SPEED_2500:
f1410647
ET
8935 if (cmd->duplex != DUPLEX_FULL) {
8936 DP(NETIF_MSG_LINK,
8937 "2.5G half not supported\n");
a2fbb9ea 8938 return -EINVAL;
f1410647 8939 }
a2fbb9ea 8940
34f80b04 8941 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8942 DP(NETIF_MSG_LINK,
8943 "2.5G full not supported\n");
a2fbb9ea 8944 return -EINVAL;
f1410647 8945 }
a2fbb9ea 8946
f1410647 8947 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8948 ADVERTISED_TP);
8949 break;
8950
8951 case SPEED_10000:
f1410647
ET
8952 if (cmd->duplex != DUPLEX_FULL) {
8953 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8954 return -EINVAL;
f1410647 8955 }
a2fbb9ea 8956
34f80b04 8957 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8958 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8959 return -EINVAL;
f1410647 8960 }
a2fbb9ea
ET
8961
8962 advertising = (ADVERTISED_10000baseT_Full |
8963 ADVERTISED_FIBRE);
8964 break;
8965
8966 default:
f1410647 8967 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8968 return -EINVAL;
8969 }
8970
c18487ee
YR
8971 bp->link_params.req_line_speed = cmd->speed;
8972 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8973 bp->port.advertising = advertising;
a2fbb9ea
ET
8974 }
8975
c18487ee 8976 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8977 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8978 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8979 bp->port.advertising);
a2fbb9ea 8980
34f80b04 8981 if (netif_running(dev)) {
bb2a0f7a 8982 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8983 bnx2x_link_set(bp);
8984 }
a2fbb9ea
ET
8985
8986 return 0;
8987}
8988
0a64ea57
EG
8989#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8990#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8991
8992static int bnx2x_get_regs_len(struct net_device *dev)
8993{
0a64ea57 8994 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 8995 int regdump_len = 0;
0a64ea57
EG
8996 int i;
8997
0a64ea57
EG
8998 if (CHIP_IS_E1(bp)) {
8999 for (i = 0; i < REGS_COUNT; i++)
9000 if (IS_E1_ONLINE(reg_addrs[i].info))
9001 regdump_len += reg_addrs[i].size;
9002
9003 for (i = 0; i < WREGS_COUNT_E1; i++)
9004 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
9005 regdump_len += wreg_addrs_e1[i].size *
9006 (1 + wreg_addrs_e1[i].read_regs_count);
9007
9008 } else { /* E1H */
9009 for (i = 0; i < REGS_COUNT; i++)
9010 if (IS_E1H_ONLINE(reg_addrs[i].info))
9011 regdump_len += reg_addrs[i].size;
9012
9013 for (i = 0; i < WREGS_COUNT_E1H; i++)
9014 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
9015 regdump_len += wreg_addrs_e1h[i].size *
9016 (1 + wreg_addrs_e1h[i].read_regs_count);
9017 }
9018 regdump_len *= 4;
9019 regdump_len += sizeof(struct dump_hdr);
9020
9021 return regdump_len;
9022}
9023
9024static void bnx2x_get_regs(struct net_device *dev,
9025 struct ethtool_regs *regs, void *_p)
9026{
9027 u32 *p = _p, i, j;
9028 struct bnx2x *bp = netdev_priv(dev);
9029 struct dump_hdr dump_hdr = {0};
9030
9031 regs->version = 0;
9032 memset(p, 0, regs->len);
9033
9034 if (!netif_running(bp->dev))
9035 return;
9036
9037 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
9038 dump_hdr.dump_sign = dump_sign_all;
9039 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
9040 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
9041 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
9042 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
9043 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
9044
9045 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
9046 p += dump_hdr.hdr_size + 1;
9047
9048 if (CHIP_IS_E1(bp)) {
9049 for (i = 0; i < REGS_COUNT; i++)
9050 if (IS_E1_ONLINE(reg_addrs[i].info))
9051 for (j = 0; j < reg_addrs[i].size; j++)
9052 *p++ = REG_RD(bp,
9053 reg_addrs[i].addr + j*4);
9054
9055 } else { /* E1H */
9056 for (i = 0; i < REGS_COUNT; i++)
9057 if (IS_E1H_ONLINE(reg_addrs[i].info))
9058 for (j = 0; j < reg_addrs[i].size; j++)
9059 *p++ = REG_RD(bp,
9060 reg_addrs[i].addr + j*4);
9061 }
9062}
9063
0d28e49a
EG
9064#define PHY_FW_VER_LEN 10
9065
9066static void bnx2x_get_drvinfo(struct net_device *dev,
9067 struct ethtool_drvinfo *info)
9068{
9069 struct bnx2x *bp = netdev_priv(dev);
9070 u8 phy_fw_ver[PHY_FW_VER_LEN];
9071
9072 strcpy(info->driver, DRV_MODULE_NAME);
9073 strcpy(info->version, DRV_MODULE_VERSION);
9074
9075 phy_fw_ver[0] = '\0';
9076 if (bp->port.pmf) {
9077 bnx2x_acquire_phy_lock(bp);
9078 bnx2x_get_ext_phy_fw_version(&bp->link_params,
9079 (bp->state != BNX2X_STATE_CLOSED),
9080 phy_fw_ver, PHY_FW_VER_LEN);
9081 bnx2x_release_phy_lock(bp);
9082 }
9083
9084 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
9085 (bp->common.bc_ver & 0xff0000) >> 16,
9086 (bp->common.bc_ver & 0xff00) >> 8,
9087 (bp->common.bc_ver & 0xff),
9088 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
9089 strcpy(info->bus_info, pci_name(bp->pdev));
9090 info->n_stats = BNX2X_NUM_STATS;
9091 info->testinfo_len = BNX2X_NUM_TESTS;
9092 info->eedump_len = bp->common.flash_size;
9093 info->regdump_len = bnx2x_get_regs_len(dev);
9094}
9095
a2fbb9ea
ET
9096static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9097{
9098 struct bnx2x *bp = netdev_priv(dev);
9099
9100 if (bp->flags & NO_WOL_FLAG) {
9101 wol->supported = 0;
9102 wol->wolopts = 0;
9103 } else {
9104 wol->supported = WAKE_MAGIC;
9105 if (bp->wol)
9106 wol->wolopts = WAKE_MAGIC;
9107 else
9108 wol->wolopts = 0;
9109 }
9110 memset(&wol->sopass, 0, sizeof(wol->sopass));
9111}
9112
9113static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
9114{
9115 struct bnx2x *bp = netdev_priv(dev);
9116
9117 if (wol->wolopts & ~WAKE_MAGIC)
9118 return -EINVAL;
9119
9120 if (wol->wolopts & WAKE_MAGIC) {
9121 if (bp->flags & NO_WOL_FLAG)
9122 return -EINVAL;
9123
9124 bp->wol = 1;
34f80b04 9125 } else
a2fbb9ea 9126 bp->wol = 0;
34f80b04 9127
a2fbb9ea
ET
9128 return 0;
9129}
9130
9131static u32 bnx2x_get_msglevel(struct net_device *dev)
9132{
9133 struct bnx2x *bp = netdev_priv(dev);
9134
9135 return bp->msglevel;
9136}
9137
9138static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
9139{
9140 struct bnx2x *bp = netdev_priv(dev);
9141
9142 if (capable(CAP_NET_ADMIN))
9143 bp->msglevel = level;
9144}
9145
9146static int bnx2x_nway_reset(struct net_device *dev)
9147{
9148 struct bnx2x *bp = netdev_priv(dev);
9149
34f80b04
EG
9150 if (!bp->port.pmf)
9151 return 0;
a2fbb9ea 9152
34f80b04 9153 if (netif_running(dev)) {
bb2a0f7a 9154 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9155 bnx2x_link_set(bp);
9156 }
a2fbb9ea
ET
9157
9158 return 0;
9159}
9160
01e53298
NO
9161static u32
9162bnx2x_get_link(struct net_device *dev)
9163{
9164 struct bnx2x *bp = netdev_priv(dev);
9165
9166 return bp->link_vars.link_up;
9167}
9168
a2fbb9ea
ET
9169static int bnx2x_get_eeprom_len(struct net_device *dev)
9170{
9171 struct bnx2x *bp = netdev_priv(dev);
9172
34f80b04 9173 return bp->common.flash_size;
a2fbb9ea
ET
9174}
9175
9176static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
9177{
34f80b04 9178 int port = BP_PORT(bp);
a2fbb9ea
ET
9179 int count, i;
9180 u32 val = 0;
9181
9182 /* adjust timeout for emulation/FPGA */
9183 count = NVRAM_TIMEOUT_COUNT;
9184 if (CHIP_REV_IS_SLOW(bp))
9185 count *= 100;
9186
9187 /* request access to nvram interface */
9188 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9189 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
9190
9191 for (i = 0; i < count*10; i++) {
9192 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9193 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
9194 break;
9195
9196 udelay(5);
9197 }
9198
9199 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 9200 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
9201 return -EBUSY;
9202 }
9203
9204 return 0;
9205}
9206
9207static int bnx2x_release_nvram_lock(struct bnx2x *bp)
9208{
34f80b04 9209 int port = BP_PORT(bp);
a2fbb9ea
ET
9210 int count, i;
9211 u32 val = 0;
9212
9213 /* adjust timeout for emulation/FPGA */
9214 count = NVRAM_TIMEOUT_COUNT;
9215 if (CHIP_REV_IS_SLOW(bp))
9216 count *= 100;
9217
9218 /* relinquish nvram interface */
9219 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
9220 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
9221
9222 for (i = 0; i < count*10; i++) {
9223 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
9224 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
9225 break;
9226
9227 udelay(5);
9228 }
9229
9230 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 9231 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
9232 return -EBUSY;
9233 }
9234
9235 return 0;
9236}
9237
9238static void bnx2x_enable_nvram_access(struct bnx2x *bp)
9239{
9240 u32 val;
9241
9242 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9243
9244 /* enable both bits, even on read */
9245 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9246 (val | MCPR_NVM_ACCESS_ENABLE_EN |
9247 MCPR_NVM_ACCESS_ENABLE_WR_EN));
9248}
9249
9250static void bnx2x_disable_nvram_access(struct bnx2x *bp)
9251{
9252 u32 val;
9253
9254 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
9255
9256 /* disable both bits, even after read */
9257 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
9258 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
9259 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
9260}
9261
4781bfad 9262static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
9263 u32 cmd_flags)
9264{
f1410647 9265 int count, i, rc;
a2fbb9ea
ET
9266 u32 val;
9267
9268 /* build the command word */
9269 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
9270
9271 /* need to clear DONE bit separately */
9272 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9273
9274 /* address of the NVRAM to read from */
9275 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9276 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9277
9278 /* issue a read command */
9279 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9280
9281 /* adjust timeout for emulation/FPGA */
9282 count = NVRAM_TIMEOUT_COUNT;
9283 if (CHIP_REV_IS_SLOW(bp))
9284 count *= 100;
9285
9286 /* wait for completion */
9287 *ret_val = 0;
9288 rc = -EBUSY;
9289 for (i = 0; i < count; i++) {
9290 udelay(5);
9291 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9292
9293 if (val & MCPR_NVM_COMMAND_DONE) {
9294 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
9295 /* we read nvram data in cpu order
9296 * but ethtool sees it as an array of bytes
9297 * converting to big-endian will do the work */
4781bfad 9298 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
9299 rc = 0;
9300 break;
9301 }
9302 }
9303
9304 return rc;
9305}
9306
9307static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
9308 int buf_size)
9309{
9310 int rc;
9311 u32 cmd_flags;
4781bfad 9312 __be32 val;
a2fbb9ea
ET
9313
9314 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9315 DP(BNX2X_MSG_NVM,
c14423fe 9316 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9317 offset, buf_size);
9318 return -EINVAL;
9319 }
9320
34f80b04
EG
9321 if (offset + buf_size > bp->common.flash_size) {
9322 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9323 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9324 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9325 return -EINVAL;
9326 }
9327
9328 /* request access to nvram interface */
9329 rc = bnx2x_acquire_nvram_lock(bp);
9330 if (rc)
9331 return rc;
9332
9333 /* enable access to nvram interface */
9334 bnx2x_enable_nvram_access(bp);
9335
9336 /* read the first word(s) */
9337 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9338 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9339 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9340 memcpy(ret_buf, &val, 4);
9341
9342 /* advance to the next dword */
9343 offset += sizeof(u32);
9344 ret_buf += sizeof(u32);
9345 buf_size -= sizeof(u32);
9346 cmd_flags = 0;
9347 }
9348
9349 if (rc == 0) {
9350 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9351 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9352 memcpy(ret_buf, &val, 4);
9353 }
9354
9355 /* disable access to nvram interface */
9356 bnx2x_disable_nvram_access(bp);
9357 bnx2x_release_nvram_lock(bp);
9358
9359 return rc;
9360}
9361
9362static int bnx2x_get_eeprom(struct net_device *dev,
9363 struct ethtool_eeprom *eeprom, u8 *eebuf)
9364{
9365 struct bnx2x *bp = netdev_priv(dev);
9366 int rc;
9367
2add3acb
EG
9368 if (!netif_running(dev))
9369 return -EAGAIN;
9370
34f80b04 9371 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9372 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9373 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9374 eeprom->len, eeprom->len);
9375
9376 /* parameters already validated in ethtool_get_eeprom */
9377
9378 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9379
9380 return rc;
9381}
9382
9383static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9384 u32 cmd_flags)
9385{
f1410647 9386 int count, i, rc;
a2fbb9ea
ET
9387
9388 /* build the command word */
9389 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9390
9391 /* need to clear DONE bit separately */
9392 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9393
9394 /* write the data */
9395 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9396
9397 /* address of the NVRAM to write to */
9398 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9399 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9400
9401 /* issue the write command */
9402 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9403
9404 /* adjust timeout for emulation/FPGA */
9405 count = NVRAM_TIMEOUT_COUNT;
9406 if (CHIP_REV_IS_SLOW(bp))
9407 count *= 100;
9408
9409 /* wait for completion */
9410 rc = -EBUSY;
9411 for (i = 0; i < count; i++) {
9412 udelay(5);
9413 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9414 if (val & MCPR_NVM_COMMAND_DONE) {
9415 rc = 0;
9416 break;
9417 }
9418 }
9419
9420 return rc;
9421}
9422
f1410647 9423#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
9424
9425static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9426 int buf_size)
9427{
9428 int rc;
9429 u32 cmd_flags;
9430 u32 align_offset;
4781bfad 9431 __be32 val;
a2fbb9ea 9432
34f80b04
EG
9433 if (offset + buf_size > bp->common.flash_size) {
9434 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9435 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9436 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9437 return -EINVAL;
9438 }
9439
9440 /* request access to nvram interface */
9441 rc = bnx2x_acquire_nvram_lock(bp);
9442 if (rc)
9443 return rc;
9444
9445 /* enable access to nvram interface */
9446 bnx2x_enable_nvram_access(bp);
9447
9448 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9449 align_offset = (offset & ~0x03);
9450 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9451
9452 if (rc == 0) {
9453 val &= ~(0xff << BYTE_OFFSET(offset));
9454 val |= (*data_buf << BYTE_OFFSET(offset));
9455
9456 /* nvram data is returned as an array of bytes
9457 * convert it back to cpu order */
9458 val = be32_to_cpu(val);
9459
a2fbb9ea
ET
9460 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9461 cmd_flags);
9462 }
9463
9464 /* disable access to nvram interface */
9465 bnx2x_disable_nvram_access(bp);
9466 bnx2x_release_nvram_lock(bp);
9467
9468 return rc;
9469}
9470
9471static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9472 int buf_size)
9473{
9474 int rc;
9475 u32 cmd_flags;
9476 u32 val;
9477 u32 written_so_far;
9478
34f80b04 9479 if (buf_size == 1) /* ethtool */
a2fbb9ea 9480 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
9481
9482 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 9483 DP(BNX2X_MSG_NVM,
c14423fe 9484 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
9485 offset, buf_size);
9486 return -EINVAL;
9487 }
9488
34f80b04
EG
9489 if (offset + buf_size > bp->common.flash_size) {
9490 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 9491 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 9492 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
9493 return -EINVAL;
9494 }
9495
9496 /* request access to nvram interface */
9497 rc = bnx2x_acquire_nvram_lock(bp);
9498 if (rc)
9499 return rc;
9500
9501 /* enable access to nvram interface */
9502 bnx2x_enable_nvram_access(bp);
9503
9504 written_so_far = 0;
9505 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9506 while ((written_so_far < buf_size) && (rc == 0)) {
9507 if (written_so_far == (buf_size - sizeof(u32)))
9508 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9509 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9510 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9511 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9512 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9513
9514 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
9515
9516 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9517
9518 /* advance to the next dword */
9519 offset += sizeof(u32);
9520 data_buf += sizeof(u32);
9521 written_so_far += sizeof(u32);
9522 cmd_flags = 0;
9523 }
9524
9525 /* disable access to nvram interface */
9526 bnx2x_disable_nvram_access(bp);
9527 bnx2x_release_nvram_lock(bp);
9528
9529 return rc;
9530}
9531
9532static int bnx2x_set_eeprom(struct net_device *dev,
9533 struct ethtool_eeprom *eeprom, u8 *eebuf)
9534{
9535 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
9536 int port = BP_PORT(bp);
9537 int rc = 0;
a2fbb9ea 9538
9f4c9583
EG
9539 if (!netif_running(dev))
9540 return -EAGAIN;
9541
34f80b04 9542 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
9543 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9544 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9545 eeprom->len, eeprom->len);
9546
9547 /* parameters already validated in ethtool_set_eeprom */
9548
f57a6025
EG
9549 /* PHY eeprom can be accessed only by the PMF */
9550 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
9551 !bp->port.pmf)
9552 return -EINVAL;
9553
9554 if (eeprom->magic == 0x50485950) {
9555 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
9556 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 9557
f57a6025
EG
9558 bnx2x_acquire_phy_lock(bp);
9559 rc |= bnx2x_link_reset(&bp->link_params,
9560 &bp->link_vars, 0);
9561 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9562 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
9563 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9564 MISC_REGISTERS_GPIO_HIGH, port);
9565 bnx2x_release_phy_lock(bp);
9566 bnx2x_link_report(bp);
9567
9568 } else if (eeprom->magic == 0x50485952) {
9569 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9570 if ((bp->state == BNX2X_STATE_OPEN) ||
9571 (bp->state == BNX2X_STATE_DISABLED)) {
4a37fb66 9572 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
9573 rc |= bnx2x_link_reset(&bp->link_params,
9574 &bp->link_vars, 1);
9575
9576 rc |= bnx2x_phy_init(&bp->link_params,
9577 &bp->link_vars);
4a37fb66 9578 bnx2x_release_phy_lock(bp);
f57a6025
EG
9579 bnx2x_calc_fc_adv(bp);
9580 }
9581 } else if (eeprom->magic == 0x53985943) {
9582 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
9583 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9584 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
9585 u8 ext_phy_addr =
659bc5c4 9586 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
9587
9588 /* DSP Remove Download Mode */
9589 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
9590 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 9591
f57a6025
EG
9592 bnx2x_acquire_phy_lock(bp);
9593
9594 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
9595
9596 /* wait 0.5 sec to allow it to run */
9597 msleep(500);
9598 bnx2x_ext_phy_hw_reset(bp, port);
9599 msleep(500);
9600 bnx2x_release_phy_lock(bp);
9601 }
9602 } else
c18487ee 9603 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
9604
9605 return rc;
9606}
9607
9608static int bnx2x_get_coalesce(struct net_device *dev,
9609 struct ethtool_coalesce *coal)
9610{
9611 struct bnx2x *bp = netdev_priv(dev);
9612
9613 memset(coal, 0, sizeof(struct ethtool_coalesce));
9614
9615 coal->rx_coalesce_usecs = bp->rx_ticks;
9616 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
9617
9618 return 0;
9619}
9620
ca00392c 9621#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
9622static int bnx2x_set_coalesce(struct net_device *dev,
9623 struct ethtool_coalesce *coal)
9624{
9625 struct bnx2x *bp = netdev_priv(dev);
9626
9627 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
9628 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9629 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
9630
9631 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
9632 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9633 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 9634
34f80b04 9635 if (netif_running(dev))
a2fbb9ea
ET
9636 bnx2x_update_coalesce(bp);
9637
9638 return 0;
9639}
9640
9641static void bnx2x_get_ringparam(struct net_device *dev,
9642 struct ethtool_ringparam *ering)
9643{
9644 struct bnx2x *bp = netdev_priv(dev);
9645
9646 ering->rx_max_pending = MAX_RX_AVAIL;
9647 ering->rx_mini_max_pending = 0;
9648 ering->rx_jumbo_max_pending = 0;
9649
9650 ering->rx_pending = bp->rx_ring_size;
9651 ering->rx_mini_pending = 0;
9652 ering->rx_jumbo_pending = 0;
9653
9654 ering->tx_max_pending = MAX_TX_AVAIL;
9655 ering->tx_pending = bp->tx_ring_size;
9656}
9657
9658static int bnx2x_set_ringparam(struct net_device *dev,
9659 struct ethtool_ringparam *ering)
9660{
9661 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9662 int rc = 0;
a2fbb9ea
ET
9663
9664 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9665 (ering->tx_pending > MAX_TX_AVAIL) ||
9666 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9667 return -EINVAL;
9668
9669 bp->rx_ring_size = ering->rx_pending;
9670 bp->tx_ring_size = ering->tx_pending;
9671
34f80b04
EG
9672 if (netif_running(dev)) {
9673 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9674 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
9675 }
9676
34f80b04 9677 return rc;
a2fbb9ea
ET
9678}
9679
9680static void bnx2x_get_pauseparam(struct net_device *dev,
9681 struct ethtool_pauseparam *epause)
9682{
9683 struct bnx2x *bp = netdev_priv(dev);
9684
356e2385
EG
9685 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9686 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
9687 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9688
c0700f90
DM
9689 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9690 BNX2X_FLOW_CTRL_RX);
9691 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9692 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
9693
9694 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9695 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9696 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9697}
9698
9699static int bnx2x_set_pauseparam(struct net_device *dev,
9700 struct ethtool_pauseparam *epause)
9701{
9702 struct bnx2x *bp = netdev_priv(dev);
9703
34f80b04
EG
9704 if (IS_E1HMF(bp))
9705 return 0;
9706
a2fbb9ea
ET
9707 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9708 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9709 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9710
c0700f90 9711 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 9712
f1410647 9713 if (epause->rx_pause)
c0700f90 9714 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 9715
f1410647 9716 if (epause->tx_pause)
c0700f90 9717 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 9718
c0700f90
DM
9719 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9720 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9721
c18487ee 9722 if (epause->autoneg) {
34f80b04 9723 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 9724 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
9725 return -EINVAL;
9726 }
a2fbb9ea 9727
c18487ee 9728 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 9729 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 9730 }
a2fbb9ea 9731
c18487ee
YR
9732 DP(NETIF_MSG_LINK,
9733 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
9734
9735 if (netif_running(dev)) {
bb2a0f7a 9736 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
9737 bnx2x_link_set(bp);
9738 }
a2fbb9ea
ET
9739
9740 return 0;
9741}
9742
df0f2343
VZ
9743static int bnx2x_set_flags(struct net_device *dev, u32 data)
9744{
9745 struct bnx2x *bp = netdev_priv(dev);
9746 int changed = 0;
9747 int rc = 0;
9748
9749 /* TPA requires Rx CSUM offloading */
9750 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9751 if (!(dev->features & NETIF_F_LRO)) {
9752 dev->features |= NETIF_F_LRO;
9753 bp->flags |= TPA_ENABLE_FLAG;
9754 changed = 1;
9755 }
9756
9757 } else if (dev->features & NETIF_F_LRO) {
9758 dev->features &= ~NETIF_F_LRO;
9759 bp->flags &= ~TPA_ENABLE_FLAG;
9760 changed = 1;
9761 }
9762
9763 if (changed && netif_running(dev)) {
9764 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9765 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9766 }
9767
9768 return rc;
9769}
9770
a2fbb9ea
ET
9771static u32 bnx2x_get_rx_csum(struct net_device *dev)
9772{
9773 struct bnx2x *bp = netdev_priv(dev);
9774
9775 return bp->rx_csum;
9776}
9777
9778static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9779{
9780 struct bnx2x *bp = netdev_priv(dev);
df0f2343 9781 int rc = 0;
a2fbb9ea
ET
9782
9783 bp->rx_csum = data;
df0f2343
VZ
9784
9785 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9786 TPA'ed packets will be discarded due to wrong TCP CSUM */
9787 if (!data) {
9788 u32 flags = ethtool_op_get_flags(dev);
9789
9790 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9791 }
9792
9793 return rc;
a2fbb9ea
ET
9794}
9795
9796static int bnx2x_set_tso(struct net_device *dev, u32 data)
9797{
755735eb 9798 if (data) {
a2fbb9ea 9799 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9800 dev->features |= NETIF_F_TSO6;
9801 } else {
a2fbb9ea 9802 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9803 dev->features &= ~NETIF_F_TSO6;
9804 }
9805
a2fbb9ea
ET
9806 return 0;
9807}
9808
f3c87cdd 9809static const struct {
a2fbb9ea
ET
9810 char string[ETH_GSTRING_LEN];
9811} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9812 { "register_test (offline)" },
9813 { "memory_test (offline)" },
9814 { "loopback_test (offline)" },
9815 { "nvram_test (online)" },
9816 { "interrupt_test (online)" },
9817 { "link_test (online)" },
d3d4f495 9818 { "idle check (online)" }
a2fbb9ea
ET
9819};
9820
9821static int bnx2x_self_test_count(struct net_device *dev)
9822{
9823 return BNX2X_NUM_TESTS;
9824}
9825
f3c87cdd
YG
9826static int bnx2x_test_registers(struct bnx2x *bp)
9827{
9828 int idx, i, rc = -ENODEV;
9829 u32 wr_val = 0;
9dabc424 9830 int port = BP_PORT(bp);
f3c87cdd
YG
9831 static const struct {
9832 u32 offset0;
9833 u32 offset1;
9834 u32 mask;
9835 } reg_tbl[] = {
9836/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9837 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9838 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9839 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9840 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9841 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9842 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9843 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9844 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9845 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9846/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9847 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9848 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9849 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9850 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9851 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9852 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9853 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 9854 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
9855 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9856/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
9857 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9858 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9859 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9860 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9861 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9862 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9863 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9864 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
9865 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9866/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
9867 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9868 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9869 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9870 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9871 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9872 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9873
9874 { 0xffffffff, 0, 0x00000000 }
9875 };
9876
9877 if (!netif_running(bp->dev))
9878 return rc;
9879
9880 /* Repeat the test twice:
9881 First by writing 0x00000000, second by writing 0xffffffff */
9882 for (idx = 0; idx < 2; idx++) {
9883
9884 switch (idx) {
9885 case 0:
9886 wr_val = 0;
9887 break;
9888 case 1:
9889 wr_val = 0xffffffff;
9890 break;
9891 }
9892
9893 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9894 u32 offset, mask, save_val, val;
f3c87cdd
YG
9895
9896 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9897 mask = reg_tbl[i].mask;
9898
9899 save_val = REG_RD(bp, offset);
9900
9901 REG_WR(bp, offset, wr_val);
9902 val = REG_RD(bp, offset);
9903
9904 /* Restore the original register's value */
9905 REG_WR(bp, offset, save_val);
9906
9907 /* verify that value is as expected value */
9908 if ((val & mask) != (wr_val & mask))
9909 goto test_reg_exit;
9910 }
9911 }
9912
9913 rc = 0;
9914
9915test_reg_exit:
9916 return rc;
9917}
9918
9919static int bnx2x_test_memory(struct bnx2x *bp)
9920{
9921 int i, j, rc = -ENODEV;
9922 u32 val;
9923 static const struct {
9924 u32 offset;
9925 int size;
9926 } mem_tbl[] = {
9927 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9928 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9929 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9930 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9931 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9932 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9933 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9934
9935 { 0xffffffff, 0 }
9936 };
9937 static const struct {
9938 char *name;
9939 u32 offset;
9dabc424
YG
9940 u32 e1_mask;
9941 u32 e1h_mask;
f3c87cdd 9942 } prty_tbl[] = {
9dabc424
YG
9943 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9944 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9945 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9946 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9947 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9948 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9949
9950 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9951 };
9952
9953 if (!netif_running(bp->dev))
9954 return rc;
9955
9956 /* Go through all the memories */
9957 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9958 for (j = 0; j < mem_tbl[i].size; j++)
9959 REG_RD(bp, mem_tbl[i].offset + j*4);
9960
9961 /* Check the parity status */
9962 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9963 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9964 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9965 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9966 DP(NETIF_MSG_HW,
9967 "%s is 0x%x\n", prty_tbl[i].name, val);
9968 goto test_mem_exit;
9969 }
9970 }
9971
9972 rc = 0;
9973
9974test_mem_exit:
9975 return rc;
9976}
9977
f3c87cdd
YG
9978static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9979{
9980 int cnt = 1000;
9981
9982 if (link_up)
9983 while (bnx2x_link_test(bp) && cnt--)
9984 msleep(10);
9985}
9986
9987static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9988{
9989 unsigned int pkt_size, num_pkts, i;
9990 struct sk_buff *skb;
9991 unsigned char *packet;
ca00392c
EG
9992 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9993 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
f3c87cdd
YG
9994 u16 tx_start_idx, tx_idx;
9995 u16 rx_start_idx, rx_idx;
ca00392c 9996 u16 pkt_prod, bd_prod;
f3c87cdd 9997 struct sw_tx_bd *tx_buf;
ca00392c
EG
9998 struct eth_tx_start_bd *tx_start_bd;
9999 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
10000 dma_addr_t mapping;
10001 union eth_rx_cqe *cqe;
10002 u8 cqe_fp_flags;
10003 struct sw_rx_bd *rx_buf;
10004 u16 len;
10005 int rc = -ENODEV;
10006
b5bf9068
EG
10007 /* check the loopback mode */
10008 switch (loopback_mode) {
10009 case BNX2X_PHY_LOOPBACK:
10010 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
10011 return -EINVAL;
10012 break;
10013 case BNX2X_MAC_LOOPBACK:
f3c87cdd 10014 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 10015 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
10016 break;
10017 default:
f3c87cdd 10018 return -EINVAL;
b5bf9068 10019 }
f3c87cdd 10020
b5bf9068
EG
10021 /* prepare the loopback packet */
10022 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
10023 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
10024 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
10025 if (!skb) {
10026 rc = -ENOMEM;
10027 goto test_loopback_exit;
10028 }
10029 packet = skb_put(skb, pkt_size);
10030 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
10031 memset(packet + ETH_ALEN, 0, ETH_ALEN);
10032 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
10033 for (i = ETH_HLEN; i < pkt_size; i++)
10034 packet[i] = (unsigned char) (i & 0xff);
10035
b5bf9068 10036 /* send the loopback packet */
f3c87cdd 10037 num_pkts = 0;
ca00392c
EG
10038 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
10039 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 10040
ca00392c
EG
10041 pkt_prod = fp_tx->tx_pkt_prod++;
10042 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
10043 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 10044 tx_buf->skb = skb;
ca00392c 10045 tx_buf->flags = 0;
f3c87cdd 10046
ca00392c
EG
10047 bd_prod = TX_BD(fp_tx->tx_bd_prod);
10048 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
f3c87cdd
YG
10049 mapping = pci_map_single(bp->pdev, skb->data,
10050 skb_headlen(skb), PCI_DMA_TODEVICE);
ca00392c
EG
10051 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10052 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10053 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
10054 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10055 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10056 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10057 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
10058 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
10059
10060 /* turn on parsing and get a BD */
10061 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10062 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
10063
10064 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 10065
58f4c4cf
EG
10066 wmb();
10067
ca00392c
EG
10068 fp_tx->tx_db.data.prod += 2;
10069 barrier();
10070 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
f3c87cdd
YG
10071
10072 mmiowb();
10073
10074 num_pkts++;
ca00392c 10075 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
10076 bp->dev->trans_start = jiffies;
10077
10078 udelay(100);
10079
ca00392c 10080 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
10081 if (tx_idx != tx_start_idx + num_pkts)
10082 goto test_loopback_exit;
10083
ca00392c 10084 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
10085 if (rx_idx != rx_start_idx + num_pkts)
10086 goto test_loopback_exit;
10087
ca00392c 10088 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
10089 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
10090 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
10091 goto test_loopback_rx_exit;
10092
10093 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
10094 if (len != pkt_size)
10095 goto test_loopback_rx_exit;
10096
ca00392c 10097 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
10098 skb = rx_buf->skb;
10099 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
10100 for (i = ETH_HLEN; i < pkt_size; i++)
10101 if (*(skb->data + i) != (unsigned char) (i & 0xff))
10102 goto test_loopback_rx_exit;
10103
10104 rc = 0;
10105
10106test_loopback_rx_exit:
f3c87cdd 10107
ca00392c
EG
10108 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
10109 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
10110 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
10111 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
10112
10113 /* Update producers */
ca00392c
EG
10114 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
10115 fp_rx->rx_sge_prod);
f3c87cdd
YG
10116
10117test_loopback_exit:
10118 bp->link_params.loopback_mode = LOOPBACK_NONE;
10119
10120 return rc;
10121}
10122
10123static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
10124{
b5bf9068 10125 int rc = 0, res;
f3c87cdd
YG
10126
10127 if (!netif_running(bp->dev))
10128 return BNX2X_LOOPBACK_FAILED;
10129
f8ef6e44 10130 bnx2x_netif_stop(bp, 1);
3910c8ae 10131 bnx2x_acquire_phy_lock(bp);
f3c87cdd 10132
b5bf9068
EG
10133 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
10134 if (res) {
10135 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
10136 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
10137 }
10138
b5bf9068
EG
10139 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
10140 if (res) {
10141 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
10142 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
10143 }
10144
3910c8ae 10145 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
10146 bnx2x_netif_start(bp);
10147
10148 return rc;
10149}
10150
10151#define CRC32_RESIDUAL 0xdebb20e3
10152
10153static int bnx2x_test_nvram(struct bnx2x *bp)
10154{
10155 static const struct {
10156 int offset;
10157 int size;
10158 } nvram_tbl[] = {
10159 { 0, 0x14 }, /* bootstrap */
10160 { 0x14, 0xec }, /* dir */
10161 { 0x100, 0x350 }, /* manuf_info */
10162 { 0x450, 0xf0 }, /* feature_info */
10163 { 0x640, 0x64 }, /* upgrade_key_info */
10164 { 0x6a4, 0x64 },
10165 { 0x708, 0x70 }, /* manuf_key_info */
10166 { 0x778, 0x70 },
10167 { 0, 0 }
10168 };
4781bfad 10169 __be32 buf[0x350 / 4];
f3c87cdd
YG
10170 u8 *data = (u8 *)buf;
10171 int i, rc;
10172 u32 magic, csum;
10173
10174 rc = bnx2x_nvram_read(bp, 0, data, 4);
10175 if (rc) {
f5372251 10176 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
10177 goto test_nvram_exit;
10178 }
10179
10180 magic = be32_to_cpu(buf[0]);
10181 if (magic != 0x669955aa) {
10182 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
10183 rc = -ENODEV;
10184 goto test_nvram_exit;
10185 }
10186
10187 for (i = 0; nvram_tbl[i].size; i++) {
10188
10189 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
10190 nvram_tbl[i].size);
10191 if (rc) {
10192 DP(NETIF_MSG_PROBE,
f5372251 10193 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
10194 goto test_nvram_exit;
10195 }
10196
10197 csum = ether_crc_le(nvram_tbl[i].size, data);
10198 if (csum != CRC32_RESIDUAL) {
10199 DP(NETIF_MSG_PROBE,
10200 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
10201 rc = -ENODEV;
10202 goto test_nvram_exit;
10203 }
10204 }
10205
10206test_nvram_exit:
10207 return rc;
10208}
10209
10210static int bnx2x_test_intr(struct bnx2x *bp)
10211{
10212 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
10213 int i, rc;
10214
10215 if (!netif_running(bp->dev))
10216 return -ENODEV;
10217
8d9c5f34 10218 config->hdr.length = 0;
af246401
EG
10219 if (CHIP_IS_E1(bp))
10220 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
10221 else
10222 config->hdr.offset = BP_FUNC(bp);
0626b899 10223 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
10224 config->hdr.reserved1 = 0;
10225
10226 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10227 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10228 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10229 if (rc == 0) {
10230 bp->set_mac_pending++;
10231 for (i = 0; i < 10; i++) {
10232 if (!bp->set_mac_pending)
10233 break;
10234 msleep_interruptible(10);
10235 }
10236 if (i == 10)
10237 rc = -ENODEV;
10238 }
10239
10240 return rc;
10241}
10242
a2fbb9ea
ET
10243static void bnx2x_self_test(struct net_device *dev,
10244 struct ethtool_test *etest, u64 *buf)
10245{
10246 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
10247
10248 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
10249
f3c87cdd 10250 if (!netif_running(dev))
a2fbb9ea 10251 return;
a2fbb9ea 10252
33471629 10253 /* offline tests are not supported in MF mode */
f3c87cdd
YG
10254 if (IS_E1HMF(bp))
10255 etest->flags &= ~ETH_TEST_FL_OFFLINE;
10256
10257 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
10258 int port = BP_PORT(bp);
10259 u32 val;
f3c87cdd
YG
10260 u8 link_up;
10261
279abdf5
EG
10262 /* save current value of input enable for TX port IF */
10263 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
10264 /* disable input for TX port IF */
10265 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10266
f3c87cdd
YG
10267 link_up = bp->link_vars.link_up;
10268 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10269 bnx2x_nic_load(bp, LOAD_DIAG);
10270 /* wait until link state is restored */
10271 bnx2x_wait_for_link(bp, link_up);
10272
10273 if (bnx2x_test_registers(bp) != 0) {
10274 buf[0] = 1;
10275 etest->flags |= ETH_TEST_FL_FAILED;
10276 }
10277 if (bnx2x_test_memory(bp) != 0) {
10278 buf[1] = 1;
10279 etest->flags |= ETH_TEST_FL_FAILED;
10280 }
10281 buf[2] = bnx2x_test_loopback(bp, link_up);
10282 if (buf[2] != 0)
10283 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 10284
f3c87cdd 10285 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
10286
10287 /* restore input for TX port IF */
10288 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
10289
f3c87cdd
YG
10290 bnx2x_nic_load(bp, LOAD_NORMAL);
10291 /* wait until link state is restored */
10292 bnx2x_wait_for_link(bp, link_up);
10293 }
10294 if (bnx2x_test_nvram(bp) != 0) {
10295 buf[3] = 1;
a2fbb9ea
ET
10296 etest->flags |= ETH_TEST_FL_FAILED;
10297 }
f3c87cdd
YG
10298 if (bnx2x_test_intr(bp) != 0) {
10299 buf[4] = 1;
10300 etest->flags |= ETH_TEST_FL_FAILED;
10301 }
10302 if (bp->port.pmf)
10303 if (bnx2x_link_test(bp) != 0) {
10304 buf[5] = 1;
10305 etest->flags |= ETH_TEST_FL_FAILED;
10306 }
f3c87cdd
YG
10307
10308#ifdef BNX2X_EXTRA_DEBUG
10309 bnx2x_panic_dump(bp);
10310#endif
a2fbb9ea
ET
10311}
10312
de832a55
EG
10313static const struct {
10314 long offset;
10315 int size;
10316 u8 string[ETH_GSTRING_LEN];
10317} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
10318/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
10319 { Q_STATS_OFFSET32(error_bytes_received_hi),
10320 8, "[%d]: rx_error_bytes" },
10321 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
10322 8, "[%d]: rx_ucast_packets" },
10323 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
10324 8, "[%d]: rx_mcast_packets" },
10325 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
10326 8, "[%d]: rx_bcast_packets" },
10327 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
10328 { Q_STATS_OFFSET32(rx_err_discard_pkt),
10329 4, "[%d]: rx_phy_ip_err_discards"},
10330 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
10331 4, "[%d]: rx_skb_alloc_discard" },
10332 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
10333
10334/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
10335 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10336 8, "[%d]: tx_packets" }
10337};
10338
bb2a0f7a
YG
10339static const struct {
10340 long offset;
10341 int size;
10342 u32 flags;
66e855f3
YG
10343#define STATS_FLAGS_PORT 1
10344#define STATS_FLAGS_FUNC 2
de832a55 10345#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 10346 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 10347} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
10348/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
10349 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 10350 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 10351 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 10352 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 10353 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 10354 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 10355 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 10356 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 10357 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 10358 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 10359 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 10360 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 10361 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
10362 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
10363 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
10364 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
10365 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
10366/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10367 8, STATS_FLAGS_PORT, "rx_fragments" },
10368 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10369 8, STATS_FLAGS_PORT, "rx_jabbers" },
10370 { STATS_OFFSET32(no_buff_discard_hi),
10371 8, STATS_FLAGS_BOTH, "rx_discards" },
10372 { STATS_OFFSET32(mac_filter_discard),
10373 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10374 { STATS_OFFSET32(xxoverflow_discard),
10375 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10376 { STATS_OFFSET32(brb_drop_hi),
10377 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10378 { STATS_OFFSET32(brb_truncate_hi),
10379 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10380 { STATS_OFFSET32(pause_frames_received_hi),
10381 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10382 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10383 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10384 { STATS_OFFSET32(nig_timer_max),
10385 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10386/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10387 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10388 { STATS_OFFSET32(rx_skb_alloc_failed),
10389 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10390 { STATS_OFFSET32(hw_csum_err),
10391 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10392
10393 { STATS_OFFSET32(total_bytes_transmitted_hi),
10394 8, STATS_FLAGS_BOTH, "tx_bytes" },
10395 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10396 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10397 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10398 8, STATS_FLAGS_BOTH, "tx_packets" },
10399 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10400 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10401 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10402 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 10403 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 10404 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 10405 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 10406 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 10407/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 10408 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 10409 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 10410 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 10411 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 10412 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 10413 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 10414 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 10415 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 10416 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 10417 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 10418 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 10419 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 10420 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 10421 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 10422 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 10423 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 10424 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 10425 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 10426 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 10427/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 10428 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
10429 { STATS_OFFSET32(pause_frames_sent_hi),
10430 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
10431};
10432
de832a55
EG
10433#define IS_PORT_STAT(i) \
10434 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10435#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10436#define IS_E1HMF_MODE_STAT(bp) \
10437 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 10438
a2fbb9ea
ET
10439static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10440{
bb2a0f7a 10441 struct bnx2x *bp = netdev_priv(dev);
de832a55 10442 int i, j, k;
bb2a0f7a 10443
a2fbb9ea
ET
10444 switch (stringset) {
10445 case ETH_SS_STATS:
de832a55
EG
10446 if (is_multi(bp)) {
10447 k = 0;
ca00392c 10448 for_each_rx_queue(bp, i) {
de832a55
EG
10449 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10450 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10451 bnx2x_q_stats_arr[j].string, i);
10452 k += BNX2X_NUM_Q_STATS;
10453 }
10454 if (IS_E1HMF_MODE_STAT(bp))
10455 break;
10456 for (j = 0; j < BNX2X_NUM_STATS; j++)
10457 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10458 bnx2x_stats_arr[j].string);
10459 } else {
10460 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10461 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10462 continue;
10463 strcpy(buf + j*ETH_GSTRING_LEN,
10464 bnx2x_stats_arr[i].string);
10465 j++;
10466 }
bb2a0f7a 10467 }
a2fbb9ea
ET
10468 break;
10469
10470 case ETH_SS_TEST:
10471 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10472 break;
10473 }
10474}
10475
10476static int bnx2x_get_stats_count(struct net_device *dev)
10477{
bb2a0f7a 10478 struct bnx2x *bp = netdev_priv(dev);
de832a55 10479 int i, num_stats;
bb2a0f7a 10480
de832a55 10481 if (is_multi(bp)) {
ca00392c 10482 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
de832a55
EG
10483 if (!IS_E1HMF_MODE_STAT(bp))
10484 num_stats += BNX2X_NUM_STATS;
10485 } else {
10486 if (IS_E1HMF_MODE_STAT(bp)) {
10487 num_stats = 0;
10488 for (i = 0; i < BNX2X_NUM_STATS; i++)
10489 if (IS_FUNC_STAT(i))
10490 num_stats++;
10491 } else
10492 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 10493 }
de832a55 10494
bb2a0f7a 10495 return num_stats;
a2fbb9ea
ET
10496}
10497
10498static void bnx2x_get_ethtool_stats(struct net_device *dev,
10499 struct ethtool_stats *stats, u64 *buf)
10500{
10501 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
10502 u32 *hw_stats, *offset;
10503 int i, j, k;
bb2a0f7a 10504
de832a55
EG
10505 if (is_multi(bp)) {
10506 k = 0;
ca00392c 10507 for_each_rx_queue(bp, i) {
de832a55
EG
10508 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10509 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10510 if (bnx2x_q_stats_arr[j].size == 0) {
10511 /* skip this counter */
10512 buf[k + j] = 0;
10513 continue;
10514 }
10515 offset = (hw_stats +
10516 bnx2x_q_stats_arr[j].offset);
10517 if (bnx2x_q_stats_arr[j].size == 4) {
10518 /* 4-byte counter */
10519 buf[k + j] = (u64) *offset;
10520 continue;
10521 }
10522 /* 8-byte counter */
10523 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10524 }
10525 k += BNX2X_NUM_Q_STATS;
10526 }
10527 if (IS_E1HMF_MODE_STAT(bp))
10528 return;
10529 hw_stats = (u32 *)&bp->eth_stats;
10530 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10531 if (bnx2x_stats_arr[j].size == 0) {
10532 /* skip this counter */
10533 buf[k + j] = 0;
10534 continue;
10535 }
10536 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10537 if (bnx2x_stats_arr[j].size == 4) {
10538 /* 4-byte counter */
10539 buf[k + j] = (u64) *offset;
10540 continue;
10541 }
10542 /* 8-byte counter */
10543 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 10544 }
de832a55
EG
10545 } else {
10546 hw_stats = (u32 *)&bp->eth_stats;
10547 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10548 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10549 continue;
10550 if (bnx2x_stats_arr[i].size == 0) {
10551 /* skip this counter */
10552 buf[j] = 0;
10553 j++;
10554 continue;
10555 }
10556 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10557 if (bnx2x_stats_arr[i].size == 4) {
10558 /* 4-byte counter */
10559 buf[j] = (u64) *offset;
10560 j++;
10561 continue;
10562 }
10563 /* 8-byte counter */
10564 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 10565 j++;
a2fbb9ea 10566 }
a2fbb9ea
ET
10567 }
10568}
10569
10570static int bnx2x_phys_id(struct net_device *dev, u32 data)
10571{
10572 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10573 int port = BP_PORT(bp);
a2fbb9ea
ET
10574 int i;
10575
34f80b04
EG
10576 if (!netif_running(dev))
10577 return 0;
10578
10579 if (!bp->port.pmf)
10580 return 0;
10581
a2fbb9ea
ET
10582 if (data == 0)
10583 data = 2;
10584
10585 for (i = 0; i < (data * 2); i++) {
c18487ee 10586 if ((i % 2) == 0)
34f80b04 10587 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
10588 bp->link_params.hw_led_mode,
10589 bp->link_params.chip_id);
10590 else
34f80b04 10591 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
10592 bp->link_params.hw_led_mode,
10593 bp->link_params.chip_id);
10594
a2fbb9ea
ET
10595 msleep_interruptible(500);
10596 if (signal_pending(current))
10597 break;
10598 }
10599
c18487ee 10600 if (bp->link_vars.link_up)
34f80b04 10601 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
10602 bp->link_vars.line_speed,
10603 bp->link_params.hw_led_mode,
10604 bp->link_params.chip_id);
a2fbb9ea
ET
10605
10606 return 0;
10607}
10608
10609static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
10610 .get_settings = bnx2x_get_settings,
10611 .set_settings = bnx2x_set_settings,
10612 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
10613 .get_regs_len = bnx2x_get_regs_len,
10614 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
10615 .get_wol = bnx2x_get_wol,
10616 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
10617 .get_msglevel = bnx2x_get_msglevel,
10618 .set_msglevel = bnx2x_set_msglevel,
10619 .nway_reset = bnx2x_nway_reset,
01e53298 10620 .get_link = bnx2x_get_link,
7a9b2557
VZ
10621 .get_eeprom_len = bnx2x_get_eeprom_len,
10622 .get_eeprom = bnx2x_get_eeprom,
10623 .set_eeprom = bnx2x_set_eeprom,
10624 .get_coalesce = bnx2x_get_coalesce,
10625 .set_coalesce = bnx2x_set_coalesce,
10626 .get_ringparam = bnx2x_get_ringparam,
10627 .set_ringparam = bnx2x_set_ringparam,
10628 .get_pauseparam = bnx2x_get_pauseparam,
10629 .set_pauseparam = bnx2x_set_pauseparam,
10630 .get_rx_csum = bnx2x_get_rx_csum,
10631 .set_rx_csum = bnx2x_set_rx_csum,
10632 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 10633 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
10634 .set_flags = bnx2x_set_flags,
10635 .get_flags = ethtool_op_get_flags,
10636 .get_sg = ethtool_op_get_sg,
10637 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
10638 .get_tso = ethtool_op_get_tso,
10639 .set_tso = bnx2x_set_tso,
10640 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
10641 .self_test = bnx2x_self_test,
10642 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
10643 .phys_id = bnx2x_phys_id,
10644 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 10645 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
10646};
10647
10648/* end of ethtool_ops */
10649
10650/****************************************************************************
10651* General service functions
10652****************************************************************************/
10653
10654static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10655{
10656 u16 pmcsr;
10657
10658 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10659
10660 switch (state) {
10661 case PCI_D0:
34f80b04 10662 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
10663 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10664 PCI_PM_CTRL_PME_STATUS));
10665
10666 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 10667 /* delay required during transition out of D3hot */
a2fbb9ea 10668 msleep(20);
34f80b04 10669 break;
a2fbb9ea 10670
34f80b04
EG
10671 case PCI_D3hot:
10672 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10673 pmcsr |= 3;
a2fbb9ea 10674
34f80b04
EG
10675 if (bp->wol)
10676 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 10677
34f80b04
EG
10678 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10679 pmcsr);
a2fbb9ea 10680
34f80b04
EG
10681 /* No more memory access after this point until
10682 * device is brought back to D0.
10683 */
10684 break;
10685
10686 default:
10687 return -EINVAL;
10688 }
10689 return 0;
a2fbb9ea
ET
10690}
10691
237907c1
EG
10692static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10693{
10694 u16 rx_cons_sb;
10695
10696 /* Tell compiler that status block fields can change */
10697 barrier();
10698 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10699 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10700 rx_cons_sb++;
10701 return (fp->rx_comp_cons != rx_cons_sb);
10702}
10703
34f80b04
EG
10704/*
10705 * net_device service functions
10706 */
10707
a2fbb9ea
ET
10708static int bnx2x_poll(struct napi_struct *napi, int budget)
10709{
10710 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10711 napi);
10712 struct bnx2x *bp = fp->bp;
10713 int work_done = 0;
10714
10715#ifdef BNX2X_STOP_ON_ERROR
10716 if (unlikely(bp->panic))
34f80b04 10717 goto poll_panic;
a2fbb9ea
ET
10718#endif
10719
a2fbb9ea
ET
10720 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10721 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10722
10723 bnx2x_update_fpsb_idx(fp);
10724
8534f32c 10725 if (bnx2x_has_rx_work(fp)) {
a2fbb9ea 10726 work_done = bnx2x_rx_int(fp, budget);
356e2385 10727
8534f32c
EG
10728 /* must not complete if we consumed full budget */
10729 if (work_done >= budget)
10730 goto poll_again;
10731 }
a2fbb9ea 10732
ca00392c 10733 /* bnx2x_has_rx_work() reads the status block, thus we need to
8534f32c 10734 * ensure that status block indices have been actually read
ca00392c 10735 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
8534f32c 10736 * so that we won't write the "newer" value of the status block to IGU
ca00392c 10737 * (if there was a DMA right after bnx2x_has_rx_work and
8534f32c
EG
10738 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10739 * may be postponed to right before bnx2x_ack_sb). In this case
10740 * there will never be another interrupt until there is another update
10741 * of the status block, while there is still unhandled work.
10742 */
10743 rmb();
a2fbb9ea 10744
ca00392c 10745 if (!bnx2x_has_rx_work(fp)) {
a2fbb9ea 10746#ifdef BNX2X_STOP_ON_ERROR
34f80b04 10747poll_panic:
a2fbb9ea 10748#endif
288379f0 10749 napi_complete(napi);
a2fbb9ea 10750
0626b899 10751 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
a2fbb9ea 10752 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
0626b899 10753 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
a2fbb9ea
ET
10754 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10755 }
356e2385 10756
8534f32c 10757poll_again:
a2fbb9ea
ET
10758 return work_done;
10759}
10760
755735eb
EG
10761
10762/* we split the first BD into headers and data BDs
33471629 10763 * to ease the pain of our fellow microcode engineers
755735eb
EG
10764 * we use one mapping for both BDs
10765 * So far this has only been observed to happen
10766 * in Other Operating Systems(TM)
10767 */
10768static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10769 struct bnx2x_fastpath *fp,
ca00392c
EG
10770 struct sw_tx_bd *tx_buf,
10771 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
10772 u16 bd_prod, int nbd)
10773{
ca00392c 10774 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
10775 struct eth_tx_bd *d_tx_bd;
10776 dma_addr_t mapping;
10777 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10778
10779 /* first fix first BD */
10780 h_tx_bd->nbd = cpu_to_le16(nbd);
10781 h_tx_bd->nbytes = cpu_to_le16(hlen);
10782
10783 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10784 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10785 h_tx_bd->addr_lo, h_tx_bd->nbd);
10786
10787 /* now get a new data BD
10788 * (after the pbd) and fill it */
10789 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 10790 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
10791
10792 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10793 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10794
10795 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10796 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10797 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
10798
10799 /* this marks the BD as one that has no individual mapping */
10800 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10801
755735eb
EG
10802 DP(NETIF_MSG_TX_QUEUED,
10803 "TSO split data size is %d (%x:%x)\n",
10804 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10805
ca00392c
EG
10806 /* update tx_bd */
10807 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
10808
10809 return bd_prod;
10810}
10811
10812static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10813{
10814 if (fix > 0)
10815 csum = (u16) ~csum_fold(csum_sub(csum,
10816 csum_partial(t_header - fix, fix, 0)));
10817
10818 else if (fix < 0)
10819 csum = (u16) ~csum_fold(csum_add(csum,
10820 csum_partial(t_header, -fix, 0)));
10821
10822 return swab16(csum);
10823}
10824
10825static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10826{
10827 u32 rc;
10828
10829 if (skb->ip_summed != CHECKSUM_PARTIAL)
10830 rc = XMIT_PLAIN;
10831
10832 else {
4781bfad 10833 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
10834 rc = XMIT_CSUM_V6;
10835 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10836 rc |= XMIT_CSUM_TCP;
10837
10838 } else {
10839 rc = XMIT_CSUM_V4;
10840 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10841 rc |= XMIT_CSUM_TCP;
10842 }
10843 }
10844
10845 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10846 rc |= XMIT_GSO_V4;
10847
10848 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10849 rc |= XMIT_GSO_V6;
10850
10851 return rc;
10852}
10853
632da4d6 10854#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10855/* check if packet requires linearization (packet is too fragmented)
10856 no need to check fragmentation if page size > 8K (there will be no
10857 violation to FW restrictions) */
755735eb
EG
10858static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10859 u32 xmit_type)
10860{
10861 int to_copy = 0;
10862 int hlen = 0;
10863 int first_bd_sz = 0;
10864
10865 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10866 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10867
10868 if (xmit_type & XMIT_GSO) {
10869 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10870 /* Check if LSO packet needs to be copied:
10871 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10872 int wnd_size = MAX_FETCH_BD - 3;
33471629 10873 /* Number of windows to check */
755735eb
EG
10874 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10875 int wnd_idx = 0;
10876 int frag_idx = 0;
10877 u32 wnd_sum = 0;
10878
10879 /* Headers length */
10880 hlen = (int)(skb_transport_header(skb) - skb->data) +
10881 tcp_hdrlen(skb);
10882
10883 /* Amount of data (w/o headers) on linear part of SKB*/
10884 first_bd_sz = skb_headlen(skb) - hlen;
10885
10886 wnd_sum = first_bd_sz;
10887
10888 /* Calculate the first sum - it's special */
10889 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10890 wnd_sum +=
10891 skb_shinfo(skb)->frags[frag_idx].size;
10892
10893 /* If there was data on linear skb data - check it */
10894 if (first_bd_sz > 0) {
10895 if (unlikely(wnd_sum < lso_mss)) {
10896 to_copy = 1;
10897 goto exit_lbl;
10898 }
10899
10900 wnd_sum -= first_bd_sz;
10901 }
10902
10903 /* Others are easier: run through the frag list and
10904 check all windows */
10905 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10906 wnd_sum +=
10907 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10908
10909 if (unlikely(wnd_sum < lso_mss)) {
10910 to_copy = 1;
10911 break;
10912 }
10913 wnd_sum -=
10914 skb_shinfo(skb)->frags[wnd_idx].size;
10915 }
755735eb
EG
10916 } else {
10917 /* in non-LSO too fragmented packet should always
10918 be linearized */
10919 to_copy = 1;
10920 }
10921 }
10922
10923exit_lbl:
10924 if (unlikely(to_copy))
10925 DP(NETIF_MSG_TX_QUEUED,
10926 "Linearization IS REQUIRED for %s packet. "
10927 "num_frags %d hlen %d first_bd_sz %d\n",
10928 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10929 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10930
10931 return to_copy;
10932}
632da4d6 10933#endif
755735eb
EG
10934
10935/* called with netif_tx_lock
a2fbb9ea 10936 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10937 * netif_wake_queue()
a2fbb9ea
ET
10938 */
10939static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10940{
10941 struct bnx2x *bp = netdev_priv(dev);
ca00392c 10942 struct bnx2x_fastpath *fp, *fp_stat;
555f6c78 10943 struct netdev_queue *txq;
a2fbb9ea 10944 struct sw_tx_bd *tx_buf;
ca00392c
EG
10945 struct eth_tx_start_bd *tx_start_bd;
10946 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
10947 struct eth_tx_parse_bd *pbd = NULL;
10948 u16 pkt_prod, bd_prod;
755735eb 10949 int nbd, fp_index;
a2fbb9ea 10950 dma_addr_t mapping;
755735eb 10951 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
10952 int i;
10953 u8 hlen = 0;
ca00392c 10954 __le16 pkt_size = 0;
a2fbb9ea
ET
10955
10956#ifdef BNX2X_STOP_ON_ERROR
10957 if (unlikely(bp->panic))
10958 return NETDEV_TX_BUSY;
10959#endif
10960
555f6c78
EG
10961 fp_index = skb_get_queue_mapping(skb);
10962 txq = netdev_get_tx_queue(dev, fp_index);
10963
ca00392c
EG
10964 fp = &bp->fp[fp_index + bp->num_rx_queues];
10965 fp_stat = &bp->fp[fp_index];
755735eb 10966
231fd58a 10967 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
ca00392c 10968 fp_stat->eth_q_stats.driver_xoff++;
555f6c78 10969 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10970 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10971 return NETDEV_TX_BUSY;
10972 }
10973
755735eb
EG
10974 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10975 " gso type %x xmit_type %x\n",
10976 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10977 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10978
632da4d6 10979#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
10980 /* First, check if we need to linearize the skb (due to FW
10981 restrictions). No need to check fragmentation if page size > 8K
10982 (there will be no violation to FW restrictions) */
755735eb
EG
10983 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10984 /* Statistics of linearization */
10985 bp->lin_cnt++;
10986 if (skb_linearize(skb) != 0) {
10987 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10988 "silently dropping this SKB\n");
10989 dev_kfree_skb_any(skb);
da5a662a 10990 return NETDEV_TX_OK;
755735eb
EG
10991 }
10992 }
632da4d6 10993#endif
755735eb 10994
a2fbb9ea 10995 /*
755735eb 10996 Please read carefully. First we use one BD which we mark as start,
ca00392c 10997 then we have a parsing info BD (used for TSO or xsum),
755735eb 10998 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10999 (don't forget to mark the last one as last,
11000 and to unmap only AFTER you write to the BD ...)
755735eb 11001 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
11002 */
11003
11004 pkt_prod = fp->tx_pkt_prod++;
755735eb 11005 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 11006
755735eb 11007 /* get a tx_buf and first BD */
a2fbb9ea 11008 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 11009 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 11010
ca00392c
EG
11011 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11012 tx_start_bd->general_data = (UNICAST_ADDRESS <<
11013 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 11014 /* header nbd */
ca00392c 11015 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 11016
755735eb
EG
11017 /* remember the first BD of the packet */
11018 tx_buf->first_bd = fp->tx_bd_prod;
11019 tx_buf->skb = skb;
ca00392c 11020 tx_buf->flags = 0;
a2fbb9ea
ET
11021
11022 DP(NETIF_MSG_TX_QUEUED,
11023 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 11024 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 11025
0c6671b0
EG
11026#ifdef BCM_VLAN
11027 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
11028 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
11029 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
11030 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 11031 } else
0c6671b0 11032#endif
ca00392c 11033 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 11034
ca00392c
EG
11035 /* turn on parsing and get a BD */
11036 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11037 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 11038
ca00392c 11039 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
11040
11041 if (xmit_type & XMIT_CSUM) {
ca00392c 11042 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
11043
11044 /* for now NS flag is not used in Linux */
4781bfad
EG
11045 pbd->global_data =
11046 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
11047 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 11048
755735eb
EG
11049 pbd->ip_hlen = (skb_transport_header(skb) -
11050 skb_network_header(skb)) / 2;
11051
11052 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 11053
755735eb 11054 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 11055 hlen = hlen*2;
a2fbb9ea 11056
ca00392c 11057 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
11058
11059 if (xmit_type & XMIT_CSUM_V4)
ca00392c 11060 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
11061 ETH_TX_BD_FLAGS_IP_CSUM;
11062 else
ca00392c
EG
11063 tx_start_bd->bd_flags.as_bitfield |=
11064 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
11065
11066 if (xmit_type & XMIT_CSUM_TCP) {
11067 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
11068
11069 } else {
11070 s8 fix = SKB_CS_OFF(skb); /* signed! */
11071
ca00392c 11072 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 11073
755735eb 11074 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11075 "hlen %d fix %d csum before fix %x\n",
11076 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
11077
11078 /* HW bug: fixup the CSUM */
11079 pbd->tcp_pseudo_csum =
11080 bnx2x_csum_fix(skb_transport_header(skb),
11081 SKB_CS(skb), fix);
11082
11083 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
11084 pbd->tcp_pseudo_csum);
11085 }
a2fbb9ea
ET
11086 }
11087
11088 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 11089 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea 11090
ca00392c
EG
11091 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11092 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11093 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
11094 tx_start_bd->nbd = cpu_to_le16(nbd);
11095 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11096 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
11097
11098 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 11099 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
11100 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
11101 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
11102 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 11103
755735eb 11104 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
11105
11106 DP(NETIF_MSG_TX_QUEUED,
11107 "TSO packet len %d hlen %d total len %d tso size %d\n",
11108 skb->len, hlen, skb_headlen(skb),
11109 skb_shinfo(skb)->gso_size);
11110
ca00392c 11111 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 11112
755735eb 11113 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
11114 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
11115 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
11116
11117 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
11118 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
11119 pbd->tcp_flags = pbd_tcp_flags(skb);
11120
11121 if (xmit_type & XMIT_GSO_V4) {
11122 pbd->ip_id = swab16(ip_hdr(skb)->id);
11123 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
11124 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
11125 ip_hdr(skb)->daddr,
11126 0, IPPROTO_TCP, 0));
755735eb
EG
11127
11128 } else
11129 pbd->tcp_pseudo_csum =
11130 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
11131 &ipv6_hdr(skb)->daddr,
11132 0, IPPROTO_TCP, 0));
11133
a2fbb9ea
ET
11134 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
11135 }
ca00392c 11136 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 11137
755735eb
EG
11138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
11139 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 11140
755735eb 11141 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
11142 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
11143 if (total_pkt_bd == NULL)
11144 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 11145
755735eb
EG
11146 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
11147 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 11148
ca00392c
EG
11149 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11150 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11151 tx_data_bd->nbytes = cpu_to_le16(frag->size);
11152 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 11153
755735eb 11154 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
11155 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
11156 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
11157 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
11158 }
11159
ca00392c 11160 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 11161
a2fbb9ea
ET
11162 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11163
755735eb 11164 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
11165 * if the packet contains or ends with it
11166 */
11167 if (TX_BD_POFF(bd_prod) < nbd)
11168 nbd++;
11169
ca00392c
EG
11170 if (total_pkt_bd != NULL)
11171 total_pkt_bd->total_pkt_bytes = pkt_size;
11172
a2fbb9ea
ET
11173 if (pbd)
11174 DP(NETIF_MSG_TX_QUEUED,
11175 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
11176 " tcp_flags %x xsum %x seq %u hlen %u\n",
11177 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
11178 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 11179 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 11180
755735eb 11181 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 11182
58f4c4cf
EG
11183 /*
11184 * Make sure that the BD data is updated before updating the producer
11185 * since FW might read the BD right after the producer is updated.
11186 * This is only applicable for weak-ordered memory model archs such
11187 * as IA-64. The following barrier is also mandatory since FW will
11188 * assumes packets must have BDs.
11189 */
11190 wmb();
11191
ca00392c
EG
11192 fp->tx_db.data.prod += nbd;
11193 barrier();
11194 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
a2fbb9ea
ET
11195
11196 mmiowb();
11197
755735eb 11198 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
11199
11200 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 11201 netif_tx_stop_queue(txq);
58f4c4cf
EG
11202 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
11203 if we put Tx into XOFF state. */
11204 smp_mb();
ca00392c 11205 fp_stat->eth_q_stats.driver_xoff++;
a2fbb9ea 11206 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 11207 netif_tx_wake_queue(txq);
a2fbb9ea 11208 }
ca00392c 11209 fp_stat->tx_pkt++;
a2fbb9ea
ET
11210
11211 return NETDEV_TX_OK;
11212}
11213
bb2a0f7a 11214/* called with rtnl_lock */
a2fbb9ea
ET
11215static int bnx2x_open(struct net_device *dev)
11216{
11217 struct bnx2x *bp = netdev_priv(dev);
11218
6eccabb3
EG
11219 netif_carrier_off(dev);
11220
a2fbb9ea
ET
11221 bnx2x_set_power_state(bp, PCI_D0);
11222
bb2a0f7a 11223 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
11224}
11225
bb2a0f7a 11226/* called with rtnl_lock */
a2fbb9ea
ET
11227static int bnx2x_close(struct net_device *dev)
11228{
a2fbb9ea
ET
11229 struct bnx2x *bp = netdev_priv(dev);
11230
11231 /* Unload the driver, release IRQs */
bb2a0f7a
YG
11232 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11233 if (atomic_read(&bp->pdev->enable_cnt) == 1)
11234 if (!CHIP_REV_IS_SLOW(bp))
11235 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
11236
11237 return 0;
11238}
11239
f5372251 11240/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
11241static void bnx2x_set_rx_mode(struct net_device *dev)
11242{
11243 struct bnx2x *bp = netdev_priv(dev);
11244 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
11245 int port = BP_PORT(bp);
11246
11247 if (bp->state != BNX2X_STATE_OPEN) {
11248 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11249 return;
11250 }
11251
11252 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
11253
11254 if (dev->flags & IFF_PROMISC)
11255 rx_mode = BNX2X_RX_MODE_PROMISC;
11256
11257 else if ((dev->flags & IFF_ALLMULTI) ||
11258 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
11259 rx_mode = BNX2X_RX_MODE_ALLMULTI;
11260
11261 else { /* some multicasts */
11262 if (CHIP_IS_E1(bp)) {
11263 int i, old, offset;
11264 struct dev_mc_list *mclist;
11265 struct mac_configuration_cmd *config =
11266 bnx2x_sp(bp, mcast_config);
11267
11268 for (i = 0, mclist = dev->mc_list;
11269 mclist && (i < dev->mc_count);
11270 i++, mclist = mclist->next) {
11271
11272 config->config_table[i].
11273 cam_entry.msb_mac_addr =
11274 swab16(*(u16 *)&mclist->dmi_addr[0]);
11275 config->config_table[i].
11276 cam_entry.middle_mac_addr =
11277 swab16(*(u16 *)&mclist->dmi_addr[2]);
11278 config->config_table[i].
11279 cam_entry.lsb_mac_addr =
11280 swab16(*(u16 *)&mclist->dmi_addr[4]);
11281 config->config_table[i].cam_entry.flags =
11282 cpu_to_le16(port);
11283 config->config_table[i].
11284 target_table_entry.flags = 0;
ca00392c
EG
11285 config->config_table[i].target_table_entry.
11286 clients_bit_vector =
11287 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
11288 config->config_table[i].
11289 target_table_entry.vlan_id = 0;
11290
11291 DP(NETIF_MSG_IFUP,
11292 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
11293 config->config_table[i].
11294 cam_entry.msb_mac_addr,
11295 config->config_table[i].
11296 cam_entry.middle_mac_addr,
11297 config->config_table[i].
11298 cam_entry.lsb_mac_addr);
11299 }
8d9c5f34 11300 old = config->hdr.length;
34f80b04
EG
11301 if (old > i) {
11302 for (; i < old; i++) {
11303 if (CAM_IS_INVALID(config->
11304 config_table[i])) {
af246401 11305 /* already invalidated */
34f80b04
EG
11306 break;
11307 }
11308 /* invalidate */
11309 CAM_INVALIDATE(config->
11310 config_table[i]);
11311 }
11312 }
11313
11314 if (CHIP_REV_IS_SLOW(bp))
11315 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
11316 else
11317 offset = BNX2X_MAX_MULTICAST*(1 + port);
11318
8d9c5f34 11319 config->hdr.length = i;
34f80b04 11320 config->hdr.offset = offset;
8d9c5f34 11321 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
11322 config->hdr.reserved1 = 0;
11323
11324 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11325 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11326 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
11327 0);
11328 } else { /* E1H */
11329 /* Accept one or more multicasts */
11330 struct dev_mc_list *mclist;
11331 u32 mc_filter[MC_HASH_SIZE];
11332 u32 crc, bit, regidx;
11333 int i;
11334
11335 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
11336
11337 for (i = 0, mclist = dev->mc_list;
11338 mclist && (i < dev->mc_count);
11339 i++, mclist = mclist->next) {
11340
7c510e4b
JB
11341 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
11342 mclist->dmi_addr);
34f80b04
EG
11343
11344 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
11345 bit = (crc >> 24) & 0xff;
11346 regidx = bit >> 5;
11347 bit &= 0x1f;
11348 mc_filter[regidx] |= (1 << bit);
11349 }
11350
11351 for (i = 0; i < MC_HASH_SIZE; i++)
11352 REG_WR(bp, MC_HASH_OFFSET(bp, i),
11353 mc_filter[i]);
11354 }
11355 }
11356
11357 bp->rx_mode = rx_mode;
11358 bnx2x_set_storm_rx_mode(bp);
11359}
11360
11361/* called with rtnl_lock */
a2fbb9ea
ET
11362static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11363{
11364 struct sockaddr *addr = p;
11365 struct bnx2x *bp = netdev_priv(dev);
11366
34f80b04 11367 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
11368 return -EINVAL;
11369
11370 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
11371 if (netif_running(dev)) {
11372 if (CHIP_IS_E1(bp))
3101c2bc 11373 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 11374 else
3101c2bc 11375 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 11376 }
a2fbb9ea
ET
11377
11378 return 0;
11379}
11380
c18487ee 11381/* called with rtnl_lock */
01cd4528
EG
11382static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11383 int devad, u16 addr)
a2fbb9ea 11384{
01cd4528
EG
11385 struct bnx2x *bp = netdev_priv(netdev);
11386 u16 value;
11387 int rc;
11388 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 11389
01cd4528
EG
11390 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11391 prtad, devad, addr);
a2fbb9ea 11392
01cd4528
EG
11393 if (prtad != bp->mdio.prtad) {
11394 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11395 prtad, bp->mdio.prtad);
11396 return -EINVAL;
11397 }
11398
11399 /* The HW expects different devad if CL22 is used */
11400 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11401
01cd4528
EG
11402 bnx2x_acquire_phy_lock(bp);
11403 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
11404 devad, addr, &value);
11405 bnx2x_release_phy_lock(bp);
11406 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11407
01cd4528
EG
11408 if (!rc)
11409 rc = value;
11410 return rc;
11411}
a2fbb9ea 11412
01cd4528
EG
11413/* called with rtnl_lock */
11414static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11415 u16 addr, u16 value)
11416{
11417 struct bnx2x *bp = netdev_priv(netdev);
11418 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
11419 int rc;
11420
11421 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
11422 " value 0x%x\n", prtad, devad, addr, value);
11423
11424 if (prtad != bp->mdio.prtad) {
11425 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
11426 prtad, bp->mdio.prtad);
11427 return -EINVAL;
a2fbb9ea
ET
11428 }
11429
01cd4528
EG
11430 /* The HW expects different devad if CL22 is used */
11431 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11432
01cd4528
EG
11433 bnx2x_acquire_phy_lock(bp);
11434 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
11435 devad, addr, value);
11436 bnx2x_release_phy_lock(bp);
11437 return rc;
11438}
c18487ee 11439
01cd4528
EG
11440/* called with rtnl_lock */
11441static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11442{
11443 struct bnx2x *bp = netdev_priv(dev);
11444 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11445
01cd4528
EG
11446 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11447 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11448
01cd4528
EG
11449 if (!netif_running(dev))
11450 return -EAGAIN;
11451
11452 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11453}
11454
34f80b04 11455/* called with rtnl_lock */
a2fbb9ea
ET
11456static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11457{
11458 struct bnx2x *bp = netdev_priv(dev);
34f80b04 11459 int rc = 0;
a2fbb9ea
ET
11460
11461 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11462 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11463 return -EINVAL;
11464
11465 /* This does not race with packet allocation
c14423fe 11466 * because the actual alloc size is
a2fbb9ea
ET
11467 * only updated as part of load
11468 */
11469 dev->mtu = new_mtu;
11470
11471 if (netif_running(dev)) {
34f80b04
EG
11472 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11473 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 11474 }
34f80b04
EG
11475
11476 return rc;
a2fbb9ea
ET
11477}
11478
11479static void bnx2x_tx_timeout(struct net_device *dev)
11480{
11481 struct bnx2x *bp = netdev_priv(dev);
11482
11483#ifdef BNX2X_STOP_ON_ERROR
11484 if (!bp->panic)
11485 bnx2x_panic();
11486#endif
11487 /* This allows the netif to be shutdown gracefully before resetting */
11488 schedule_work(&bp->reset_task);
11489}
11490
11491#ifdef BCM_VLAN
34f80b04 11492/* called with rtnl_lock */
a2fbb9ea
ET
11493static void bnx2x_vlan_rx_register(struct net_device *dev,
11494 struct vlan_group *vlgrp)
11495{
11496 struct bnx2x *bp = netdev_priv(dev);
11497
11498 bp->vlgrp = vlgrp;
0c6671b0
EG
11499
11500 /* Set flags according to the required capabilities */
11501 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11502
11503 if (dev->features & NETIF_F_HW_VLAN_TX)
11504 bp->flags |= HW_VLAN_TX_FLAG;
11505
11506 if (dev->features & NETIF_F_HW_VLAN_RX)
11507 bp->flags |= HW_VLAN_RX_FLAG;
11508
a2fbb9ea 11509 if (netif_running(dev))
49d66772 11510 bnx2x_set_client_config(bp);
a2fbb9ea 11511}
34f80b04 11512
a2fbb9ea
ET
11513#endif
11514
11515#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11516static void poll_bnx2x(struct net_device *dev)
11517{
11518 struct bnx2x *bp = netdev_priv(dev);
11519
11520 disable_irq(bp->pdev->irq);
11521 bnx2x_interrupt(bp->pdev->irq, dev);
11522 enable_irq(bp->pdev->irq);
11523}
11524#endif
11525
c64213cd
SH
11526static const struct net_device_ops bnx2x_netdev_ops = {
11527 .ndo_open = bnx2x_open,
11528 .ndo_stop = bnx2x_close,
11529 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 11530 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
11531 .ndo_set_mac_address = bnx2x_change_mac_addr,
11532 .ndo_validate_addr = eth_validate_addr,
11533 .ndo_do_ioctl = bnx2x_ioctl,
11534 .ndo_change_mtu = bnx2x_change_mtu,
11535 .ndo_tx_timeout = bnx2x_tx_timeout,
11536#ifdef BCM_VLAN
11537 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11538#endif
11539#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11540 .ndo_poll_controller = poll_bnx2x,
11541#endif
11542};
11543
34f80b04
EG
11544static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11545 struct net_device *dev)
a2fbb9ea
ET
11546{
11547 struct bnx2x *bp;
11548 int rc;
11549
11550 SET_NETDEV_DEV(dev, &pdev->dev);
11551 bp = netdev_priv(dev);
11552
34f80b04
EG
11553 bp->dev = dev;
11554 bp->pdev = pdev;
a2fbb9ea 11555 bp->flags = 0;
34f80b04 11556 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
11557
11558 rc = pci_enable_device(pdev);
11559 if (rc) {
11560 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11561 goto err_out;
11562 }
11563
11564 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11565 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11566 " aborting\n");
11567 rc = -ENODEV;
11568 goto err_out_disable;
11569 }
11570
11571 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11572 printk(KERN_ERR PFX "Cannot find second PCI device"
11573 " base address, aborting\n");
11574 rc = -ENODEV;
11575 goto err_out_disable;
11576 }
11577
34f80b04
EG
11578 if (atomic_read(&pdev->enable_cnt) == 1) {
11579 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11580 if (rc) {
11581 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11582 " aborting\n");
11583 goto err_out_disable;
11584 }
a2fbb9ea 11585
34f80b04
EG
11586 pci_set_master(pdev);
11587 pci_save_state(pdev);
11588 }
a2fbb9ea
ET
11589
11590 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11591 if (bp->pm_cap == 0) {
11592 printk(KERN_ERR PFX "Cannot find power management"
11593 " capability, aborting\n");
11594 rc = -EIO;
11595 goto err_out_release;
11596 }
11597
11598 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11599 if (bp->pcie_cap == 0) {
11600 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11601 " aborting\n");
11602 rc = -EIO;
11603 goto err_out_release;
11604 }
11605
6a35528a 11606 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 11607 bp->flags |= USING_DAC_FLAG;
6a35528a 11608 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
a2fbb9ea
ET
11609 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11610 " failed, aborting\n");
11611 rc = -EIO;
11612 goto err_out_release;
11613 }
11614
284901a9 11615 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
a2fbb9ea
ET
11616 printk(KERN_ERR PFX "System does not support DMA,"
11617 " aborting\n");
11618 rc = -EIO;
11619 goto err_out_release;
11620 }
11621
34f80b04
EG
11622 dev->mem_start = pci_resource_start(pdev, 0);
11623 dev->base_addr = dev->mem_start;
11624 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
11625
11626 dev->irq = pdev->irq;
11627
275f165f 11628 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
11629 if (!bp->regview) {
11630 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11631 rc = -ENOMEM;
11632 goto err_out_release;
11633 }
11634
34f80b04
EG
11635 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11636 min_t(u64, BNX2X_DB_SIZE,
11637 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
11638 if (!bp->doorbells) {
11639 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11640 rc = -ENOMEM;
11641 goto err_out_unmap;
11642 }
11643
11644 bnx2x_set_power_state(bp, PCI_D0);
11645
34f80b04
EG
11646 /* clean indirect addresses */
11647 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11648 PCICFG_VENDOR_ID_OFFSET);
11649 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11650 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11651 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11652 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 11653
34f80b04 11654 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 11655
c64213cd 11656 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 11657 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
11658 dev->features |= NETIF_F_SG;
11659 dev->features |= NETIF_F_HW_CSUM;
11660 if (bp->flags & USING_DAC_FLAG)
11661 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
11662 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11663 dev->features |= NETIF_F_TSO6;
34f80b04
EG
11664#ifdef BCM_VLAN
11665 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 11666 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
11667
11668 dev->vlan_features |= NETIF_F_SG;
11669 dev->vlan_features |= NETIF_F_HW_CSUM;
11670 if (bp->flags & USING_DAC_FLAG)
11671 dev->vlan_features |= NETIF_F_HIGHDMA;
11672 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11673 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 11674#endif
a2fbb9ea 11675
01cd4528
EG
11676 /* get_port_hwinfo() will set prtad and mmds properly */
11677 bp->mdio.prtad = MDIO_PRTAD_NONE;
11678 bp->mdio.mmds = 0;
11679 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
11680 bp->mdio.dev = dev;
11681 bp->mdio.mdio_read = bnx2x_mdio_read;
11682 bp->mdio.mdio_write = bnx2x_mdio_write;
11683
a2fbb9ea
ET
11684 return 0;
11685
11686err_out_unmap:
11687 if (bp->regview) {
11688 iounmap(bp->regview);
11689 bp->regview = NULL;
11690 }
a2fbb9ea
ET
11691 if (bp->doorbells) {
11692 iounmap(bp->doorbells);
11693 bp->doorbells = NULL;
11694 }
11695
11696err_out_release:
34f80b04
EG
11697 if (atomic_read(&pdev->enable_cnt) == 1)
11698 pci_release_regions(pdev);
a2fbb9ea
ET
11699
11700err_out_disable:
11701 pci_disable_device(pdev);
11702 pci_set_drvdata(pdev, NULL);
11703
11704err_out:
11705 return rc;
11706}
11707
37f9ce62
EG
11708static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
11709 int *width, int *speed)
25047950
ET
11710{
11711 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11712
37f9ce62 11713 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 11714
37f9ce62
EG
11715 /* return value of 1=2.5GHz 2=5GHz */
11716 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 11717}
37f9ce62 11718
94a78b79
VZ
11719static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11720{
37f9ce62 11721 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
11722 struct bnx2x_fw_file_hdr *fw_hdr;
11723 struct bnx2x_fw_file_section *sections;
94a78b79 11724 u32 offset, len, num_ops;
37f9ce62 11725 u16 *ops_offsets;
94a78b79 11726 int i;
37f9ce62 11727 const u8 *fw_ver;
94a78b79
VZ
11728
11729 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11730 return -EINVAL;
11731
11732 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11733 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11734
11735 /* Make sure none of the offsets and sizes make us read beyond
11736 * the end of the firmware data */
11737 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11738 offset = be32_to_cpu(sections[i].offset);
11739 len = be32_to_cpu(sections[i].len);
11740 if (offset + len > firmware->size) {
37f9ce62
EG
11741 printk(KERN_ERR PFX "Section %d length is out of "
11742 "bounds\n", i);
94a78b79
VZ
11743 return -EINVAL;
11744 }
11745 }
11746
11747 /* Likewise for the init_ops offsets */
11748 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11749 ops_offsets = (u16 *)(firmware->data + offset);
11750 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11751
11752 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11753 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
37f9ce62
EG
11754 printk(KERN_ERR PFX "Section offset %d is out of "
11755 "bounds\n", i);
94a78b79
VZ
11756 return -EINVAL;
11757 }
11758 }
11759
11760 /* Check FW version */
11761 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11762 fw_ver = firmware->data + offset;
11763 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11764 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11765 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11766 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11767 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11768 " Should be %d.%d.%d.%d\n",
11769 fw_ver[0], fw_ver[1], fw_ver[2],
11770 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11771 BCM_5710_FW_MINOR_VERSION,
11772 BCM_5710_FW_REVISION_VERSION,
11773 BCM_5710_FW_ENGINEERING_VERSION);
11774 return -EINVAL;
11775 }
11776
11777 return 0;
11778}
11779
11780static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11781{
11782 u32 i;
11783 const __be32 *source = (const __be32*)_source;
11784 u32 *target = (u32*)_target;
11785
11786 for (i = 0; i < n/4; i++)
11787 target[i] = be32_to_cpu(source[i]);
11788}
11789
11790/*
11791 Ops array is stored in the following format:
11792 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11793 */
11794static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11795{
11796 u32 i, j, tmp;
11797 const __be32 *source = (const __be32*)_source;
11798 struct raw_op *target = (struct raw_op*)_target;
11799
11800 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11801 tmp = be32_to_cpu(source[j]);
11802 target[i].op = (tmp >> 24) & 0xff;
11803 target[i].offset = tmp & 0xffffff;
11804 target[i].raw_data = be32_to_cpu(source[j+1]);
11805 }
11806}
11807static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11808{
11809 u32 i;
11810 u16 *target = (u16*)_target;
11811 const __be16 *source = (const __be16*)_source;
11812
11813 for (i = 0; i < n/2; i++)
11814 target[i] = be16_to_cpu(source[i]);
11815}
11816
11817#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11818 do { \
11819 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11820 bp->arr = kmalloc(len, GFP_KERNEL); \
11821 if (!bp->arr) { \
11822 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11823 goto lbl; \
11824 } \
11825 func(bp->firmware->data + \
11826 be32_to_cpu(fw_hdr->arr.offset), \
11827 (u8*)bp->arr, len); \
11828 } while (0)
11829
11830
11831static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11832{
11833 char fw_file_name[40] = {0};
11834 int rc, offset;
11835 struct bnx2x_fw_file_hdr *fw_hdr;
11836
11837 /* Create a FW file name */
11838 if (CHIP_IS_E1(bp))
11839 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11840 else
11841 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11842
11843 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11844 BCM_5710_FW_MAJOR_VERSION,
11845 BCM_5710_FW_MINOR_VERSION,
11846 BCM_5710_FW_REVISION_VERSION,
11847 BCM_5710_FW_ENGINEERING_VERSION);
11848
11849 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11850
11851 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11852 if (rc) {
11853 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11854 goto request_firmware_exit;
11855 }
11856
11857 rc = bnx2x_check_firmware(bp);
11858 if (rc) {
11859 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11860 goto request_firmware_exit;
11861 }
11862
11863 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11864
11865 /* Initialize the pointers to the init arrays */
11866 /* Blob */
11867 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11868
11869 /* Opcodes */
11870 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11871
11872 /* Offsets */
11873 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11874
11875 /* STORMs firmware */
573f2035
EG
11876 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11877 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11878 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
11879 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11880 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11881 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11882 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
11883 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11884 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11885 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11886 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
11887 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11888 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
11889 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11890 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
11891 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
11892
11893 return 0;
11894init_offsets_alloc_err:
11895 kfree(bp->init_ops);
11896init_ops_alloc_err:
11897 kfree(bp->init_data);
11898request_firmware_exit:
11899 release_firmware(bp->firmware);
11900
11901 return rc;
11902}
11903
11904
25047950 11905
a2fbb9ea
ET
11906static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11907 const struct pci_device_id *ent)
11908{
a2fbb9ea
ET
11909 struct net_device *dev = NULL;
11910 struct bnx2x *bp;
37f9ce62 11911 int pcie_width, pcie_speed;
25047950 11912 int rc;
a2fbb9ea 11913
a2fbb9ea 11914 /* dev zeroed in init_etherdev */
555f6c78 11915 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
11916 if (!dev) {
11917 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 11918 return -ENOMEM;
34f80b04 11919 }
a2fbb9ea 11920
a2fbb9ea
ET
11921 bp = netdev_priv(dev);
11922 bp->msglevel = debug;
11923
df4770de
EG
11924 pci_set_drvdata(pdev, dev);
11925
34f80b04 11926 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
11927 if (rc < 0) {
11928 free_netdev(dev);
11929 return rc;
11930 }
11931
34f80b04 11932 rc = bnx2x_init_bp(bp);
693fc0d1
EG
11933 if (rc)
11934 goto init_one_exit;
11935
94a78b79
VZ
11936 /* Set init arrays */
11937 rc = bnx2x_init_firmware(bp, &pdev->dev);
11938 if (rc) {
11939 printk(KERN_ERR PFX "Error loading firmware\n");
11940 goto init_one_exit;
11941 }
11942
693fc0d1 11943 rc = register_netdev(dev);
34f80b04 11944 if (rc) {
693fc0d1 11945 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
11946 goto init_one_exit;
11947 }
11948
37f9ce62 11949 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
25047950 11950 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 11951 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 11952 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
37f9ce62 11953 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
25047950 11954 dev->base_addr, bp->pdev->irq);
e174961c 11955 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
c016201c 11956
a2fbb9ea 11957 return 0;
34f80b04
EG
11958
11959init_one_exit:
11960 if (bp->regview)
11961 iounmap(bp->regview);
11962
11963 if (bp->doorbells)
11964 iounmap(bp->doorbells);
11965
11966 free_netdev(dev);
11967
11968 if (atomic_read(&pdev->enable_cnt) == 1)
11969 pci_release_regions(pdev);
11970
11971 pci_disable_device(pdev);
11972 pci_set_drvdata(pdev, NULL);
11973
11974 return rc;
a2fbb9ea
ET
11975}
11976
11977static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11978{
11979 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
11980 struct bnx2x *bp;
11981
11982 if (!dev) {
228241eb
ET
11983 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11984 return;
11985 }
228241eb 11986 bp = netdev_priv(dev);
a2fbb9ea 11987
a2fbb9ea
ET
11988 unregister_netdev(dev);
11989
94a78b79
VZ
11990 kfree(bp->init_ops_offsets);
11991 kfree(bp->init_ops);
11992 kfree(bp->init_data);
11993 release_firmware(bp->firmware);
11994
a2fbb9ea
ET
11995 if (bp->regview)
11996 iounmap(bp->regview);
11997
11998 if (bp->doorbells)
11999 iounmap(bp->doorbells);
12000
12001 free_netdev(dev);
34f80b04
EG
12002
12003 if (atomic_read(&pdev->enable_cnt) == 1)
12004 pci_release_regions(pdev);
12005
a2fbb9ea
ET
12006 pci_disable_device(pdev);
12007 pci_set_drvdata(pdev, NULL);
12008}
12009
12010static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
12011{
12012 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
12013 struct bnx2x *bp;
12014
34f80b04
EG
12015 if (!dev) {
12016 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12017 return -ENODEV;
12018 }
12019 bp = netdev_priv(dev);
a2fbb9ea 12020
34f80b04 12021 rtnl_lock();
a2fbb9ea 12022
34f80b04 12023 pci_save_state(pdev);
228241eb 12024
34f80b04
EG
12025 if (!netif_running(dev)) {
12026 rtnl_unlock();
12027 return 0;
12028 }
a2fbb9ea
ET
12029
12030 netif_device_detach(dev);
a2fbb9ea 12031
da5a662a 12032 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 12033
a2fbb9ea 12034 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 12035
34f80b04
EG
12036 rtnl_unlock();
12037
a2fbb9ea
ET
12038 return 0;
12039}
12040
12041static int bnx2x_resume(struct pci_dev *pdev)
12042{
12043 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 12044 struct bnx2x *bp;
a2fbb9ea
ET
12045 int rc;
12046
228241eb
ET
12047 if (!dev) {
12048 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
12049 return -ENODEV;
12050 }
228241eb 12051 bp = netdev_priv(dev);
a2fbb9ea 12052
34f80b04
EG
12053 rtnl_lock();
12054
228241eb 12055 pci_restore_state(pdev);
34f80b04
EG
12056
12057 if (!netif_running(dev)) {
12058 rtnl_unlock();
12059 return 0;
12060 }
12061
a2fbb9ea
ET
12062 bnx2x_set_power_state(bp, PCI_D0);
12063 netif_device_attach(dev);
12064
da5a662a 12065 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 12066
34f80b04
EG
12067 rtnl_unlock();
12068
12069 return rc;
a2fbb9ea
ET
12070}
12071
f8ef6e44
YG
12072static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12073{
12074 int i;
12075
12076 bp->state = BNX2X_STATE_ERROR;
12077
12078 bp->rx_mode = BNX2X_RX_MODE_NONE;
12079
12080 bnx2x_netif_stop(bp, 0);
12081
12082 del_timer_sync(&bp->timer);
12083 bp->stats_state = STATS_STATE_DISABLED;
12084 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
12085
12086 /* Release IRQs */
12087 bnx2x_free_irq(bp);
12088
12089 if (CHIP_IS_E1(bp)) {
12090 struct mac_configuration_cmd *config =
12091 bnx2x_sp(bp, mcast_config);
12092
8d9c5f34 12093 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
12094 CAM_INVALIDATE(config->config_table[i]);
12095 }
12096
12097 /* Free SKBs, SGEs, TPA pool and driver internals */
12098 bnx2x_free_skbs(bp);
555f6c78 12099 for_each_rx_queue(bp, i)
f8ef6e44 12100 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 12101 for_each_rx_queue(bp, i)
7cde1c8b 12102 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
12103 bnx2x_free_mem(bp);
12104
12105 bp->state = BNX2X_STATE_CLOSED;
12106
12107 netif_carrier_off(bp->dev);
12108
12109 return 0;
12110}
12111
12112static void bnx2x_eeh_recover(struct bnx2x *bp)
12113{
12114 u32 val;
12115
12116 mutex_init(&bp->port.phy_mutex);
12117
12118 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
12119 bp->link_params.shmem_base = bp->common.shmem_base;
12120 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
12121
12122 if (!bp->common.shmem_base ||
12123 (bp->common.shmem_base < 0xA0000) ||
12124 (bp->common.shmem_base >= 0xC0000)) {
12125 BNX2X_DEV_INFO("MCP not active\n");
12126 bp->flags |= NO_MCP_FLAG;
12127 return;
12128 }
12129
12130 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
12131 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12132 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
12133 BNX2X_ERR("BAD MCP validity signature\n");
12134
12135 if (!BP_NOMCP(bp)) {
12136 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
12137 & DRV_MSG_SEQ_NUMBER_MASK);
12138 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
12139 }
12140}
12141
493adb1f
WX
12142/**
12143 * bnx2x_io_error_detected - called when PCI error is detected
12144 * @pdev: Pointer to PCI device
12145 * @state: The current pci connection state
12146 *
12147 * This function is called after a PCI bus error affecting
12148 * this device has been detected.
12149 */
12150static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12151 pci_channel_state_t state)
12152{
12153 struct net_device *dev = pci_get_drvdata(pdev);
12154 struct bnx2x *bp = netdev_priv(dev);
12155
12156 rtnl_lock();
12157
12158 netif_device_detach(dev);
12159
07ce50e4
DN
12160 if (state == pci_channel_io_perm_failure) {
12161 rtnl_unlock();
12162 return PCI_ERS_RESULT_DISCONNECT;
12163 }
12164
493adb1f 12165 if (netif_running(dev))
f8ef6e44 12166 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
12167
12168 pci_disable_device(pdev);
12169
12170 rtnl_unlock();
12171
12172 /* Request a slot reset */
12173 return PCI_ERS_RESULT_NEED_RESET;
12174}
12175
12176/**
12177 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12178 * @pdev: Pointer to PCI device
12179 *
12180 * Restart the card from scratch, as if from a cold-boot.
12181 */
12182static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12183{
12184 struct net_device *dev = pci_get_drvdata(pdev);
12185 struct bnx2x *bp = netdev_priv(dev);
12186
12187 rtnl_lock();
12188
12189 if (pci_enable_device(pdev)) {
12190 dev_err(&pdev->dev,
12191 "Cannot re-enable PCI device after reset\n");
12192 rtnl_unlock();
12193 return PCI_ERS_RESULT_DISCONNECT;
12194 }
12195
12196 pci_set_master(pdev);
12197 pci_restore_state(pdev);
12198
12199 if (netif_running(dev))
12200 bnx2x_set_power_state(bp, PCI_D0);
12201
12202 rtnl_unlock();
12203
12204 return PCI_ERS_RESULT_RECOVERED;
12205}
12206
12207/**
12208 * bnx2x_io_resume - called when traffic can start flowing again
12209 * @pdev: Pointer to PCI device
12210 *
12211 * This callback is called when the error recovery driver tells us that
12212 * its OK to resume normal operation.
12213 */
12214static void bnx2x_io_resume(struct pci_dev *pdev)
12215{
12216 struct net_device *dev = pci_get_drvdata(pdev);
12217 struct bnx2x *bp = netdev_priv(dev);
12218
12219 rtnl_lock();
12220
f8ef6e44
YG
12221 bnx2x_eeh_recover(bp);
12222
493adb1f 12223 if (netif_running(dev))
f8ef6e44 12224 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12225
12226 netif_device_attach(dev);
12227
12228 rtnl_unlock();
12229}
12230
12231static struct pci_error_handlers bnx2x_err_handler = {
12232 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12233 .slot_reset = bnx2x_io_slot_reset,
12234 .resume = bnx2x_io_resume,
493adb1f
WX
12235};
12236
a2fbb9ea 12237static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
12238 .name = DRV_MODULE_NAME,
12239 .id_table = bnx2x_pci_tbl,
12240 .probe = bnx2x_init_one,
12241 .remove = __devexit_p(bnx2x_remove_one),
12242 .suspend = bnx2x_suspend,
12243 .resume = bnx2x_resume,
12244 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
12245};
12246
12247static int __init bnx2x_init(void)
12248{
dd21ca6d
SG
12249 int ret;
12250
938cf541
EG
12251 printk(KERN_INFO "%s", version);
12252
1cf167f2
EG
12253 bnx2x_wq = create_singlethread_workqueue("bnx2x");
12254 if (bnx2x_wq == NULL) {
12255 printk(KERN_ERR PFX "Cannot create workqueue\n");
12256 return -ENOMEM;
12257 }
12258
dd21ca6d
SG
12259 ret = pci_register_driver(&bnx2x_pci_driver);
12260 if (ret) {
12261 printk(KERN_ERR PFX "Cannot register driver\n");
12262 destroy_workqueue(bnx2x_wq);
12263 }
12264 return ret;
a2fbb9ea
ET
12265}
12266
12267static void __exit bnx2x_cleanup(void)
12268{
12269 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
12270
12271 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
12272}
12273
12274module_init(bnx2x_init);
12275module_exit(bnx2x_cleanup);
12276
94a78b79 12277