bnx2x: Increase DMAE max write size for 57711
[linux-2.6-block.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea 51#include <linux/io.h>
45229b42 52#include <linux/stringify.h>
a2fbb9ea 53
359d8b15 54
a2fbb9ea
ET
55#include "bnx2x.h"
56#include "bnx2x_init.h"
94a78b79 57#include "bnx2x_init_ops.h"
0a64ea57 58#include "bnx2x_dump.h"
a2fbb9ea 59
4fd89b7a
DK
60#define DRV_MODULE_VERSION "1.52.1-8"
61#define DRV_MODULE_RELDATE "2010/04/01"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
94a78b79
VZ
64#include <linux/firmware.h>
65#include "bnx2x_fw_file_hdr.h"
66/* FW files */
45229b42
BH
67#define FW_FILE_VERSION \
68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
94a78b79 74
34f80b04
EG
75/* Time in jiffies before concluding the transmitter is hung */
76#define TX_TIMEOUT (5*HZ)
a2fbb9ea 77
53a10565 78static char version[] __devinitdata =
34f80b04 79 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
80 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
81
24e3fcef 82MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
84MODULE_LICENSE("GPL");
85MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
a2fbb9ea 88
555f6c78
EG
89static int multi_mode = 1;
90module_param(multi_mode, int, 0);
ca00392c
EG
91MODULE_PARM_DESC(multi_mode, " Multi queue mode "
92 "(0 Disable; 1 Enable (default))");
93
54b9ddaa
VZ
94static int num_queues;
95module_param(num_queues, int, 0);
96MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
97 " (default is as a number of CPUs)");
555f6c78 98
19680c48 99static int disable_tpa;
19680c48 100module_param(disable_tpa, int, 0);
9898f86d 101MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
102
103static int int_mode;
104module_param(int_mode, int, 0);
105MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
123static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 124
1cf167f2 125static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
126
127enum bnx2x_board_type {
128 BCM57710 = 0,
34f80b04
EG
129 BCM57711 = 1,
130 BCM57711E = 2,
a2fbb9ea
ET
131};
132
34f80b04 133/* indexed by board_type, above */
53a10565 134static struct {
a2fbb9ea
ET
135 char *name;
136} board_info[] __devinitdata = {
34f80b04
EG
137 { "Broadcom NetXtreme II BCM57710 XGb" },
138 { "Broadcom NetXtreme II BCM57711 XGb" },
139 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
140};
141
34f80b04 142
a3aa1884 143static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
144 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
145 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
146 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
a2fbb9ea
ET
147 { 0 }
148};
149
150MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
151
152/****************************************************************************
153* General service functions
154****************************************************************************/
155
156/* used only at init
157 * locking is done by mcp
158 */
573f2035 159void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
160{
161 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
162 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
163 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
164 PCICFG_VENDOR_ID_OFFSET);
165}
166
a2fbb9ea
ET
167static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
168{
169 u32 val;
170
171 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
172 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
173 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
174 PCICFG_VENDOR_ID_OFFSET);
175
176 return val;
177}
a2fbb9ea
ET
178
179static const u32 dmae_reg_go_c[] = {
180 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
181 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
182 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
183 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
184};
185
186/* copy command into DMAE command memory and set DMAE command go */
187static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
188 int idx)
189{
190 u32 cmd_offset;
191 int i;
192
193 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
194 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
195 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
196
ad8d3948
EG
197 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
198 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
199 }
200 REG_WR(bp, dmae_reg_go_c[idx], 1);
201}
202
ad8d3948
EG
203void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
204 u32 len32)
a2fbb9ea 205{
5ff7b6d4 206 struct dmae_command dmae;
a2fbb9ea 207 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
208 int cnt = 200;
209
210 if (!bp->dmae_ready) {
211 u32 *data = bnx2x_sp(bp, wb_data[0]);
212
213 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
214 " using indirect\n", dst_addr, len32);
215 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
216 return;
217 }
218
5ff7b6d4 219 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 220
5ff7b6d4
EG
221 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
222 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
223 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 224#ifdef __BIG_ENDIAN
5ff7b6d4 225 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 226#else
5ff7b6d4 227 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 228#endif
5ff7b6d4
EG
229 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
230 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
231 dmae.src_addr_lo = U64_LO(dma_addr);
232 dmae.src_addr_hi = U64_HI(dma_addr);
233 dmae.dst_addr_lo = dst_addr >> 2;
234 dmae.dst_addr_hi = 0;
235 dmae.len = len32;
236 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
237 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
238 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 239
c3eefaf6 240 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
241 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
242 "dst_addr [%x:%08x (%08x)]\n"
243 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
244 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
245 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
246 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
ad8d3948 247 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
248 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
249 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 250
5ff7b6d4
EG
251 mutex_lock(&bp->dmae_mutex);
252
a2fbb9ea
ET
253 *wb_comp = 0;
254
5ff7b6d4 255 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
256
257 udelay(5);
ad8d3948
EG
258
259 while (*wb_comp != DMAE_COMP_VAL) {
260 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
261
ad8d3948 262 if (!cnt) {
c3eefaf6 263 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
264 break;
265 }
ad8d3948 266 cnt--;
12469401
YG
267 /* adjust delay for emulation/FPGA */
268 if (CHIP_REV_IS_SLOW(bp))
269 msleep(100);
270 else
271 udelay(5);
a2fbb9ea 272 }
ad8d3948
EG
273
274 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
275}
276
c18487ee 277void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 278{
5ff7b6d4 279 struct dmae_command dmae;
a2fbb9ea 280 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
281 int cnt = 200;
282
283 if (!bp->dmae_ready) {
284 u32 *data = bnx2x_sp(bp, wb_data[0]);
285 int i;
286
287 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
288 " using indirect\n", src_addr, len32);
289 for (i = 0; i < len32; i++)
290 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
291 return;
292 }
293
5ff7b6d4 294 memset(&dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 295
5ff7b6d4
EG
296 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
297 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
298 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 299#ifdef __BIG_ENDIAN
5ff7b6d4 300 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 301#else
5ff7b6d4 302 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 303#endif
5ff7b6d4
EG
304 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
305 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
306 dmae.src_addr_lo = src_addr >> 2;
307 dmae.src_addr_hi = 0;
308 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
309 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae.len = len32;
311 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
312 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
313 dmae.comp_val = DMAE_COMP_VAL;
a2fbb9ea 314
c3eefaf6 315 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
a2fbb9ea
ET
316 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
317 "dst_addr [%x:%08x (%08x)]\n"
318 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
5ff7b6d4
EG
319 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
320 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
321 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
a2fbb9ea 322
5ff7b6d4
EG
323 mutex_lock(&bp->dmae_mutex);
324
325 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
a2fbb9ea
ET
326 *wb_comp = 0;
327
5ff7b6d4 328 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
329
330 udelay(5);
ad8d3948
EG
331
332 while (*wb_comp != DMAE_COMP_VAL) {
333
ad8d3948 334 if (!cnt) {
c3eefaf6 335 BNX2X_ERR("DMAE timeout!\n");
a2fbb9ea
ET
336 break;
337 }
ad8d3948 338 cnt--;
12469401
YG
339 /* adjust delay for emulation/FPGA */
340 if (CHIP_REV_IS_SLOW(bp))
341 msleep(100);
342 else
343 udelay(5);
a2fbb9ea 344 }
ad8d3948 345 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
346 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
347 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
348
349 mutex_unlock(&bp->dmae_mutex);
350}
351
573f2035
EG
352void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
353 u32 addr, u32 len)
354{
02e3c6cb 355 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
356 int offset = 0;
357
02e3c6cb 358 while (len > dmae_wr_max) {
573f2035 359 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
360 addr + offset, dmae_wr_max);
361 offset += dmae_wr_max * 4;
362 len -= dmae_wr_max;
573f2035
EG
363 }
364
365 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
366}
367
ad8d3948
EG
368/* used only for slowpath so not inlined */
369static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
370{
371 u32 wb_write[2];
372
373 wb_write[0] = val_hi;
374 wb_write[1] = val_lo;
375 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 376}
a2fbb9ea 377
ad8d3948
EG
378#ifdef USE_WB_RD
379static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
380{
381 u32 wb_data[2];
382
383 REG_RD_DMAE(bp, reg, wb_data, 2);
384
385 return HILO_U64(wb_data[0], wb_data[1]);
386}
387#endif
388
a2fbb9ea
ET
389static int bnx2x_mc_assert(struct bnx2x *bp)
390{
a2fbb9ea 391 char last_idx;
34f80b04
EG
392 int i, rc = 0;
393 u32 row0, row1, row2, row3;
394
395 /* XSTORM */
396 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
397 XSTORM_ASSERT_LIST_INDEX_OFFSET);
398 if (last_idx)
399 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
400
401 /* print the asserts */
402 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
403
404 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
405 XSTORM_ASSERT_LIST_OFFSET(i));
406 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
407 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
408 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
409 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
410 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
411 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
412
413 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
414 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
415 " 0x%08x 0x%08x 0x%08x\n",
416 i, row3, row2, row1, row0);
417 rc++;
418 } else {
419 break;
420 }
421 }
422
423 /* TSTORM */
424 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
425 TSTORM_ASSERT_LIST_INDEX_OFFSET);
426 if (last_idx)
427 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
428
429 /* print the asserts */
430 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
431
432 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
433 TSTORM_ASSERT_LIST_OFFSET(i));
434 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
435 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
436 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
437 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
438 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
439 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
440
441 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
442 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
443 " 0x%08x 0x%08x 0x%08x\n",
444 i, row3, row2, row1, row0);
445 rc++;
446 } else {
447 break;
448 }
449 }
450
451 /* CSTORM */
452 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
453 CSTORM_ASSERT_LIST_INDEX_OFFSET);
454 if (last_idx)
455 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
456
457 /* print the asserts */
458 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
459
460 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
461 CSTORM_ASSERT_LIST_OFFSET(i));
462 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
463 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
464 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
465 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
466 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
467 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
468
469 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
470 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
471 " 0x%08x 0x%08x 0x%08x\n",
472 i, row3, row2, row1, row0);
473 rc++;
474 } else {
475 break;
476 }
477 }
478
479 /* USTORM */
480 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
481 USTORM_ASSERT_LIST_INDEX_OFFSET);
482 if (last_idx)
483 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
484
485 /* print the asserts */
486 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
487
488 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
489 USTORM_ASSERT_LIST_OFFSET(i));
490 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
491 USTORM_ASSERT_LIST_OFFSET(i) + 4);
492 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
493 USTORM_ASSERT_LIST_OFFSET(i) + 8);
494 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
495 USTORM_ASSERT_LIST_OFFSET(i) + 12);
496
497 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
498 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
499 " 0x%08x 0x%08x 0x%08x\n",
500 i, row3, row2, row1, row0);
501 rc++;
502 } else {
503 break;
a2fbb9ea
ET
504 }
505 }
34f80b04 506
a2fbb9ea
ET
507 return rc;
508}
c14423fe 509
a2fbb9ea
ET
510static void bnx2x_fw_dump(struct bnx2x *bp)
511{
512 u32 mark, offset;
4781bfad 513 __be32 data[9];
a2fbb9ea
ET
514 int word;
515
516 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772 517 mark = ((mark + 0x3) & ~0x3);
7995c64e 518 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 519
7995c64e 520 pr_err("");
a2fbb9ea
ET
521 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
522 for (word = 0; word < 8; word++)
523 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
524 offset + 4*word));
525 data[8] = 0x0;
7995c64e 526 pr_cont("%s", (char *)data);
a2fbb9ea
ET
527 }
528 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
529 for (word = 0; word < 8; word++)
530 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
531 offset + 4*word));
532 data[8] = 0x0;
7995c64e 533 pr_cont("%s", (char *)data);
a2fbb9ea 534 }
7995c64e 535 pr_err("end of fw dump\n");
a2fbb9ea
ET
536}
537
538static void bnx2x_panic_dump(struct bnx2x *bp)
539{
540 int i;
541 u16 j, start, end;
542
66e855f3
YG
543 bp->stats_state = STATS_STATE_DISABLED;
544 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
a2fbb9ea
ET
546 BNX2X_ERR("begin crash dump -----------------\n");
547
8440d2b6
EG
548 /* Indices */
549 /* Common */
550 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
551 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
552 " spq_prod_idx(%u)\n",
553 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556 /* Rx */
54b9ddaa 557 for_each_queue(bp, i) {
a2fbb9ea 558 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 559
c3eefaf6 560 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
66e855f3
YG
561 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
562 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
8440d2b6 563 i, fp->rx_bd_prod, fp->rx_bd_cons,
66e855f3
YG
564 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
c3eefaf6 566 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
8440d2b6
EG
567 " fp_u_idx(%x) *sb_u_idx(%x)\n",
568 fp->rx_sge_prod, fp->last_max_sge,
569 le16_to_cpu(fp->fp_u_idx),
570 fp->status_blk->u_status_block.status_block_index);
571 }
a2fbb9ea 572
8440d2b6 573 /* Tx */
54b9ddaa 574 for_each_queue(bp, i) {
8440d2b6 575 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 576
c3eefaf6 577 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
8440d2b6
EG
578 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
579 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
580 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
c3eefaf6 581 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
ca00392c 582 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
8440d2b6 583 fp->status_blk->c_status_block.status_block_index,
ca00392c 584 fp->tx_db.data.prod);
8440d2b6 585 }
a2fbb9ea 586
8440d2b6
EG
587 /* Rings */
588 /* Rx */
54b9ddaa 589 for_each_queue(bp, i) {
8440d2b6 590 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
591
592 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
593 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 594 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
595 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
596 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
597
c3eefaf6
EG
598 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
599 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
600 }
601
3196a88a
EG
602 start = RX_SGE(fp->rx_sge_prod);
603 end = RX_SGE(fp->last_max_sge);
8440d2b6 604 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
605 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
606 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
607
c3eefaf6
EG
608 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
609 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
610 }
611
a2fbb9ea
ET
612 start = RCQ_BD(fp->rx_comp_cons - 10);
613 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 614 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
615 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
616
c3eefaf6
EG
617 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
618 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
619 }
620 }
621
8440d2b6 622 /* Tx */
54b9ddaa 623 for_each_queue(bp, i) {
8440d2b6
EG
624 struct bnx2x_fastpath *fp = &bp->fp[i];
625
626 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
627 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
628 for (j = start; j != end; j = TX_BD(j + 1)) {
629 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
630
c3eefaf6
EG
631 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
632 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
633 }
634
635 start = TX_BD(fp->tx_bd_cons - 10);
636 end = TX_BD(fp->tx_bd_cons + 254);
637 for (j = start; j != end; j = TX_BD(j + 1)) {
638 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
639
c3eefaf6
EG
640 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
641 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
642 }
643 }
a2fbb9ea 644
34f80b04 645 bnx2x_fw_dump(bp);
a2fbb9ea
ET
646 bnx2x_mc_assert(bp);
647 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
648}
649
615f8fd9 650static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 651{
34f80b04 652 int port = BP_PORT(bp);
a2fbb9ea
ET
653 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
654 u32 val = REG_RD(bp, addr);
655 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 656 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
657
658 if (msix) {
8badd27a
EG
659 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
660 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
661 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
662 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
663 } else if (msi) {
664 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
665 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
666 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
667 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
668 } else {
669 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 670 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
671 HC_CONFIG_0_REG_INT_LINE_EN_0 |
672 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 673
8badd27a
EG
674 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
675 val, port, addr);
615f8fd9
ET
676
677 REG_WR(bp, addr, val);
678
a2fbb9ea
ET
679 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
680 }
681
8badd27a
EG
682 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
683 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
684
685 REG_WR(bp, addr, val);
37dbbf32
EG
686 /*
687 * Ensure that HC_CONFIG is written before leading/trailing edge config
688 */
689 mmiowb();
690 barrier();
34f80b04
EG
691
692 if (CHIP_IS_E1H(bp)) {
693 /* init leading/trailing edge */
694 if (IS_E1HMF(bp)) {
8badd27a 695 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 696 if (bp->port.pmf)
4acac6a5
EG
697 /* enable nig and gpio3 attention */
698 val |= 0x1100;
34f80b04
EG
699 } else
700 val = 0xffff;
701
702 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
703 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
704 }
37dbbf32
EG
705
706 /* Make sure that interrupts are indeed enabled from here on */
707 mmiowb();
a2fbb9ea
ET
708}
709
615f8fd9 710static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 711{
34f80b04 712 int port = BP_PORT(bp);
a2fbb9ea
ET
713 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
714 u32 val = REG_RD(bp, addr);
715
716 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
717 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
718 HC_CONFIG_0_REG_INT_LINE_EN_0 |
719 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
720
721 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
722 val, port, addr);
723
8badd27a
EG
724 /* flush all outstanding writes */
725 mmiowb();
726
a2fbb9ea
ET
727 REG_WR(bp, addr, val);
728 if (REG_RD(bp, addr) != val)
729 BNX2X_ERR("BUG! proper val not read from IGU!\n");
730}
731
f8ef6e44 732static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 733{
a2fbb9ea 734 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 735 int i, offset;
a2fbb9ea 736
34f80b04 737 /* disable interrupt handling */
a2fbb9ea 738 atomic_inc(&bp->intr_sem);
e1510706
EG
739 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
740
f8ef6e44
YG
741 if (disable_hw)
742 /* prevent the HW from sending interrupts */
743 bnx2x_int_disable(bp);
a2fbb9ea
ET
744
745 /* make sure all ISRs are done */
746 if (msix) {
8badd27a
EG
747 synchronize_irq(bp->msix_table[0].vector);
748 offset = 1;
37b091ba
MC
749#ifdef BCM_CNIC
750 offset++;
751#endif
a2fbb9ea 752 for_each_queue(bp, i)
8badd27a 753 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
754 } else
755 synchronize_irq(bp->pdev->irq);
756
757 /* make sure sp_task is not running */
1cf167f2
EG
758 cancel_delayed_work(&bp->sp_task);
759 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
760}
761
34f80b04 762/* fast path */
a2fbb9ea
ET
763
764/*
34f80b04 765 * General service functions
a2fbb9ea
ET
766 */
767
72fd0718
VZ
768/* Return true if succeeded to acquire the lock */
769static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
770{
771 u32 lock_status;
772 u32 resource_bit = (1 << resource);
773 int func = BP_FUNC(bp);
774 u32 hw_lock_control_reg;
775
776 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
777
778 /* Validating that the resource is within range */
779 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
780 DP(NETIF_MSG_HW,
781 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
782 resource, HW_LOCK_MAX_RESOURCE_VALUE);
783 return -EINVAL;
784 }
785
786 if (func <= 5)
787 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
788 else
789 hw_lock_control_reg =
790 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
791
792 /* Try to acquire the lock */
793 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
794 lock_status = REG_RD(bp, hw_lock_control_reg);
795 if (lock_status & resource_bit)
796 return true;
797
798 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
799 return false;
800}
801
34f80b04 802static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
803 u8 storm, u16 index, u8 op, u8 update)
804{
5c862848
EG
805 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
806 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
807 struct igu_ack_register igu_ack;
808
809 igu_ack.status_block_index = index;
810 igu_ack.sb_id_and_flags =
34f80b04 811 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
812 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
813 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
814 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
815
5c862848
EG
816 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
817 (*(u32 *)&igu_ack), hc_addr);
818 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
37dbbf32
EG
819
820 /* Make sure that ACK is written */
821 mmiowb();
822 barrier();
a2fbb9ea
ET
823}
824
54b9ddaa 825static inline void bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
826{
827 struct host_status_block *fpsb = fp->status_blk;
a2fbb9ea
ET
828
829 barrier(); /* status block is written to by the chip */
54b9ddaa
VZ
830 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
831 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
a2fbb9ea
ET
832}
833
a2fbb9ea
ET
834static u16 bnx2x_ack_int(struct bnx2x *bp)
835{
5c862848
EG
836 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
837 COMMAND_REG_SIMD_MASK);
838 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 839
5c862848
EG
840 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
841 result, hc_addr);
a2fbb9ea 842
a2fbb9ea
ET
843 return result;
844}
845
846
847/*
848 * fast path service functions
849 */
850
e8b5fc51
VZ
851static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
852{
853 /* Tell compiler that consumer and producer can change */
854 barrier();
855 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
237907c1
EG
856}
857
a2fbb9ea
ET
858/* free skb in the packet ring at pos idx
859 * return idx of last bd freed
860 */
861static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
862 u16 idx)
863{
864 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
ca00392c
EG
865 struct eth_tx_start_bd *tx_start_bd;
866 struct eth_tx_bd *tx_data_bd;
a2fbb9ea 867 struct sk_buff *skb = tx_buf->skb;
34f80b04 868 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
869 int nbd;
870
54b9ddaa
VZ
871 /* prefetch skb end pointer to speedup dev_kfree_skb() */
872 prefetch(&skb->end);
873
a2fbb9ea
ET
874 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
875 idx, tx_buf, skb);
876
877 /* unmap first bd */
878 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
ca00392c 879 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
1a983142 880 dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd),
ca00392c 881 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
a2fbb9ea 882
ca00392c 883 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
a2fbb9ea 884#ifdef BNX2X_STOP_ON_ERROR
ca00392c 885 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
34f80b04 886 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
887 bnx2x_panic();
888 }
889#endif
ca00392c 890 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea 891
ca00392c
EG
892 /* Get the next bd */
893 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea 894
ca00392c
EG
895 /* Skip a parse bd... */
896 --nbd;
897 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
898
899 /* ...and the TSO split header bd since they have no mapping */
900 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
901 --nbd;
902 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
a2fbb9ea
ET
903 }
904
905 /* now free frags */
906 while (nbd > 0) {
907
908 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
ca00392c 909 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
1a983142
FT
910 dma_unmap_page(&bp->pdev->dev, BD_UNMAP_ADDR(tx_data_bd),
911 BD_UNMAP_LEN(tx_data_bd), DMA_TO_DEVICE);
a2fbb9ea
ET
912 if (--nbd)
913 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
914 }
915
916 /* release skb */
53e5e96e 917 WARN_ON(!skb);
54b9ddaa 918 dev_kfree_skb(skb);
a2fbb9ea
ET
919 tx_buf->first_bd = 0;
920 tx_buf->skb = NULL;
921
34f80b04 922 return new_cons;
a2fbb9ea
ET
923}
924
34f80b04 925static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 926{
34f80b04
EG
927 s16 used;
928 u16 prod;
929 u16 cons;
a2fbb9ea 930
a2fbb9ea
ET
931 prod = fp->tx_bd_prod;
932 cons = fp->tx_bd_cons;
933
34f80b04
EG
934 /* NUM_TX_RINGS = number of "next-page" entries
935 It will be used as a threshold */
936 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 937
34f80b04 938#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
939 WARN_ON(used < 0);
940 WARN_ON(used > fp->bp->tx_ring_size);
941 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 942#endif
a2fbb9ea 943
34f80b04 944 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
945}
946
54b9ddaa
VZ
947static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
948{
949 u16 hw_cons;
950
951 /* Tell compiler that status block fields can change */
952 barrier();
953 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
954 return hw_cons != fp->tx_pkt_cons;
955}
956
957static int bnx2x_tx_int(struct bnx2x_fastpath *fp)
a2fbb9ea
ET
958{
959 struct bnx2x *bp = fp->bp;
555f6c78 960 struct netdev_queue *txq;
a2fbb9ea 961 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
a2fbb9ea
ET
962
963#ifdef BNX2X_STOP_ON_ERROR
964 if (unlikely(bp->panic))
54b9ddaa 965 return -1;
a2fbb9ea
ET
966#endif
967
54b9ddaa 968 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
969 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
970 sw_cons = fp->tx_pkt_cons;
971
972 while (sw_cons != hw_cons) {
973 u16 pkt_cons;
974
975 pkt_cons = TX_BD(sw_cons);
976
977 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
978
34f80b04 979 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
980 hw_cons, sw_cons, pkt_cons);
981
34f80b04 982/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
983 rmb();
984 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
985 }
986*/
987 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
988 sw_cons++;
a2fbb9ea
ET
989 }
990
991 fp->tx_pkt_cons = sw_cons;
992 fp->tx_bd_cons = bd_cons;
993
c16cc0b4
VZ
994 /* Need to make the tx_bd_cons update visible to start_xmit()
995 * before checking for netif_tx_queue_stopped(). Without the
996 * memory barrier, there is a small possibility that
997 * start_xmit() will miss it and cause the queue to be stopped
998 * forever.
999 */
2d99cf16 1000 smp_mb();
c16cc0b4 1001
a2fbb9ea 1002 /* TBD need a thresh? */
555f6c78 1003 if (unlikely(netif_tx_queue_stopped(txq))) {
c16cc0b4
VZ
1004 /* Taking tx_lock() is needed to prevent reenabling the queue
1005 * while it's empty. This could have happen if rx_action() gets
1006 * suspended in bnx2x_tx_int() after the condition before
1007 * netif_tx_wake_queue(), while tx_action (bnx2x_start_xmit()):
1008 *
1009 * stops the queue->sees fresh tx_bd_cons->releases the queue->
1010 * sends some packets consuming the whole queue again->
1011 * stops the queue
6044735d 1012 */
c16cc0b4
VZ
1013
1014 __netif_tx_lock(txq, smp_processor_id());
6044735d 1015
555f6c78 1016 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 1017 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 1018 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 1019 netif_tx_wake_queue(txq);
c16cc0b4
VZ
1020
1021 __netif_tx_unlock(txq);
a2fbb9ea 1022 }
54b9ddaa 1023 return 0;
a2fbb9ea
ET
1024}
1025
993ac7b5
MC
1026#ifdef BCM_CNIC
1027static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1028#endif
3196a88a 1029
a2fbb9ea
ET
1030static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1031 union eth_rx_cqe *rr_cqe)
1032{
1033 struct bnx2x *bp = fp->bp;
1034 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1035 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1036
34f80b04 1037 DP(BNX2X_MSG_SP,
a2fbb9ea 1038 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1039 fp->index, cid, command, bp->state,
34f80b04 1040 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
1041
1042 bp->spq_left++;
1043
0626b899 1044 if (fp->index) {
a2fbb9ea
ET
1045 switch (command | fp->state) {
1046 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
1047 BNX2X_FP_STATE_OPENING):
1048 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
1049 cid);
1050 fp->state = BNX2X_FP_STATE_OPEN;
1051 break;
1052
1053 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1054 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
1055 cid);
1056 fp->state = BNX2X_FP_STATE_HALTED;
1057 break;
1058
1059 default:
34f80b04
EG
1060 BNX2X_ERR("unexpected MC reply (%d) "
1061 "fp->state is %x\n", command, fp->state);
1062 break;
a2fbb9ea 1063 }
34f80b04 1064 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1065 return;
1066 }
c14423fe 1067
a2fbb9ea
ET
1068 switch (command | bp->state) {
1069 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1070 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1071 bp->state = BNX2X_STATE_OPEN;
1072 break;
1073
1074 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1075 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1076 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1077 fp->state = BNX2X_FP_STATE_HALTED;
1078 break;
1079
a2fbb9ea 1080 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1081 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 1082 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
1083 break;
1084
993ac7b5
MC
1085#ifdef BCM_CNIC
1086 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1087 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1088 bnx2x_cnic_cfc_comp(bp, cid);
1089 break;
1090#endif
3196a88a 1091
a2fbb9ea 1092 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 1093 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 1094 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
e665bfda
MC
1095 bp->set_mac_pending--;
1096 smp_wmb();
a2fbb9ea
ET
1097 break;
1098
49d66772 1099 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 1100 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
e665bfda
MC
1101 bp->set_mac_pending--;
1102 smp_wmb();
49d66772
ET
1103 break;
1104
a2fbb9ea 1105 default:
34f80b04 1106 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 1107 command, bp->state);
34f80b04 1108 break;
a2fbb9ea 1109 }
34f80b04 1110 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
1111}
1112
7a9b2557
VZ
1113static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1114 struct bnx2x_fastpath *fp, u16 index)
1115{
1116 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1117 struct page *page = sw_buf->page;
1118 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1119
1120 /* Skip "next page" elements */
1121 if (!page)
1122 return;
1123
1a983142 1124 dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(sw_buf, mapping),
4f40f2cb 1125 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1126 __free_pages(page, PAGES_PER_SGE_SHIFT);
1127
1128 sw_buf->page = NULL;
1129 sge->addr_hi = 0;
1130 sge->addr_lo = 0;
1131}
1132
1133static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1134 struct bnx2x_fastpath *fp, int last)
1135{
1136 int i;
1137
1138 for (i = 0; i < last; i++)
1139 bnx2x_free_rx_sge(bp, fp, i);
1140}
1141
1142static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1143 struct bnx2x_fastpath *fp, u16 index)
1144{
1145 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1146 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1147 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1148 dma_addr_t mapping;
1149
1150 if (unlikely(page == NULL))
1151 return -ENOMEM;
1152
1a983142
FT
1153 mapping = dma_map_page(&bp->pdev->dev, page, 0,
1154 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
8d8bb39b 1155 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1156 __free_pages(page, PAGES_PER_SGE_SHIFT);
1157 return -ENOMEM;
1158 }
1159
1160 sw_buf->page = page;
1a983142 1161 dma_unmap_addr_set(sw_buf, mapping, mapping);
7a9b2557
VZ
1162
1163 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1164 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1165
1166 return 0;
1167}
1168
a2fbb9ea
ET
1169static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1170 struct bnx2x_fastpath *fp, u16 index)
1171{
1172 struct sk_buff *skb;
1173 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1174 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1175 dma_addr_t mapping;
1176
1177 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1178 if (unlikely(skb == NULL))
1179 return -ENOMEM;
1180
1a983142
FT
1181 mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_size,
1182 DMA_FROM_DEVICE);
8d8bb39b 1183 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1184 dev_kfree_skb(skb);
1185 return -ENOMEM;
1186 }
1187
1188 rx_buf->skb = skb;
1a983142 1189 dma_unmap_addr_set(rx_buf, mapping, mapping);
a2fbb9ea
ET
1190
1191 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1192 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1193
1194 return 0;
1195}
1196
1197/* note that we are not allocating a new skb,
1198 * we are just moving one from cons to prod
1199 * we are not creating a new mapping,
1200 * so there is no need to check for dma_mapping_error().
1201 */
1202static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1203 struct sk_buff *skb, u16 cons, u16 prod)
1204{
1205 struct bnx2x *bp = fp->bp;
1206 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1207 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1208 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1209 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1210
1a983142
FT
1211 dma_sync_single_for_device(&bp->pdev->dev,
1212 dma_unmap_addr(cons_rx_buf, mapping),
1213 RX_COPY_THRESH, DMA_FROM_DEVICE);
a2fbb9ea
ET
1214
1215 prod_rx_buf->skb = cons_rx_buf->skb;
1a983142
FT
1216 dma_unmap_addr_set(prod_rx_buf, mapping,
1217 dma_unmap_addr(cons_rx_buf, mapping));
a2fbb9ea
ET
1218 *prod_bd = *cons_bd;
1219}
1220
7a9b2557
VZ
1221static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1222 u16 idx)
1223{
1224 u16 last_max = fp->last_max_sge;
1225
1226 if (SUB_S16(idx, last_max) > 0)
1227 fp->last_max_sge = idx;
1228}
1229
1230static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1231{
1232 int i, j;
1233
1234 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1235 int idx = RX_SGE_CNT * i - 1;
1236
1237 for (j = 0; j < 2; j++) {
1238 SGE_MASK_CLEAR_BIT(fp, idx);
1239 idx--;
1240 }
1241 }
1242}
1243
1244static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1245 struct eth_fast_path_rx_cqe *fp_cqe)
1246{
1247 struct bnx2x *bp = fp->bp;
4f40f2cb 1248 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1249 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1250 SGE_PAGE_SHIFT;
7a9b2557
VZ
1251 u16 last_max, last_elem, first_elem;
1252 u16 delta = 0;
1253 u16 i;
1254
1255 if (!sge_len)
1256 return;
1257
1258 /* First mark all used pages */
1259 for (i = 0; i < sge_len; i++)
1260 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1261
1262 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1263 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1264
1265 /* Here we assume that the last SGE index is the biggest */
1266 prefetch((void *)(fp->sge_mask));
1267 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1268
1269 last_max = RX_SGE(fp->last_max_sge);
1270 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1271 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1272
1273 /* If ring is not full */
1274 if (last_elem + 1 != first_elem)
1275 last_elem++;
1276
1277 /* Now update the prod */
1278 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1279 if (likely(fp->sge_mask[i]))
1280 break;
1281
1282 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1283 delta += RX_SGE_MASK_ELEM_SZ;
1284 }
1285
1286 if (delta > 0) {
1287 fp->rx_sge_prod += delta;
1288 /* clear page-end entries */
1289 bnx2x_clear_sge_mask_next_elems(fp);
1290 }
1291
1292 DP(NETIF_MSG_RX_STATUS,
1293 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1294 fp->last_max_sge, fp->rx_sge_prod);
1295}
1296
1297static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1298{
1299 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1300 memset(fp->sge_mask, 0xff,
1301 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1302
33471629
EG
1303 /* Clear the two last indices in the page to 1:
1304 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1305 hence will never be indicated and should be removed from
1306 the calculations. */
1307 bnx2x_clear_sge_mask_next_elems(fp);
1308}
1309
1310static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1311 struct sk_buff *skb, u16 cons, u16 prod)
1312{
1313 struct bnx2x *bp = fp->bp;
1314 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1315 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1316 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1317 dma_addr_t mapping;
1318
1319 /* move empty skb from pool to prod and map it */
1320 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1a983142
FT
1321 mapping = dma_map_single(&bp->pdev->dev, fp->tpa_pool[queue].skb->data,
1322 bp->rx_buf_size, DMA_FROM_DEVICE);
1323 dma_unmap_addr_set(prod_rx_buf, mapping, mapping);
7a9b2557
VZ
1324
1325 /* move partial skb from cons to pool (don't unmap yet) */
1326 fp->tpa_pool[queue] = *cons_rx_buf;
1327
1328 /* mark bin state as start - print error if current state != stop */
1329 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1330 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1331
1332 fp->tpa_state[queue] = BNX2X_TPA_START;
1333
1334 /* point prod_bd to new skb */
1335 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1336 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1337
1338#ifdef BNX2X_STOP_ON_ERROR
1339 fp->tpa_queue_used |= (1 << queue);
1340#ifdef __powerpc64__
1341 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1342#else
1343 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1344#endif
1345 fp->tpa_queue_used);
1346#endif
1347}
1348
1349static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1350 struct sk_buff *skb,
1351 struct eth_fast_path_rx_cqe *fp_cqe,
1352 u16 cqe_idx)
1353{
1354 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1355 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1356 u32 i, frag_len, frag_size, pages;
1357 int err;
1358 int j;
1359
1360 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1361 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1362
1363 /* This is needed in order to enable forwarding support */
1364 if (frag_size)
4f40f2cb 1365 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1366 max(frag_size, (u32)len_on_bd));
1367
1368#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1369 if (pages >
1370 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1371 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1372 pages, cqe_idx);
1373 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1374 fp_cqe->pkt_len, len_on_bd);
1375 bnx2x_panic();
1376 return -EINVAL;
1377 }
1378#endif
1379
1380 /* Run through the SGL and compose the fragmented skb */
1381 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1382 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1383
1384 /* FW gives the indices of the SGE as if the ring is an array
1385 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1386 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1387 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1388 old_rx_pg = *rx_pg;
1389
1390 /* If we fail to allocate a substitute page, we simply stop
1391 where we are and drop the whole packet */
1392 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1393 if (unlikely(err)) {
de832a55 1394 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1395 return err;
1396 }
1397
1398 /* Unmap the page as we r going to pass it to the stack */
1a983142
FT
1399 dma_unmap_page(&bp->pdev->dev,
1400 dma_unmap_addr(&old_rx_pg, mapping),
1401 SGE_PAGE_SIZE*PAGES_PER_SGE, DMA_FROM_DEVICE);
7a9b2557
VZ
1402
1403 /* Add one frag and update the appropriate fields in the skb */
1404 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1405
1406 skb->data_len += frag_len;
1407 skb->truesize += frag_len;
1408 skb->len += frag_len;
1409
1410 frag_size -= frag_len;
1411 }
1412
1413 return 0;
1414}
1415
1416static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1417 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1418 u16 cqe_idx)
1419{
1420 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1421 struct sk_buff *skb = rx_buf->skb;
1422 /* alloc new skb */
1423 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1424
1425 /* Unmap skb in the pool anyway, as we are going to change
1426 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1427 fails. */
1a983142
FT
1428 dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(rx_buf, mapping),
1429 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557 1430
7a9b2557 1431 if (likely(new_skb)) {
66e855f3
YG
1432 /* fix ip xsum and give it to the stack */
1433 /* (no need to map the new skb) */
0c6671b0
EG
1434#ifdef BCM_VLAN
1435 int is_vlan_cqe =
1436 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1437 PARSING_FLAGS_VLAN);
1438 int is_not_hwaccel_vlan_cqe =
1439 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1440#endif
7a9b2557
VZ
1441
1442 prefetch(skb);
1443 prefetch(((char *)(skb)) + 128);
1444
7a9b2557
VZ
1445#ifdef BNX2X_STOP_ON_ERROR
1446 if (pad + len > bp->rx_buf_size) {
1447 BNX2X_ERR("skb_put is about to fail... "
1448 "pad %d len %d rx_buf_size %d\n",
1449 pad, len, bp->rx_buf_size);
1450 bnx2x_panic();
1451 return;
1452 }
1453#endif
1454
1455 skb_reserve(skb, pad);
1456 skb_put(skb, len);
1457
1458 skb->protocol = eth_type_trans(skb, bp->dev);
1459 skb->ip_summed = CHECKSUM_UNNECESSARY;
1460
1461 {
1462 struct iphdr *iph;
1463
1464 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1465#ifdef BCM_VLAN
1466 /* If there is no Rx VLAN offloading -
1467 take VLAN tag into an account */
1468 if (unlikely(is_not_hwaccel_vlan_cqe))
1469 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1470#endif
7a9b2557
VZ
1471 iph->check = 0;
1472 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1473 }
1474
1475 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1476 &cqe->fast_path_cqe, cqe_idx)) {
1477#ifdef BCM_VLAN
0c6671b0
EG
1478 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1479 (!is_not_hwaccel_vlan_cqe))
4fd89b7a
DK
1480 vlan_gro_receive(&fp->napi, bp->vlgrp,
1481 le16_to_cpu(cqe->fast_path_cqe.
1482 vlan_tag), skb);
7a9b2557
VZ
1483 else
1484#endif
4fd89b7a 1485 napi_gro_receive(&fp->napi, skb);
7a9b2557
VZ
1486 } else {
1487 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1488 " - dropping packet!\n");
1489 dev_kfree_skb(skb);
1490 }
1491
7a9b2557
VZ
1492
1493 /* put new skb in bin */
1494 fp->tpa_pool[queue].skb = new_skb;
1495
1496 } else {
66e855f3 1497 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1498 DP(NETIF_MSG_RX_STATUS,
1499 "Failed to allocate new skb - dropping packet!\n");
de832a55 1500 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1501 }
1502
1503 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1504}
1505
1506static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1507 struct bnx2x_fastpath *fp,
1508 u16 bd_prod, u16 rx_comp_prod,
1509 u16 rx_sge_prod)
1510{
8d9c5f34 1511 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1512 int i;
1513
1514 /* Update producers */
1515 rx_prods.bd_prod = bd_prod;
1516 rx_prods.cqe_prod = rx_comp_prod;
1517 rx_prods.sge_prod = rx_sge_prod;
1518
58f4c4cf
EG
1519 /*
1520 * Make sure that the BD and SGE data is updated before updating the
1521 * producers since FW might read the BD/SGE right after the producer
1522 * is updated.
1523 * This is only applicable for weak-ordered memory model archs such
1524 * as IA-64. The following barrier is also mandatory since FW will
1525 * assumes BDs must have buffers.
1526 */
1527 wmb();
1528
8d9c5f34
EG
1529 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1530 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 1531 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
7a9b2557
VZ
1532 ((u32 *)&rx_prods)[i]);
1533
58f4c4cf
EG
1534 mmiowb(); /* keep prod updates ordered */
1535
7a9b2557 1536 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1537 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1538 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1539}
1540
a2fbb9ea
ET
1541static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1542{
1543 struct bnx2x *bp = fp->bp;
34f80b04 1544 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1545 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1546 int rx_pkt = 0;
1547
1548#ifdef BNX2X_STOP_ON_ERROR
1549 if (unlikely(bp->panic))
1550 return 0;
1551#endif
1552
34f80b04
EG
1553 /* CQ "next element" is of the size of the regular element,
1554 that's why it's ok here */
a2fbb9ea
ET
1555 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1556 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1557 hw_comp_cons++;
1558
1559 bd_cons = fp->rx_bd_cons;
1560 bd_prod = fp->rx_bd_prod;
34f80b04 1561 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1562 sw_comp_cons = fp->rx_comp_cons;
1563 sw_comp_prod = fp->rx_comp_prod;
1564
1565 /* Memory barrier necessary as speculative reads of the rx
1566 * buffer can be ahead of the index in the status block
1567 */
1568 rmb();
1569
1570 DP(NETIF_MSG_RX_STATUS,
1571 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
0626b899 1572 fp->index, hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1573
1574 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1575 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1576 struct sk_buff *skb;
1577 union eth_rx_cqe *cqe;
34f80b04
EG
1578 u8 cqe_fp_flags;
1579 u16 len, pad;
a2fbb9ea
ET
1580
1581 comp_ring_cons = RCQ_BD(sw_comp_cons);
1582 bd_prod = RX_BD(bd_prod);
1583 bd_cons = RX_BD(bd_cons);
1584
619e7a66
EG
1585 /* Prefetch the page containing the BD descriptor
1586 at producer's index. It will be needed when new skb is
1587 allocated */
1588 prefetch((void *)(PAGE_ALIGN((unsigned long)
1589 (&fp->rx_desc_ring[bd_prod])) -
1590 PAGE_SIZE + 1));
1591
a2fbb9ea 1592 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1593 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1594
a2fbb9ea 1595 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1596 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1597 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1598 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1599 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1600 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1601
1602 /* is this a slowpath msg? */
34f80b04 1603 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1604 bnx2x_sp_event(fp, cqe);
1605 goto next_cqe;
1606
1607 /* this is an rx packet */
1608 } else {
1609 rx_buf = &fp->rx_buf_ring[bd_cons];
1610 skb = rx_buf->skb;
54b9ddaa
VZ
1611 prefetch(skb);
1612 prefetch((u8 *)skb + 256);
a2fbb9ea
ET
1613 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1614 pad = cqe->fast_path_cqe.placement_offset;
1615
7a9b2557
VZ
1616 /* If CQE is marked both TPA_START and TPA_END
1617 it is a non-TPA CQE */
1618 if ((!fp->disable_tpa) &&
1619 (TPA_TYPE(cqe_fp_flags) !=
1620 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1621 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1622
1623 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1624 DP(NETIF_MSG_RX_STATUS,
1625 "calling tpa_start on queue %d\n",
1626 queue);
1627
1628 bnx2x_tpa_start(fp, queue, skb,
1629 bd_cons, bd_prod);
1630 goto next_rx;
1631 }
1632
1633 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1634 DP(NETIF_MSG_RX_STATUS,
1635 "calling tpa_stop on queue %d\n",
1636 queue);
1637
1638 if (!BNX2X_RX_SUM_FIX(cqe))
1639 BNX2X_ERR("STOP on none TCP "
1640 "data\n");
1641
1642 /* This is a size of the linear data
1643 on this skb */
1644 len = le16_to_cpu(cqe->fast_path_cqe.
1645 len_on_bd);
1646 bnx2x_tpa_stop(bp, fp, queue, pad,
1647 len, cqe, comp_ring_cons);
1648#ifdef BNX2X_STOP_ON_ERROR
1649 if (bp->panic)
17cb4006 1650 return 0;
7a9b2557
VZ
1651#endif
1652
1653 bnx2x_update_sge_prod(fp,
1654 &cqe->fast_path_cqe);
1655 goto next_cqe;
1656 }
1657 }
1658
1a983142
FT
1659 dma_sync_single_for_device(&bp->pdev->dev,
1660 dma_unmap_addr(rx_buf, mapping),
1661 pad + RX_COPY_THRESH,
1662 DMA_FROM_DEVICE);
a2fbb9ea
ET
1663 prefetch(skb);
1664 prefetch(((char *)(skb)) + 128);
1665
1666 /* is this an error packet? */
34f80b04 1667 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1668 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1669 "ERROR flags %x rx packet %u\n",
1670 cqe_fp_flags, sw_comp_cons);
de832a55 1671 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1672 goto reuse_rx;
1673 }
1674
1675 /* Since we don't have a jumbo ring
1676 * copy small packets if mtu > 1500
1677 */
1678 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1679 (len <= RX_COPY_THRESH)) {
1680 struct sk_buff *new_skb;
1681
1682 new_skb = netdev_alloc_skb(bp->dev,
1683 len + pad);
1684 if (new_skb == NULL) {
1685 DP(NETIF_MSG_RX_ERR,
34f80b04 1686 "ERROR packet dropped "
a2fbb9ea 1687 "because of alloc failure\n");
de832a55 1688 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1689 goto reuse_rx;
1690 }
1691
1692 /* aligned copy */
1693 skb_copy_from_linear_data_offset(skb, pad,
1694 new_skb->data + pad, len);
1695 skb_reserve(new_skb, pad);
1696 skb_put(new_skb, len);
1697
1698 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1699
1700 skb = new_skb;
1701
a119a069
EG
1702 } else
1703 if (likely(bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0)) {
1a983142
FT
1704 dma_unmap_single(&bp->pdev->dev,
1705 dma_unmap_addr(rx_buf, mapping),
437cf2f1 1706 bp->rx_buf_size,
1a983142 1707 DMA_FROM_DEVICE);
a2fbb9ea
ET
1708 skb_reserve(skb, pad);
1709 skb_put(skb, len);
1710
1711 } else {
1712 DP(NETIF_MSG_RX_ERR,
34f80b04 1713 "ERROR packet dropped because "
a2fbb9ea 1714 "of alloc failure\n");
de832a55 1715 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1716reuse_rx:
1717 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1718 goto next_rx;
1719 }
1720
1721 skb->protocol = eth_type_trans(skb, bp->dev);
1722
1723 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1724 if (bp->rx_csum) {
1adcd8be
EG
1725 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1726 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1727 else
de832a55 1728 fp->eth_q_stats.hw_csum_err++;
66e855f3 1729 }
a2fbb9ea
ET
1730 }
1731
748e5439 1732 skb_record_rx_queue(skb, fp->index);
ab6ad5a4 1733
a2fbb9ea 1734#ifdef BCM_VLAN
0c6671b0 1735 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1736 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1737 PARSING_FLAGS_VLAN))
4fd89b7a
DK
1738 vlan_gro_receive(&fp->napi, bp->vlgrp,
1739 le16_to_cpu(cqe->fast_path_cqe.vlan_tag), skb);
a2fbb9ea
ET
1740 else
1741#endif
4fd89b7a 1742 napi_gro_receive(&fp->napi, skb);
a2fbb9ea 1743
a2fbb9ea
ET
1744
1745next_rx:
1746 rx_buf->skb = NULL;
1747
1748 bd_cons = NEXT_RX_IDX(bd_cons);
1749 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1750 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1751 rx_pkt++;
a2fbb9ea
ET
1752next_cqe:
1753 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1754 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1755
34f80b04 1756 if (rx_pkt == budget)
a2fbb9ea
ET
1757 break;
1758 } /* while */
1759
1760 fp->rx_bd_cons = bd_cons;
34f80b04 1761 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1762 fp->rx_comp_cons = sw_comp_cons;
1763 fp->rx_comp_prod = sw_comp_prod;
1764
7a9b2557
VZ
1765 /* Update producers */
1766 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1767 fp->rx_sge_prod);
a2fbb9ea
ET
1768
1769 fp->rx_pkt += rx_pkt;
1770 fp->rx_calls++;
1771
1772 return rx_pkt;
1773}
1774
1775static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1776{
1777 struct bnx2x_fastpath *fp = fp_cookie;
1778 struct bnx2x *bp = fp->bp;
a2fbb9ea 1779
da5a662a
VZ
1780 /* Return here if interrupt is disabled */
1781 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1782 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1783 return IRQ_HANDLED;
1784 }
1785
34f80b04 1786 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
ca00392c 1787 fp->index, fp->sb_id);
0626b899 1788 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1789
1790#ifdef BNX2X_STOP_ON_ERROR
1791 if (unlikely(bp->panic))
1792 return IRQ_HANDLED;
1793#endif
ca00392c 1794
54b9ddaa
VZ
1795 /* Handle Rx and Tx according to MSI-X vector */
1796 prefetch(fp->rx_cons_sb);
1797 prefetch(fp->tx_cons_sb);
1798 prefetch(&fp->status_blk->u_status_block.status_block_index);
1799 prefetch(&fp->status_blk->c_status_block.status_block_index);
1800 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
34f80b04 1801
a2fbb9ea
ET
1802 return IRQ_HANDLED;
1803}
1804
1805static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1806{
555f6c78 1807 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1808 u16 status = bnx2x_ack_int(bp);
34f80b04 1809 u16 mask;
ca00392c 1810 int i;
a2fbb9ea 1811
34f80b04 1812 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1813 if (unlikely(status == 0)) {
1814 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1815 return IRQ_NONE;
1816 }
f5372251 1817 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1818
34f80b04 1819 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1820 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1821 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1822 return IRQ_HANDLED;
1823 }
1824
3196a88a
EG
1825#ifdef BNX2X_STOP_ON_ERROR
1826 if (unlikely(bp->panic))
1827 return IRQ_HANDLED;
1828#endif
1829
ca00392c
EG
1830 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1831 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1832
ca00392c
EG
1833 mask = 0x2 << fp->sb_id;
1834 if (status & mask) {
54b9ddaa
VZ
1835 /* Handle Rx and Tx according to SB id */
1836 prefetch(fp->rx_cons_sb);
1837 prefetch(&fp->status_blk->u_status_block.
1838 status_block_index);
1839 prefetch(fp->tx_cons_sb);
1840 prefetch(&fp->status_blk->c_status_block.
1841 status_block_index);
1842 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1843 status &= ~mask;
1844 }
a2fbb9ea
ET
1845 }
1846
993ac7b5
MC
1847#ifdef BCM_CNIC
1848 mask = 0x2 << CNIC_SB_ID(bp);
1849 if (status & (mask | 0x1)) {
1850 struct cnic_ops *c_ops = NULL;
1851
1852 rcu_read_lock();
1853 c_ops = rcu_dereference(bp->cnic_ops);
1854 if (c_ops)
1855 c_ops->cnic_handler(bp->cnic_data, NULL);
1856 rcu_read_unlock();
1857
1858 status &= ~mask;
1859 }
1860#endif
a2fbb9ea 1861
34f80b04 1862 if (unlikely(status & 0x1)) {
1cf167f2 1863 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1864
1865 status &= ~0x1;
1866 if (!status)
1867 return IRQ_HANDLED;
1868 }
1869
34f80b04
EG
1870 if (status)
1871 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1872 status);
a2fbb9ea 1873
c18487ee 1874 return IRQ_HANDLED;
a2fbb9ea
ET
1875}
1876
c18487ee 1877/* end of fast path */
a2fbb9ea 1878
bb2a0f7a 1879static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1880
c18487ee
YR
1881/* Link */
1882
1883/*
1884 * General service functions
1885 */
a2fbb9ea 1886
4a37fb66 1887static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1888{
1889 u32 lock_status;
1890 u32 resource_bit = (1 << resource);
4a37fb66
YG
1891 int func = BP_FUNC(bp);
1892 u32 hw_lock_control_reg;
c18487ee 1893 int cnt;
a2fbb9ea 1894
c18487ee
YR
1895 /* Validating that the resource is within range */
1896 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1897 DP(NETIF_MSG_HW,
1898 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1899 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1900 return -EINVAL;
1901 }
a2fbb9ea 1902
4a37fb66
YG
1903 if (func <= 5) {
1904 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1905 } else {
1906 hw_lock_control_reg =
1907 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1908 }
1909
c18487ee 1910 /* Validating that the resource is not already taken */
4a37fb66 1911 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1912 if (lock_status & resource_bit) {
1913 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1914 lock_status, resource_bit);
1915 return -EEXIST;
1916 }
a2fbb9ea 1917
46230476
EG
1918 /* Try for 5 second every 5ms */
1919 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1920 /* Try to acquire the lock */
4a37fb66
YG
1921 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1922 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1923 if (lock_status & resource_bit)
1924 return 0;
a2fbb9ea 1925
c18487ee 1926 msleep(5);
a2fbb9ea 1927 }
c18487ee
YR
1928 DP(NETIF_MSG_HW, "Timeout\n");
1929 return -EAGAIN;
1930}
a2fbb9ea 1931
4a37fb66 1932static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1933{
1934 u32 lock_status;
1935 u32 resource_bit = (1 << resource);
4a37fb66
YG
1936 int func = BP_FUNC(bp);
1937 u32 hw_lock_control_reg;
a2fbb9ea 1938
72fd0718
VZ
1939 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1940
c18487ee
YR
1941 /* Validating that the resource is within range */
1942 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1943 DP(NETIF_MSG_HW,
1944 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1945 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1946 return -EINVAL;
1947 }
1948
4a37fb66
YG
1949 if (func <= 5) {
1950 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1951 } else {
1952 hw_lock_control_reg =
1953 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1954 }
1955
c18487ee 1956 /* Validating that the resource is currently taken */
4a37fb66 1957 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1958 if (!(lock_status & resource_bit)) {
1959 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1960 lock_status, resource_bit);
1961 return -EFAULT;
a2fbb9ea
ET
1962 }
1963
4a37fb66 1964 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1965 return 0;
1966}
1967
1968/* HW Lock for shared dual port PHYs */
4a37fb66 1969static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee 1970{
34f80b04 1971 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1972
46c6a674
EG
1973 if (bp->port.need_hw_lock)
1974 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
c18487ee 1975}
a2fbb9ea 1976
4a37fb66 1977static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee 1978{
46c6a674
EG
1979 if (bp->port.need_hw_lock)
1980 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
a2fbb9ea 1981
34f80b04 1982 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1983}
a2fbb9ea 1984
4acac6a5
EG
1985int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1986{
1987 /* The GPIO should be swapped if swap register is set and active */
1988 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1989 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1990 int gpio_shift = gpio_num +
1991 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1992 u32 gpio_mask = (1 << gpio_shift);
1993 u32 gpio_reg;
1994 int value;
1995
1996 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1997 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1998 return -EINVAL;
1999 }
2000
2001 /* read GPIO value */
2002 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2003
2004 /* get the requested pin value */
2005 if ((gpio_reg & gpio_mask) == gpio_mask)
2006 value = 1;
2007 else
2008 value = 0;
2009
2010 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2011
2012 return value;
2013}
2014
17de50b7 2015int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2016{
2017 /* The GPIO should be swapped if swap register is set and active */
2018 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2019 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2020 int gpio_shift = gpio_num +
2021 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2022 u32 gpio_mask = (1 << gpio_shift);
2023 u32 gpio_reg;
a2fbb9ea 2024
c18487ee
YR
2025 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2026 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2027 return -EINVAL;
2028 }
a2fbb9ea 2029
4a37fb66 2030 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2031 /* read GPIO and mask except the float bits */
2032 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2033
c18487ee
YR
2034 switch (mode) {
2035 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2036 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
2037 gpio_num, gpio_shift);
2038 /* clear FLOAT and set CLR */
2039 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2040 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2041 break;
a2fbb9ea 2042
c18487ee
YR
2043 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2044 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
2045 gpio_num, gpio_shift);
2046 /* clear FLOAT and set SET */
2047 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2048 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2049 break;
a2fbb9ea 2050
17de50b7 2051 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
2052 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
2053 gpio_num, gpio_shift);
2054 /* set FLOAT */
2055 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2056 break;
a2fbb9ea 2057
c18487ee
YR
2058 default:
2059 break;
a2fbb9ea
ET
2060 }
2061
c18487ee 2062 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2063 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2064
c18487ee 2065 return 0;
a2fbb9ea
ET
2066}
2067
4acac6a5
EG
2068int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2069{
2070 /* The GPIO should be swapped if swap register is set and active */
2071 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2072 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2073 int gpio_shift = gpio_num +
2074 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2075 u32 gpio_mask = (1 << gpio_shift);
2076 u32 gpio_reg;
2077
2078 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2079 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2080 return -EINVAL;
2081 }
2082
2083 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2084 /* read GPIO int */
2085 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2086
2087 switch (mode) {
2088 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2089 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2090 "output low\n", gpio_num, gpio_shift);
2091 /* clear SET and set CLR */
2092 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2093 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2094 break;
2095
2096 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2097 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2098 "output high\n", gpio_num, gpio_shift);
2099 /* clear CLR and set SET */
2100 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2101 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2102 break;
2103
2104 default:
2105 break;
2106 }
2107
2108 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2109 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2110
2111 return 0;
2112}
2113
c18487ee 2114static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 2115{
c18487ee
YR
2116 u32 spio_mask = (1 << spio_num);
2117 u32 spio_reg;
a2fbb9ea 2118
c18487ee
YR
2119 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2120 (spio_num > MISC_REGISTERS_SPIO_7)) {
2121 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2122 return -EINVAL;
a2fbb9ea
ET
2123 }
2124
4a37fb66 2125 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
2126 /* read SPIO and mask except the float bits */
2127 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 2128
c18487ee 2129 switch (mode) {
6378c025 2130 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
2131 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2132 /* clear FLOAT and set CLR */
2133 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2134 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2135 break;
a2fbb9ea 2136
6378c025 2137 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
2138 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2139 /* clear FLOAT and set SET */
2140 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2141 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2142 break;
a2fbb9ea 2143
c18487ee
YR
2144 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2145 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2146 /* set FLOAT */
2147 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2148 break;
a2fbb9ea 2149
c18487ee
YR
2150 default:
2151 break;
a2fbb9ea
ET
2152 }
2153
c18487ee 2154 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2155 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2156
a2fbb9ea
ET
2157 return 0;
2158}
2159
c18487ee 2160static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2161{
ad33ea3a
EG
2162 switch (bp->link_vars.ieee_fc &
2163 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2164 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2165 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2166 ADVERTISED_Pause);
2167 break;
356e2385 2168
c18487ee 2169 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2170 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2171 ADVERTISED_Pause);
2172 break;
356e2385 2173
c18487ee 2174 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2175 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee 2176 break;
356e2385 2177
c18487ee 2178 default:
34f80b04 2179 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2180 ADVERTISED_Pause);
2181 break;
2182 }
2183}
f1410647 2184
c18487ee
YR
2185static void bnx2x_link_report(struct bnx2x *bp)
2186{
f34d28ea 2187 if (bp->flags & MF_FUNC_DIS) {
2691d51d 2188 netif_carrier_off(bp->dev);
7995c64e 2189 netdev_err(bp->dev, "NIC Link is Down\n");
2691d51d
EG
2190 return;
2191 }
2192
c18487ee 2193 if (bp->link_vars.link_up) {
35c5f8fe
EG
2194 u16 line_speed;
2195
c18487ee
YR
2196 if (bp->state == BNX2X_STATE_OPEN)
2197 netif_carrier_on(bp->dev);
7995c64e 2198 netdev_info(bp->dev, "NIC Link is Up, ");
f1410647 2199
35c5f8fe
EG
2200 line_speed = bp->link_vars.line_speed;
2201 if (IS_E1HMF(bp)) {
2202 u16 vn_max_rate;
2203
2204 vn_max_rate =
2205 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2206 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2207 if (vn_max_rate < line_speed)
2208 line_speed = vn_max_rate;
2209 }
7995c64e 2210 pr_cont("%d Mbps ", line_speed);
f1410647 2211
c18487ee 2212 if (bp->link_vars.duplex == DUPLEX_FULL)
7995c64e 2213 pr_cont("full duplex");
c18487ee 2214 else
7995c64e 2215 pr_cont("half duplex");
f1410647 2216
c0700f90
DM
2217 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2218 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
7995c64e 2219 pr_cont(", receive ");
356e2385
EG
2220 if (bp->link_vars.flow_ctrl &
2221 BNX2X_FLOW_CTRL_TX)
7995c64e 2222 pr_cont("& transmit ");
c18487ee 2223 } else {
7995c64e 2224 pr_cont(", transmit ");
c18487ee 2225 }
7995c64e 2226 pr_cont("flow control ON");
c18487ee 2227 }
7995c64e 2228 pr_cont("\n");
f1410647 2229
c18487ee
YR
2230 } else { /* link_down */
2231 netif_carrier_off(bp->dev);
7995c64e 2232 netdev_err(bp->dev, "NIC Link is Down\n");
f1410647 2233 }
c18487ee
YR
2234}
2235
b5bf9068 2236static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 2237{
19680c48
EG
2238 if (!BP_NOMCP(bp)) {
2239 u8 rc;
a2fbb9ea 2240
19680c48 2241 /* Initialize link parameters structure variables */
8c99e7b0
YR
2242 /* It is recommended to turn off RX FC for jumbo frames
2243 for better performance */
0c593270 2244 if (bp->dev->mtu > 5000)
c0700f90 2245 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2246 else
c0700f90 2247 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2248
4a37fb66 2249 bnx2x_acquire_phy_lock(bp);
b5bf9068
EG
2250
2251 if (load_mode == LOAD_DIAG)
2252 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2253
19680c48 2254 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2255
4a37fb66 2256 bnx2x_release_phy_lock(bp);
a2fbb9ea 2257
3c96c68b
EG
2258 bnx2x_calc_fc_adv(bp);
2259
b5bf9068
EG
2260 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2261 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2262 bnx2x_link_report(bp);
b5bf9068 2263 }
34f80b04 2264
19680c48
EG
2265 return rc;
2266 }
f5372251 2267 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2268 return -EINVAL;
a2fbb9ea
ET
2269}
2270
c18487ee 2271static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2272{
19680c48 2273 if (!BP_NOMCP(bp)) {
4a37fb66 2274 bnx2x_acquire_phy_lock(bp);
19680c48 2275 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2276 bnx2x_release_phy_lock(bp);
a2fbb9ea 2277
19680c48
EG
2278 bnx2x_calc_fc_adv(bp);
2279 } else
f5372251 2280 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2281}
a2fbb9ea 2282
c18487ee
YR
2283static void bnx2x__link_reset(struct bnx2x *bp)
2284{
19680c48 2285 if (!BP_NOMCP(bp)) {
4a37fb66 2286 bnx2x_acquire_phy_lock(bp);
589abe3a 2287 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 2288 bnx2x_release_phy_lock(bp);
19680c48 2289 } else
f5372251 2290 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2291}
a2fbb9ea 2292
c18487ee
YR
2293static u8 bnx2x_link_test(struct bnx2x *bp)
2294{
2295 u8 rc;
a2fbb9ea 2296
4a37fb66 2297 bnx2x_acquire_phy_lock(bp);
c18487ee 2298 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2299 bnx2x_release_phy_lock(bp);
a2fbb9ea 2300
c18487ee
YR
2301 return rc;
2302}
a2fbb9ea 2303
8a1c38d1 2304static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2305{
8a1c38d1
EG
2306 u32 r_param = bp->link_vars.line_speed / 8;
2307 u32 fair_periodic_timeout_usec;
2308 u32 t_fair;
34f80b04 2309
8a1c38d1
EG
2310 memset(&(bp->cmng.rs_vars), 0,
2311 sizeof(struct rate_shaping_vars_per_port));
2312 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2313
8a1c38d1
EG
2314 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2315 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2316
8a1c38d1
EG
2317 /* this is the threshold below which no timer arming will occur
2318 1.25 coefficient is for the threshold to be a little bigger
2319 than the real time, to compensate for timer in-accuracy */
2320 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2321 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2322
8a1c38d1
EG
2323 /* resolution of fairness timer */
2324 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2325 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2326 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2327
8a1c38d1
EG
2328 /* this is the threshold below which we won't arm the timer anymore */
2329 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2330
8a1c38d1
EG
2331 /* we multiply by 1e3/8 to get bytes/msec.
2332 We don't want the credits to pass a credit
2333 of the t_fair*FAIR_MEM (algorithm resolution) */
2334 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2335 /* since each tick is 4 usec */
2336 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2337}
2338
2691d51d
EG
2339/* Calculates the sum of vn_min_rates.
2340 It's needed for further normalizing of the min_rates.
2341 Returns:
2342 sum of vn_min_rates.
2343 or
2344 0 - if all the min_rates are 0.
2345 In the later case fainess algorithm should be deactivated.
2346 If not all min_rates are zero then those that are zeroes will be set to 1.
2347 */
2348static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2349{
2350 int all_zero = 1;
2351 int port = BP_PORT(bp);
2352 int vn;
2353
2354 bp->vn_weight_sum = 0;
2355 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2356 int func = 2*vn + port;
2357 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2358 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2359 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2360
2361 /* Skip hidden vns */
2362 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
2363 continue;
2364
2365 /* If min rate is zero - set it to 1 */
2366 if (!vn_min_rate)
2367 vn_min_rate = DEF_MIN_RATE;
2368 else
2369 all_zero = 0;
2370
2371 bp->vn_weight_sum += vn_min_rate;
2372 }
2373
2374 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
2375 if (all_zero) {
2376 bp->cmng.flags.cmng_enables &=
2377 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2378 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2379 " fairness will be disabled\n");
2380 } else
2381 bp->cmng.flags.cmng_enables |=
2382 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2383}
2384
8a1c38d1 2385static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2386{
2387 struct rate_shaping_vars_per_vn m_rs_vn;
2388 struct fairness_vars_per_vn m_fair_vn;
2389 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2390 u16 vn_min_rate, vn_max_rate;
2391 int i;
2392
2393 /* If function is hidden - set min and max to zeroes */
2394 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2395 vn_min_rate = 0;
2396 vn_max_rate = 0;
2397
2398 } else {
2399 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2400 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
b015e3d1
EG
2401 /* If min rate is zero - set it to 1 */
2402 if (!vn_min_rate)
34f80b04
EG
2403 vn_min_rate = DEF_MIN_RATE;
2404 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2405 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2406 }
8a1c38d1 2407 DP(NETIF_MSG_IFUP,
b015e3d1 2408 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 2409 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2410
2411 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2412 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2413
2414 /* global vn counter - maximal Mbps for this vn */
2415 m_rs_vn.vn_counter.rate = vn_max_rate;
2416
2417 /* quota - number of bytes transmitted in this period */
2418 m_rs_vn.vn_counter.quota =
2419 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2420
8a1c38d1 2421 if (bp->vn_weight_sum) {
34f80b04
EG
2422 /* credit for each period of the fairness algorithm:
2423 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2424 vn_weight_sum should not be larger than 10000, thus
2425 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2426 than zero */
34f80b04 2427 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2428 max((u32)(vn_min_rate * (T_FAIR_COEF /
2429 (8 * bp->vn_weight_sum))),
2430 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2431 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2432 m_fair_vn.vn_credit_delta);
2433 }
2434
34f80b04
EG
2435 /* Store it to internal memory */
2436 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2437 REG_WR(bp, BAR_XSTRORM_INTMEM +
2438 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2439 ((u32 *)(&m_rs_vn))[i]);
2440
2441 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2442 REG_WR(bp, BAR_XSTRORM_INTMEM +
2443 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2444 ((u32 *)(&m_fair_vn))[i]);
2445}
2446
8a1c38d1 2447
c18487ee
YR
2448/* This function is called upon link interrupt */
2449static void bnx2x_link_attn(struct bnx2x *bp)
2450{
bb2a0f7a
YG
2451 /* Make sure that we are synced with the current statistics */
2452 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2453
c18487ee 2454 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2455
bb2a0f7a
YG
2456 if (bp->link_vars.link_up) {
2457
1c06328c 2458 /* dropless flow control */
a18f5128 2459 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1c06328c
EG
2460 int port = BP_PORT(bp);
2461 u32 pause_enabled = 0;
2462
2463 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2464 pause_enabled = 1;
2465
2466 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2467 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2468 pause_enabled);
2469 }
2470
bb2a0f7a
YG
2471 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2472 struct host_port_stats *pstats;
2473
2474 pstats = bnx2x_sp(bp, port_stats);
2475 /* reset old bmac stats */
2476 memset(&(pstats->mac_stx[0]), 0,
2477 sizeof(struct mac_stx));
2478 }
f34d28ea 2479 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2480 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2481 }
2482
c18487ee
YR
2483 /* indicate link status */
2484 bnx2x_link_report(bp);
34f80b04
EG
2485
2486 if (IS_E1HMF(bp)) {
8a1c38d1 2487 int port = BP_PORT(bp);
34f80b04 2488 int func;
8a1c38d1 2489 int vn;
34f80b04 2490
ab6ad5a4 2491 /* Set the attention towards other drivers on the same port */
34f80b04
EG
2492 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2493 if (vn == BP_E1HVN(bp))
2494 continue;
2495
8a1c38d1 2496 func = ((vn << 1) | port);
34f80b04
EG
2497 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2498 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2499 }
34f80b04 2500
8a1c38d1
EG
2501 if (bp->link_vars.link_up) {
2502 int i;
2503
2504 /* Init rate shaping and fairness contexts */
2505 bnx2x_init_port_minmax(bp);
34f80b04 2506
34f80b04 2507 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2508 bnx2x_init_vn_minmax(bp, 2*vn + port);
2509
2510 /* Store it to internal memory */
2511 for (i = 0;
2512 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2513 REG_WR(bp, BAR_XSTRORM_INTMEM +
2514 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2515 ((u32 *)(&bp->cmng))[i]);
2516 }
34f80b04 2517 }
c18487ee 2518}
a2fbb9ea 2519
c18487ee
YR
2520static void bnx2x__link_status_update(struct bnx2x *bp)
2521{
f34d28ea 2522 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2523 return;
a2fbb9ea 2524
c18487ee 2525 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2526
bb2a0f7a
YG
2527 if (bp->link_vars.link_up)
2528 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2529 else
2530 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2531
2691d51d
EG
2532 bnx2x_calc_vn_weight_sum(bp);
2533
c18487ee
YR
2534 /* indicate link status */
2535 bnx2x_link_report(bp);
a2fbb9ea 2536}
a2fbb9ea 2537
34f80b04
EG
2538static void bnx2x_pmf_update(struct bnx2x *bp)
2539{
2540 int port = BP_PORT(bp);
2541 u32 val;
2542
2543 bp->port.pmf = 1;
2544 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2545
2546 /* enable nig attention */
2547 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2548 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2549 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2550
2551 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2552}
2553
c18487ee 2554/* end of Link */
a2fbb9ea
ET
2555
2556/* slow path */
2557
2558/*
2559 * General service functions
2560 */
2561
2691d51d
EG
2562/* send the MCP a request, block until there is a reply */
2563u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2564{
2565 int func = BP_FUNC(bp);
2566 u32 seq = ++bp->fw_seq;
2567 u32 rc = 0;
2568 u32 cnt = 1;
2569 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2570
c4ff7cbf 2571 mutex_lock(&bp->fw_mb_mutex);
2691d51d
EG
2572 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2573 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2574
2575 do {
2576 /* let the FW do it's magic ... */
2577 msleep(delay);
2578
2579 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2580
c4ff7cbf
EG
2581 /* Give the FW up to 5 second (500*10ms) */
2582 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2583
2584 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2585 cnt*delay, rc, seq);
2586
2587 /* is this a reply to our command? */
2588 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2589 rc &= FW_MSG_CODE_MASK;
2590 else {
2591 /* FW BUG! */
2592 BNX2X_ERR("FW failed to respond!\n");
2593 bnx2x_fw_dump(bp);
2594 rc = 0;
2595 }
c4ff7cbf 2596 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2597
2598 return rc;
2599}
2600
2601static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
e665bfda 2602static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2691d51d
EG
2603static void bnx2x_set_rx_mode(struct net_device *dev);
2604
2605static void bnx2x_e1h_disable(struct bnx2x *bp)
2606{
2607 int port = BP_PORT(bp);
2691d51d
EG
2608
2609 netif_tx_disable(bp->dev);
2691d51d
EG
2610
2611 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2612
2691d51d
EG
2613 netif_carrier_off(bp->dev);
2614}
2615
2616static void bnx2x_e1h_enable(struct bnx2x *bp)
2617{
2618 int port = BP_PORT(bp);
2619
2620 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2621
2691d51d
EG
2622 /* Tx queue should be only reenabled */
2623 netif_tx_wake_all_queues(bp->dev);
2624
061bc702
EG
2625 /*
2626 * Should not call netif_carrier_on since it will be called if the link
2627 * is up when checking for link state
2628 */
2691d51d
EG
2629}
2630
2631static void bnx2x_update_min_max(struct bnx2x *bp)
2632{
2633 int port = BP_PORT(bp);
2634 int vn, i;
2635
2636 /* Init rate shaping and fairness contexts */
2637 bnx2x_init_port_minmax(bp);
2638
2639 bnx2x_calc_vn_weight_sum(bp);
2640
2641 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2642 bnx2x_init_vn_minmax(bp, 2*vn + port);
2643
2644 if (bp->port.pmf) {
2645 int func;
2646
2647 /* Set the attention towards other drivers on the same port */
2648 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2649 if (vn == BP_E1HVN(bp))
2650 continue;
2651
2652 func = ((vn << 1) | port);
2653 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2654 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2655 }
2656
2657 /* Store it to internal memory */
2658 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
2659 REG_WR(bp, BAR_XSTRORM_INTMEM +
2660 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2661 ((u32 *)(&bp->cmng))[i]);
2662 }
2663}
2664
2665static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2666{
2691d51d 2667 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2691d51d
EG
2668
2669 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2670
f34d28ea
EG
2671 /*
2672 * This is the only place besides the function initialization
2673 * where the bp->flags can change so it is done without any
2674 * locks
2675 */
2691d51d
EG
2676 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2677 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
f34d28ea 2678 bp->flags |= MF_FUNC_DIS;
2691d51d
EG
2679
2680 bnx2x_e1h_disable(bp);
2681 } else {
2682 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
f34d28ea 2683 bp->flags &= ~MF_FUNC_DIS;
2691d51d
EG
2684
2685 bnx2x_e1h_enable(bp);
2686 }
2687 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2688 }
2689 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2690
2691 bnx2x_update_min_max(bp);
2692 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2693 }
2694
2695 /* Report results to MCP */
2696 if (dcc_event)
2697 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE);
2698 else
2699 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2700}
2701
28912902
MC
2702/* must be called under the spq lock */
2703static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2704{
2705 struct eth_spe *next_spe = bp->spq_prod_bd;
2706
2707 if (bp->spq_prod_bd == bp->spq_last_bd) {
2708 bp->spq_prod_bd = bp->spq;
2709 bp->spq_prod_idx = 0;
2710 DP(NETIF_MSG_TIMER, "end of spq\n");
2711 } else {
2712 bp->spq_prod_bd++;
2713 bp->spq_prod_idx++;
2714 }
2715 return next_spe;
2716}
2717
2718/* must be called under the spq lock */
2719static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2720{
2721 int func = BP_FUNC(bp);
2722
2723 /* Make sure that BD data is updated before writing the producer */
2724 wmb();
2725
2726 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2727 bp->spq_prod_idx);
2728 mmiowb();
2729}
2730
a2fbb9ea
ET
2731/* the slow path queue is odd since completions arrive on the fastpath ring */
2732static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2733 u32 data_hi, u32 data_lo, int common)
2734{
28912902 2735 struct eth_spe *spe;
a2fbb9ea 2736
34f80b04
EG
2737 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2738 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2739 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2740 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2741 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2742
2743#ifdef BNX2X_STOP_ON_ERROR
2744 if (unlikely(bp->panic))
2745 return -EIO;
2746#endif
2747
34f80b04 2748 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2749
2750 if (!bp->spq_left) {
2751 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2752 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2753 bnx2x_panic();
2754 return -EBUSY;
2755 }
f1410647 2756
28912902
MC
2757 spe = bnx2x_sp_get_next(bp);
2758
a2fbb9ea 2759 /* CID needs port number to be encoded int it */
28912902 2760 spe->hdr.conn_and_cmd_data =
a2fbb9ea
ET
2761 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2762 HW_CID(bp, cid)));
28912902 2763 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
a2fbb9ea 2764 if (common)
28912902 2765 spe->hdr.type |=
a2fbb9ea
ET
2766 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2767
28912902
MC
2768 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2769 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
a2fbb9ea
ET
2770
2771 bp->spq_left--;
2772
28912902 2773 bnx2x_sp_prod_update(bp);
34f80b04 2774 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2775 return 0;
2776}
2777
2778/* acquire split MCP access lock register */
4a37fb66 2779static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2780{
72fd0718 2781 u32 j, val;
34f80b04 2782 int rc = 0;
a2fbb9ea
ET
2783
2784 might_sleep();
72fd0718 2785 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2786 val = (1UL << 31);
2787 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2788 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2789 if (val & (1L << 31))
2790 break;
2791
2792 msleep(5);
2793 }
a2fbb9ea 2794 if (!(val & (1L << 31))) {
19680c48 2795 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2796 rc = -EBUSY;
2797 }
2798
2799 return rc;
2800}
2801
4a37fb66
YG
2802/* release split MCP access lock register */
2803static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2804{
72fd0718 2805 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2806}
2807
2808static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2809{
2810 struct host_def_status_block *def_sb = bp->def_status_blk;
2811 u16 rc = 0;
2812
2813 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2814 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2815 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2816 rc |= 1;
2817 }
2818 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2819 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2820 rc |= 2;
2821 }
2822 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2823 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2824 rc |= 4;
2825 }
2826 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2827 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2828 rc |= 8;
2829 }
2830 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2831 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2832 rc |= 16;
2833 }
2834 return rc;
2835}
2836
2837/*
2838 * slow path service functions
2839 */
2840
2841static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2842{
34f80b04 2843 int port = BP_PORT(bp);
5c862848
EG
2844 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2845 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2846 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2847 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2848 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2849 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2850 u32 aeu_mask;
87942b46 2851 u32 nig_mask = 0;
a2fbb9ea 2852
a2fbb9ea
ET
2853 if (bp->attn_state & asserted)
2854 BNX2X_ERR("IGU ERROR\n");
2855
3fcaf2e5
EG
2856 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2857 aeu_mask = REG_RD(bp, aeu_addr);
2858
a2fbb9ea 2859 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2860 aeu_mask, asserted);
72fd0718 2861 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2862 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2863
3fcaf2e5
EG
2864 REG_WR(bp, aeu_addr, aeu_mask);
2865 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2866
3fcaf2e5 2867 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2868 bp->attn_state |= asserted;
3fcaf2e5 2869 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2870
2871 if (asserted & ATTN_HARD_WIRED_MASK) {
2872 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2873
a5e9a7cf
EG
2874 bnx2x_acquire_phy_lock(bp);
2875
877e9aa4 2876 /* save nig interrupt mask */
87942b46 2877 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2878 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2879
c18487ee 2880 bnx2x_link_attn(bp);
a2fbb9ea
ET
2881
2882 /* handle unicore attn? */
2883 }
2884 if (asserted & ATTN_SW_TIMER_4_FUNC)
2885 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2886
2887 if (asserted & GPIO_2_FUNC)
2888 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2889
2890 if (asserted & GPIO_3_FUNC)
2891 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2892
2893 if (asserted & GPIO_4_FUNC)
2894 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2895
2896 if (port == 0) {
2897 if (asserted & ATTN_GENERAL_ATTN_1) {
2898 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2899 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2900 }
2901 if (asserted & ATTN_GENERAL_ATTN_2) {
2902 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2903 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2904 }
2905 if (asserted & ATTN_GENERAL_ATTN_3) {
2906 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2907 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2908 }
2909 } else {
2910 if (asserted & ATTN_GENERAL_ATTN_4) {
2911 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2912 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2913 }
2914 if (asserted & ATTN_GENERAL_ATTN_5) {
2915 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2916 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2917 }
2918 if (asserted & ATTN_GENERAL_ATTN_6) {
2919 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2921 }
2922 }
2923
2924 } /* if hardwired */
2925
5c862848
EG
2926 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2927 asserted, hc_addr);
2928 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2929
2930 /* now set back the mask */
a5e9a7cf 2931 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2932 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2933 bnx2x_release_phy_lock(bp);
2934 }
a2fbb9ea
ET
2935}
2936
fd4ef40d
EG
2937static inline void bnx2x_fan_failure(struct bnx2x *bp)
2938{
2939 int port = BP_PORT(bp);
2940
2941 /* mark the failure */
2942 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2943 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2944 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2945 bp->link_params.ext_phy_config);
2946
2947 /* log the failure */
7995c64e
JP
2948 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
2949 "Please contact Dell Support for assistance.\n");
fd4ef40d 2950}
ab6ad5a4 2951
877e9aa4 2952static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2953{
34f80b04 2954 int port = BP_PORT(bp);
877e9aa4 2955 int reg_offset;
4d295db0 2956 u32 val, swap_val, swap_override;
877e9aa4 2957
34f80b04
EG
2958 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2959 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2960
34f80b04 2961 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2962
2963 val = REG_RD(bp, reg_offset);
2964 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2965 REG_WR(bp, reg_offset, val);
2966
2967 BNX2X_ERR("SPIO5 hw attention\n");
2968
fd4ef40d 2969 /* Fan failure attention */
35b19ba5
EG
2970 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2971 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
17de50b7 2972 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2973 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2974 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
fd4ef40d
EG
2975 /* The PHY reset is controlled by GPIO 1 */
2976 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2977 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4
ET
2978 break;
2979
4d295db0
EG
2980 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2981 /* The PHY reset is controlled by GPIO 1 */
2982 /* fake the port number to cancel the swap done in
2983 set_gpio() */
2984 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2985 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2986 port = (swap_val && swap_override) ^ 1;
2987 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2988 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2989 break;
2990
877e9aa4
ET
2991 default:
2992 break;
2993 }
fd4ef40d 2994 bnx2x_fan_failure(bp);
877e9aa4 2995 }
34f80b04 2996
589abe3a
EG
2997 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2998 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2999 bnx2x_acquire_phy_lock(bp);
3000 bnx2x_handle_module_detect_int(&bp->link_params);
3001 bnx2x_release_phy_lock(bp);
3002 }
3003
34f80b04
EG
3004 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3005
3006 val = REG_RD(bp, reg_offset);
3007 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3008 REG_WR(bp, reg_offset, val);
3009
3010 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3011 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3012 bnx2x_panic();
3013 }
877e9aa4
ET
3014}
3015
3016static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3017{
3018 u32 val;
3019
0626b899 3020 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3021
3022 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3023 BNX2X_ERR("DB hw attention 0x%x\n", val);
3024 /* DORQ discard attention */
3025 if (val & 0x2)
3026 BNX2X_ERR("FATAL error from DORQ\n");
3027 }
34f80b04
EG
3028
3029 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3030
3031 int port = BP_PORT(bp);
3032 int reg_offset;
3033
3034 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3035 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3036
3037 val = REG_RD(bp, reg_offset);
3038 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3039 REG_WR(bp, reg_offset, val);
3040
3041 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3042 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3043 bnx2x_panic();
3044 }
877e9aa4
ET
3045}
3046
3047static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3048{
3049 u32 val;
3050
3051 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3052
3053 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3054 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3055 /* CFC error attention */
3056 if (val & 0x2)
3057 BNX2X_ERR("FATAL error from CFC\n");
3058 }
3059
3060 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3061
3062 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3063 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3064 /* RQ_USDMDP_FIFO_OVERFLOW */
3065 if (val & 0x18000)
3066 BNX2X_ERR("FATAL error from PXP\n");
3067 }
34f80b04
EG
3068
3069 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3070
3071 int port = BP_PORT(bp);
3072 int reg_offset;
3073
3074 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3075 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3076
3077 val = REG_RD(bp, reg_offset);
3078 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3079 REG_WR(bp, reg_offset, val);
3080
3081 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3082 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3083 bnx2x_panic();
3084 }
877e9aa4
ET
3085}
3086
3087static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3088{
34f80b04
EG
3089 u32 val;
3090
877e9aa4
ET
3091 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3092
34f80b04
EG
3093 if (attn & BNX2X_PMF_LINK_ASSERT) {
3094 int func = BP_FUNC(bp);
3095
3096 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
b015e3d1
EG
3097 bp->mf_config = SHMEM_RD(bp,
3098 mf_cfg.func_mf_config[func].config);
2691d51d
EG
3099 val = SHMEM_RD(bp, func_mb[func].drv_status);
3100 if (val & DRV_STATUS_DCC_EVENT_MASK)
3101 bnx2x_dcc_event(bp,
3102 (val & DRV_STATUS_DCC_EVENT_MASK));
34f80b04 3103 bnx2x__link_status_update(bp);
2691d51d 3104 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3105 bnx2x_pmf_update(bp);
3106
3107 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3108
3109 BNX2X_ERR("MC assert!\n");
3110 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3113 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3114 bnx2x_panic();
3115
3116 } else if (attn & BNX2X_MCP_ASSERT) {
3117
3118 BNX2X_ERR("MCP assert!\n");
3119 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3120 bnx2x_fw_dump(bp);
877e9aa4
ET
3121
3122 } else
3123 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3124 }
3125
3126 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3127 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3128 if (attn & BNX2X_GRC_TIMEOUT) {
3129 val = CHIP_IS_E1H(bp) ?
3130 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
3131 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3132 }
3133 if (attn & BNX2X_GRC_RSV) {
3134 val = CHIP_IS_E1H(bp) ?
3135 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
3136 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3137 }
877e9aa4 3138 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3139 }
3140}
3141
72fd0718
VZ
3142static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode);
3143static int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
3144
3145
3146#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3147#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3148#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3149#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3150#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3151#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3152/*
3153 * should be run under rtnl lock
3154 */
3155static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3156{
3157 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3158 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3159 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3160 barrier();
3161 mmiowb();
3162}
3163
3164/*
3165 * should be run under rtnl lock
3166 */
3167static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3168{
3169 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3170 val |= (1 << 16);
3171 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3172 barrier();
3173 mmiowb();
3174}
3175
3176/*
3177 * should be run under rtnl lock
3178 */
3179static inline bool bnx2x_reset_is_done(struct bnx2x *bp)
3180{
3181 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3182 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3183 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3184}
3185
3186/*
3187 * should be run under rtnl lock
3188 */
3189static inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
3190{
3191 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3192
3193 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3194
3195 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3196 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3197 barrier();
3198 mmiowb();
3199}
3200
3201/*
3202 * should be run under rtnl lock
3203 */
3204static inline u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
3205{
3206 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3207
3208 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3209
3210 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3211 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3212 barrier();
3213 mmiowb();
3214
3215 return val1;
3216}
3217
3218/*
3219 * should be run under rtnl lock
3220 */
3221static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3222{
3223 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3224}
3225
3226static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3227{
3228 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3229 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3230}
3231
3232static inline void _print_next_block(int idx, const char *blk)
3233{
3234 if (idx)
3235 pr_cont(", ");
3236 pr_cont("%s", blk);
3237}
3238
3239static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3240{
3241 int i = 0;
3242 u32 cur_bit = 0;
3243 for (i = 0; sig; i++) {
3244 cur_bit = ((u32)0x1 << i);
3245 if (sig & cur_bit) {
3246 switch (cur_bit) {
3247 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3248 _print_next_block(par_num++, "BRB");
3249 break;
3250 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3251 _print_next_block(par_num++, "PARSER");
3252 break;
3253 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3254 _print_next_block(par_num++, "TSDM");
3255 break;
3256 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3257 _print_next_block(par_num++, "SEARCHER");
3258 break;
3259 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3260 _print_next_block(par_num++, "TSEMI");
3261 break;
3262 }
3263
3264 /* Clear the bit */
3265 sig &= ~cur_bit;
3266 }
3267 }
3268
3269 return par_num;
3270}
3271
3272static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3273{
3274 int i = 0;
3275 u32 cur_bit = 0;
3276 for (i = 0; sig; i++) {
3277 cur_bit = ((u32)0x1 << i);
3278 if (sig & cur_bit) {
3279 switch (cur_bit) {
3280 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3281 _print_next_block(par_num++, "PBCLIENT");
3282 break;
3283 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3284 _print_next_block(par_num++, "QM");
3285 break;
3286 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3287 _print_next_block(par_num++, "XSDM");
3288 break;
3289 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3290 _print_next_block(par_num++, "XSEMI");
3291 break;
3292 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3293 _print_next_block(par_num++, "DOORBELLQ");
3294 break;
3295 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3296 _print_next_block(par_num++, "VAUX PCI CORE");
3297 break;
3298 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3299 _print_next_block(par_num++, "DEBUG");
3300 break;
3301 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3302 _print_next_block(par_num++, "USDM");
3303 break;
3304 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3305 _print_next_block(par_num++, "USEMI");
3306 break;
3307 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3308 _print_next_block(par_num++, "UPB");
3309 break;
3310 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3311 _print_next_block(par_num++, "CSDM");
3312 break;
3313 }
3314
3315 /* Clear the bit */
3316 sig &= ~cur_bit;
3317 }
3318 }
3319
3320 return par_num;
3321}
3322
3323static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3324{
3325 int i = 0;
3326 u32 cur_bit = 0;
3327 for (i = 0; sig; i++) {
3328 cur_bit = ((u32)0x1 << i);
3329 if (sig & cur_bit) {
3330 switch (cur_bit) {
3331 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3332 _print_next_block(par_num++, "CSEMI");
3333 break;
3334 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3335 _print_next_block(par_num++, "PXP");
3336 break;
3337 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3338 _print_next_block(par_num++,
3339 "PXPPCICLOCKCLIENT");
3340 break;
3341 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3342 _print_next_block(par_num++, "CFC");
3343 break;
3344 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3345 _print_next_block(par_num++, "CDU");
3346 break;
3347 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3348 _print_next_block(par_num++, "IGU");
3349 break;
3350 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3351 _print_next_block(par_num++, "MISC");
3352 break;
3353 }
3354
3355 /* Clear the bit */
3356 sig &= ~cur_bit;
3357 }
3358 }
3359
3360 return par_num;
3361}
3362
3363static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3364{
3365 int i = 0;
3366 u32 cur_bit = 0;
3367 for (i = 0; sig; i++) {
3368 cur_bit = ((u32)0x1 << i);
3369 if (sig & cur_bit) {
3370 switch (cur_bit) {
3371 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3372 _print_next_block(par_num++, "MCP ROM");
3373 break;
3374 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3375 _print_next_block(par_num++, "MCP UMP RX");
3376 break;
3377 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3378 _print_next_block(par_num++, "MCP UMP TX");
3379 break;
3380 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3381 _print_next_block(par_num++, "MCP SCPAD");
3382 break;
3383 }
3384
3385 /* Clear the bit */
3386 sig &= ~cur_bit;
3387 }
3388 }
3389
3390 return par_num;
3391}
3392
3393static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3394 u32 sig2, u32 sig3)
3395{
3396 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3397 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3398 int par_num = 0;
3399 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3400 "[0]:0x%08x [1]:0x%08x "
3401 "[2]:0x%08x [3]:0x%08x\n",
3402 sig0 & HW_PRTY_ASSERT_SET_0,
3403 sig1 & HW_PRTY_ASSERT_SET_1,
3404 sig2 & HW_PRTY_ASSERT_SET_2,
3405 sig3 & HW_PRTY_ASSERT_SET_3);
3406 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3407 bp->dev->name);
3408 par_num = bnx2x_print_blocks_with_parity0(
3409 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3410 par_num = bnx2x_print_blocks_with_parity1(
3411 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3412 par_num = bnx2x_print_blocks_with_parity2(
3413 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3414 par_num = bnx2x_print_blocks_with_parity3(
3415 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3416 printk("\n");
3417 return true;
3418 } else
3419 return false;
3420}
3421
3422static bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3423{
a2fbb9ea 3424 struct attn_route attn;
72fd0718
VZ
3425 int port = BP_PORT(bp);
3426
3427 attn.sig[0] = REG_RD(bp,
3428 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3429 port*4);
3430 attn.sig[1] = REG_RD(bp,
3431 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3432 port*4);
3433 attn.sig[2] = REG_RD(bp,
3434 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3435 port*4);
3436 attn.sig[3] = REG_RD(bp,
3437 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3438 port*4);
3439
3440 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3441 attn.sig[3]);
3442}
3443
3444static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3445{
3446 struct attn_route attn, *group_mask;
34f80b04 3447 int port = BP_PORT(bp);
877e9aa4 3448 int index;
a2fbb9ea
ET
3449 u32 reg_addr;
3450 u32 val;
3fcaf2e5 3451 u32 aeu_mask;
a2fbb9ea
ET
3452
3453 /* need to take HW lock because MCP or other port might also
3454 try to handle this event */
4a37fb66 3455 bnx2x_acquire_alr(bp);
a2fbb9ea 3456
72fd0718
VZ
3457 if (bnx2x_chk_parity_attn(bp)) {
3458 bp->recovery_state = BNX2X_RECOVERY_INIT;
3459 bnx2x_set_reset_in_progress(bp);
3460 schedule_delayed_work(&bp->reset_task, 0);
3461 /* Disable HW interrupts */
3462 bnx2x_int_disable(bp);
3463 bnx2x_release_alr(bp);
3464 /* In case of parity errors don't handle attentions so that
3465 * other function would "see" parity errors.
3466 */
3467 return;
3468 }
3469
a2fbb9ea
ET
3470 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3471 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3472 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3473 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
3474 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3475 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
3476
3477 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3478 if (deasserted & (1 << index)) {
72fd0718 3479 group_mask = &bp->attn_group[index];
a2fbb9ea 3480
34f80b04 3481 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
72fd0718
VZ
3482 index, group_mask->sig[0], group_mask->sig[1],
3483 group_mask->sig[2], group_mask->sig[3]);
a2fbb9ea 3484
877e9aa4 3485 bnx2x_attn_int_deasserted3(bp,
72fd0718 3486 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3487 bnx2x_attn_int_deasserted1(bp,
72fd0718 3488 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3489 bnx2x_attn_int_deasserted2(bp,
72fd0718 3490 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3491 bnx2x_attn_int_deasserted0(bp,
72fd0718 3492 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3493 }
3494 }
3495
4a37fb66 3496 bnx2x_release_alr(bp);
a2fbb9ea 3497
5c862848 3498 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
3499
3500 val = ~deasserted;
3fcaf2e5
EG
3501 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3502 val, reg_addr);
5c862848 3503 REG_WR(bp, reg_addr, val);
a2fbb9ea 3504
a2fbb9ea 3505 if (~bp->attn_state & deasserted)
3fcaf2e5 3506 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3507
3508 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3509 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3510
3fcaf2e5
EG
3511 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3512 aeu_mask = REG_RD(bp, reg_addr);
3513
3514 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3515 aeu_mask, deasserted);
72fd0718 3516 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3517 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3518
3fcaf2e5
EG
3519 REG_WR(bp, reg_addr, aeu_mask);
3520 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3521
3522 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3523 bp->attn_state &= ~deasserted;
3524 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3525}
3526
3527static void bnx2x_attn_int(struct bnx2x *bp)
3528{
3529 /* read local copy of bits */
68d59484
EG
3530 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3531 attn_bits);
3532 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3533 attn_bits_ack);
a2fbb9ea
ET
3534 u32 attn_state = bp->attn_state;
3535
3536 /* look for changed bits */
3537 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3538 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3539
3540 DP(NETIF_MSG_HW,
3541 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3542 attn_bits, attn_ack, asserted, deasserted);
3543
3544 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3545 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3546
3547 /* handle bits that were raised */
3548 if (asserted)
3549 bnx2x_attn_int_asserted(bp, asserted);
3550
3551 if (deasserted)
3552 bnx2x_attn_int_deasserted(bp, deasserted);
3553}
3554
3555static void bnx2x_sp_task(struct work_struct *work)
3556{
1cf167f2 3557 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3558 u16 status;
3559
34f80b04 3560
a2fbb9ea
ET
3561 /* Return here if interrupt is disabled */
3562 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3563 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3564 return;
3565 }
3566
3567 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3568/* if (status == 0) */
3569/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3570
3196a88a 3571 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 3572
877e9aa4
ET
3573 /* HW attentions */
3574 if (status & 0x1)
a2fbb9ea 3575 bnx2x_attn_int(bp);
a2fbb9ea 3576
68d59484 3577 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
3578 IGU_INT_NOP, 1);
3579 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
3580 IGU_INT_NOP, 1);
3581 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
3582 IGU_INT_NOP, 1);
3583 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
3584 IGU_INT_NOP, 1);
3585 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
3586 IGU_INT_ENABLE, 1);
877e9aa4 3587
a2fbb9ea
ET
3588}
3589
3590static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3591{
3592 struct net_device *dev = dev_instance;
3593 struct bnx2x *bp = netdev_priv(dev);
3594
3595 /* Return here if interrupt is disabled */
3596 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3597 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3598 return IRQ_HANDLED;
3599 }
3600
8d9c5f34 3601 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3602
3603#ifdef BNX2X_STOP_ON_ERROR
3604 if (unlikely(bp->panic))
3605 return IRQ_HANDLED;
3606#endif
3607
993ac7b5
MC
3608#ifdef BCM_CNIC
3609 {
3610 struct cnic_ops *c_ops;
3611
3612 rcu_read_lock();
3613 c_ops = rcu_dereference(bp->cnic_ops);
3614 if (c_ops)
3615 c_ops->cnic_handler(bp->cnic_data, NULL);
3616 rcu_read_unlock();
3617 }
3618#endif
1cf167f2 3619 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3620
3621 return IRQ_HANDLED;
3622}
3623
3624/* end of slow path */
3625
3626/* Statistics */
3627
3628/****************************************************************************
3629* Macros
3630****************************************************************************/
3631
a2fbb9ea
ET
3632/* sum[hi:lo] += add[hi:lo] */
3633#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3634 do { \
3635 s_lo += a_lo; \
f5ba6772 3636 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
3637 } while (0)
3638
3639/* difference = minuend - subtrahend */
3640#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3641 do { \
bb2a0f7a
YG
3642 if (m_lo < s_lo) { \
3643 /* underflow */ \
a2fbb9ea 3644 d_hi = m_hi - s_hi; \
bb2a0f7a 3645 if (d_hi > 0) { \
6378c025 3646 /* we can 'loan' 1 */ \
a2fbb9ea
ET
3647 d_hi--; \
3648 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 3649 } else { \
6378c025 3650 /* m_hi <= s_hi */ \
a2fbb9ea
ET
3651 d_hi = 0; \
3652 d_lo = 0; \
3653 } \
bb2a0f7a
YG
3654 } else { \
3655 /* m_lo >= s_lo */ \
a2fbb9ea 3656 if (m_hi < s_hi) { \
bb2a0f7a
YG
3657 d_hi = 0; \
3658 d_lo = 0; \
3659 } else { \
6378c025 3660 /* m_hi >= s_hi */ \
bb2a0f7a
YG
3661 d_hi = m_hi - s_hi; \
3662 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
3663 } \
3664 } \
3665 } while (0)
3666
bb2a0f7a 3667#define UPDATE_STAT64(s, t) \
a2fbb9ea 3668 do { \
bb2a0f7a
YG
3669 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3670 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3671 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3672 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3673 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3674 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
3675 } while (0)
3676
bb2a0f7a 3677#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 3678 do { \
bb2a0f7a
YG
3679 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3680 diff.lo, new->s##_lo, old->s##_lo); \
3681 ADD_64(estats->t##_hi, diff.hi, \
3682 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
3683 } while (0)
3684
3685/* sum[hi:lo] += add */
3686#define ADD_EXTEND_64(s_hi, s_lo, a) \
3687 do { \
3688 s_lo += a; \
3689 s_hi += (s_lo < a) ? 1 : 0; \
3690 } while (0)
3691
bb2a0f7a 3692#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 3693 do { \
bb2a0f7a
YG
3694 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3695 pstats->mac_stx[1].s##_lo, \
3696 new->s); \
a2fbb9ea
ET
3697 } while (0)
3698
bb2a0f7a 3699#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea 3700 do { \
4781bfad
EG
3701 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3702 old_tclient->s = tclient->s; \
de832a55
EG
3703 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3704 } while (0)
3705
3706#define UPDATE_EXTEND_USTAT(s, t) \
3707 do { \
3708 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3709 old_uclient->s = uclient->s; \
3710 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3711 } while (0)
3712
3713#define UPDATE_EXTEND_XSTAT(s, t) \
3714 do { \
4781bfad
EG
3715 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3716 old_xclient->s = xclient->s; \
de832a55
EG
3717 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3718 } while (0)
3719
3720/* minuend -= subtrahend */
3721#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3722 do { \
3723 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3724 } while (0)
3725
3726/* minuend[hi:lo] -= subtrahend */
3727#define SUB_EXTEND_64(m_hi, m_lo, s) \
3728 do { \
3729 SUB_64(m_hi, 0, m_lo, s); \
3730 } while (0)
3731
3732#define SUB_EXTEND_USTAT(s, t) \
3733 do { \
3734 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3735 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3736 } while (0)
3737
3738/*
3739 * General service functions
3740 */
3741
3742static inline long bnx2x_hilo(u32 *hiref)
3743{
3744 u32 lo = *(hiref + 1);
3745#if (BITS_PER_LONG == 64)
3746 u32 hi = *hiref;
3747
3748 return HILO_U64(hi, lo);
3749#else
3750 return lo;
3751#endif
3752}
3753
3754/*
3755 * Init service functions
3756 */
3757
bb2a0f7a
YG
3758static void bnx2x_storm_stats_post(struct bnx2x *bp)
3759{
3760 if (!bp->stats_pending) {
3761 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3762 int i, rc;
bb2a0f7a
YG
3763
3764 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3765 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3766 for_each_queue(bp, i)
3767 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3768
3769 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3770 ((u32 *)&ramrod_data)[1],
3771 ((u32 *)&ramrod_data)[0], 0);
3772 if (rc == 0) {
3773 /* stats ramrod has it's own slot on the spq */
3774 bp->spq_left++;
3775 bp->stats_pending = 1;
3776 }
3777 }
3778}
3779
bb2a0f7a
YG
3780static void bnx2x_hw_stats_post(struct bnx2x *bp)
3781{
3782 struct dmae_command *dmae = &bp->stats_dmae;
3783 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3784
3785 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3786 if (CHIP_REV_IS_SLOW(bp))
3787 return;
bb2a0f7a
YG
3788
3789 /* loader */
3790 if (bp->executer_idx) {
3791 int loader_idx = PMF_DMAE_C(bp);
3792
3793 memset(dmae, 0, sizeof(struct dmae_command));
3794
3795 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3796 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3797 DMAE_CMD_DST_RESET |
3798#ifdef __BIG_ENDIAN
3799 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3800#else
3801 DMAE_CMD_ENDIANITY_DW_SWAP |
3802#endif
3803 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3804 DMAE_CMD_PORT_0) |
3805 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3806 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3807 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3808 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3809 sizeof(struct dmae_command) *
3810 (loader_idx + 1)) >> 2;
3811 dmae->dst_addr_hi = 0;
3812 dmae->len = sizeof(struct dmae_command) >> 2;
3813 if (CHIP_IS_E1(bp))
3814 dmae->len--;
3815 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3816 dmae->comp_addr_hi = 0;
3817 dmae->comp_val = 1;
3818
3819 *stats_comp = 0;
3820 bnx2x_post_dmae(bp, dmae, loader_idx);
3821
3822 } else if (bp->func_stx) {
3823 *stats_comp = 0;
3824 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3825 }
3826}
3827
3828static int bnx2x_stats_comp(struct bnx2x *bp)
3829{
3830 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3831 int cnt = 10;
3832
3833 might_sleep();
3834 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3835 if (!cnt) {
3836 BNX2X_ERR("timeout waiting for stats finished\n");
3837 break;
3838 }
3839 cnt--;
12469401 3840 msleep(1);
bb2a0f7a
YG
3841 }
3842 return 1;
3843}
3844
3845/*
3846 * Statistics service functions
3847 */
3848
3849static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3850{
3851 struct dmae_command *dmae;
3852 u32 opcode;
3853 int loader_idx = PMF_DMAE_C(bp);
3854 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3855
3856 /* sanity */
3857 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3858 BNX2X_ERR("BUG!\n");
3859 return;
3860 }
3861
3862 bp->executer_idx = 0;
3863
3864 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3865 DMAE_CMD_C_ENABLE |
3866 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3867#ifdef __BIG_ENDIAN
3868 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3869#else
3870 DMAE_CMD_ENDIANITY_DW_SWAP |
3871#endif
3872 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3873 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3874
3875 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3876 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3877 dmae->src_addr_lo = bp->port.port_stx >> 2;
3878 dmae->src_addr_hi = 0;
3879 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3880 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3881 dmae->len = DMAE_LEN32_RD_MAX;
3882 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3883 dmae->comp_addr_hi = 0;
3884 dmae->comp_val = 1;
3885
3886 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3887 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3888 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3889 dmae->src_addr_hi = 0;
7a9b2557
VZ
3890 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3891 DMAE_LEN32_RD_MAX * 4);
3892 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3893 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3894 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3895 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3896 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3897 dmae->comp_val = DMAE_COMP_VAL;
3898
3899 *stats_comp = 0;
3900 bnx2x_hw_stats_post(bp);
3901 bnx2x_stats_comp(bp);
3902}
3903
3904static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3905{
3906 struct dmae_command *dmae;
34f80b04 3907 int port = BP_PORT(bp);
bb2a0f7a 3908 int vn = BP_E1HVN(bp);
a2fbb9ea 3909 u32 opcode;
bb2a0f7a 3910 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3911 u32 mac_addr;
bb2a0f7a
YG
3912 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3913
3914 /* sanity */
3915 if (!bp->link_vars.link_up || !bp->port.pmf) {
3916 BNX2X_ERR("BUG!\n");
3917 return;
3918 }
a2fbb9ea
ET
3919
3920 bp->executer_idx = 0;
bb2a0f7a
YG
3921
3922 /* MCP */
3923 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3924 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3925 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3926#ifdef __BIG_ENDIAN
bb2a0f7a 3927 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3928#else
bb2a0f7a 3929 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3930#endif
bb2a0f7a
YG
3931 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3932 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3933
bb2a0f7a 3934 if (bp->port.port_stx) {
a2fbb9ea
ET
3935
3936 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3937 dmae->opcode = opcode;
bb2a0f7a
YG
3938 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3939 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3940 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3941 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3942 dmae->len = sizeof(struct host_port_stats) >> 2;
3943 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3944 dmae->comp_addr_hi = 0;
3945 dmae->comp_val = 1;
a2fbb9ea
ET
3946 }
3947
bb2a0f7a
YG
3948 if (bp->func_stx) {
3949
3950 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3951 dmae->opcode = opcode;
3952 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3953 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3954 dmae->dst_addr_lo = bp->func_stx >> 2;
3955 dmae->dst_addr_hi = 0;
3956 dmae->len = sizeof(struct host_func_stats) >> 2;
3957 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3958 dmae->comp_addr_hi = 0;
3959 dmae->comp_val = 1;
a2fbb9ea
ET
3960 }
3961
bb2a0f7a 3962 /* MAC */
a2fbb9ea
ET
3963 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3964 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3965 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3966#ifdef __BIG_ENDIAN
3967 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3968#else
3969 DMAE_CMD_ENDIANITY_DW_SWAP |
3970#endif
bb2a0f7a
YG
3971 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3972 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3973
c18487ee 3974 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3975
3976 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3977 NIG_REG_INGRESS_BMAC0_MEM);
3978
3979 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3980 BIGMAC_REGISTER_TX_STAT_GTBYT */
3981 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3982 dmae->opcode = opcode;
3983 dmae->src_addr_lo = (mac_addr +
3984 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3985 dmae->src_addr_hi = 0;
3986 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3987 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3988 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3989 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3990 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3991 dmae->comp_addr_hi = 0;
3992 dmae->comp_val = 1;
3993
3994 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3995 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3996 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3997 dmae->opcode = opcode;
3998 dmae->src_addr_lo = (mac_addr +
3999 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4000 dmae->src_addr_hi = 0;
4001 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4002 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 4003 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4004 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
4005 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
4006 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
4007 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4008 dmae->comp_addr_hi = 0;
4009 dmae->comp_val = 1;
4010
c18487ee 4011 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
4012
4013 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
4014
4015 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
4016 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4017 dmae->opcode = opcode;
4018 dmae->src_addr_lo = (mac_addr +
4019 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
4020 dmae->src_addr_hi = 0;
4021 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
4022 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
4023 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
4024 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4025 dmae->comp_addr_hi = 0;
4026 dmae->comp_val = 1;
4027
4028 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
4029 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4030 dmae->opcode = opcode;
4031 dmae->src_addr_lo = (mac_addr +
4032 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
4033 dmae->src_addr_hi = 0;
4034 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4035 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 4036 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4037 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
4038 dmae->len = 1;
4039 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4040 dmae->comp_addr_hi = 0;
4041 dmae->comp_val = 1;
4042
4043 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
4044 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4045 dmae->opcode = opcode;
4046 dmae->src_addr_lo = (mac_addr +
4047 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
4048 dmae->src_addr_hi = 0;
4049 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4050 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 4051 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 4052 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
4053 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
4054 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4055 dmae->comp_addr_hi = 0;
4056 dmae->comp_val = 1;
4057 }
4058
4059 /* NIG */
bb2a0f7a
YG
4060 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4061 dmae->opcode = opcode;
4062 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
4063 NIG_REG_STAT0_BRB_DISCARD) >> 2;
4064 dmae->src_addr_hi = 0;
4065 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
4066 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
4067 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
4068 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4069 dmae->comp_addr_hi = 0;
4070 dmae->comp_val = 1;
4071
4072 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4073 dmae->opcode = opcode;
4074 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
4075 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
4076 dmae->src_addr_hi = 0;
4077 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4078 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4079 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4080 offsetof(struct nig_stats, egress_mac_pkt0_lo));
4081 dmae->len = (2*sizeof(u32)) >> 2;
4082 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4083 dmae->comp_addr_hi = 0;
4084 dmae->comp_val = 1;
4085
a2fbb9ea
ET
4086 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4087 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4088 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4089 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4090#ifdef __BIG_ENDIAN
4091 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4092#else
4093 DMAE_CMD_ENDIANITY_DW_SWAP |
4094#endif
bb2a0f7a
YG
4095 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4096 (vn << DMAE_CMD_E1HVN_SHIFT));
4097 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
4098 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 4099 dmae->src_addr_hi = 0;
bb2a0f7a
YG
4100 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
4101 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4102 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
4103 offsetof(struct nig_stats, egress_mac_pkt1_lo));
4104 dmae->len = (2*sizeof(u32)) >> 2;
4105 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4106 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4107 dmae->comp_val = DMAE_COMP_VAL;
4108
4109 *stats_comp = 0;
a2fbb9ea
ET
4110}
4111
bb2a0f7a 4112static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 4113{
bb2a0f7a
YG
4114 struct dmae_command *dmae = &bp->stats_dmae;
4115 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4116
bb2a0f7a
YG
4117 /* sanity */
4118 if (!bp->func_stx) {
4119 BNX2X_ERR("BUG!\n");
4120 return;
4121 }
a2fbb9ea 4122
bb2a0f7a
YG
4123 bp->executer_idx = 0;
4124 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 4125
bb2a0f7a
YG
4126 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4127 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4128 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4129#ifdef __BIG_ENDIAN
4130 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4131#else
4132 DMAE_CMD_ENDIANITY_DW_SWAP |
4133#endif
4134 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4135 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4136 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4137 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4138 dmae->dst_addr_lo = bp->func_stx >> 2;
4139 dmae->dst_addr_hi = 0;
4140 dmae->len = sizeof(struct host_func_stats) >> 2;
4141 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4142 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4143 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4144
bb2a0f7a
YG
4145 *stats_comp = 0;
4146}
a2fbb9ea 4147
bb2a0f7a
YG
4148static void bnx2x_stats_start(struct bnx2x *bp)
4149{
4150 if (bp->port.pmf)
4151 bnx2x_port_stats_init(bp);
4152
4153 else if (bp->func_stx)
4154 bnx2x_func_stats_init(bp);
4155
4156 bnx2x_hw_stats_post(bp);
4157 bnx2x_storm_stats_post(bp);
4158}
4159
4160static void bnx2x_stats_pmf_start(struct bnx2x *bp)
4161{
4162 bnx2x_stats_comp(bp);
4163 bnx2x_stats_pmf_update(bp);
4164 bnx2x_stats_start(bp);
4165}
4166
4167static void bnx2x_stats_restart(struct bnx2x *bp)
4168{
4169 bnx2x_stats_comp(bp);
4170 bnx2x_stats_start(bp);
4171}
4172
4173static void bnx2x_bmac_stats_update(struct bnx2x *bp)
4174{
4175 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
4176 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4177 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4178 struct {
4179 u32 lo;
4180 u32 hi;
4181 } diff;
bb2a0f7a
YG
4182
4183 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
4184 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
4185 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
4186 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
4187 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
4188 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 4189 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 4190 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 4191 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
4192 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
4193 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
4194 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
4195 UPDATE_STAT64(tx_stat_gt127,
4196 tx_stat_etherstatspkts65octetsto127octets);
4197 UPDATE_STAT64(tx_stat_gt255,
4198 tx_stat_etherstatspkts128octetsto255octets);
4199 UPDATE_STAT64(tx_stat_gt511,
4200 tx_stat_etherstatspkts256octetsto511octets);
4201 UPDATE_STAT64(tx_stat_gt1023,
4202 tx_stat_etherstatspkts512octetsto1023octets);
4203 UPDATE_STAT64(tx_stat_gt1518,
4204 tx_stat_etherstatspkts1024octetsto1522octets);
4205 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
4206 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
4207 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
4208 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
4209 UPDATE_STAT64(tx_stat_gterr,
4210 tx_stat_dot3statsinternalmactransmiterrors);
4211 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
4212
4213 estats->pause_frames_received_hi =
4214 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
4215 estats->pause_frames_received_lo =
4216 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
4217
4218 estats->pause_frames_sent_hi =
4219 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
4220 estats->pause_frames_sent_lo =
4221 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
4222}
4223
4224static void bnx2x_emac_stats_update(struct bnx2x *bp)
4225{
4226 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
4227 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 4228 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
4229
4230 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
4231 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
4232 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
4233 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
4234 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
4235 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
4236 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
4237 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
4238 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
4239 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
4240 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
4241 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
4242 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
4243 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
4244 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
4245 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
4246 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
4247 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
4248 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
4249 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
4250 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
4251 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
4252 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
4253 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
4254 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
4255 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
4256 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
4257 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
4258 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
4259 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
4260 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
4261
4262 estats->pause_frames_received_hi =
4263 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
4264 estats->pause_frames_received_lo =
4265 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
4266 ADD_64(estats->pause_frames_received_hi,
4267 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
4268 estats->pause_frames_received_lo,
4269 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
4270
4271 estats->pause_frames_sent_hi =
4272 pstats->mac_stx[1].tx_stat_outxonsent_hi;
4273 estats->pause_frames_sent_lo =
4274 pstats->mac_stx[1].tx_stat_outxonsent_lo;
4275 ADD_64(estats->pause_frames_sent_hi,
4276 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
4277 estats->pause_frames_sent_lo,
4278 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
4279}
4280
4281static int bnx2x_hw_stats_update(struct bnx2x *bp)
4282{
4283 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
4284 struct nig_stats *old = &(bp->port.old_nig_stats);
4285 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
4286 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4781bfad
EG
4287 struct {
4288 u32 lo;
4289 u32 hi;
4290 } diff;
de832a55 4291 u32 nig_timer_max;
bb2a0f7a
YG
4292
4293 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
4294 bnx2x_bmac_stats_update(bp);
4295
4296 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
4297 bnx2x_emac_stats_update(bp);
4298
4299 else { /* unreached */
c3eefaf6 4300 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
bb2a0f7a
YG
4301 return -1;
4302 }
a2fbb9ea 4303
bb2a0f7a
YG
4304 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
4305 new->brb_discard - old->brb_discard);
66e855f3
YG
4306 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
4307 new->brb_truncate - old->brb_truncate);
a2fbb9ea 4308
bb2a0f7a
YG
4309 UPDATE_STAT64_NIG(egress_mac_pkt0,
4310 etherstatspkts1024octetsto1522octets);
4311 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 4312
bb2a0f7a 4313 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 4314
bb2a0f7a
YG
4315 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
4316 sizeof(struct mac_stx));
4317 estats->brb_drop_hi = pstats->brb_drop_hi;
4318 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 4319
bb2a0f7a 4320 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 4321
de832a55
EG
4322 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
4323 if (nig_timer_max != estats->nig_timer_max) {
4324 estats->nig_timer_max = nig_timer_max;
4325 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
4326 }
4327
bb2a0f7a 4328 return 0;
a2fbb9ea
ET
4329}
4330
bb2a0f7a 4331static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
4332{
4333 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 4334 struct tstorm_per_port_stats *tport =
de832a55 4335 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
4336 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
4337 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
4338 int i;
4339
6fe49bb9
EG
4340 memcpy(&(fstats->total_bytes_received_hi),
4341 &(bnx2x_sp(bp, func_stats_base)->total_bytes_received_hi),
de832a55
EG
4342 sizeof(struct host_func_stats) - 2*sizeof(u32));
4343 estats->error_bytes_received_hi = 0;
4344 estats->error_bytes_received_lo = 0;
4345 estats->etherstatsoverrsizepkts_hi = 0;
4346 estats->etherstatsoverrsizepkts_lo = 0;
4347 estats->no_buff_discard_hi = 0;
4348 estats->no_buff_discard_lo = 0;
a2fbb9ea 4349
54b9ddaa 4350 for_each_queue(bp, i) {
de832a55
EG
4351 struct bnx2x_fastpath *fp = &bp->fp[i];
4352 int cl_id = fp->cl_id;
4353 struct tstorm_per_client_stats *tclient =
4354 &stats->tstorm_common.client_statistics[cl_id];
4355 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
4356 struct ustorm_per_client_stats *uclient =
4357 &stats->ustorm_common.client_statistics[cl_id];
4358 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
4359 struct xstorm_per_client_stats *xclient =
4360 &stats->xstorm_common.client_statistics[cl_id];
4361 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
4362 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
4363 u32 diff;
4364
4365 /* are storm stats valid? */
4366 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 4367 bp->stats_counter) {
de832a55
EG
4368 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
4369 " xstorm counter (%d) != stats_counter (%d)\n",
4370 i, xclient->stats_counter, bp->stats_counter);
4371 return -1;
4372 }
4373 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 4374 bp->stats_counter) {
de832a55
EG
4375 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
4376 " tstorm counter (%d) != stats_counter (%d)\n",
4377 i, tclient->stats_counter, bp->stats_counter);
4378 return -2;
4379 }
4380 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
4381 bp->stats_counter) {
4382 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
4383 " ustorm counter (%d) != stats_counter (%d)\n",
4384 i, uclient->stats_counter, bp->stats_counter);
4385 return -4;
4386 }
a2fbb9ea 4387
de832a55 4388 qstats->total_bytes_received_hi =
ca00392c 4389 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
de832a55 4390 qstats->total_bytes_received_lo =
ca00392c
EG
4391 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
4392
4393 ADD_64(qstats->total_bytes_received_hi,
4394 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
4395 qstats->total_bytes_received_lo,
4396 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
4397
4398 ADD_64(qstats->total_bytes_received_hi,
4399 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
4400 qstats->total_bytes_received_lo,
4401 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
4402
4403 qstats->valid_bytes_received_hi =
4404 qstats->total_bytes_received_hi;
de832a55 4405 qstats->valid_bytes_received_lo =
ca00392c 4406 qstats->total_bytes_received_lo;
bb2a0f7a 4407
de832a55 4408 qstats->error_bytes_received_hi =
bb2a0f7a 4409 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 4410 qstats->error_bytes_received_lo =
bb2a0f7a 4411 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 4412
de832a55
EG
4413 ADD_64(qstats->total_bytes_received_hi,
4414 qstats->error_bytes_received_hi,
4415 qstats->total_bytes_received_lo,
4416 qstats->error_bytes_received_lo);
4417
4418 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
4419 total_unicast_packets_received);
4420 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
4421 total_multicast_packets_received);
4422 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
4423 total_broadcast_packets_received);
4424 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
4425 etherstatsoverrsizepkts);
4426 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
4427
4428 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
4429 total_unicast_packets_received);
4430 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
4431 total_multicast_packets_received);
4432 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
4433 total_broadcast_packets_received);
4434 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
4435 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
4436 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
4437
4438 qstats->total_bytes_transmitted_hi =
ca00392c 4439 le32_to_cpu(xclient->unicast_bytes_sent.hi);
de832a55 4440 qstats->total_bytes_transmitted_lo =
ca00392c
EG
4441 le32_to_cpu(xclient->unicast_bytes_sent.lo);
4442
4443 ADD_64(qstats->total_bytes_transmitted_hi,
4444 le32_to_cpu(xclient->multicast_bytes_sent.hi),
4445 qstats->total_bytes_transmitted_lo,
4446 le32_to_cpu(xclient->multicast_bytes_sent.lo));
4447
4448 ADD_64(qstats->total_bytes_transmitted_hi,
4449 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
4450 qstats->total_bytes_transmitted_lo,
4451 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
bb2a0f7a 4452
de832a55
EG
4453 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
4454 total_unicast_packets_transmitted);
4455 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
4456 total_multicast_packets_transmitted);
4457 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
4458 total_broadcast_packets_transmitted);
4459
4460 old_tclient->checksum_discard = tclient->checksum_discard;
4461 old_tclient->ttl0_discard = tclient->ttl0_discard;
4462
4463 ADD_64(fstats->total_bytes_received_hi,
4464 qstats->total_bytes_received_hi,
4465 fstats->total_bytes_received_lo,
4466 qstats->total_bytes_received_lo);
4467 ADD_64(fstats->total_bytes_transmitted_hi,
4468 qstats->total_bytes_transmitted_hi,
4469 fstats->total_bytes_transmitted_lo,
4470 qstats->total_bytes_transmitted_lo);
4471 ADD_64(fstats->total_unicast_packets_received_hi,
4472 qstats->total_unicast_packets_received_hi,
4473 fstats->total_unicast_packets_received_lo,
4474 qstats->total_unicast_packets_received_lo);
4475 ADD_64(fstats->total_multicast_packets_received_hi,
4476 qstats->total_multicast_packets_received_hi,
4477 fstats->total_multicast_packets_received_lo,
4478 qstats->total_multicast_packets_received_lo);
4479 ADD_64(fstats->total_broadcast_packets_received_hi,
4480 qstats->total_broadcast_packets_received_hi,
4481 fstats->total_broadcast_packets_received_lo,
4482 qstats->total_broadcast_packets_received_lo);
4483 ADD_64(fstats->total_unicast_packets_transmitted_hi,
4484 qstats->total_unicast_packets_transmitted_hi,
4485 fstats->total_unicast_packets_transmitted_lo,
4486 qstats->total_unicast_packets_transmitted_lo);
4487 ADD_64(fstats->total_multicast_packets_transmitted_hi,
4488 qstats->total_multicast_packets_transmitted_hi,
4489 fstats->total_multicast_packets_transmitted_lo,
4490 qstats->total_multicast_packets_transmitted_lo);
4491 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
4492 qstats->total_broadcast_packets_transmitted_hi,
4493 fstats->total_broadcast_packets_transmitted_lo,
4494 qstats->total_broadcast_packets_transmitted_lo);
4495 ADD_64(fstats->valid_bytes_received_hi,
4496 qstats->valid_bytes_received_hi,
4497 fstats->valid_bytes_received_lo,
4498 qstats->valid_bytes_received_lo);
4499
4500 ADD_64(estats->error_bytes_received_hi,
4501 qstats->error_bytes_received_hi,
4502 estats->error_bytes_received_lo,
4503 qstats->error_bytes_received_lo);
4504 ADD_64(estats->etherstatsoverrsizepkts_hi,
4505 qstats->etherstatsoverrsizepkts_hi,
4506 estats->etherstatsoverrsizepkts_lo,
4507 qstats->etherstatsoverrsizepkts_lo);
4508 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
4509 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
4510 }
4511
4512 ADD_64(fstats->total_bytes_received_hi,
4513 estats->rx_stat_ifhcinbadoctets_hi,
4514 fstats->total_bytes_received_lo,
4515 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
4516
4517 memcpy(estats, &(fstats->total_bytes_received_hi),
4518 sizeof(struct host_func_stats) - 2*sizeof(u32));
4519
de832a55
EG
4520 ADD_64(estats->etherstatsoverrsizepkts_hi,
4521 estats->rx_stat_dot3statsframestoolong_hi,
4522 estats->etherstatsoverrsizepkts_lo,
4523 estats->rx_stat_dot3statsframestoolong_lo);
4524 ADD_64(estats->error_bytes_received_hi,
4525 estats->rx_stat_ifhcinbadoctets_hi,
4526 estats->error_bytes_received_lo,
4527 estats->rx_stat_ifhcinbadoctets_lo);
4528
4529 if (bp->port.pmf) {
4530 estats->mac_filter_discard =
4531 le32_to_cpu(tport->mac_filter_discard);
4532 estats->xxoverflow_discard =
4533 le32_to_cpu(tport->xxoverflow_discard);
4534 estats->brb_truncate_discard =
bb2a0f7a 4535 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
4536 estats->mac_discard = le32_to_cpu(tport->mac_discard);
4537 }
bb2a0f7a
YG
4538
4539 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 4540
de832a55
EG
4541 bp->stats_pending = 0;
4542
a2fbb9ea
ET
4543 return 0;
4544}
4545
bb2a0f7a 4546static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 4547{
bb2a0f7a 4548 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4549 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 4550 int i;
a2fbb9ea
ET
4551
4552 nstats->rx_packets =
4553 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
4554 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
4555 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
4556
4557 nstats->tx_packets =
4558 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
4559 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
4560 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
4561
de832a55 4562 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 4563
0e39e645 4564 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 4565
de832a55 4566 nstats->rx_dropped = estats->mac_discard;
54b9ddaa 4567 for_each_queue(bp, i)
de832a55
EG
4568 nstats->rx_dropped +=
4569 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4570
a2fbb9ea
ET
4571 nstats->tx_dropped = 0;
4572
4573 nstats->multicast =
de832a55 4574 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 4575
bb2a0f7a 4576 nstats->collisions =
de832a55 4577 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
4578
4579 nstats->rx_length_errors =
de832a55
EG
4580 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4581 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4582 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4583 bnx2x_hilo(&estats->brb_truncate_hi);
4584 nstats->rx_crc_errors =
4585 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4586 nstats->rx_frame_errors =
4587 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4588 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
4589 nstats->rx_missed_errors = estats->xxoverflow_discard;
4590
4591 nstats->rx_errors = nstats->rx_length_errors +
4592 nstats->rx_over_errors +
4593 nstats->rx_crc_errors +
4594 nstats->rx_frame_errors +
0e39e645
ET
4595 nstats->rx_fifo_errors +
4596 nstats->rx_missed_errors;
a2fbb9ea 4597
bb2a0f7a 4598 nstats->tx_aborted_errors =
de832a55
EG
4599 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4600 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4601 nstats->tx_carrier_errors =
4602 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
4603 nstats->tx_fifo_errors = 0;
4604 nstats->tx_heartbeat_errors = 0;
4605 nstats->tx_window_errors = 0;
4606
4607 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
4608 nstats->tx_carrier_errors +
4609 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4610}
4611
4612static void bnx2x_drv_stats_update(struct bnx2x *bp)
4613{
4614 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4615 int i;
4616
4617 estats->driver_xoff = 0;
4618 estats->rx_err_discard_pkt = 0;
4619 estats->rx_skb_alloc_failed = 0;
4620 estats->hw_csum_err = 0;
54b9ddaa 4621 for_each_queue(bp, i) {
de832a55
EG
4622 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4623
4624 estats->driver_xoff += qstats->driver_xoff;
4625 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4626 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4627 estats->hw_csum_err += qstats->hw_csum_err;
4628 }
a2fbb9ea
ET
4629}
4630
bb2a0f7a 4631static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 4632{
bb2a0f7a 4633 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4634
bb2a0f7a
YG
4635 if (*stats_comp != DMAE_COMP_VAL)
4636 return;
4637
4638 if (bp->port.pmf)
de832a55 4639 bnx2x_hw_stats_update(bp);
a2fbb9ea 4640
de832a55
EG
4641 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4642 BNX2X_ERR("storm stats were not updated for 3 times\n");
4643 bnx2x_panic();
4644 return;
a2fbb9ea
ET
4645 }
4646
de832a55
EG
4647 bnx2x_net_stats_update(bp);
4648 bnx2x_drv_stats_update(bp);
4649
7995c64e 4650 if (netif_msg_timer(bp)) {
ca00392c 4651 struct bnx2x_fastpath *fp0_rx = bp->fp;
54b9ddaa 4652 struct bnx2x_fastpath *fp0_tx = bp->fp;
de832a55
EG
4653 struct tstorm_per_client_stats *old_tclient =
4654 &bp->fp->old_tclient;
4655 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 4656 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 4657 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 4658 int i;
a2fbb9ea 4659
7995c64e 4660 netdev_printk(KERN_DEBUG, bp->dev, "\n");
a2fbb9ea
ET
4661 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4662 " tx pkt (%lx)\n",
ca00392c
EG
4663 bnx2x_tx_avail(fp0_tx),
4664 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
4665 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4666 " rx pkt (%lx)\n",
ca00392c
EG
4667 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4668 fp0_rx->rx_comp_cons),
4669 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
de832a55
EG
4670 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4671 "brb truncate %u\n",
4672 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4673 qstats->driver_xoff,
4674 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 4675 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 4676 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
4677 "mac_discard %u mac_filter_discard %u "
4678 "xxovrflow_discard %u brb_truncate_discard %u "
4679 "ttl0_discard %u\n",
4781bfad 4680 le32_to_cpu(old_tclient->checksum_discard),
de832a55
EG
4681 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4682 bnx2x_hilo(&qstats->no_buff_discard_hi),
4683 estats->mac_discard, estats->mac_filter_discard,
4684 estats->xxoverflow_discard, estats->brb_truncate_discard,
4781bfad 4685 le32_to_cpu(old_tclient->ttl0_discard));
a2fbb9ea
ET
4686
4687 for_each_queue(bp, i) {
4688 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4689 bnx2x_fp(bp, i, tx_pkt),
4690 bnx2x_fp(bp, i, rx_pkt),
4691 bnx2x_fp(bp, i, rx_calls));
4692 }
4693 }
4694
bb2a0f7a
YG
4695 bnx2x_hw_stats_post(bp);
4696 bnx2x_storm_stats_post(bp);
4697}
a2fbb9ea 4698
bb2a0f7a
YG
4699static void bnx2x_port_stats_stop(struct bnx2x *bp)
4700{
4701 struct dmae_command *dmae;
4702 u32 opcode;
4703 int loader_idx = PMF_DMAE_C(bp);
4704 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4705
bb2a0f7a 4706 bp->executer_idx = 0;
a2fbb9ea 4707
bb2a0f7a
YG
4708 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4709 DMAE_CMD_C_ENABLE |
4710 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4711#ifdef __BIG_ENDIAN
bb2a0f7a 4712 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4713#else
bb2a0f7a 4714 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4715#endif
bb2a0f7a
YG
4716 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4717 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4718
4719 if (bp->port.port_stx) {
4720
4721 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4722 if (bp->func_stx)
4723 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4724 else
4725 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4726 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4727 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4728 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4729 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4730 dmae->len = sizeof(struct host_port_stats) >> 2;
4731 if (bp->func_stx) {
4732 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4733 dmae->comp_addr_hi = 0;
4734 dmae->comp_val = 1;
4735 } else {
4736 dmae->comp_addr_lo =
4737 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4738 dmae->comp_addr_hi =
4739 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4740 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4741
bb2a0f7a
YG
4742 *stats_comp = 0;
4743 }
a2fbb9ea
ET
4744 }
4745
bb2a0f7a
YG
4746 if (bp->func_stx) {
4747
4748 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4749 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4750 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4751 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4752 dmae->dst_addr_lo = bp->func_stx >> 2;
4753 dmae->dst_addr_hi = 0;
4754 dmae->len = sizeof(struct host_func_stats) >> 2;
4755 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4756 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4757 dmae->comp_val = DMAE_COMP_VAL;
4758
4759 *stats_comp = 0;
a2fbb9ea 4760 }
bb2a0f7a
YG
4761}
4762
4763static void bnx2x_stats_stop(struct bnx2x *bp)
4764{
4765 int update = 0;
4766
4767 bnx2x_stats_comp(bp);
4768
4769 if (bp->port.pmf)
4770 update = (bnx2x_hw_stats_update(bp) == 0);
4771
4772 update |= (bnx2x_storm_stats_update(bp) == 0);
4773
4774 if (update) {
4775 bnx2x_net_stats_update(bp);
a2fbb9ea 4776
bb2a0f7a
YG
4777 if (bp->port.pmf)
4778 bnx2x_port_stats_stop(bp);
4779
4780 bnx2x_hw_stats_post(bp);
4781 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4782 }
4783}
4784
bb2a0f7a
YG
4785static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4786{
4787}
4788
4789static const struct {
4790 void (*action)(struct bnx2x *bp);
4791 enum bnx2x_stats_state next_state;
4792} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4793/* state event */
4794{
4795/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4796/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4797/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4798/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4799},
4800{
4801/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4802/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4803/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4804/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4805}
4806};
4807
4808static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4809{
4810 enum bnx2x_stats_state state = bp->stats_state;
4811
4812 bnx2x_stats_stm[state][event].action(bp);
4813 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4814
8924665a
EG
4815 /* Make sure the state has been "changed" */
4816 smp_wmb();
4817
7995c64e 4818 if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
bb2a0f7a
YG
4819 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4820 state, event, bp->stats_state);
4821}
4822
6fe49bb9
EG
4823static void bnx2x_port_stats_base_init(struct bnx2x *bp)
4824{
4825 struct dmae_command *dmae;
4826 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4827
4828 /* sanity */
4829 if (!bp->port.pmf || !bp->port.port_stx) {
4830 BNX2X_ERR("BUG!\n");
4831 return;
4832 }
4833
4834 bp->executer_idx = 0;
4835
4836 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4837 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4838 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4839 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4840#ifdef __BIG_ENDIAN
4841 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4842#else
4843 DMAE_CMD_ENDIANITY_DW_SWAP |
4844#endif
4845 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4846 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4847 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4848 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4849 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4850 dmae->dst_addr_hi = 0;
4851 dmae->len = sizeof(struct host_port_stats) >> 2;
4852 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4853 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4854 dmae->comp_val = DMAE_COMP_VAL;
4855
4856 *stats_comp = 0;
4857 bnx2x_hw_stats_post(bp);
4858 bnx2x_stats_comp(bp);
4859}
4860
4861static void bnx2x_func_stats_base_init(struct bnx2x *bp)
4862{
4863 int vn, vn_max = IS_E1HMF(bp) ? E1HVN_MAX : E1VN_MAX;
4864 int port = BP_PORT(bp);
4865 int func;
4866 u32 func_stx;
4867
4868 /* sanity */
4869 if (!bp->port.pmf || !bp->func_stx) {
4870 BNX2X_ERR("BUG!\n");
4871 return;
4872 }
4873
4874 /* save our func_stx */
4875 func_stx = bp->func_stx;
4876
4877 for (vn = VN_0; vn < vn_max; vn++) {
4878 func = 2*vn + port;
4879
4880 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4881 bnx2x_func_stats_init(bp);
4882 bnx2x_hw_stats_post(bp);
4883 bnx2x_stats_comp(bp);
4884 }
4885
4886 /* restore our func_stx */
4887 bp->func_stx = func_stx;
4888}
4889
4890static void bnx2x_func_stats_base_update(struct bnx2x *bp)
4891{
4892 struct dmae_command *dmae = &bp->stats_dmae;
4893 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4894
4895 /* sanity */
4896 if (!bp->func_stx) {
4897 BNX2X_ERR("BUG!\n");
4898 return;
4899 }
4900
4901 bp->executer_idx = 0;
4902 memset(dmae, 0, sizeof(struct dmae_command));
4903
4904 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
4905 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
4906 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4907#ifdef __BIG_ENDIAN
4908 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4909#else
4910 DMAE_CMD_ENDIANITY_DW_SWAP |
4911#endif
4912 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4913 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4914 dmae->src_addr_lo = bp->func_stx >> 2;
4915 dmae->src_addr_hi = 0;
4916 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats_base));
4917 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats_base));
4918 dmae->len = sizeof(struct host_func_stats) >> 2;
4919 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4920 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4921 dmae->comp_val = DMAE_COMP_VAL;
4922
4923 *stats_comp = 0;
4924 bnx2x_hw_stats_post(bp);
4925 bnx2x_stats_comp(bp);
4926}
4927
4928static void bnx2x_stats_init(struct bnx2x *bp)
4929{
4930 int port = BP_PORT(bp);
4931 int func = BP_FUNC(bp);
4932 int i;
4933
4934 bp->stats_pending = 0;
4935 bp->executer_idx = 0;
4936 bp->stats_counter = 0;
4937
4938 /* port and func stats for management */
4939 if (!BP_NOMCP(bp)) {
4940 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
4941 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
4942
4943 } else {
4944 bp->port.port_stx = 0;
4945 bp->func_stx = 0;
4946 }
4947 DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n",
4948 bp->port.port_stx, bp->func_stx);
4949
4950 /* port stats */
4951 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
4952 bp->port.old_nig_stats.brb_discard =
4953 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
4954 bp->port.old_nig_stats.brb_truncate =
4955 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
4956 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
4957 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
4958 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
4959 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
4960
4961 /* function stats */
4962 for_each_queue(bp, i) {
4963 struct bnx2x_fastpath *fp = &bp->fp[i];
4964
4965 memset(&fp->old_tclient, 0,
4966 sizeof(struct tstorm_per_client_stats));
4967 memset(&fp->old_uclient, 0,
4968 sizeof(struct ustorm_per_client_stats));
4969 memset(&fp->old_xclient, 0,
4970 sizeof(struct xstorm_per_client_stats));
4971 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
4972 }
4973
4974 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
4975 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
4976
4977 bp->stats_state = STATS_STATE_DISABLED;
4978
4979 if (bp->port.pmf) {
4980 if (bp->port.port_stx)
4981 bnx2x_port_stats_base_init(bp);
4982
4983 if (bp->func_stx)
4984 bnx2x_func_stats_base_init(bp);
4985
4986 } else if (bp->func_stx)
4987 bnx2x_func_stats_base_update(bp);
4988}
4989
a2fbb9ea
ET
4990static void bnx2x_timer(unsigned long data)
4991{
4992 struct bnx2x *bp = (struct bnx2x *) data;
4993
4994 if (!netif_running(bp->dev))
4995 return;
4996
4997 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4998 goto timer_restart;
a2fbb9ea
ET
4999
5000 if (poll) {
5001 struct bnx2x_fastpath *fp = &bp->fp[0];
5002 int rc;
5003
7961f791 5004 bnx2x_tx_int(fp);
a2fbb9ea
ET
5005 rc = bnx2x_rx_int(fp, 1000);
5006 }
5007
34f80b04
EG
5008 if (!BP_NOMCP(bp)) {
5009 int func = BP_FUNC(bp);
a2fbb9ea
ET
5010 u32 drv_pulse;
5011 u32 mcp_pulse;
5012
5013 ++bp->fw_drv_pulse_wr_seq;
5014 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5015 /* TBD - add SYSTEM_TIME */
5016 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 5017 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 5018
34f80b04 5019 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
5020 MCP_PULSE_SEQ_MASK);
5021 /* The delta between driver pulse and mcp response
5022 * should be 1 (before mcp response) or 0 (after mcp response)
5023 */
5024 if ((drv_pulse != mcp_pulse) &&
5025 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5026 /* someone lost a heartbeat... */
5027 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5028 drv_pulse, mcp_pulse);
5029 }
5030 }
5031
f34d28ea 5032 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5033 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5034
f1410647 5035timer_restart:
a2fbb9ea
ET
5036 mod_timer(&bp->timer, jiffies + bp->current_interval);
5037}
5038
5039/* end of Statistics */
5040
5041/* nic init */
5042
5043/*
5044 * nic init service functions
5045 */
5046
34f80b04 5047static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 5048{
34f80b04
EG
5049 int port = BP_PORT(bp);
5050
ca00392c
EG
5051 /* "CSTORM" */
5052 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5053 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
5054 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
5055 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5056 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
5057 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
34f80b04
EG
5058}
5059
5c862848
EG
5060static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
5061 dma_addr_t mapping, int sb_id)
34f80b04
EG
5062{
5063 int port = BP_PORT(bp);
bb2a0f7a 5064 int func = BP_FUNC(bp);
a2fbb9ea 5065 int index;
34f80b04 5066 u64 section;
a2fbb9ea
ET
5067
5068 /* USTORM */
5069 section = ((u64)mapping) + offsetof(struct host_status_block,
5070 u_status_block);
34f80b04 5071 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea 5072
ca00392c
EG
5073 REG_WR(bp, BAR_CSTRORM_INTMEM +
5074 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
5075 REG_WR(bp, BAR_CSTRORM_INTMEM +
5076 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5077 U64_HI(section));
ca00392c
EG
5078 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
5079 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5080
5081 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
ca00392c
EG
5082 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5083 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
5084
5085 /* CSTORM */
5086 section = ((u64)mapping) + offsetof(struct host_status_block,
5087 c_status_block);
34f80b04 5088 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5089
5090 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5091 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 5092 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5093 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
a2fbb9ea 5094 U64_HI(section));
7a9b2557 5095 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
ca00392c 5096 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
a2fbb9ea
ET
5097
5098 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
5099 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5100 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
34f80b04
EG
5101
5102 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
5103}
5104
5105static void bnx2x_zero_def_sb(struct bnx2x *bp)
5106{
5107 int func = BP_FUNC(bp);
a2fbb9ea 5108
ca00392c 5109 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
490c3c9b
EG
5110 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5111 sizeof(struct tstorm_def_status_block)/4);
ca00392c
EG
5112 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5113 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
5114 sizeof(struct cstorm_def_status_block_u)/4);
5115 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
5116 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
5117 sizeof(struct cstorm_def_status_block_c)/4);
5118 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
34f80b04
EG
5119 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
5120 sizeof(struct xstorm_def_status_block)/4);
a2fbb9ea
ET
5121}
5122
5123static void bnx2x_init_def_sb(struct bnx2x *bp,
5124 struct host_def_status_block *def_sb,
34f80b04 5125 dma_addr_t mapping, int sb_id)
a2fbb9ea 5126{
34f80b04
EG
5127 int port = BP_PORT(bp);
5128 int func = BP_FUNC(bp);
a2fbb9ea
ET
5129 int index, val, reg_offset;
5130 u64 section;
5131
5132 /* ATTN */
5133 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5134 atten_status_block);
34f80b04 5135 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 5136
49d66772
ET
5137 bp->attn_state = 0;
5138
a2fbb9ea
ET
5139 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5140 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5141
34f80b04 5142 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
5143 bp->attn_group[index].sig[0] = REG_RD(bp,
5144 reg_offset + 0x10*index);
5145 bp->attn_group[index].sig[1] = REG_RD(bp,
5146 reg_offset + 0x4 + 0x10*index);
5147 bp->attn_group[index].sig[2] = REG_RD(bp,
5148 reg_offset + 0x8 + 0x10*index);
5149 bp->attn_group[index].sig[3] = REG_RD(bp,
5150 reg_offset + 0xc + 0x10*index);
5151 }
5152
a2fbb9ea
ET
5153 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5154 HC_REG_ATTN_MSG0_ADDR_L);
5155
5156 REG_WR(bp, reg_offset, U64_LO(section));
5157 REG_WR(bp, reg_offset + 4, U64_HI(section));
5158
5159 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
5160
5161 val = REG_RD(bp, reg_offset);
34f80b04 5162 val |= sb_id;
a2fbb9ea
ET
5163 REG_WR(bp, reg_offset, val);
5164
5165 /* USTORM */
5166 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5167 u_def_status_block);
34f80b04 5168 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea 5169
ca00392c
EG
5170 REG_WR(bp, BAR_CSTRORM_INTMEM +
5171 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
5172 REG_WR(bp, BAR_CSTRORM_INTMEM +
5173 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
a2fbb9ea 5174 U64_HI(section));
ca00392c
EG
5175 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
5176 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
a2fbb9ea
ET
5177
5178 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
ca00392c
EG
5179 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5180 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
a2fbb9ea
ET
5181
5182 /* CSTORM */
5183 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5184 c_def_status_block);
34f80b04 5185 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5186
5187 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5188 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
a2fbb9ea 5189 REG_WR(bp, BAR_CSTRORM_INTMEM +
ca00392c 5190 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
a2fbb9ea 5191 U64_HI(section));
5c862848 5192 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
ca00392c 5193 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
a2fbb9ea
ET
5194
5195 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
5196 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c 5197 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
a2fbb9ea
ET
5198
5199 /* TSTORM */
5200 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5201 t_def_status_block);
34f80b04 5202 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5203
5204 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5205 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5206 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5207 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5208 U64_HI(section));
5c862848 5209 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 5210 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5211
5212 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
5213 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 5214 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
5215
5216 /* XSTORM */
5217 section = ((u64)mapping) + offsetof(struct host_def_status_block,
5218 x_def_status_block);
34f80b04 5219 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
5220
5221 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5222 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 5223 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 5224 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 5225 U64_HI(section));
5c862848 5226 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 5227 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
5228
5229 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
5230 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 5231 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 5232
bb2a0f7a 5233 bp->stats_pending = 0;
66e855f3 5234 bp->set_mac_pending = 0;
bb2a0f7a 5235
34f80b04 5236 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5237}
5238
5239static void bnx2x_update_coalesce(struct bnx2x *bp)
5240{
34f80b04 5241 int port = BP_PORT(bp);
a2fbb9ea
ET
5242 int i;
5243
5244 for_each_queue(bp, i) {
34f80b04 5245 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
5246
5247 /* HC_INDEX_U_ETH_RX_CQ_CONS */
ca00392c
EG
5248 REG_WR8(bp, BAR_CSTRORM_INTMEM +
5249 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
5250 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5251 bp->rx_ticks/(4 * BNX2X_BTR));
ca00392c
EG
5252 REG_WR16(bp, BAR_CSTRORM_INTMEM +
5253 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
5254 U_SB_ETH_RX_CQ_INDEX),
7d323bfd 5255 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5256
5257 /* HC_INDEX_C_ETH_TX_CQ_CONS */
5258 REG_WR8(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5259 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
5260 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5261 bp->tx_ticks/(4 * BNX2X_BTR));
a2fbb9ea 5262 REG_WR16(bp, BAR_CSTRORM_INTMEM +
ca00392c
EG
5263 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
5264 C_SB_ETH_TX_CQ_INDEX),
7d323bfd 5265 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
a2fbb9ea
ET
5266 }
5267}
5268
7a9b2557
VZ
5269static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
5270 struct bnx2x_fastpath *fp, int last)
5271{
5272 int i;
5273
5274 for (i = 0; i < last; i++) {
5275 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
5276 struct sk_buff *skb = rx_buf->skb;
5277
5278 if (skb == NULL) {
5279 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
5280 continue;
5281 }
5282
5283 if (fp->tpa_state[i] == BNX2X_TPA_START)
1a983142
FT
5284 dma_unmap_single(&bp->pdev->dev,
5285 dma_unmap_addr(rx_buf, mapping),
5286 bp->rx_buf_size, DMA_FROM_DEVICE);
7a9b2557
VZ
5287
5288 dev_kfree_skb(skb);
5289 rx_buf->skb = NULL;
5290 }
5291}
5292
a2fbb9ea
ET
5293static void bnx2x_init_rx_rings(struct bnx2x *bp)
5294{
7a9b2557 5295 int func = BP_FUNC(bp);
32626230
EG
5296 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
5297 ETH_MAX_AGGREGATION_QUEUES_E1H;
5298 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 5299 int i, j;
a2fbb9ea 5300
87942b46 5301 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
5302 DP(NETIF_MSG_IFUP,
5303 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 5304
7a9b2557 5305 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 5306
54b9ddaa 5307 for_each_queue(bp, j) {
32626230 5308 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 5309
32626230 5310 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
5311 fp->tpa_pool[i].skb =
5312 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
5313 if (!fp->tpa_pool[i].skb) {
5314 BNX2X_ERR("Failed to allocate TPA "
5315 "skb pool for queue[%d] - "
5316 "disabling TPA on this "
5317 "queue!\n", j);
5318 bnx2x_free_tpa_pool(bp, fp, i);
5319 fp->disable_tpa = 1;
5320 break;
5321 }
1a983142 5322 dma_unmap_addr_set((struct sw_rx_bd *)
7a9b2557
VZ
5323 &bp->fp->tpa_pool[i],
5324 mapping, 0);
5325 fp->tpa_state[i] = BNX2X_TPA_STOP;
5326 }
5327 }
5328 }
5329
54b9ddaa 5330 for_each_queue(bp, j) {
a2fbb9ea
ET
5331 struct bnx2x_fastpath *fp = &bp->fp[j];
5332
5333 fp->rx_bd_cons = 0;
5334 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
5335 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
5336
5337 /* "next page" elements initialization */
5338 /* SGE ring */
5339 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
5340 struct eth_rx_sge *sge;
5341
5342 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
5343 sge->addr_hi =
5344 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
5345 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5346 sge->addr_lo =
5347 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
5348 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
5349 }
5350
5351 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 5352
7a9b2557 5353 /* RX BD ring */
a2fbb9ea
ET
5354 for (i = 1; i <= NUM_RX_RINGS; i++) {
5355 struct eth_rx_bd *rx_bd;
5356
5357 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
5358 rx_bd->addr_hi =
5359 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 5360 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5361 rx_bd->addr_lo =
5362 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 5363 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
5364 }
5365
34f80b04 5366 /* CQ ring */
a2fbb9ea
ET
5367 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
5368 struct eth_rx_cqe_next_page *nextpg;
5369
5370 nextpg = (struct eth_rx_cqe_next_page *)
5371 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
5372 nextpg->addr_hi =
5373 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 5374 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5375 nextpg->addr_lo =
5376 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 5377 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
5378 }
5379
7a9b2557
VZ
5380 /* Allocate SGEs and initialize the ring elements */
5381 for (i = 0, ring_prod = 0;
5382 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 5383
7a9b2557
VZ
5384 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
5385 BNX2X_ERR("was only able to allocate "
5386 "%d rx sges\n", i);
5387 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
5388 /* Cleanup already allocated elements */
5389 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 5390 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
5391 fp->disable_tpa = 1;
5392 ring_prod = 0;
5393 break;
5394 }
5395 ring_prod = NEXT_SGE_IDX(ring_prod);
5396 }
5397 fp->rx_sge_prod = ring_prod;
5398
5399 /* Allocate BDs and initialize BD ring */
66e855f3 5400 fp->rx_comp_cons = 0;
7a9b2557 5401 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
5402 for (i = 0; i < bp->rx_ring_size; i++) {
5403 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
5404 BNX2X_ERR("was only able to allocate "
de832a55
EG
5405 "%d rx skbs on queue[%d]\n", i, j);
5406 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
5407 break;
5408 }
5409 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 5410 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 5411 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
5412 }
5413
7a9b2557
VZ
5414 fp->rx_bd_prod = ring_prod;
5415 /* must not have more available CQEs than BDs */
5416 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
5417 cqe_ring_prod);
a2fbb9ea
ET
5418 fp->rx_pkt = fp->rx_calls = 0;
5419
7a9b2557
VZ
5420 /* Warning!
5421 * this will generate an interrupt (to the TSTORM)
5422 * must only be done after chip is initialized
5423 */
5424 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
5425 fp->rx_sge_prod);
a2fbb9ea
ET
5426 if (j != 0)
5427 continue;
5428
5429 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5430 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
5431 U64_LO(fp->rx_comp_mapping));
5432 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 5433 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
5434 U64_HI(fp->rx_comp_mapping));
5435 }
5436}
5437
5438static void bnx2x_init_tx_ring(struct bnx2x *bp)
5439{
5440 int i, j;
5441
54b9ddaa 5442 for_each_queue(bp, j) {
a2fbb9ea
ET
5443 struct bnx2x_fastpath *fp = &bp->fp[j];
5444
5445 for (i = 1; i <= NUM_TX_RINGS; i++) {
ca00392c
EG
5446 struct eth_tx_next_bd *tx_next_bd =
5447 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
a2fbb9ea 5448
ca00392c 5449 tx_next_bd->addr_hi =
a2fbb9ea 5450 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 5451 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
ca00392c 5452 tx_next_bd->addr_lo =
a2fbb9ea 5453 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 5454 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
5455 }
5456
ca00392c
EG
5457 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
5458 fp->tx_db.data.zero_fill1 = 0;
5459 fp->tx_db.data.prod = 0;
5460
a2fbb9ea
ET
5461 fp->tx_pkt_prod = 0;
5462 fp->tx_pkt_cons = 0;
5463 fp->tx_bd_prod = 0;
5464 fp->tx_bd_cons = 0;
5465 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
5466 fp->tx_pkt = 0;
5467 }
5468}
5469
5470static void bnx2x_init_sp_ring(struct bnx2x *bp)
5471{
34f80b04 5472 int func = BP_FUNC(bp);
a2fbb9ea
ET
5473
5474 spin_lock_init(&bp->spq_lock);
5475
5476 bp->spq_left = MAX_SPQ_PENDING;
5477 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5478 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5479 bp->spq_prod_bd = bp->spq;
5480 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
5481
34f80b04 5482 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 5483 U64_LO(bp->spq_mapping));
34f80b04
EG
5484 REG_WR(bp,
5485 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
5486 U64_HI(bp->spq_mapping));
5487
34f80b04 5488 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
5489 bp->spq_prod_idx);
5490}
5491
5492static void bnx2x_init_context(struct bnx2x *bp)
5493{
5494 int i;
5495
54b9ddaa
VZ
5496 /* Rx */
5497 for_each_queue(bp, i) {
a2fbb9ea
ET
5498 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
5499 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 5500 u8 cl_id = fp->cl_id;
a2fbb9ea 5501
34f80b04
EG
5502 context->ustorm_st_context.common.sb_index_numbers =
5503 BNX2X_RX_SB_INDEX_NUM;
0626b899 5504 context->ustorm_st_context.common.clientId = cl_id;
ca00392c 5505 context->ustorm_st_context.common.status_block_id = fp->sb_id;
34f80b04 5506 context->ustorm_st_context.common.flags =
de832a55
EG
5507 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
5508 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
5509 context->ustorm_st_context.common.statistics_counter_id =
5510 cl_id;
8d9c5f34 5511 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 5512 BNX2X_RX_ALIGN_SHIFT;
34f80b04 5513 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 5514 bp->rx_buf_size;
34f80b04 5515 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 5516 U64_HI(fp->rx_desc_mapping);
34f80b04 5517 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 5518 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
5519 if (!fp->disable_tpa) {
5520 context->ustorm_st_context.common.flags |=
ca00392c 5521 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
7a9b2557 5522 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
5523 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
5524 (u32)0xffff);
7a9b2557
VZ
5525 context->ustorm_st_context.common.sge_page_base_hi =
5526 U64_HI(fp->rx_sge_mapping);
5527 context->ustorm_st_context.common.sge_page_base_lo =
5528 U64_LO(fp->rx_sge_mapping);
ca00392c
EG
5529
5530 context->ustorm_st_context.common.max_sges_for_packet =
5531 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
5532 context->ustorm_st_context.common.max_sges_for_packet =
5533 ((context->ustorm_st_context.common.
5534 max_sges_for_packet + PAGES_PER_SGE - 1) &
5535 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
7a9b2557
VZ
5536 }
5537
8d9c5f34
EG
5538 context->ustorm_ag_context.cdu_usage =
5539 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5540 CDU_REGION_NUMBER_UCM_AG,
5541 ETH_CONNECTION_TYPE);
5542
ca00392c
EG
5543 context->xstorm_ag_context.cdu_reserved =
5544 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
5545 CDU_REGION_NUMBER_XCM_AG,
5546 ETH_CONNECTION_TYPE);
5547 }
5548
54b9ddaa
VZ
5549 /* Tx */
5550 for_each_queue(bp, i) {
ca00392c
EG
5551 struct bnx2x_fastpath *fp = &bp->fp[i];
5552 struct eth_context *context =
54b9ddaa 5553 bnx2x_sp(bp, context[i].eth);
ca00392c
EG
5554
5555 context->cstorm_st_context.sb_index_number =
5556 C_SB_ETH_TX_CQ_INDEX;
5557 context->cstorm_st_context.status_block_id = fp->sb_id;
5558
8d9c5f34
EG
5559 context->xstorm_st_context.tx_bd_page_base_hi =
5560 U64_HI(fp->tx_desc_mapping);
5561 context->xstorm_st_context.tx_bd_page_base_lo =
5562 U64_LO(fp->tx_desc_mapping);
ca00392c 5563 context->xstorm_st_context.statistics_data = (fp->cl_id |
8d9c5f34 5564 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea
ET
5565 }
5566}
5567
5568static void bnx2x_init_ind_table(struct bnx2x *bp)
5569{
26c8fa4d 5570 int func = BP_FUNC(bp);
a2fbb9ea
ET
5571 int i;
5572
555f6c78 5573 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
5574 return;
5575
555f6c78
EG
5576 DP(NETIF_MSG_IFUP,
5577 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 5578 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 5579 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 5580 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
54b9ddaa 5581 bp->fp->cl_id + (i % bp->num_queues));
a2fbb9ea
ET
5582}
5583
49d66772
ET
5584static void bnx2x_set_client_config(struct bnx2x *bp)
5585{
49d66772 5586 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
5587 int port = BP_PORT(bp);
5588 int i;
49d66772 5589
e7799c5f 5590 tstorm_client.mtu = bp->dev->mtu;
49d66772 5591 tstorm_client.config_flags =
de832a55
EG
5592 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
5593 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 5594#ifdef BCM_VLAN
0c6671b0 5595 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 5596 tstorm_client.config_flags |=
8d9c5f34 5597 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
5598 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
5599 }
5600#endif
49d66772
ET
5601
5602 for_each_queue(bp, i) {
de832a55
EG
5603 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
5604
49d66772 5605 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5606 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
5607 ((u32 *)&tstorm_client)[0]);
5608 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5609 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
5610 ((u32 *)&tstorm_client)[1]);
5611 }
5612
34f80b04
EG
5613 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
5614 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
5615}
5616
a2fbb9ea
ET
5617static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5618{
a2fbb9ea 5619 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04 5620 int mode = bp->rx_mode;
37b091ba 5621 int mask = bp->rx_mode_cl_mask;
34f80b04 5622 int func = BP_FUNC(bp);
581ce43d 5623 int port = BP_PORT(bp);
a2fbb9ea 5624 int i;
581ce43d
EG
5625 /* All but management unicast packets should pass to the host as well */
5626 u32 llh_mask =
5627 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
5628 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
5629 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
5630 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 5631
3196a88a 5632 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
5633
5634 switch (mode) {
5635 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
5636 tstorm_mac_filter.ucast_drop_all = mask;
5637 tstorm_mac_filter.mcast_drop_all = mask;
5638 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea 5639 break;
356e2385 5640
a2fbb9ea 5641 case BNX2X_RX_MODE_NORMAL:
34f80b04 5642 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5643 break;
356e2385 5644
a2fbb9ea 5645 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
5646 tstorm_mac_filter.mcast_accept_all = mask;
5647 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea 5648 break;
356e2385 5649
a2fbb9ea 5650 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
5651 tstorm_mac_filter.ucast_accept_all = mask;
5652 tstorm_mac_filter.mcast_accept_all = mask;
5653 tstorm_mac_filter.bcast_accept_all = mask;
581ce43d
EG
5654 /* pass management unicast packets as well */
5655 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 5656 break;
356e2385 5657
a2fbb9ea 5658 default:
34f80b04
EG
5659 BNX2X_ERR("BAD rx mode (%d)\n", mode);
5660 break;
a2fbb9ea
ET
5661 }
5662
581ce43d
EG
5663 REG_WR(bp,
5664 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
5665 llh_mask);
5666
a2fbb9ea
ET
5667 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
5668 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5669 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
5670 ((u32 *)&tstorm_mac_filter)[i]);
5671
34f80b04 5672/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
5673 ((u32 *)&tstorm_mac_filter)[i]); */
5674 }
a2fbb9ea 5675
49d66772
ET
5676 if (mode != BNX2X_RX_MODE_NONE)
5677 bnx2x_set_client_config(bp);
a2fbb9ea
ET
5678}
5679
471de716
EG
5680static void bnx2x_init_internal_common(struct bnx2x *bp)
5681{
5682 int i;
5683
5684 /* Zero this manually as its initialization is
5685 currently missing in the initTool */
5686 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
5687 REG_WR(bp, BAR_USTRORM_INTMEM +
5688 USTORM_AGG_DATA_OFFSET + i * 4, 0);
5689}
5690
5691static void bnx2x_init_internal_port(struct bnx2x *bp)
5692{
5693 int port = BP_PORT(bp);
5694
ca00392c
EG
5695 REG_WR(bp,
5696 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
5697 REG_WR(bp,
5698 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
471de716
EG
5699 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5700 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
5701}
5702
5703static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 5704{
a2fbb9ea
ET
5705 struct tstorm_eth_function_common_config tstorm_config = {0};
5706 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
5707 int port = BP_PORT(bp);
5708 int func = BP_FUNC(bp);
de832a55
EG
5709 int i, j;
5710 u32 offset;
471de716 5711 u16 max_agg_size;
a2fbb9ea
ET
5712
5713 if (is_multi(bp)) {
555f6c78 5714 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
5715 tstorm_config.rss_result_mask = MULTI_MASK;
5716 }
ca00392c
EG
5717
5718 /* Enable TPA if needed */
5719 if (bp->flags & TPA_ENABLE_FLAG)
5720 tstorm_config.config_flags |=
5721 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5722
8d9c5f34
EG
5723 if (IS_E1HMF(bp))
5724 tstorm_config.config_flags |=
5725 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 5726
34f80b04
EG
5727 tstorm_config.leading_client_id = BP_L_ID(bp);
5728
a2fbb9ea 5729 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 5730 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
5731 (*(u32 *)&tstorm_config));
5732
c14423fe 5733 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
37b091ba 5734 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
a2fbb9ea
ET
5735 bnx2x_set_storm_rx_mode(bp);
5736
de832a55
EG
5737 for_each_queue(bp, i) {
5738 u8 cl_id = bp->fp[i].cl_id;
5739
5740 /* reset xstorm per client statistics */
5741 offset = BAR_XSTRORM_INTMEM +
5742 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5743 for (j = 0;
5744 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5745 REG_WR(bp, offset + j*4, 0);
5746
5747 /* reset tstorm per client statistics */
5748 offset = BAR_TSTRORM_INTMEM +
5749 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5750 for (j = 0;
5751 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5752 REG_WR(bp, offset + j*4, 0);
5753
5754 /* reset ustorm per client statistics */
5755 offset = BAR_USTRORM_INTMEM +
5756 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5757 for (j = 0;
5758 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5759 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
5760 }
5761
5762 /* Init statistics related context */
34f80b04 5763 stats_flags.collect_eth = 1;
a2fbb9ea 5764
66e855f3 5765 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5766 ((u32 *)&stats_flags)[0]);
66e855f3 5767 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5768 ((u32 *)&stats_flags)[1]);
5769
66e855f3 5770 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5771 ((u32 *)&stats_flags)[0]);
66e855f3 5772 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5773 ((u32 *)&stats_flags)[1]);
5774
de832a55
EG
5775 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5776 ((u32 *)&stats_flags)[0]);
5777 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5778 ((u32 *)&stats_flags)[1]);
5779
66e855f3 5780 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 5781 ((u32 *)&stats_flags)[0]);
66e855f3 5782 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
5783 ((u32 *)&stats_flags)[1]);
5784
66e855f3
YG
5785 REG_WR(bp, BAR_XSTRORM_INTMEM +
5786 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5787 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5788 REG_WR(bp, BAR_XSTRORM_INTMEM +
5789 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5790 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5791
5792 REG_WR(bp, BAR_TSTRORM_INTMEM +
5793 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5794 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5795 REG_WR(bp, BAR_TSTRORM_INTMEM +
5796 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5797 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 5798
de832a55
EG
5799 REG_WR(bp, BAR_USTRORM_INTMEM +
5800 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5801 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5802 REG_WR(bp, BAR_USTRORM_INTMEM +
5803 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5804 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5805
34f80b04
EG
5806 if (CHIP_IS_E1H(bp)) {
5807 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5808 IS_E1HMF(bp));
5809 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5810 IS_E1HMF(bp));
5811 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5812 IS_E1HMF(bp));
5813 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5814 IS_E1HMF(bp));
5815
7a9b2557
VZ
5816 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5817 bp->e1hov);
34f80b04
EG
5818 }
5819
4f40f2cb
EG
5820 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5821 max_agg_size =
5822 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5823 SGE_PAGE_SIZE * PAGES_PER_SGE),
5824 (u32)0xffff);
54b9ddaa 5825 for_each_queue(bp, i) {
7a9b2557 5826 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
5827
5828 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5829 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5830 U64_LO(fp->rx_comp_mapping));
5831 REG_WR(bp, BAR_USTRORM_INTMEM +
0626b899 5832 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
7a9b2557
VZ
5833 U64_HI(fp->rx_comp_mapping));
5834
ca00392c
EG
5835 /* Next page */
5836 REG_WR(bp, BAR_USTRORM_INTMEM +
5837 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5838 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5839 REG_WR(bp, BAR_USTRORM_INTMEM +
5840 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5841 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5842
7a9b2557 5843 REG_WR16(bp, BAR_USTRORM_INTMEM +
0626b899 5844 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
7a9b2557
VZ
5845 max_agg_size);
5846 }
8a1c38d1 5847
1c06328c
EG
5848 /* dropless flow control */
5849 if (CHIP_IS_E1H(bp)) {
5850 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5851
5852 rx_pause.bd_thr_low = 250;
5853 rx_pause.cqe_thr_low = 250;
5854 rx_pause.cos = 1;
5855 rx_pause.sge_thr_low = 0;
5856 rx_pause.bd_thr_high = 350;
5857 rx_pause.cqe_thr_high = 350;
5858 rx_pause.sge_thr_high = 0;
5859
54b9ddaa 5860 for_each_queue(bp, i) {
1c06328c
EG
5861 struct bnx2x_fastpath *fp = &bp->fp[i];
5862
5863 if (!fp->disable_tpa) {
5864 rx_pause.sge_thr_low = 150;
5865 rx_pause.sge_thr_high = 250;
5866 }
5867
5868
5869 offset = BAR_USTRORM_INTMEM +
5870 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5871 fp->cl_id);
5872 for (j = 0;
5873 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5874 j++)
5875 REG_WR(bp, offset + j*4,
5876 ((u32 *)&rx_pause)[j]);
5877 }
5878 }
5879
8a1c38d1
EG
5880 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5881
5882 /* Init rate shaping and fairness contexts */
5883 if (IS_E1HMF(bp)) {
5884 int vn;
5885
5886 /* During init there is no active link
5887 Until link is up, set link rate to 10Gbps */
5888 bp->link_vars.line_speed = SPEED_10000;
5889 bnx2x_init_port_minmax(bp);
5890
b015e3d1
EG
5891 if (!BP_NOMCP(bp))
5892 bp->mf_config =
5893 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8a1c38d1
EG
5894 bnx2x_calc_vn_weight_sum(bp);
5895
5896 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5897 bnx2x_init_vn_minmax(bp, 2*vn + port);
5898
5899 /* Enable rate shaping and fairness */
b015e3d1 5900 bp->cmng.flags.cmng_enables |=
8a1c38d1 5901 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b015e3d1 5902
8a1c38d1
EG
5903 } else {
5904 /* rate shaping and fairness are disabled */
5905 DP(NETIF_MSG_IFUP,
5906 "single function mode minmax will be disabled\n");
5907 }
5908
5909
5910 /* Store it to internal memory */
5911 if (bp->port.pmf)
5912 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5913 REG_WR(bp, BAR_XSTRORM_INTMEM +
5914 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5915 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5916}
5917
471de716
EG
5918static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5919{
5920 switch (load_code) {
5921 case FW_MSG_CODE_DRV_LOAD_COMMON:
5922 bnx2x_init_internal_common(bp);
5923 /* no break */
5924
5925 case FW_MSG_CODE_DRV_LOAD_PORT:
5926 bnx2x_init_internal_port(bp);
5927 /* no break */
5928
5929 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5930 bnx2x_init_internal_func(bp);
5931 break;
5932
5933 default:
5934 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5935 break;
5936 }
5937}
5938
5939static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5940{
5941 int i;
5942
5943 for_each_queue(bp, i) {
5944 struct bnx2x_fastpath *fp = &bp->fp[i];
5945
34f80b04 5946 fp->bp = bp;
a2fbb9ea 5947 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5948 fp->index = i;
34f80b04 5949 fp->cl_id = BP_L_ID(bp) + i;
37b091ba
MC
5950#ifdef BCM_CNIC
5951 fp->sb_id = fp->cl_id + 1;
5952#else
34f80b04 5953 fp->sb_id = fp->cl_id;
37b091ba 5954#endif
34f80b04 5955 DP(NETIF_MSG_IFUP,
f5372251
EG
5956 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5957 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5c862848 5958 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
0626b899 5959 fp->sb_id);
5c862848 5960 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5961 }
5962
16119785
EG
5963 /* ensure status block indices were read */
5964 rmb();
5965
5966
5c862848
EG
5967 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5968 DEF_SB_ID);
5969 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5970 bnx2x_update_coalesce(bp);
5971 bnx2x_init_rx_rings(bp);
5972 bnx2x_init_tx_ring(bp);
5973 bnx2x_init_sp_ring(bp);
5974 bnx2x_init_context(bp);
471de716 5975 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5976 bnx2x_init_ind_table(bp);
0ef00459
EG
5977 bnx2x_stats_init(bp);
5978
5979 /* At this point, we are ready for interrupts */
5980 atomic_set(&bp->intr_sem, 0);
5981
5982 /* flush all before enabling interrupts */
5983 mb();
5984 mmiowb();
5985
615f8fd9 5986 bnx2x_int_enable(bp);
eb8da205
EG
5987
5988 /* Check for SPIO5 */
5989 bnx2x_attn_int_deasserted0(bp,
5990 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5991 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
5992}
5993
5994/* end of nic init */
5995
5996/*
5997 * gzip service functions
5998 */
5999
6000static int bnx2x_gunzip_init(struct bnx2x *bp)
6001{
1a983142
FT
6002 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6003 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6004 if (bp->gunzip_buf == NULL)
6005 goto gunzip_nomem1;
6006
6007 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6008 if (bp->strm == NULL)
6009 goto gunzip_nomem2;
6010
6011 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
6012 GFP_KERNEL);
6013 if (bp->strm->workspace == NULL)
6014 goto gunzip_nomem3;
6015
6016 return 0;
6017
6018gunzip_nomem3:
6019 kfree(bp->strm);
6020 bp->strm = NULL;
6021
6022gunzip_nomem2:
1a983142
FT
6023 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6024 bp->gunzip_mapping);
a2fbb9ea
ET
6025 bp->gunzip_buf = NULL;
6026
6027gunzip_nomem1:
7995c64e 6028 netdev_err(bp->dev, "Cannot allocate firmware buffer for un-compression\n");
a2fbb9ea
ET
6029 return -ENOMEM;
6030}
6031
6032static void bnx2x_gunzip_end(struct bnx2x *bp)
6033{
6034 kfree(bp->strm->workspace);
6035
6036 kfree(bp->strm);
6037 bp->strm = NULL;
6038
6039 if (bp->gunzip_buf) {
1a983142
FT
6040 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6041 bp->gunzip_mapping);
a2fbb9ea
ET
6042 bp->gunzip_buf = NULL;
6043 }
6044}
6045
94a78b79 6046static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6047{
6048 int n, rc;
6049
6050 /* check gzip header */
94a78b79
VZ
6051 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6052 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6053 return -EINVAL;
94a78b79 6054 }
a2fbb9ea
ET
6055
6056 n = 10;
6057
34f80b04 6058#define FNAME 0x8
a2fbb9ea
ET
6059
6060 if (zbuf[3] & FNAME)
6061 while ((zbuf[n++] != 0) && (n < len));
6062
94a78b79 6063 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6064 bp->strm->avail_in = len - n;
6065 bp->strm->next_out = bp->gunzip_buf;
6066 bp->strm->avail_out = FW_BUF_SIZE;
6067
6068 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6069 if (rc != Z_OK)
6070 return rc;
6071
6072 rc = zlib_inflate(bp->strm, Z_FINISH);
6073 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6074 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6075 bp->strm->msg);
a2fbb9ea
ET
6076
6077 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6078 if (bp->gunzip_outlen & 0x3)
7995c64e
JP
6079 netdev_err(bp->dev, "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
6080 bp->gunzip_outlen);
a2fbb9ea
ET
6081 bp->gunzip_outlen >>= 2;
6082
6083 zlib_inflateEnd(bp->strm);
6084
6085 if (rc == Z_STREAM_END)
6086 return 0;
6087
6088 return rc;
6089}
6090
6091/* nic load/unload */
6092
6093/*
34f80b04 6094 * General service functions
a2fbb9ea
ET
6095 */
6096
6097/* send a NIG loopback debug packet */
6098static void bnx2x_lb_pckt(struct bnx2x *bp)
6099{
a2fbb9ea 6100 u32 wb_write[3];
a2fbb9ea
ET
6101
6102 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6103 wb_write[0] = 0x55555555;
6104 wb_write[1] = 0x55555555;
34f80b04 6105 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6106 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6107
6108 /* NON-IP protocol */
a2fbb9ea
ET
6109 wb_write[0] = 0x09000000;
6110 wb_write[1] = 0x55555555;
34f80b04 6111 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6112 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6113}
6114
6115/* some of the internal memories
6116 * are not directly readable from the driver
6117 * to test them we send debug packets
6118 */
6119static int bnx2x_int_mem_test(struct bnx2x *bp)
6120{
6121 int factor;
6122 int count, i;
6123 u32 val = 0;
6124
ad8d3948 6125 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6126 factor = 120;
ad8d3948
EG
6127 else if (CHIP_REV_IS_EMUL(bp))
6128 factor = 200;
6129 else
a2fbb9ea 6130 factor = 1;
a2fbb9ea
ET
6131
6132 DP(NETIF_MSG_HW, "start part1\n");
6133
6134 /* Disable inputs of parser neighbor blocks */
6135 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6136 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6137 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6138 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6139
6140 /* Write 0 to parser credits for CFC search request */
6141 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6142
6143 /* send Ethernet packet */
6144 bnx2x_lb_pckt(bp);
6145
6146 /* TODO do i reset NIG statistic? */
6147 /* Wait until NIG register shows 1 packet of size 0x10 */
6148 count = 1000 * factor;
6149 while (count) {
34f80b04 6150
a2fbb9ea
ET
6151 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6152 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6153 if (val == 0x10)
6154 break;
6155
6156 msleep(10);
6157 count--;
6158 }
6159 if (val != 0x10) {
6160 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6161 return -1;
6162 }
6163
6164 /* Wait until PRS register shows 1 packet */
6165 count = 1000 * factor;
6166 while (count) {
6167 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6168 if (val == 1)
6169 break;
6170
6171 msleep(10);
6172 count--;
6173 }
6174 if (val != 0x1) {
6175 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6176 return -2;
6177 }
6178
6179 /* Reset and init BRB, PRS */
34f80b04 6180 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6181 msleep(50);
34f80b04 6182 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6183 msleep(50);
94a78b79
VZ
6184 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6185 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
6186
6187 DP(NETIF_MSG_HW, "part2\n");
6188
6189 /* Disable inputs of parser neighbor blocks */
6190 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6191 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6192 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6193 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6194
6195 /* Write 0 to parser credits for CFC search request */
6196 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6197
6198 /* send 10 Ethernet packets */
6199 for (i = 0; i < 10; i++)
6200 bnx2x_lb_pckt(bp);
6201
6202 /* Wait until NIG register shows 10 + 1
6203 packets of size 11*0x10 = 0xb0 */
6204 count = 1000 * factor;
6205 while (count) {
34f80b04 6206
a2fbb9ea
ET
6207 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6208 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6209 if (val == 0xb0)
6210 break;
6211
6212 msleep(10);
6213 count--;
6214 }
6215 if (val != 0xb0) {
6216 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6217 return -3;
6218 }
6219
6220 /* Wait until PRS register shows 2 packets */
6221 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6222 if (val != 2)
6223 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6224
6225 /* Write 1 to parser credits for CFC search request */
6226 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6227
6228 /* Wait until PRS register shows 3 packets */
6229 msleep(10 * factor);
6230 /* Wait until NIG register shows 1 packet of size 0x10 */
6231 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6232 if (val != 3)
6233 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6234
6235 /* clear NIG EOP FIFO */
6236 for (i = 0; i < 11; i++)
6237 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6238 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6239 if (val != 1) {
6240 BNX2X_ERR("clear of NIG failed\n");
6241 return -4;
6242 }
6243
6244 /* Reset and init BRB, PRS, NIG */
6245 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6246 msleep(50);
6247 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6248 msleep(50);
94a78b79
VZ
6249 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6250 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 6251#ifndef BCM_CNIC
a2fbb9ea
ET
6252 /* set NIC mode */
6253 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6254#endif
6255
6256 /* Enable inputs of parser neighbor blocks */
6257 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6258 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6259 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6260 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6261
6262 DP(NETIF_MSG_HW, "done\n");
6263
6264 return 0; /* OK */
6265}
6266
6267static void enable_blocks_attention(struct bnx2x *bp)
6268{
6269 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6270 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
6271 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6272 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6273 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6274 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6275 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6276 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6277 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6278/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6279/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6280 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6281 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6282 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6283/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6284/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6285 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6286 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6287 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6288 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6289/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6290/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
6291 if (CHIP_REV_IS_FPGA(bp))
6292 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
6293 else
6294 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
6295 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6296 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6297 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
6298/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
6299/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6300 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6301 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
6302/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
6303 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
6304}
6305
72fd0718
VZ
6306static const struct {
6307 u32 addr;
6308 u32 mask;
6309} bnx2x_parity_mask[] = {
6310 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
6311 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
6312 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
6313 {HC_REG_HC_PRTY_MASK, 0xffffffff},
6314 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
6315 {QM_REG_QM_PRTY_MASK, 0x0},
6316 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
6317 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
6318 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
6319 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
6320 {CDU_REG_CDU_PRTY_MASK, 0x0},
6321 {CFC_REG_CFC_PRTY_MASK, 0x0},
6322 {DBG_REG_DBG_PRTY_MASK, 0x0},
6323 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
6324 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
6325 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
6326 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
6327 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
6328 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
6329 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
6330 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
6331 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
6332 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
6333 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
6334 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
6335 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
6336 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
6337 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
6338};
6339
6340static void enable_blocks_parity(struct bnx2x *bp)
6341{
6342 int i, mask_arr_len =
6343 sizeof(bnx2x_parity_mask)/(sizeof(bnx2x_parity_mask[0]));
6344
6345 for (i = 0; i < mask_arr_len; i++)
6346 REG_WR(bp, bnx2x_parity_mask[i].addr,
6347 bnx2x_parity_mask[i].mask);
6348}
6349
34f80b04 6350
81f75bbf
EG
6351static void bnx2x_reset_common(struct bnx2x *bp)
6352{
6353 /* reset_common */
6354 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6355 0xd3ffff7f);
6356 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
6357}
6358
573f2035
EG
6359static void bnx2x_init_pxp(struct bnx2x *bp)
6360{
6361 u16 devctl;
6362 int r_order, w_order;
6363
6364 pci_read_config_word(bp->pdev,
6365 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
6366 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6367 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6368 if (bp->mrrs == -1)
6369 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6370 else {
6371 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6372 r_order = bp->mrrs;
6373 }
6374
6375 bnx2x_init_pxp_arb(bp, r_order, w_order);
6376}
fd4ef40d
EG
6377
6378static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6379{
6380 u32 val;
6381 u8 port;
6382 u8 is_required = 0;
6383
6384 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6385 SHARED_HW_CFG_FAN_FAILURE_MASK;
6386
6387 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6388 is_required = 1;
6389
6390 /*
6391 * The fan failure mechanism is usually related to the PHY type since
6392 * the power consumption of the board is affected by the PHY. Currently,
6393 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6394 */
6395 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6396 for (port = PORT_0; port < PORT_MAX; port++) {
6397 u32 phy_type =
6398 SHMEM_RD(bp, dev_info.port_hw_config[port].
6399 external_phy_config) &
6400 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
6401 is_required |=
6402 ((phy_type ==
6403 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
4d295db0
EG
6404 (phy_type ==
6405 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
fd4ef40d
EG
6406 (phy_type ==
6407 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
6408 }
6409
6410 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6411
6412 if (is_required == 0)
6413 return;
6414
6415 /* Fan failure is indicated by SPIO 5 */
6416 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
6417 MISC_REGISTERS_SPIO_INPUT_HI_Z);
6418
6419 /* set to active low mode */
6420 val = REG_RD(bp, MISC_REG_SPIO_INT);
6421 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
6422 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
6423 REG_WR(bp, MISC_REG_SPIO_INT, val);
6424
6425 /* enable interrupt to signal the IGU */
6426 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
6427 val |= (1 << MISC_REGISTERS_SPIO_5);
6428 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6429}
6430
34f80b04 6431static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 6432{
a2fbb9ea 6433 u32 val, i;
37b091ba
MC
6434#ifdef BCM_CNIC
6435 u32 wb_write[2];
6436#endif
a2fbb9ea 6437
34f80b04 6438 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 6439
81f75bbf 6440 bnx2x_reset_common(bp);
34f80b04
EG
6441 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
6442 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 6443
94a78b79 6444 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
34f80b04
EG
6445 if (CHIP_IS_E1H(bp))
6446 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 6447
34f80b04
EG
6448 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
6449 msleep(30);
6450 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 6451
94a78b79 6452 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
6453 if (CHIP_IS_E1(bp)) {
6454 /* enable HW interrupt from PXP on USDM overflow
6455 bit 16 on INT_MASK_0 */
6456 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6457 }
a2fbb9ea 6458
94a78b79 6459 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 6460 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6461
6462#ifdef __BIG_ENDIAN
34f80b04
EG
6463 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6464 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6465 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6466 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6467 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6468 /* make sure this value is 0 */
6469 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6470
6471/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6472 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6473 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6474 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6475 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6476#endif
6477
34f80b04 6478 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
37b091ba 6479#ifdef BCM_CNIC
34f80b04
EG
6480 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6481 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6482 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
6483#endif
6484
34f80b04
EG
6485 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6486 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6487
34f80b04
EG
6488 /* let the HW do it's magic ... */
6489 msleep(100);
6490 /* finish PXP init */
6491 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6492 if (val != 1) {
6493 BNX2X_ERR("PXP2 CFG failed\n");
6494 return -EBUSY;
6495 }
6496 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6497 if (val != 1) {
6498 BNX2X_ERR("PXP2 RD_INIT failed\n");
6499 return -EBUSY;
6500 }
a2fbb9ea 6501
34f80b04
EG
6502 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6503 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6504
94a78b79 6505 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 6506
34f80b04
EG
6507 /* clean the DMAE memory */
6508 bp->dmae_ready = 1;
6509 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 6510
94a78b79
VZ
6511 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
6512 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
6513 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
6514 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 6515
34f80b04
EG
6516 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6517 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6518 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6519 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6520
94a78b79 6521 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba
MC
6522
6523#ifdef BCM_CNIC
6524 wb_write[0] = 0;
6525 wb_write[1] = 0;
6526 for (i = 0; i < 64; i++) {
6527 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6528 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6529
6530 if (CHIP_IS_E1H(bp)) {
6531 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6532 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6533 wb_write, 2);
6534 }
6535 }
6536#endif
34f80b04
EG
6537 /* soft reset pulse */
6538 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6539 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6540
37b091ba 6541#ifdef BCM_CNIC
94a78b79 6542 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 6543#endif
a2fbb9ea 6544
94a78b79 6545 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
34f80b04
EG
6546 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
6547 if (!CHIP_REV_IS_SLOW(bp)) {
6548 /* enable hw interrupt from doorbell Q */
6549 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6550 }
a2fbb9ea 6551
94a78b79
VZ
6552 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6553 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 6554 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 6555#ifndef BCM_CNIC
3196a88a
EG
6556 /* set NIC mode */
6557 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 6558#endif
34f80b04
EG
6559 if (CHIP_IS_E1H(bp))
6560 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 6561
94a78b79
VZ
6562 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
6563 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
6564 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
6565 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 6566
ca00392c
EG
6567 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6568 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6569 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
6570 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 6571
94a78b79
VZ
6572 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
6573 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
6574 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
6575 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 6576
34f80b04
EG
6577 /* sync semi rtc */
6578 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6579 0x80000000);
6580 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6581 0x80000000);
a2fbb9ea 6582
94a78b79
VZ
6583 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
6584 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
6585 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 6586
34f80b04
EG
6587 REG_WR(bp, SRC_REG_SOFT_RST, 1);
6588 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
6589 REG_WR(bp, i, 0xc0cac01a);
6590 /* TODO: replace with something meaningful */
6591 }
94a78b79 6592 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
6593#ifdef BCM_CNIC
6594 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6595 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6596 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6597 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6598 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6599 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6600 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6601 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6602 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6603 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6604#endif
34f80b04 6605 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6606
34f80b04
EG
6607 if (sizeof(union cdu_context) != 1024)
6608 /* we currently assume that a context is 1024 bytes */
7995c64e
JP
6609 pr_alert("please adjust the size of cdu_context(%ld)\n",
6610 (long)sizeof(union cdu_context));
a2fbb9ea 6611
94a78b79 6612 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
6613 val = (4 << 24) + (0 << 12) + 1024;
6614 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6615
94a78b79 6616 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 6617 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6618 /* enable context validation interrupt from CFC */
6619 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6620
6621 /* set the thresholds to prevent CFC/CDU race */
6622 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6623
94a78b79
VZ
6624 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
6625 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 6626
94a78b79 6627 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
6628 /* Reset PCIE errors for debug */
6629 REG_WR(bp, 0x2814, 0xffffffff);
6630 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6631
94a78b79 6632 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 6633 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 6634 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 6635 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 6636
94a78b79 6637 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
34f80b04
EG
6638 if (CHIP_IS_E1H(bp)) {
6639 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
6640 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
6641 }
6642
6643 if (CHIP_REV_IS_SLOW(bp))
6644 msleep(200);
6645
6646 /* finish CFC init */
6647 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6648 if (val != 1) {
6649 BNX2X_ERR("CFC LL_INIT failed\n");
6650 return -EBUSY;
6651 }
6652 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6653 if (val != 1) {
6654 BNX2X_ERR("CFC AC_INIT failed\n");
6655 return -EBUSY;
6656 }
6657 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6658 if (val != 1) {
6659 BNX2X_ERR("CFC CAM_INIT failed\n");
6660 return -EBUSY;
6661 }
6662 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6663
34f80b04
EG
6664 /* read NIG statistic
6665 to see if this is our first up since powerup */
6666 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6667 val = *bnx2x_sp(bp, wb_data[0]);
6668
6669 /* do internal memory self test */
6670 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
6671 BNX2X_ERR("internal mem self test failed\n");
6672 return -EBUSY;
6673 }
6674
35b19ba5 6675 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
46c6a674
EG
6676 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
6677 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
6678 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 6679 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
46c6a674
EG
6680 bp->port.need_hw_lock = 1;
6681 break;
6682
34f80b04
EG
6683 default:
6684 break;
6685 }
f1410647 6686
fd4ef40d
EG
6687 bnx2x_setup_fan_failure_detection(bp);
6688
34f80b04
EG
6689 /* clear PXP2 attentions */
6690 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6691
34f80b04 6692 enable_blocks_attention(bp);
72fd0718
VZ
6693 if (CHIP_PARITY_SUPPORTED(bp))
6694 enable_blocks_parity(bp);
a2fbb9ea 6695
6bbca910
YR
6696 if (!BP_NOMCP(bp)) {
6697 bnx2x_acquire_phy_lock(bp);
6698 bnx2x_common_init_phy(bp, bp->common.shmem_base);
6699 bnx2x_release_phy_lock(bp);
6700 } else
6701 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6702
34f80b04
EG
6703 return 0;
6704}
a2fbb9ea 6705
34f80b04
EG
6706static int bnx2x_init_port(struct bnx2x *bp)
6707{
6708 int port = BP_PORT(bp);
94a78b79 6709 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 6710 u32 low, high;
34f80b04 6711 u32 val;
a2fbb9ea 6712
34f80b04
EG
6713 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
6714
6715 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 6716
94a78b79 6717 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 6718 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c
EG
6719
6720 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6721 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6722 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 6723 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 6724
37b091ba
MC
6725#ifdef BCM_CNIC
6726 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
a2fbb9ea 6727
94a78b79 6728 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
6729 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6730 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 6731#endif
94a78b79 6732 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 6733
94a78b79 6734 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
1c06328c
EG
6735 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
6736 /* no pause for emulation and FPGA */
6737 low = 0;
6738 high = 513;
6739 } else {
6740 if (IS_E1HMF(bp))
6741 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
6742 else if (bp->dev->mtu > 4096) {
6743 if (bp->flags & ONE_PORT_FLAG)
6744 low = 160;
6745 else {
6746 val = bp->dev->mtu;
6747 /* (24*1024 + val*4)/256 */
6748 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
6749 }
6750 } else
6751 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
6752 high = low + 56; /* 14*1024/256 */
6753 }
6754 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
6755 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
6756
6757
94a78b79 6758 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 6759
94a78b79 6760 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 6761 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 6762 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 6763 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 6764
94a78b79
VZ
6765 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6766 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6767 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6768 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
356e2385 6769
94a78b79 6770 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 6771 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 6772
94a78b79 6773 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea
ET
6774
6775 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 6776 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
6777
6778 /* update threshold */
34f80b04 6779 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 6780 /* update init credit */
34f80b04 6781 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
6782
6783 /* probe changes */
34f80b04 6784 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 6785 msleep(5);
34f80b04 6786 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea 6787
37b091ba
MC
6788#ifdef BCM_CNIC
6789 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 6790#endif
94a78b79 6791 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 6792 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
6793
6794 if (CHIP_IS_E1(bp)) {
6795 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6796 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6797 }
94a78b79 6798 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 6799
94a78b79 6800 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
6801 /* init aeu_mask_attn_func_0/1:
6802 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6803 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6804 * bits 4-7 are used for "per vn group attention" */
6805 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6806 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6807
94a78b79 6808 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 6809 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 6810 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 6811 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 6812 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 6813
94a78b79 6814 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
6815
6816 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6817
6818 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
6819 /* 0x2 disable e1hov, 0x1 enable */
6820 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6821 (IS_E1HMF(bp) ? 0x1 : 0x2));
6822
1c06328c
EG
6823 {
6824 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6825 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6826 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6827 }
34f80b04
EG
6828 }
6829
94a78b79 6830 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 6831 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
a2fbb9ea 6832
35b19ba5 6833 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
589abe3a
EG
6834 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6835 {
6836 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6837
6838 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6839 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6840
6841 /* The GPIO should be swapped if the swap register is
6842 set and active */
6843 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6844 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6845
6846 /* Select function upon port-swap configuration */
6847 if (port == 0) {
6848 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6849 aeu_gpio_mask = (swap_val && swap_override) ?
6850 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6851 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6852 } else {
6853 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6854 aeu_gpio_mask = (swap_val && swap_override) ?
6855 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6856 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6857 }
6858 val = REG_RD(bp, offset);
6859 /* add GPIO3 to group */
6860 val |= aeu_gpio_mask;
6861 REG_WR(bp, offset, val);
6862 }
6863 break;
6864
35b19ba5 6865 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
4d295db0 6866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647 6867 /* add SPIO 5 to group 0 */
4d295db0
EG
6868 {
6869 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6870 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6871 val = REG_RD(bp, reg_addr);
f1410647 6872 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0
EG
6873 REG_WR(bp, reg_addr, val);
6874 }
f1410647
ET
6875 break;
6876
6877 default:
6878 break;
6879 }
6880
c18487ee 6881 bnx2x__link_reset(bp);
a2fbb9ea 6882
34f80b04
EG
6883 return 0;
6884}
6885
6886#define ILT_PER_FUNC (768/2)
6887#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6888/* the phys address is shifted right 12 bits and has an added
6889 1=valid bit added to the 53rd bit
6890 then since this is a wide register(TM)
6891 we split it into two 32 bit writes
6892 */
6893#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6894#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6895#define PXP_ONE_ILT(x) (((x) << 10) | x)
6896#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6897
37b091ba
MC
6898#ifdef BCM_CNIC
6899#define CNIC_ILT_LINES 127
6900#define CNIC_CTX_PER_ILT 16
6901#else
34f80b04 6902#define CNIC_ILT_LINES 0
37b091ba 6903#endif
34f80b04
EG
6904
6905static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6906{
6907 int reg;
6908
6909 if (CHIP_IS_E1H(bp))
6910 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6911 else /* E1 */
6912 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6913
6914 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6915}
6916
6917static int bnx2x_init_func(struct bnx2x *bp)
6918{
6919 int port = BP_PORT(bp);
6920 int func = BP_FUNC(bp);
8badd27a 6921 u32 addr, val;
34f80b04
EG
6922 int i;
6923
6924 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6925
8badd27a
EG
6926 /* set MSI reconfigure capability */
6927 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6928 val = REG_RD(bp, addr);
6929 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6930 REG_WR(bp, addr, val);
6931
34f80b04
EG
6932 i = FUNC_ILT_BASE(func);
6933
6934 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6935 if (CHIP_IS_E1H(bp)) {
6936 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6937 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6938 } else /* E1 */
6939 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6940 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6941
37b091ba
MC
6942#ifdef BCM_CNIC
6943 i += 1 + CNIC_ILT_LINES;
6944 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6945 if (CHIP_IS_E1(bp))
6946 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6947 else {
6948 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6949 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6950 }
6951
6952 i++;
6953 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6954 if (CHIP_IS_E1(bp))
6955 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6956 else {
6957 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6958 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6959 }
6960
6961 i++;
6962 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6963 if (CHIP_IS_E1(bp))
6964 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6965 else {
6966 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6967 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6968 }
6969
6970 /* tell the searcher where the T2 table is */
6971 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6972
6973 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6974 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6975
6976 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6977 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6978 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6979
6980 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6981#endif
34f80b04
EG
6982
6983 if (CHIP_IS_E1H(bp)) {
573f2035
EG
6984 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
6985 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
6986 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
6987 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
6988 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
6989 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
6990 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
6991 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
6992 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
34f80b04
EG
6993
6994 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6995 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6996 }
6997
6998 /* HC init per function */
6999 if (CHIP_IS_E1H(bp)) {
7000 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7001
7002 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7003 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7004 }
94a78b79 7005 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
34f80b04 7006
c14423fe 7007 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7008 REG_WR(bp, 0x2114, 0xffffffff);
7009 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 7010
34f80b04
EG
7011 return 0;
7012}
7013
7014static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
7015{
7016 int i, rc = 0;
a2fbb9ea 7017
34f80b04
EG
7018 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
7019 BP_FUNC(bp), load_code);
a2fbb9ea 7020
34f80b04
EG
7021 bp->dmae_ready = 0;
7022 mutex_init(&bp->dmae_mutex);
54016b26
EG
7023 rc = bnx2x_gunzip_init(bp);
7024 if (rc)
7025 return rc;
a2fbb9ea 7026
34f80b04
EG
7027 switch (load_code) {
7028 case FW_MSG_CODE_DRV_LOAD_COMMON:
7029 rc = bnx2x_init_common(bp);
7030 if (rc)
7031 goto init_hw_err;
7032 /* no break */
7033
7034 case FW_MSG_CODE_DRV_LOAD_PORT:
7035 bp->dmae_ready = 1;
7036 rc = bnx2x_init_port(bp);
7037 if (rc)
7038 goto init_hw_err;
7039 /* no break */
7040
7041 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
7042 bp->dmae_ready = 1;
7043 rc = bnx2x_init_func(bp);
7044 if (rc)
7045 goto init_hw_err;
7046 break;
7047
7048 default:
7049 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
7050 break;
7051 }
7052
7053 if (!BP_NOMCP(bp)) {
7054 int func = BP_FUNC(bp);
a2fbb9ea
ET
7055
7056 bp->fw_drv_pulse_wr_seq =
34f80b04 7057 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 7058 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
7059 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
7060 }
a2fbb9ea 7061
34f80b04
EG
7062 /* this needs to be done before gunzip end */
7063 bnx2x_zero_def_sb(bp);
7064 for_each_queue(bp, i)
7065 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
37b091ba
MC
7066#ifdef BCM_CNIC
7067 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
7068#endif
34f80b04
EG
7069
7070init_hw_err:
7071 bnx2x_gunzip_end(bp);
7072
7073 return rc;
a2fbb9ea
ET
7074}
7075
a2fbb9ea
ET
7076static void bnx2x_free_mem(struct bnx2x *bp)
7077{
7078
7079#define BNX2X_PCI_FREE(x, y, size) \
7080 do { \
7081 if (x) { \
1a983142 7082 dma_free_coherent(&bp->pdev->dev, size, x, y); \
a2fbb9ea
ET
7083 x = NULL; \
7084 y = 0; \
7085 } \
7086 } while (0)
7087
7088#define BNX2X_FREE(x) \
7089 do { \
7090 if (x) { \
7091 vfree(x); \
7092 x = NULL; \
7093 } \
7094 } while (0)
7095
7096 int i;
7097
7098 /* fastpath */
555f6c78 7099 /* Common */
a2fbb9ea
ET
7100 for_each_queue(bp, i) {
7101
555f6c78 7102 /* status blocks */
a2fbb9ea
ET
7103 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
7104 bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7105 sizeof(struct host_status_block));
555f6c78
EG
7106 }
7107 /* Rx */
54b9ddaa 7108 for_each_queue(bp, i) {
a2fbb9ea 7109
555f6c78 7110 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7111 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
7112 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
7113 bnx2x_fp(bp, i, rx_desc_mapping),
7114 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7115
7116 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
7117 bnx2x_fp(bp, i, rx_comp_mapping),
7118 sizeof(struct eth_fast_path_rx_cqe) *
7119 NUM_RCQ_BD);
a2fbb9ea 7120
7a9b2557 7121 /* SGE ring */
32626230 7122 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
7123 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
7124 bnx2x_fp(bp, i, rx_sge_mapping),
7125 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
7126 }
555f6c78 7127 /* Tx */
54b9ddaa 7128 for_each_queue(bp, i) {
555f6c78
EG
7129
7130 /* fastpath tx rings: tx_buf tx_desc */
7131 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
7132 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
7133 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7134 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7135 }
a2fbb9ea
ET
7136 /* end of fastpath */
7137
7138 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 7139 sizeof(struct host_def_status_block));
a2fbb9ea
ET
7140
7141 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7142 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7143
37b091ba 7144#ifdef BCM_CNIC
a2fbb9ea
ET
7145 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
7146 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
7147 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
7148 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
37b091ba
MC
7149 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
7150 sizeof(struct host_status_block));
a2fbb9ea 7151#endif
7a9b2557 7152 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
7153
7154#undef BNX2X_PCI_FREE
7155#undef BNX2X_KFREE
7156}
7157
7158static int bnx2x_alloc_mem(struct bnx2x *bp)
7159{
7160
7161#define BNX2X_PCI_ALLOC(x, y, size) \
7162 do { \
1a983142 7163 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
a2fbb9ea
ET
7164 if (x == NULL) \
7165 goto alloc_mem_err; \
7166 memset(x, 0, size); \
7167 } while (0)
7168
7169#define BNX2X_ALLOC(x, size) \
7170 do { \
7171 x = vmalloc(size); \
7172 if (x == NULL) \
7173 goto alloc_mem_err; \
7174 memset(x, 0, size); \
7175 } while (0)
7176
7177 int i;
7178
7179 /* fastpath */
555f6c78 7180 /* Common */
a2fbb9ea
ET
7181 for_each_queue(bp, i) {
7182 bnx2x_fp(bp, i, bp) = bp;
7183
555f6c78 7184 /* status blocks */
a2fbb9ea
ET
7185 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
7186 &bnx2x_fp(bp, i, status_blk_mapping),
ca00392c 7187 sizeof(struct host_status_block));
555f6c78
EG
7188 }
7189 /* Rx */
54b9ddaa 7190 for_each_queue(bp, i) {
a2fbb9ea 7191
555f6c78 7192 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
7193 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
7194 sizeof(struct sw_rx_bd) * NUM_RX_BD);
7195 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
7196 &bnx2x_fp(bp, i, rx_desc_mapping),
7197 sizeof(struct eth_rx_bd) * NUM_RX_BD);
7198
7199 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
7200 &bnx2x_fp(bp, i, rx_comp_mapping),
7201 sizeof(struct eth_fast_path_rx_cqe) *
7202 NUM_RCQ_BD);
7203
7a9b2557
VZ
7204 /* SGE ring */
7205 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
7206 sizeof(struct sw_rx_page) * NUM_RX_SGE);
7207 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
7208 &bnx2x_fp(bp, i, rx_sge_mapping),
7209 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 7210 }
555f6c78 7211 /* Tx */
54b9ddaa 7212 for_each_queue(bp, i) {
555f6c78 7213
555f6c78
EG
7214 /* fastpath tx rings: tx_buf tx_desc */
7215 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
7216 sizeof(struct sw_tx_bd) * NUM_TX_BD);
7217 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
7218 &bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 7219 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 7220 }
a2fbb9ea
ET
7221 /* end of fastpath */
7222
7223 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7224 sizeof(struct host_def_status_block));
7225
7226 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7227 sizeof(struct bnx2x_slowpath));
7228
37b091ba 7229#ifdef BCM_CNIC
a2fbb9ea
ET
7230 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
7231
a2fbb9ea
ET
7232 /* allocate searcher T2 table
7233 we allocate 1/4 of alloc num for T2
7234 (which is not entered into the ILT) */
7235 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
7236
37b091ba 7237 /* Initialize T2 (for 1024 connections) */
a2fbb9ea 7238 for (i = 0; i < 16*1024; i += 64)
37b091ba 7239 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
a2fbb9ea 7240
37b091ba 7241 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
a2fbb9ea
ET
7242 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
7243
7244 /* QM queues (128*MAX_CONN) */
7245 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
37b091ba
MC
7246
7247 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
7248 sizeof(struct host_status_block));
a2fbb9ea
ET
7249#endif
7250
7251 /* Slow path ring */
7252 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
7253
7254 return 0;
7255
7256alloc_mem_err:
7257 bnx2x_free_mem(bp);
7258 return -ENOMEM;
7259
7260#undef BNX2X_PCI_ALLOC
7261#undef BNX2X_ALLOC
7262}
7263
7264static void bnx2x_free_tx_skbs(struct bnx2x *bp)
7265{
7266 int i;
7267
54b9ddaa 7268 for_each_queue(bp, i) {
a2fbb9ea
ET
7269 struct bnx2x_fastpath *fp = &bp->fp[i];
7270
7271 u16 bd_cons = fp->tx_bd_cons;
7272 u16 sw_prod = fp->tx_pkt_prod;
7273 u16 sw_cons = fp->tx_pkt_cons;
7274
a2fbb9ea
ET
7275 while (sw_cons != sw_prod) {
7276 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
7277 sw_cons++;
7278 }
7279 }
7280}
7281
7282static void bnx2x_free_rx_skbs(struct bnx2x *bp)
7283{
7284 int i, j;
7285
54b9ddaa 7286 for_each_queue(bp, j) {
a2fbb9ea
ET
7287 struct bnx2x_fastpath *fp = &bp->fp[j];
7288
a2fbb9ea
ET
7289 for (i = 0; i < NUM_RX_BD; i++) {
7290 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
7291 struct sk_buff *skb = rx_buf->skb;
7292
7293 if (skb == NULL)
7294 continue;
7295
1a983142
FT
7296 dma_unmap_single(&bp->pdev->dev,
7297 dma_unmap_addr(rx_buf, mapping),
7298 bp->rx_buf_size, DMA_FROM_DEVICE);
a2fbb9ea
ET
7299
7300 rx_buf->skb = NULL;
7301 dev_kfree_skb(skb);
7302 }
7a9b2557 7303 if (!fp->disable_tpa)
32626230
EG
7304 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
7305 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 7306 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
7307 }
7308}
7309
7310static void bnx2x_free_skbs(struct bnx2x *bp)
7311{
7312 bnx2x_free_tx_skbs(bp);
7313 bnx2x_free_rx_skbs(bp);
7314}
7315
7316static void bnx2x_free_msix_irqs(struct bnx2x *bp)
7317{
34f80b04 7318 int i, offset = 1;
a2fbb9ea
ET
7319
7320 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 7321 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
7322 bp->msix_table[0].vector);
7323
37b091ba
MC
7324#ifdef BCM_CNIC
7325 offset++;
7326#endif
a2fbb9ea 7327 for_each_queue(bp, i) {
c14423fe 7328 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 7329 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
7330 bnx2x_fp(bp, i, state));
7331
34f80b04 7332 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 7333 }
a2fbb9ea
ET
7334}
7335
6cbe5065 7336static void bnx2x_free_irq(struct bnx2x *bp, bool disable_only)
a2fbb9ea 7337{
a2fbb9ea 7338 if (bp->flags & USING_MSIX_FLAG) {
6cbe5065
VZ
7339 if (!disable_only)
7340 bnx2x_free_msix_irqs(bp);
a2fbb9ea 7341 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
7342 bp->flags &= ~USING_MSIX_FLAG;
7343
8badd27a 7344 } else if (bp->flags & USING_MSI_FLAG) {
6cbe5065
VZ
7345 if (!disable_only)
7346 free_irq(bp->pdev->irq, bp->dev);
8badd27a
EG
7347 pci_disable_msi(bp->pdev);
7348 bp->flags &= ~USING_MSI_FLAG;
7349
6cbe5065 7350 } else if (!disable_only)
a2fbb9ea
ET
7351 free_irq(bp->pdev->irq, bp->dev);
7352}
7353
7354static int bnx2x_enable_msix(struct bnx2x *bp)
7355{
8badd27a
EG
7356 int i, rc, offset = 1;
7357 int igu_vec = 0;
a2fbb9ea 7358
8badd27a
EG
7359 bp->msix_table[0].entry = igu_vec;
7360 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 7361
37b091ba
MC
7362#ifdef BCM_CNIC
7363 igu_vec = BP_L_ID(bp) + offset;
7364 bp->msix_table[1].entry = igu_vec;
7365 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7366 offset++;
7367#endif
34f80b04 7368 for_each_queue(bp, i) {
8badd27a 7369 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
7370 bp->msix_table[i + offset].entry = igu_vec;
7371 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
7372 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
7373 }
7374
34f80b04 7375 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 7376 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 7377 if (rc) {
8badd27a
EG
7378 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
7379 return rc;
34f80b04 7380 }
8badd27a 7381
a2fbb9ea
ET
7382 bp->flags |= USING_MSIX_FLAG;
7383
7384 return 0;
a2fbb9ea
ET
7385}
7386
a2fbb9ea
ET
7387static int bnx2x_req_msix_irqs(struct bnx2x *bp)
7388{
34f80b04 7389 int i, rc, offset = 1;
a2fbb9ea 7390
a2fbb9ea
ET
7391 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
7392 bp->dev->name, bp->dev);
a2fbb9ea
ET
7393 if (rc) {
7394 BNX2X_ERR("request sp irq failed\n");
7395 return -EBUSY;
7396 }
7397
37b091ba
MC
7398#ifdef BCM_CNIC
7399 offset++;
7400#endif
a2fbb9ea 7401 for_each_queue(bp, i) {
555f6c78 7402 struct bnx2x_fastpath *fp = &bp->fp[i];
54b9ddaa
VZ
7403 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
7404 bp->dev->name, i);
ca00392c 7405
34f80b04 7406 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 7407 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 7408 if (rc) {
555f6c78 7409 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
7410 bnx2x_free_msix_irqs(bp);
7411 return -EBUSY;
7412 }
7413
555f6c78 7414 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
7415 }
7416
555f6c78 7417 i = BNX2X_NUM_QUEUES(bp);
7995c64e
JP
7418 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d ... fp[%d] %d\n",
7419 bp->msix_table[0].vector,
7420 0, bp->msix_table[offset].vector,
7421 i - 1, bp->msix_table[offset + i - 1].vector);
555f6c78 7422
a2fbb9ea 7423 return 0;
a2fbb9ea
ET
7424}
7425
8badd27a
EG
7426static int bnx2x_enable_msi(struct bnx2x *bp)
7427{
7428 int rc;
7429
7430 rc = pci_enable_msi(bp->pdev);
7431 if (rc) {
7432 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
7433 return -1;
7434 }
7435 bp->flags |= USING_MSI_FLAG;
7436
7437 return 0;
7438}
7439
a2fbb9ea
ET
7440static int bnx2x_req_irq(struct bnx2x *bp)
7441{
8badd27a 7442 unsigned long flags;
34f80b04 7443 int rc;
a2fbb9ea 7444
8badd27a
EG
7445 if (bp->flags & USING_MSI_FLAG)
7446 flags = 0;
7447 else
7448 flags = IRQF_SHARED;
7449
7450 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 7451 bp->dev->name, bp->dev);
a2fbb9ea
ET
7452 if (!rc)
7453 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
7454
7455 return rc;
a2fbb9ea
ET
7456}
7457
65abd74d
YG
7458static void bnx2x_napi_enable(struct bnx2x *bp)
7459{
7460 int i;
7461
54b9ddaa 7462 for_each_queue(bp, i)
65abd74d
YG
7463 napi_enable(&bnx2x_fp(bp, i, napi));
7464}
7465
7466static void bnx2x_napi_disable(struct bnx2x *bp)
7467{
7468 int i;
7469
54b9ddaa 7470 for_each_queue(bp, i)
65abd74d
YG
7471 napi_disable(&bnx2x_fp(bp, i, napi));
7472}
7473
7474static void bnx2x_netif_start(struct bnx2x *bp)
7475{
e1510706
EG
7476 int intr_sem;
7477
7478 intr_sem = atomic_dec_and_test(&bp->intr_sem);
7479 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
7480
7481 if (intr_sem) {
65abd74d 7482 if (netif_running(bp->dev)) {
65abd74d
YG
7483 bnx2x_napi_enable(bp);
7484 bnx2x_int_enable(bp);
555f6c78
EG
7485 if (bp->state == BNX2X_STATE_OPEN)
7486 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
7487 }
7488 }
7489}
7490
f8ef6e44 7491static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 7492{
f8ef6e44 7493 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 7494 bnx2x_napi_disable(bp);
762d5f6c 7495 netif_tx_disable(bp->dev);
65abd74d
YG
7496}
7497
a2fbb9ea
ET
7498/*
7499 * Init service functions
7500 */
7501
e665bfda
MC
7502/**
7503 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7504 *
7505 * @param bp driver descriptor
7506 * @param set set or clear an entry (1 or 0)
7507 * @param mac pointer to a buffer containing a MAC
7508 * @param cl_bit_vec bit vector of clients to register a MAC for
7509 * @param cam_offset offset in a CAM to use
7510 * @param with_bcast set broadcast MAC as well
7511 */
7512static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7513 u32 cl_bit_vec, u8 cam_offset,
7514 u8 with_bcast)
a2fbb9ea
ET
7515{
7516 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 7517 int port = BP_PORT(bp);
a2fbb9ea
ET
7518
7519 /* CAM allocation
7520 * unicasts 0-31:port0 32-63:port1
7521 * multicast 64-127:port0 128-191:port1
7522 */
e665bfda
MC
7523 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7524 config->hdr.offset = cam_offset;
7525 config->hdr.client_id = 0xff;
a2fbb9ea
ET
7526 config->hdr.reserved1 = 0;
7527
7528 /* primary MAC */
7529 config->config_table[0].cam_entry.msb_mac_addr =
e665bfda 7530 swab16(*(u16 *)&mac[0]);
a2fbb9ea 7531 config->config_table[0].cam_entry.middle_mac_addr =
e665bfda 7532 swab16(*(u16 *)&mac[2]);
a2fbb9ea 7533 config->config_table[0].cam_entry.lsb_mac_addr =
e665bfda 7534 swab16(*(u16 *)&mac[4]);
34f80b04 7535 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
7536 if (set)
7537 config->config_table[0].target_table_entry.flags = 0;
7538 else
7539 CAM_INVALIDATE(config->config_table[0]);
ca00392c 7540 config->config_table[0].target_table_entry.clients_bit_vector =
e665bfda 7541 cpu_to_le32(cl_bit_vec);
a2fbb9ea
ET
7542 config->config_table[0].target_table_entry.vlan_id = 0;
7543
3101c2bc
YG
7544 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
7545 (set ? "setting" : "clearing"),
a2fbb9ea
ET
7546 config->config_table[0].cam_entry.msb_mac_addr,
7547 config->config_table[0].cam_entry.middle_mac_addr,
7548 config->config_table[0].cam_entry.lsb_mac_addr);
7549
7550 /* broadcast */
e665bfda
MC
7551 if (with_bcast) {
7552 config->config_table[1].cam_entry.msb_mac_addr =
7553 cpu_to_le16(0xffff);
7554 config->config_table[1].cam_entry.middle_mac_addr =
7555 cpu_to_le16(0xffff);
7556 config->config_table[1].cam_entry.lsb_mac_addr =
7557 cpu_to_le16(0xffff);
7558 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7559 if (set)
7560 config->config_table[1].target_table_entry.flags =
7561 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7562 else
7563 CAM_INVALIDATE(config->config_table[1]);
7564 config->config_table[1].target_table_entry.clients_bit_vector =
7565 cpu_to_le32(cl_bit_vec);
7566 config->config_table[1].target_table_entry.vlan_id = 0;
7567 }
a2fbb9ea
ET
7568
7569 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7570 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7571 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7572}
7573
e665bfda
MC
7574/**
7575 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7576 *
7577 * @param bp driver descriptor
7578 * @param set set or clear an entry (1 or 0)
7579 * @param mac pointer to a buffer containing a MAC
7580 * @param cl_bit_vec bit vector of clients to register a MAC for
7581 * @param cam_offset offset in a CAM to use
7582 */
7583static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7584 u32 cl_bit_vec, u8 cam_offset)
34f80b04
EG
7585{
7586 struct mac_configuration_cmd_e1h *config =
7587 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7588
8d9c5f34 7589 config->hdr.length = 1;
e665bfda
MC
7590 config->hdr.offset = cam_offset;
7591 config->hdr.client_id = 0xff;
34f80b04
EG
7592 config->hdr.reserved1 = 0;
7593
7594 /* primary MAC */
7595 config->config_table[0].msb_mac_addr =
e665bfda 7596 swab16(*(u16 *)&mac[0]);
34f80b04 7597 config->config_table[0].middle_mac_addr =
e665bfda 7598 swab16(*(u16 *)&mac[2]);
34f80b04 7599 config->config_table[0].lsb_mac_addr =
e665bfda 7600 swab16(*(u16 *)&mac[4]);
ca00392c 7601 config->config_table[0].clients_bit_vector =
e665bfda 7602 cpu_to_le32(cl_bit_vec);
34f80b04
EG
7603 config->config_table[0].vlan_id = 0;
7604 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
7605 if (set)
7606 config->config_table[0].flags = BP_PORT(bp);
7607 else
7608 config->config_table[0].flags =
7609 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 7610
e665bfda 7611 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
3101c2bc 7612 (set ? "setting" : "clearing"),
34f80b04
EG
7613 config->config_table[0].msb_mac_addr,
7614 config->config_table[0].middle_mac_addr,
e665bfda 7615 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
34f80b04
EG
7616
7617 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7618 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7619 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7620}
7621
a2fbb9ea
ET
7622static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7623 int *state_p, int poll)
7624{
7625 /* can take a while if any port is running */
8b3a0f0b 7626 int cnt = 5000;
a2fbb9ea 7627
c14423fe
ET
7628 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
7629 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
7630
7631 might_sleep();
34f80b04 7632 while (cnt--) {
a2fbb9ea
ET
7633 if (poll) {
7634 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
7635 /* if index is different from 0
7636 * the reply for some commands will
3101c2bc 7637 * be on the non default queue
a2fbb9ea
ET
7638 */
7639 if (idx)
7640 bnx2x_rx_int(&bp->fp[idx], 10);
7641 }
a2fbb9ea 7642
3101c2bc 7643 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
7644 if (*state_p == state) {
7645#ifdef BNX2X_STOP_ON_ERROR
7646 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
7647#endif
a2fbb9ea 7648 return 0;
8b3a0f0b 7649 }
a2fbb9ea 7650
a2fbb9ea 7651 msleep(1);
e3553b29
EG
7652
7653 if (bp->panic)
7654 return -EIO;
a2fbb9ea
ET
7655 }
7656
a2fbb9ea 7657 /* timeout! */
49d66772
ET
7658 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
7659 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
7660#ifdef BNX2X_STOP_ON_ERROR
7661 bnx2x_panic();
7662#endif
a2fbb9ea 7663
49d66772 7664 return -EBUSY;
a2fbb9ea
ET
7665}
7666
e665bfda
MC
7667static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7668{
7669 bp->set_mac_pending++;
7670 smp_wmb();
7671
7672 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7673 (1 << bp->fp->cl_id), BP_FUNC(bp));
7674
7675 /* Wait for a completion */
7676 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7677}
7678
7679static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7680{
7681 bp->set_mac_pending++;
7682 smp_wmb();
7683
7684 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7685 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7686 1);
7687
7688 /* Wait for a completion */
7689 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7690}
7691
993ac7b5
MC
7692#ifdef BCM_CNIC
7693/**
7694 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7695 * MAC(s). This function will wait until the ramdord completion
7696 * returns.
7697 *
7698 * @param bp driver handle
7699 * @param set set or clear the CAM entry
7700 *
7701 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7702 */
7703static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7704{
7705 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7706
7707 bp->set_mac_pending++;
7708 smp_wmb();
7709
7710 /* Send a SET_MAC ramrod */
7711 if (CHIP_IS_E1(bp))
7712 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7713 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7714 1);
7715 else
7716 /* CAM allocation for E1H
7717 * unicasts: by func number
7718 * multicast: 20+FUNC*20, 20 each
7719 */
7720 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7721 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7722
7723 /* Wait for a completion when setting */
7724 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7725
7726 return 0;
7727}
7728#endif
7729
a2fbb9ea
ET
7730static int bnx2x_setup_leading(struct bnx2x *bp)
7731{
34f80b04 7732 int rc;
a2fbb9ea 7733
c14423fe 7734 /* reset IGU state */
34f80b04 7735 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
7736
7737 /* SETUP ramrod */
7738 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
7739
34f80b04
EG
7740 /* Wait for completion */
7741 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 7742
34f80b04 7743 return rc;
a2fbb9ea
ET
7744}
7745
7746static int bnx2x_setup_multi(struct bnx2x *bp, int index)
7747{
555f6c78
EG
7748 struct bnx2x_fastpath *fp = &bp->fp[index];
7749
a2fbb9ea 7750 /* reset IGU state */
555f6c78 7751 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 7752
228241eb 7753 /* SETUP ramrod */
555f6c78
EG
7754 fp->state = BNX2X_FP_STATE_OPENING;
7755 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
7756 fp->cl_id, 0);
a2fbb9ea
ET
7757
7758 /* Wait for completion */
7759 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 7760 &(fp->state), 0);
a2fbb9ea
ET
7761}
7762
a2fbb9ea 7763static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 7764
54b9ddaa 7765static void bnx2x_set_num_queues_msix(struct bnx2x *bp)
ca00392c 7766{
ca00392c
EG
7767
7768 switch (bp->multi_mode) {
7769 case ETH_RSS_MODE_DISABLED:
54b9ddaa 7770 bp->num_queues = 1;
ca00392c
EG
7771 break;
7772
7773 case ETH_RSS_MODE_REGULAR:
54b9ddaa
VZ
7774 if (num_queues)
7775 bp->num_queues = min_t(u32, num_queues,
7776 BNX2X_MAX_QUEUES(bp));
ca00392c 7777 else
54b9ddaa
VZ
7778 bp->num_queues = min_t(u32, num_online_cpus(),
7779 BNX2X_MAX_QUEUES(bp));
ca00392c
EG
7780 break;
7781
7782
7783 default:
54b9ddaa 7784 bp->num_queues = 1;
ca00392c
EG
7785 break;
7786 }
ca00392c
EG
7787}
7788
54b9ddaa 7789static int bnx2x_set_num_queues(struct bnx2x *bp)
a2fbb9ea 7790{
ca00392c 7791 int rc = 0;
a2fbb9ea 7792
8badd27a
EG
7793 switch (int_mode) {
7794 case INT_MODE_INTx:
7795 case INT_MODE_MSI:
54b9ddaa 7796 bp->num_queues = 1;
ca00392c 7797 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
8badd27a
EG
7798 break;
7799
7800 case INT_MODE_MSIX:
7801 default:
54b9ddaa
VZ
7802 /* Set number of queues according to bp->multi_mode value */
7803 bnx2x_set_num_queues_msix(bp);
ca00392c 7804
54b9ddaa
VZ
7805 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
7806 bp->num_queues);
ca00392c 7807
2dfe0e1f
EG
7808 /* if we can't use MSI-X we only need one fp,
7809 * so try to enable MSI-X with the requested number of fp's
7810 * and fallback to MSI or legacy INTx with one fp
7811 */
ca00392c 7812 rc = bnx2x_enable_msix(bp);
54b9ddaa 7813 if (rc)
34f80b04 7814 /* failed to enable MSI-X */
54b9ddaa 7815 bp->num_queues = 1;
8badd27a 7816 break;
a2fbb9ea 7817 }
54b9ddaa 7818 bp->dev->real_num_tx_queues = bp->num_queues;
ca00392c 7819 return rc;
8badd27a
EG
7820}
7821
993ac7b5
MC
7822#ifdef BCM_CNIC
7823static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7824static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7825#endif
8badd27a
EG
7826
7827/* must be called with rtnl_lock */
7828static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7829{
7830 u32 load_code;
ca00392c
EG
7831 int i, rc;
7832
8badd27a 7833#ifdef BNX2X_STOP_ON_ERROR
8badd27a
EG
7834 if (unlikely(bp->panic))
7835 return -EPERM;
7836#endif
7837
7838 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7839
54b9ddaa 7840 rc = bnx2x_set_num_queues(bp);
c14423fe 7841
6cbe5065
VZ
7842 if (bnx2x_alloc_mem(bp)) {
7843 bnx2x_free_irq(bp, true);
a2fbb9ea 7844 return -ENOMEM;
6cbe5065 7845 }
a2fbb9ea 7846
54b9ddaa 7847 for_each_queue(bp, i)
7a9b2557
VZ
7848 bnx2x_fp(bp, i, disable_tpa) =
7849 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7850
54b9ddaa 7851 for_each_queue(bp, i)
2dfe0e1f
EG
7852 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7853 bnx2x_poll, 128);
7854
2dfe0e1f
EG
7855 bnx2x_napi_enable(bp);
7856
34f80b04
EG
7857 if (bp->flags & USING_MSIX_FLAG) {
7858 rc = bnx2x_req_msix_irqs(bp);
7859 if (rc) {
6cbe5065 7860 bnx2x_free_irq(bp, true);
2dfe0e1f 7861 goto load_error1;
34f80b04
EG
7862 }
7863 } else {
ca00392c 7864 /* Fall to INTx if failed to enable MSI-X due to lack of
54b9ddaa 7865 memory (in bnx2x_set_num_queues()) */
8badd27a
EG
7866 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7867 bnx2x_enable_msi(bp);
34f80b04
EG
7868 bnx2x_ack_int(bp);
7869 rc = bnx2x_req_irq(bp);
7870 if (rc) {
2dfe0e1f 7871 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6cbe5065 7872 bnx2x_free_irq(bp, true);
2dfe0e1f 7873 goto load_error1;
a2fbb9ea 7874 }
8badd27a
EG
7875 if (bp->flags & USING_MSI_FLAG) {
7876 bp->dev->irq = bp->pdev->irq;
7995c64e
JP
7877 netdev_info(bp->dev, "using MSI IRQ %d\n",
7878 bp->pdev->irq);
8badd27a 7879 }
a2fbb9ea
ET
7880 }
7881
2dfe0e1f
EG
7882 /* Send LOAD_REQUEST command to MCP
7883 Returns the type of LOAD command:
7884 if it is the first port to be initialized
7885 common blocks should be initialized, otherwise - not
7886 */
7887 if (!BP_NOMCP(bp)) {
7888 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7889 if (!load_code) {
7890 BNX2X_ERR("MCP response failure, aborting\n");
7891 rc = -EBUSY;
7892 goto load_error2;
7893 }
7894 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7895 rc = -EBUSY; /* other port in diagnostic mode */
7896 goto load_error2;
7897 }
7898
7899 } else {
7900 int port = BP_PORT(bp);
7901
f5372251 7902 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
2dfe0e1f
EG
7903 load_count[0], load_count[1], load_count[2]);
7904 load_count[0]++;
7905 load_count[1 + port]++;
f5372251 7906 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
2dfe0e1f
EG
7907 load_count[0], load_count[1], load_count[2]);
7908 if (load_count[0] == 1)
7909 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7910 else if (load_count[1 + port] == 1)
7911 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7912 else
7913 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7914 }
7915
7916 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7917 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7918 bp->port.pmf = 1;
7919 else
7920 bp->port.pmf = 0;
7921 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 7922
a2fbb9ea 7923 /* Initialize HW */
34f80b04
EG
7924 rc = bnx2x_init_hw(bp, load_code);
7925 if (rc) {
a2fbb9ea 7926 BNX2X_ERR("HW init failed, aborting\n");
f1e1a199
VZ
7927 bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7928 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7929 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
2dfe0e1f 7930 goto load_error2;
a2fbb9ea
ET
7931 }
7932
a2fbb9ea 7933 /* Setup NIC internals and enable interrupts */
471de716 7934 bnx2x_nic_init(bp, load_code);
a2fbb9ea 7935
2691d51d
EG
7936 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) &&
7937 (bp->common.shmem2_base))
7938 SHMEM2_WR(bp, dcc_support,
7939 (SHMEM_DCC_SUPPORT_DISABLE_ENABLE_PF_TLV |
7940 SHMEM_DCC_SUPPORT_BANDWIDTH_ALLOCATION_TLV));
7941
a2fbb9ea 7942 /* Send LOAD_DONE command to MCP */
34f80b04 7943 if (!BP_NOMCP(bp)) {
228241eb
ET
7944 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7945 if (!load_code) {
da5a662a 7946 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 7947 rc = -EBUSY;
2dfe0e1f 7948 goto load_error3;
a2fbb9ea
ET
7949 }
7950 }
7951
7952 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7953
34f80b04
EG
7954 rc = bnx2x_setup_leading(bp);
7955 if (rc) {
da5a662a 7956 BNX2X_ERR("Setup leading failed!\n");
e3553b29 7957#ifndef BNX2X_STOP_ON_ERROR
2dfe0e1f 7958 goto load_error3;
e3553b29
EG
7959#else
7960 bp->panic = 1;
7961 return -EBUSY;
7962#endif
34f80b04 7963 }
a2fbb9ea 7964
34f80b04
EG
7965 if (CHIP_IS_E1H(bp))
7966 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
f5372251 7967 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
f34d28ea 7968 bp->flags |= MF_FUNC_DIS;
34f80b04 7969 }
a2fbb9ea 7970
ca00392c 7971 if (bp->state == BNX2X_STATE_OPEN) {
37b091ba
MC
7972#ifdef BCM_CNIC
7973 /* Enable Timer scan */
7974 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7975#endif
34f80b04
EG
7976 for_each_nondefault_queue(bp, i) {
7977 rc = bnx2x_setup_multi(bp, i);
7978 if (rc)
37b091ba
MC
7979#ifdef BCM_CNIC
7980 goto load_error4;
7981#else
2dfe0e1f 7982 goto load_error3;
37b091ba 7983#endif
34f80b04 7984 }
a2fbb9ea 7985
ca00392c 7986 if (CHIP_IS_E1(bp))
e665bfda 7987 bnx2x_set_eth_mac_addr_e1(bp, 1);
ca00392c 7988 else
e665bfda 7989 bnx2x_set_eth_mac_addr_e1h(bp, 1);
993ac7b5
MC
7990#ifdef BCM_CNIC
7991 /* Set iSCSI L2 MAC */
7992 mutex_lock(&bp->cnic_mutex);
7993 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7994 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7995 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
4a6e47a4
MC
7996 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping,
7997 CNIC_SB_ID(bp));
993ac7b5
MC
7998 }
7999 mutex_unlock(&bp->cnic_mutex);
8000#endif
ca00392c 8001 }
34f80b04
EG
8002
8003 if (bp->port.pmf)
b5bf9068 8004 bnx2x_initial_phy_init(bp, load_mode);
a2fbb9ea
ET
8005
8006 /* Start fast path */
34f80b04
EG
8007 switch (load_mode) {
8008 case LOAD_NORMAL:
ca00392c
EG
8009 if (bp->state == BNX2X_STATE_OPEN) {
8010 /* Tx queue should be only reenabled */
8011 netif_tx_wake_all_queues(bp->dev);
8012 }
2dfe0e1f 8013 /* Initialize the receive filter. */
34f80b04
EG
8014 bnx2x_set_rx_mode(bp->dev);
8015 break;
8016
8017 case LOAD_OPEN:
555f6c78 8018 netif_tx_start_all_queues(bp->dev);
ca00392c
EG
8019 if (bp->state != BNX2X_STATE_OPEN)
8020 netif_tx_disable(bp->dev);
2dfe0e1f 8021 /* Initialize the receive filter. */
34f80b04 8022 bnx2x_set_rx_mode(bp->dev);
34f80b04 8023 break;
a2fbb9ea 8024
34f80b04 8025 case LOAD_DIAG:
2dfe0e1f 8026 /* Initialize the receive filter. */
a2fbb9ea 8027 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
8028 bp->state = BNX2X_STATE_DIAG;
8029 break;
8030
8031 default:
8032 break;
a2fbb9ea
ET
8033 }
8034
34f80b04
EG
8035 if (!bp->port.pmf)
8036 bnx2x__link_status_update(bp);
8037
a2fbb9ea
ET
8038 /* start the timer */
8039 mod_timer(&bp->timer, jiffies + bp->current_interval);
8040
993ac7b5
MC
8041#ifdef BCM_CNIC
8042 bnx2x_setup_cnic_irq_info(bp);
8043 if (bp->state == BNX2X_STATE_OPEN)
8044 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
8045#endif
72fd0718 8046 bnx2x_inc_load_cnt(bp);
34f80b04 8047
a2fbb9ea
ET
8048 return 0;
8049
37b091ba
MC
8050#ifdef BCM_CNIC
8051load_error4:
8052 /* Disable Timer scan */
8053 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
8054#endif
2dfe0e1f
EG
8055load_error3:
8056 bnx2x_int_disable_sync(bp, 1);
8057 if (!BP_NOMCP(bp)) {
8058 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
8059 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
8060 }
8061 bp->port.pmf = 0;
7a9b2557
VZ
8062 /* Free SKBs, SGEs, TPA pool and driver internals */
8063 bnx2x_free_skbs(bp);
54b9ddaa 8064 for_each_queue(bp, i)
3196a88a 8065 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 8066load_error2:
d1014634 8067 /* Release IRQs */
6cbe5065 8068 bnx2x_free_irq(bp, false);
2dfe0e1f
EG
8069load_error1:
8070 bnx2x_napi_disable(bp);
54b9ddaa 8071 for_each_queue(bp, i)
7cde1c8b 8072 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8073 bnx2x_free_mem(bp);
8074
34f80b04 8075 return rc;
a2fbb9ea
ET
8076}
8077
8078static int bnx2x_stop_multi(struct bnx2x *bp, int index)
8079{
555f6c78 8080 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
8081 int rc;
8082
c14423fe 8083 /* halt the connection */
555f6c78
EG
8084 fp->state = BNX2X_FP_STATE_HALTING;
8085 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 8086
34f80b04 8087 /* Wait for completion */
a2fbb9ea 8088 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 8089 &(fp->state), 1);
c14423fe 8090 if (rc) /* timeout */
a2fbb9ea
ET
8091 return rc;
8092
8093 /* delete cfc entry */
8094 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
8095
34f80b04
EG
8096 /* Wait for completion */
8097 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 8098 &(fp->state), 1);
34f80b04 8099 return rc;
a2fbb9ea
ET
8100}
8101
da5a662a 8102static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 8103{
4781bfad 8104 __le16 dsb_sp_prod_idx;
c14423fe 8105 /* if the other port is handling traffic,
a2fbb9ea 8106 this can take a lot of time */
34f80b04
EG
8107 int cnt = 500;
8108 int rc;
a2fbb9ea
ET
8109
8110 might_sleep();
8111
8112 /* Send HALT ramrod */
8113 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
0626b899 8114 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
a2fbb9ea 8115
34f80b04
EG
8116 /* Wait for completion */
8117 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
8118 &(bp->fp[0].state), 1);
8119 if (rc) /* timeout */
da5a662a 8120 return rc;
a2fbb9ea 8121
49d66772 8122 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 8123
228241eb 8124 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
8125 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
8126
49d66772 8127 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
8128 we are going to reset the chip anyway
8129 so there is not much to do if this times out
8130 */
34f80b04 8131 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
8132 if (!cnt) {
8133 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
8134 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
8135 *bp->dsb_sp_prod, dsb_sp_prod_idx);
8136#ifdef BNX2X_STOP_ON_ERROR
8137 bnx2x_panic();
8138#endif
36e552ab 8139 rc = -EBUSY;
34f80b04
EG
8140 break;
8141 }
8142 cnt--;
da5a662a 8143 msleep(1);
5650d9d4 8144 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
8145 }
8146 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
8147 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
8148
8149 return rc;
a2fbb9ea
ET
8150}
8151
34f80b04
EG
8152static void bnx2x_reset_func(struct bnx2x *bp)
8153{
8154 int port = BP_PORT(bp);
8155 int func = BP_FUNC(bp);
8156 int base, i;
8157
8158 /* Configure IGU */
8159 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8160 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8161
37b091ba
MC
8162#ifdef BCM_CNIC
8163 /* Disable Timer scan */
8164 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8165 /*
8166 * Wait for at least 10ms and up to 2 second for the timers scan to
8167 * complete
8168 */
8169 for (i = 0; i < 200; i++) {
8170 msleep(10);
8171 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8172 break;
8173 }
8174#endif
34f80b04
EG
8175 /* Clear ILT */
8176 base = FUNC_ILT_BASE(func);
8177 for (i = base; i < base + ILT_PER_FUNC; i++)
8178 bnx2x_ilt_wr(bp, i, 0);
8179}
8180
8181static void bnx2x_reset_port(struct bnx2x *bp)
8182{
8183 int port = BP_PORT(bp);
8184 u32 val;
8185
8186 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8187
8188 /* Do not rcv packets to BRB */
8189 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8190 /* Do not direct rcv packets that are not for MCP to the BRB */
8191 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8192 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8193
8194 /* Configure AEU */
8195 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8196
8197 msleep(100);
8198 /* Check for BRB port occupancy */
8199 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8200 if (val)
8201 DP(NETIF_MSG_IFDOWN,
33471629 8202 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8203
8204 /* TODO: Close Doorbell port? */
8205}
8206
34f80b04
EG
8207static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
8208{
8209 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
8210 BP_FUNC(bp), reset_code);
8211
8212 switch (reset_code) {
8213 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
8214 bnx2x_reset_port(bp);
8215 bnx2x_reset_func(bp);
8216 bnx2x_reset_common(bp);
8217 break;
8218
8219 case FW_MSG_CODE_DRV_UNLOAD_PORT:
8220 bnx2x_reset_port(bp);
8221 bnx2x_reset_func(bp);
8222 break;
8223
8224 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
8225 bnx2x_reset_func(bp);
8226 break;
49d66772 8227
34f80b04
EG
8228 default:
8229 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
8230 break;
8231 }
8232}
8233
72fd0718 8234static void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 8235{
da5a662a 8236 int port = BP_PORT(bp);
a2fbb9ea 8237 u32 reset_code = 0;
da5a662a 8238 int i, cnt, rc;
a2fbb9ea 8239
555f6c78 8240 /* Wait until tx fastpath tasks complete */
54b9ddaa 8241 for_each_queue(bp, i) {
228241eb
ET
8242 struct bnx2x_fastpath *fp = &bp->fp[i];
8243
34f80b04 8244 cnt = 1000;
e8b5fc51 8245 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 8246
7961f791 8247 bnx2x_tx_int(fp);
34f80b04
EG
8248 if (!cnt) {
8249 BNX2X_ERR("timeout waiting for queue[%d]\n",
8250 i);
8251#ifdef BNX2X_STOP_ON_ERROR
8252 bnx2x_panic();
8253 return -EBUSY;
8254#else
8255 break;
8256#endif
8257 }
8258 cnt--;
da5a662a 8259 msleep(1);
34f80b04 8260 }
228241eb 8261 }
da5a662a
VZ
8262 /* Give HW time to discard old tx messages */
8263 msleep(1);
a2fbb9ea 8264
3101c2bc
YG
8265 if (CHIP_IS_E1(bp)) {
8266 struct mac_configuration_cmd *config =
8267 bnx2x_sp(bp, mcast_config);
8268
e665bfda 8269 bnx2x_set_eth_mac_addr_e1(bp, 0);
3101c2bc 8270
8d9c5f34 8271 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
8272 CAM_INVALIDATE(config->config_table[i]);
8273
8d9c5f34 8274 config->hdr.length = i;
3101c2bc
YG
8275 if (CHIP_REV_IS_SLOW(bp))
8276 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
8277 else
8278 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
0626b899 8279 config->hdr.client_id = bp->fp->cl_id;
3101c2bc
YG
8280 config->hdr.reserved1 = 0;
8281
e665bfda
MC
8282 bp->set_mac_pending++;
8283 smp_wmb();
8284
3101c2bc
YG
8285 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
8286 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
8287 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
8288
8289 } else { /* E1H */
65abd74d
YG
8290 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8291
e665bfda 8292 bnx2x_set_eth_mac_addr_e1h(bp, 0);
3101c2bc
YG
8293
8294 for (i = 0; i < MC_HASH_SIZE; i++)
8295 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7d0446c2
EG
8296
8297 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
3101c2bc 8298 }
993ac7b5
MC
8299#ifdef BCM_CNIC
8300 /* Clear iSCSI L2 MAC */
8301 mutex_lock(&bp->cnic_mutex);
8302 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8303 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8304 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8305 }
8306 mutex_unlock(&bp->cnic_mutex);
8307#endif
3101c2bc 8308
65abd74d
YG
8309 if (unload_mode == UNLOAD_NORMAL)
8310 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8311
7d0446c2 8312 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8313 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8314
7d0446c2 8315 else if (bp->wol) {
65abd74d
YG
8316 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8317 u8 *mac_addr = bp->dev->dev_addr;
8318 u32 val;
8319 /* The mac address is written to entries 1-4 to
8320 preserve entry 0 which is used by the PMF */
8321 u8 entry = (BP_E1HVN(bp) + 1)*8;
8322
8323 val = (mac_addr[0] << 8) | mac_addr[1];
8324 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8325
8326 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8327 (mac_addr[4] << 8) | mac_addr[5];
8328 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8329
8330 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8331
8332 } else
8333 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8334
34f80b04
EG
8335 /* Close multi and leading connections
8336 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
8337 for_each_nondefault_queue(bp, i)
8338 if (bnx2x_stop_multi(bp, i))
228241eb 8339 goto unload_error;
a2fbb9ea 8340
da5a662a
VZ
8341 rc = bnx2x_stop_leading(bp);
8342 if (rc) {
34f80b04 8343 BNX2X_ERR("Stop leading failed!\n");
da5a662a 8344#ifdef BNX2X_STOP_ON_ERROR
34f80b04 8345 return -EBUSY;
da5a662a
VZ
8346#else
8347 goto unload_error;
34f80b04 8348#endif
228241eb
ET
8349 }
8350
8351unload_error:
34f80b04 8352 if (!BP_NOMCP(bp))
228241eb 8353 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04 8354 else {
f5372251 8355 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
34f80b04
EG
8356 load_count[0], load_count[1], load_count[2]);
8357 load_count[0]--;
da5a662a 8358 load_count[1 + port]--;
f5372251 8359 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
34f80b04
EG
8360 load_count[0], load_count[1], load_count[2]);
8361 if (load_count[0] == 0)
8362 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 8363 else if (load_count[1 + port] == 0)
34f80b04
EG
8364 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8365 else
8366 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8367 }
a2fbb9ea 8368
34f80b04
EG
8369 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
8370 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
8371 bnx2x__link_reset(bp);
a2fbb9ea
ET
8372
8373 /* Reset the chip */
228241eb 8374 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
8375
8376 /* Report UNLOAD_DONE to MCP */
34f80b04 8377 if (!BP_NOMCP(bp))
a2fbb9ea 8378 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
356e2385 8379
72fd0718
VZ
8380}
8381
8382static inline void bnx2x_disable_close_the_gate(struct bnx2x *bp)
8383{
8384 u32 val;
8385
8386 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
8387
8388 if (CHIP_IS_E1(bp)) {
8389 int port = BP_PORT(bp);
8390 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8391 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8392
8393 val = REG_RD(bp, addr);
8394 val &= ~(0x300);
8395 REG_WR(bp, addr, val);
8396 } else if (CHIP_IS_E1H(bp)) {
8397 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8398 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8399 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8400 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8401 }
8402}
8403
8404/* must be called with rtnl_lock */
8405static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
8406{
8407 int i;
8408
8409 if (bp->state == BNX2X_STATE_CLOSED) {
8410 /* Interface has been removed - nothing to recover */
8411 bp->recovery_state = BNX2X_RECOVERY_DONE;
8412 bp->is_leader = 0;
8413 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8414 smp_wmb();
8415
8416 return -EINVAL;
8417 }
8418
8419#ifdef BCM_CNIC
8420 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
8421#endif
8422 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
8423
8424 /* Set "drop all" */
8425 bp->rx_mode = BNX2X_RX_MODE_NONE;
8426 bnx2x_set_storm_rx_mode(bp);
8427
8428 /* Disable HW interrupts, NAPI and Tx */
8429 bnx2x_netif_stop(bp, 1);
8430
8431 del_timer_sync(&bp->timer);
8432 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
8433 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
8434 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8435
8436 /* Release IRQs */
8437 bnx2x_free_irq(bp, false);
8438
8439 /* Cleanup the chip if needed */
8440 if (unload_mode != UNLOAD_RECOVERY)
8441 bnx2x_chip_cleanup(bp, unload_mode);
8442
9a035440 8443 bp->port.pmf = 0;
a2fbb9ea 8444
7a9b2557 8445 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 8446 bnx2x_free_skbs(bp);
54b9ddaa 8447 for_each_queue(bp, i)
3196a88a 8448 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 8449 for_each_queue(bp, i)
7cde1c8b 8450 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
8451 bnx2x_free_mem(bp);
8452
8453 bp->state = BNX2X_STATE_CLOSED;
228241eb 8454
a2fbb9ea
ET
8455 netif_carrier_off(bp->dev);
8456
72fd0718
VZ
8457 /* The last driver must disable a "close the gate" if there is no
8458 * parity attention or "process kill" pending.
8459 */
8460 if ((!bnx2x_dec_load_cnt(bp)) && (!bnx2x_chk_parity_attn(bp)) &&
8461 bnx2x_reset_is_done(bp))
8462 bnx2x_disable_close_the_gate(bp);
8463
8464 /* Reset MCP mail box sequence if there is on going recovery */
8465 if (unload_mode == UNLOAD_RECOVERY)
8466 bp->fw_seq = 0;
8467
8468 return 0;
8469}
8470
8471/* Close gates #2, #3 and #4: */
8472static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8473{
8474 u32 val, addr;
8475
8476 /* Gates #2 and #4a are closed/opened for "not E1" only */
8477 if (!CHIP_IS_E1(bp)) {
8478 /* #4 */
8479 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
8480 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
8481 close ? (val | 0x1) : (val & (~(u32)1)));
8482 /* #2 */
8483 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
8484 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
8485 close ? (val | 0x1) : (val & (~(u32)1)));
8486 }
8487
8488 /* #3 */
8489 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
8490 val = REG_RD(bp, addr);
8491 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
8492
8493 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
8494 close ? "closing" : "opening");
8495 mmiowb();
8496}
8497
8498#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8499
8500static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8501{
8502 /* Do some magic... */
8503 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8504 *magic_val = val & SHARED_MF_CLP_MAGIC;
8505 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8506}
8507
8508/* Restore the value of the `magic' bit.
8509 *
8510 * @param pdev Device handle.
8511 * @param magic_val Old value of the `magic' bit.
8512 */
8513static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8514{
8515 /* Restore the `magic' bit value... */
8516 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
8517 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
8518 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
8519 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8520 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8521 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8522}
8523
8524/* Prepares for MCP reset: takes care of CLP configurations.
8525 *
8526 * @param bp
8527 * @param magic_val Old value of 'magic' bit.
8528 */
8529static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8530{
8531 u32 shmem;
8532 u32 validity_offset;
8533
8534 DP(NETIF_MSG_HW, "Starting\n");
8535
8536 /* Set `magic' bit in order to save MF config */
8537 if (!CHIP_IS_E1(bp))
8538 bnx2x_clp_reset_prep(bp, magic_val);
8539
8540 /* Get shmem offset */
8541 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8542 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8543
8544 /* Clear validity map flags */
8545 if (shmem > 0)
8546 REG_WR(bp, shmem + validity_offset, 0);
8547}
8548
8549#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8550#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8551
8552/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
8553 * depending on the HW type.
8554 *
8555 * @param bp
8556 */
8557static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
8558{
8559 /* special handling for emulation and FPGA,
8560 wait 10 times longer */
8561 if (CHIP_REV_IS_SLOW(bp))
8562 msleep(MCP_ONE_TIMEOUT*10);
8563 else
8564 msleep(MCP_ONE_TIMEOUT);
8565}
8566
8567static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8568{
8569 u32 shmem, cnt, validity_offset, val;
8570 int rc = 0;
8571
8572 msleep(100);
8573
8574 /* Get shmem offset */
8575 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8576 if (shmem == 0) {
8577 BNX2X_ERR("Shmem 0 return failure\n");
8578 rc = -ENOTTY;
8579 goto exit_lbl;
8580 }
8581
8582 validity_offset = offsetof(struct shmem_region, validity_map[0]);
8583
8584 /* Wait for MCP to come up */
8585 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
8586 /* TBD: its best to check validity map of last port.
8587 * currently checks on port 0.
8588 */
8589 val = REG_RD(bp, shmem + validity_offset);
8590 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
8591 shmem + validity_offset, val);
8592
8593 /* check that shared memory is valid. */
8594 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8595 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8596 break;
8597
8598 bnx2x_mcp_wait_one(bp);
8599 }
8600
8601 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
8602
8603 /* Check that shared memory is valid. This indicates that MCP is up. */
8604 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
8605 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
8606 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
8607 rc = -ENOTTY;
8608 goto exit_lbl;
8609 }
8610
8611exit_lbl:
8612 /* Restore the `magic' bit value */
8613 if (!CHIP_IS_E1(bp))
8614 bnx2x_clp_reset_done(bp, magic_val);
8615
8616 return rc;
8617}
8618
8619static void bnx2x_pxp_prep(struct bnx2x *bp)
8620{
8621 if (!CHIP_IS_E1(bp)) {
8622 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8623 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
8624 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
8625 mmiowb();
8626 }
8627}
8628
8629/*
8630 * Reset the whole chip except for:
8631 * - PCIE core
8632 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
8633 * one reset bit)
8634 * - IGU
8635 * - MISC (including AEU)
8636 * - GRC
8637 * - RBCN, RBCP
8638 */
8639static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
8640{
8641 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8642
8643 not_reset_mask1 =
8644 MISC_REGISTERS_RESET_REG_1_RST_HC |
8645 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
8646 MISC_REGISTERS_RESET_REG_1_RST_PXP;
8647
8648 not_reset_mask2 =
8649 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
8650 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
8651 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
8652 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
8653 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
8654 MISC_REGISTERS_RESET_REG_2_RST_GRC |
8655 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8656 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
8657
8658 reset_mask1 = 0xffffffff;
8659
8660 if (CHIP_IS_E1(bp))
8661 reset_mask2 = 0xffff;
8662 else
8663 reset_mask2 = 0x1ffff;
8664
8665 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
8666 reset_mask1 & (~not_reset_mask1));
8667 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
8668 reset_mask2 & (~not_reset_mask2));
8669
8670 barrier();
8671 mmiowb();
8672
8673 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
8674 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
8675 mmiowb();
8676}
8677
8678static int bnx2x_process_kill(struct bnx2x *bp)
8679{
8680 int cnt = 1000;
8681 u32 val = 0;
8682 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
8683
8684
8685 /* Empty the Tetris buffer, wait for 1s */
8686 do {
8687 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
8688 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
8689 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
8690 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
8691 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
8692 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
8693 ((port_is_idle_0 & 0x1) == 0x1) &&
8694 ((port_is_idle_1 & 0x1) == 0x1) &&
8695 (pgl_exp_rom2 == 0xffffffff))
8696 break;
8697 msleep(1);
8698 } while (cnt-- > 0);
8699
8700 if (cnt <= 0) {
8701 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
8702 " are still"
8703 " outstanding read requests after 1s!\n");
8704 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
8705 " port_is_idle_0=0x%08x,"
8706 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
8707 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
8708 pgl_exp_rom2);
8709 return -EAGAIN;
8710 }
8711
8712 barrier();
8713
8714 /* Close gates #2, #3 and #4 */
8715 bnx2x_set_234_gates(bp, true);
8716
8717 /* TBD: Indicate that "process kill" is in progress to MCP */
8718
8719 /* Clear "unprepared" bit */
8720 REG_WR(bp, MISC_REG_UNPREPARED, 0);
8721 barrier();
8722
8723 /* Make sure all is written to the chip before the reset */
8724 mmiowb();
8725
8726 /* Wait for 1ms to empty GLUE and PCI-E core queues,
8727 * PSWHST, GRC and PSWRD Tetris buffer.
8728 */
8729 msleep(1);
8730
8731 /* Prepare to chip reset: */
8732 /* MCP */
8733 bnx2x_reset_mcp_prep(bp, &val);
8734
8735 /* PXP */
8736 bnx2x_pxp_prep(bp);
8737 barrier();
8738
8739 /* reset the chip */
8740 bnx2x_process_kill_chip_reset(bp);
8741 barrier();
8742
8743 /* Recover after reset: */
8744 /* MCP */
8745 if (bnx2x_reset_mcp_comp(bp, val))
8746 return -EAGAIN;
8747
8748 /* PXP */
8749 bnx2x_pxp_prep(bp);
8750
8751 /* Open the gates #2, #3 and #4 */
8752 bnx2x_set_234_gates(bp, false);
8753
8754 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
8755 * reset state, re-enable attentions. */
8756
a2fbb9ea
ET
8757 return 0;
8758}
8759
72fd0718
VZ
8760static int bnx2x_leader_reset(struct bnx2x *bp)
8761{
8762 int rc = 0;
8763 /* Try to recover after the failure */
8764 if (bnx2x_process_kill(bp)) {
8765 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
8766 bp->dev->name);
8767 rc = -EAGAIN;
8768 goto exit_leader_reset;
8769 }
8770
8771 /* Clear "reset is in progress" bit and update the driver state */
8772 bnx2x_set_reset_done(bp);
8773 bp->recovery_state = BNX2X_RECOVERY_DONE;
8774
8775exit_leader_reset:
8776 bp->is_leader = 0;
8777 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
8778 smp_wmb();
8779 return rc;
8780}
8781
8782static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state);
8783
8784/* Assumption: runs under rtnl lock. This together with the fact
8785 * that it's called only from bnx2x_reset_task() ensure that it
8786 * will never be called when netif_running(bp->dev) is false.
8787 */
8788static void bnx2x_parity_recover(struct bnx2x *bp)
8789{
8790 DP(NETIF_MSG_HW, "Handling parity\n");
8791 while (1) {
8792 switch (bp->recovery_state) {
8793 case BNX2X_RECOVERY_INIT:
8794 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
8795 /* Try to get a LEADER_LOCK HW lock */
8796 if (bnx2x_trylock_hw_lock(bp,
8797 HW_LOCK_RESOURCE_RESERVED_08))
8798 bp->is_leader = 1;
8799
8800 /* Stop the driver */
8801 /* If interface has been removed - break */
8802 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
8803 return;
8804
8805 bp->recovery_state = BNX2X_RECOVERY_WAIT;
8806 /* Ensure "is_leader" and "recovery_state"
8807 * update values are seen on other CPUs
8808 */
8809 smp_wmb();
8810 break;
8811
8812 case BNX2X_RECOVERY_WAIT:
8813 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
8814 if (bp->is_leader) {
8815 u32 load_counter = bnx2x_get_load_cnt(bp);
8816 if (load_counter) {
8817 /* Wait until all other functions get
8818 * down.
8819 */
8820 schedule_delayed_work(&bp->reset_task,
8821 HZ/10);
8822 return;
8823 } else {
8824 /* If all other functions got down -
8825 * try to bring the chip back to
8826 * normal. In any case it's an exit
8827 * point for a leader.
8828 */
8829 if (bnx2x_leader_reset(bp) ||
8830 bnx2x_nic_load(bp, LOAD_NORMAL)) {
8831 printk(KERN_ERR"%s: Recovery "
8832 "has failed. Power cycle is "
8833 "needed.\n", bp->dev->name);
8834 /* Disconnect this device */
8835 netif_device_detach(bp->dev);
8836 /* Block ifup for all function
8837 * of this ASIC until
8838 * "process kill" or power
8839 * cycle.
8840 */
8841 bnx2x_set_reset_in_progress(bp);
8842 /* Shut down the power */
8843 bnx2x_set_power_state(bp,
8844 PCI_D3hot);
8845 return;
8846 }
8847
8848 return;
8849 }
8850 } else { /* non-leader */
8851 if (!bnx2x_reset_is_done(bp)) {
8852 /* Try to get a LEADER_LOCK HW lock as
8853 * long as a former leader may have
8854 * been unloaded by the user or
8855 * released a leadership by another
8856 * reason.
8857 */
8858 if (bnx2x_trylock_hw_lock(bp,
8859 HW_LOCK_RESOURCE_RESERVED_08)) {
8860 /* I'm a leader now! Restart a
8861 * switch case.
8862 */
8863 bp->is_leader = 1;
8864 break;
8865 }
8866
8867 schedule_delayed_work(&bp->reset_task,
8868 HZ/10);
8869 return;
8870
8871 } else { /* A leader has completed
8872 * the "process kill". It's an exit
8873 * point for a non-leader.
8874 */
8875 bnx2x_nic_load(bp, LOAD_NORMAL);
8876 bp->recovery_state =
8877 BNX2X_RECOVERY_DONE;
8878 smp_wmb();
8879 return;
8880 }
8881 }
8882 default:
8883 return;
8884 }
8885 }
8886}
8887
8888/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
8889 * scheduled on a general queue in order to prevent a dead lock.
8890 */
34f80b04
EG
8891static void bnx2x_reset_task(struct work_struct *work)
8892{
72fd0718 8893 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
8894
8895#ifdef BNX2X_STOP_ON_ERROR
8896 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
8897 " so reset not done to allow debug dump,\n"
72fd0718 8898 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
8899 return;
8900#endif
8901
8902 rtnl_lock();
8903
8904 if (!netif_running(bp->dev))
8905 goto reset_task_exit;
8906
72fd0718
VZ
8907 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
8908 bnx2x_parity_recover(bp);
8909 else {
8910 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8911 bnx2x_nic_load(bp, LOAD_NORMAL);
8912 }
34f80b04
EG
8913
8914reset_task_exit:
8915 rtnl_unlock();
8916}
8917
a2fbb9ea
ET
8918/* end of nic load/unload */
8919
8920/* ethtool_ops */
8921
8922/*
8923 * Init service functions
8924 */
8925
f1ef27ef
EG
8926static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
8927{
8928 switch (func) {
8929 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
8930 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
8931 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
8932 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
8933 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
8934 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
8935 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
8936 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
8937 default:
8938 BNX2X_ERR("Unsupported function index: %d\n", func);
8939 return (u32)(-1);
8940 }
8941}
8942
8943static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
8944{
8945 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
8946
8947 /* Flush all outstanding writes */
8948 mmiowb();
8949
8950 /* Pretend to be function 0 */
8951 REG_WR(bp, reg, 0);
8952 /* Flush the GRC transaction (in the chip) */
8953 new_val = REG_RD(bp, reg);
8954 if (new_val != 0) {
8955 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
8956 new_val);
8957 BUG();
8958 }
8959
8960 /* From now we are in the "like-E1" mode */
8961 bnx2x_int_disable(bp);
8962
8963 /* Flush all outstanding writes */
8964 mmiowb();
8965
8966 /* Restore the original funtion settings */
8967 REG_WR(bp, reg, orig_func);
8968 new_val = REG_RD(bp, reg);
8969 if (new_val != orig_func) {
8970 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
8971 orig_func, new_val);
8972 BUG();
8973 }
8974}
8975
8976static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
8977{
8978 if (CHIP_IS_E1H(bp))
8979 bnx2x_undi_int_disable_e1h(bp, func);
8980 else
8981 bnx2x_int_disable(bp);
8982}
8983
34f80b04
EG
8984static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
8985{
8986 u32 val;
8987
8988 /* Check if there is any driver already loaded */
8989 val = REG_RD(bp, MISC_REG_UNPREPARED);
8990 if (val == 0x1) {
8991 /* Check if it is the UNDI driver
8992 * UNDI driver initializes CID offset for normal bell to 0x7
8993 */
4a37fb66 8994 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
8995 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
8996 if (val == 0x7) {
8997 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8998 /* save our func */
34f80b04 8999 int func = BP_FUNC(bp);
da5a662a
VZ
9000 u32 swap_en;
9001 u32 swap_val;
34f80b04 9002
b4661739
EG
9003 /* clear the UNDI indication */
9004 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
9005
34f80b04
EG
9006 BNX2X_DEV_INFO("UNDI is active! reset device\n");
9007
9008 /* try unload UNDI on port 0 */
9009 bp->func = 0;
da5a662a
VZ
9010 bp->fw_seq =
9011 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9012 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 9013 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9014
9015 /* if UNDI is loaded on the other port */
9016 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
9017
da5a662a
VZ
9018 /* send "DONE" for previous unload */
9019 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9020
9021 /* unload UNDI on port 1 */
34f80b04 9022 bp->func = 1;
da5a662a
VZ
9023 bp->fw_seq =
9024 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9025 DRV_MSG_SEQ_NUMBER_MASK);
9026 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
9027
9028 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
9029 }
9030
b4661739
EG
9031 /* now it's safe to release the lock */
9032 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
9033
f1ef27ef 9034 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
9035
9036 /* close input traffic and wait for it */
9037 /* Do not rcv packets to BRB */
9038 REG_WR(bp,
9039 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
9040 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
9041 /* Do not direct rcv packets that are not for MCP to
9042 * the BRB */
9043 REG_WR(bp,
9044 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
9045 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
9046 /* clear AEU */
9047 REG_WR(bp,
9048 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
9049 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
9050 msleep(10);
9051
9052 /* save NIG port swap info */
9053 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
9054 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
9055 /* reset device */
9056 REG_WR(bp,
9057 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 9058 0xd3ffffff);
34f80b04
EG
9059 REG_WR(bp,
9060 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9061 0x1403);
da5a662a
VZ
9062 /* take the NIG out of reset and restore swap values */
9063 REG_WR(bp,
9064 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
9065 MISC_REGISTERS_RESET_REG_1_RST_NIG);
9066 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
9067 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
9068
9069 /* send unload done to the MCP */
9070 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9071
9072 /* restore our func and fw_seq */
9073 bp->func = func;
9074 bp->fw_seq =
9075 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
9076 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
9077
9078 } else
9079 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
9080 }
9081}
9082
9083static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
9084{
9085 u32 val, val2, val3, val4, id;
72ce58c3 9086 u16 pmc;
34f80b04
EG
9087
9088 /* Get the chip revision id and number. */
9089 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
9090 val = REG_RD(bp, MISC_REG_CHIP_NUM);
9091 id = ((val & 0xffff) << 16);
9092 val = REG_RD(bp, MISC_REG_CHIP_REV);
9093 id |= ((val & 0xf) << 12);
9094 val = REG_RD(bp, MISC_REG_CHIP_METAL);
9095 id |= ((val & 0xff) << 4);
5a40e08e 9096 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
9097 id |= (val & 0xf);
9098 bp->common.chip_id = id;
9099 bp->link_params.chip_id = bp->common.chip_id;
9100 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
9101
1c06328c
EG
9102 val = (REG_RD(bp, 0x2874) & 0x55);
9103 if ((bp->common.chip_id & 0x1) ||
9104 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
9105 bp->flags |= ONE_PORT_FLAG;
9106 BNX2X_DEV_INFO("single port device\n");
9107 }
9108
34f80b04
EG
9109 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
9110 bp->common.flash_size = (NVRAM_1MB_SIZE <<
9111 (val & MCPR_NVM_CFG4_FLASH_SIZE));
9112 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
9113 bp->common.flash_size, bp->common.flash_size);
9114
9115 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
2691d51d 9116 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
34f80b04 9117 bp->link_params.shmem_base = bp->common.shmem_base;
2691d51d
EG
9118 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
9119 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04
EG
9120
9121 if (!bp->common.shmem_base ||
9122 (bp->common.shmem_base < 0xA0000) ||
9123 (bp->common.shmem_base >= 0xC0000)) {
9124 BNX2X_DEV_INFO("MCP not active\n");
9125 bp->flags |= NO_MCP_FLAG;
9126 return;
9127 }
9128
9129 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9130 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9131 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9132 BNX2X_ERR("BAD MCP validity signature\n");
9133
9134 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 9135 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
9136
9137 bp->link_params.hw_led_mode = ((bp->common.hw_config &
9138 SHARED_HW_CFG_LED_MODE_MASK) >>
9139 SHARED_HW_CFG_LED_MODE_SHIFT);
9140
c2c8b03e
EG
9141 bp->link_params.feature_config_flags = 0;
9142 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
9143 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
9144 bp->link_params.feature_config_flags |=
9145 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9146 else
9147 bp->link_params.feature_config_flags &=
9148 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
9149
34f80b04
EG
9150 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
9151 bp->common.bc_ver = val;
9152 BNX2X_DEV_INFO("bc_ver %X\n", val);
9153 if (val < BNX2X_BC_VER) {
9154 /* for now only warn
9155 * later we might need to enforce this */
9156 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
9157 " please upgrade BC\n", BNX2X_BC_VER, val);
9158 }
4d295db0
EG
9159 bp->link_params.feature_config_flags |=
9160 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
9161 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
72ce58c3
EG
9162
9163 if (BP_E1HVN(bp) == 0) {
9164 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
9165 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
9166 } else {
9167 /* no WOL capability for E1HVN != 0 */
9168 bp->flags |= NO_WOL_FLAG;
9169 }
9170 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 9171 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
9172
9173 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
9174 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
9175 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
9176 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
9177
7995c64e 9178 pr_info("part number %X-%X-%X-%X\n", val, val2, val3, val4);
34f80b04
EG
9179}
9180
9181static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
9182 u32 switch_cfg)
a2fbb9ea 9183{
34f80b04 9184 int port = BP_PORT(bp);
a2fbb9ea
ET
9185 u32 ext_phy_type;
9186
a2fbb9ea
ET
9187 switch (switch_cfg) {
9188 case SWITCH_CFG_1G:
9189 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
9190
c18487ee
YR
9191 ext_phy_type =
9192 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9193 switch (ext_phy_type) {
9194 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
9195 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9196 ext_phy_type);
9197
34f80b04
EG
9198 bp->port.supported |= (SUPPORTED_10baseT_Half |
9199 SUPPORTED_10baseT_Full |
9200 SUPPORTED_100baseT_Half |
9201 SUPPORTED_100baseT_Full |
9202 SUPPORTED_1000baseT_Full |
9203 SUPPORTED_2500baseX_Full |
9204 SUPPORTED_TP |
9205 SUPPORTED_FIBRE |
9206 SUPPORTED_Autoneg |
9207 SUPPORTED_Pause |
9208 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9209 break;
9210
9211 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
9212 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
9213 ext_phy_type);
9214
34f80b04
EG
9215 bp->port.supported |= (SUPPORTED_10baseT_Half |
9216 SUPPORTED_10baseT_Full |
9217 SUPPORTED_100baseT_Half |
9218 SUPPORTED_100baseT_Full |
9219 SUPPORTED_1000baseT_Full |
9220 SUPPORTED_TP |
9221 SUPPORTED_FIBRE |
9222 SUPPORTED_Autoneg |
9223 SUPPORTED_Pause |
9224 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9225 break;
9226
9227 default:
9228 BNX2X_ERR("NVRAM config error. "
9229 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 9230 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9231 return;
9232 }
9233
34f80b04
EG
9234 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
9235 port*0x10);
9236 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
9237 break;
9238
9239 case SWITCH_CFG_10G:
9240 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
9241
c18487ee
YR
9242 ext_phy_type =
9243 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
9244 switch (ext_phy_type) {
9245 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
9246 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
9247 ext_phy_type);
9248
34f80b04
EG
9249 bp->port.supported |= (SUPPORTED_10baseT_Half |
9250 SUPPORTED_10baseT_Full |
9251 SUPPORTED_100baseT_Half |
9252 SUPPORTED_100baseT_Full |
9253 SUPPORTED_1000baseT_Full |
9254 SUPPORTED_2500baseX_Full |
9255 SUPPORTED_10000baseT_Full |
9256 SUPPORTED_TP |
9257 SUPPORTED_FIBRE |
9258 SUPPORTED_Autoneg |
9259 SUPPORTED_Pause |
9260 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9261 break;
9262
589abe3a
EG
9263 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
9264 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
34f80b04 9265 ext_phy_type);
f1410647 9266
34f80b04 9267 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9268 SUPPORTED_1000baseT_Full |
34f80b04 9269 SUPPORTED_FIBRE |
589abe3a 9270 SUPPORTED_Autoneg |
34f80b04
EG
9271 SUPPORTED_Pause |
9272 SUPPORTED_Asym_Pause);
f1410647
ET
9273 break;
9274
589abe3a
EG
9275 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
9276 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
f1410647
ET
9277 ext_phy_type);
9278
34f80b04 9279 bp->port.supported |= (SUPPORTED_10000baseT_Full |
589abe3a 9280 SUPPORTED_2500baseX_Full |
34f80b04 9281 SUPPORTED_1000baseT_Full |
589abe3a
EG
9282 SUPPORTED_FIBRE |
9283 SUPPORTED_Autoneg |
9284 SUPPORTED_Pause |
9285 SUPPORTED_Asym_Pause);
9286 break;
9287
9288 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9289 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
9290 ext_phy_type);
9291
9292 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04
EG
9293 SUPPORTED_FIBRE |
9294 SUPPORTED_Pause |
9295 SUPPORTED_Asym_Pause);
f1410647
ET
9296 break;
9297
589abe3a
EG
9298 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9299 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
a2fbb9ea
ET
9300 ext_phy_type);
9301
34f80b04
EG
9302 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9303 SUPPORTED_1000baseT_Full |
9304 SUPPORTED_FIBRE |
34f80b04
EG
9305 SUPPORTED_Pause |
9306 SUPPORTED_Asym_Pause);
f1410647
ET
9307 break;
9308
589abe3a
EG
9309 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
9310 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
c18487ee
YR
9311 ext_phy_type);
9312
34f80b04 9313 bp->port.supported |= (SUPPORTED_10000baseT_Full |
34f80b04 9314 SUPPORTED_1000baseT_Full |
34f80b04 9315 SUPPORTED_Autoneg |
589abe3a 9316 SUPPORTED_FIBRE |
34f80b04
EG
9317 SUPPORTED_Pause |
9318 SUPPORTED_Asym_Pause);
c18487ee
YR
9319 break;
9320
4d295db0
EG
9321 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
9322 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
9323 ext_phy_type);
9324
9325 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9326 SUPPORTED_1000baseT_Full |
9327 SUPPORTED_Autoneg |
9328 SUPPORTED_FIBRE |
9329 SUPPORTED_Pause |
9330 SUPPORTED_Asym_Pause);
9331 break;
9332
f1410647
ET
9333 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
9334 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
9335 ext_phy_type);
9336
34f80b04
EG
9337 bp->port.supported |= (SUPPORTED_10000baseT_Full |
9338 SUPPORTED_TP |
9339 SUPPORTED_Autoneg |
9340 SUPPORTED_Pause |
9341 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
9342 break;
9343
28577185
EG
9344 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
9345 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
9346 ext_phy_type);
9347
9348 bp->port.supported |= (SUPPORTED_10baseT_Half |
9349 SUPPORTED_10baseT_Full |
9350 SUPPORTED_100baseT_Half |
9351 SUPPORTED_100baseT_Full |
9352 SUPPORTED_1000baseT_Full |
9353 SUPPORTED_10000baseT_Full |
9354 SUPPORTED_TP |
9355 SUPPORTED_Autoneg |
9356 SUPPORTED_Pause |
9357 SUPPORTED_Asym_Pause);
9358 break;
9359
c18487ee
YR
9360 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9361 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9362 bp->link_params.ext_phy_config);
9363 break;
9364
a2fbb9ea
ET
9365 default:
9366 BNX2X_ERR("NVRAM config error. "
9367 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 9368 bp->link_params.ext_phy_config);
a2fbb9ea
ET
9369 return;
9370 }
9371
34f80b04
EG
9372 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
9373 port*0x18);
9374 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 9375
a2fbb9ea
ET
9376 break;
9377
9378 default:
9379 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 9380 bp->port.link_config);
a2fbb9ea
ET
9381 return;
9382 }
34f80b04 9383 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
9384
9385 /* mask what we support according to speed_cap_mask */
c18487ee
YR
9386 if (!(bp->link_params.speed_cap_mask &
9387 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 9388 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 9389
c18487ee
YR
9390 if (!(bp->link_params.speed_cap_mask &
9391 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 9392 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 9393
c18487ee
YR
9394 if (!(bp->link_params.speed_cap_mask &
9395 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 9396 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 9397
c18487ee
YR
9398 if (!(bp->link_params.speed_cap_mask &
9399 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 9400 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 9401
c18487ee
YR
9402 if (!(bp->link_params.speed_cap_mask &
9403 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
9404 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
9405 SUPPORTED_1000baseT_Full);
a2fbb9ea 9406
c18487ee
YR
9407 if (!(bp->link_params.speed_cap_mask &
9408 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 9409 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 9410
c18487ee
YR
9411 if (!(bp->link_params.speed_cap_mask &
9412 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 9413 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 9414
34f80b04 9415 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
9416}
9417
34f80b04 9418static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 9419{
c18487ee 9420 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 9421
34f80b04 9422 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 9423 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 9424 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 9425 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9426 bp->port.advertising = bp->port.supported;
a2fbb9ea 9427 } else {
c18487ee
YR
9428 u32 ext_phy_type =
9429 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9430
9431 if ((ext_phy_type ==
9432 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
9433 (ext_phy_type ==
9434 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 9435 /* force 10G, no AN */
c18487ee 9436 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 9437 bp->port.advertising =
a2fbb9ea
ET
9438 (ADVERTISED_10000baseT_Full |
9439 ADVERTISED_FIBRE);
9440 break;
9441 }
9442 BNX2X_ERR("NVRAM config error. "
9443 "Invalid link_config 0x%x"
9444 " Autoneg not supported\n",
34f80b04 9445 bp->port.link_config);
a2fbb9ea
ET
9446 return;
9447 }
9448 break;
9449
9450 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 9451 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 9452 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
9453 bp->port.advertising = (ADVERTISED_10baseT_Full |
9454 ADVERTISED_TP);
a2fbb9ea
ET
9455 } else {
9456 BNX2X_ERR("NVRAM config error. "
9457 "Invalid link_config 0x%x"
9458 " speed_cap_mask 0x%x\n",
34f80b04 9459 bp->port.link_config,
c18487ee 9460 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9461 return;
9462 }
9463 break;
9464
9465 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 9466 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
9467 bp->link_params.req_line_speed = SPEED_10;
9468 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9469 bp->port.advertising = (ADVERTISED_10baseT_Half |
9470 ADVERTISED_TP);
a2fbb9ea
ET
9471 } else {
9472 BNX2X_ERR("NVRAM config error. "
9473 "Invalid link_config 0x%x"
9474 " speed_cap_mask 0x%x\n",
34f80b04 9475 bp->port.link_config,
c18487ee 9476 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9477 return;
9478 }
9479 break;
9480
9481 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 9482 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 9483 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
9484 bp->port.advertising = (ADVERTISED_100baseT_Full |
9485 ADVERTISED_TP);
a2fbb9ea
ET
9486 } else {
9487 BNX2X_ERR("NVRAM config error. "
9488 "Invalid link_config 0x%x"
9489 " speed_cap_mask 0x%x\n",
34f80b04 9490 bp->port.link_config,
c18487ee 9491 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9492 return;
9493 }
9494 break;
9495
9496 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 9497 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
9498 bp->link_params.req_line_speed = SPEED_100;
9499 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
9500 bp->port.advertising = (ADVERTISED_100baseT_Half |
9501 ADVERTISED_TP);
a2fbb9ea
ET
9502 } else {
9503 BNX2X_ERR("NVRAM config error. "
9504 "Invalid link_config 0x%x"
9505 " speed_cap_mask 0x%x\n",
34f80b04 9506 bp->port.link_config,
c18487ee 9507 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9508 return;
9509 }
9510 break;
9511
9512 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 9513 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 9514 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
9515 bp->port.advertising = (ADVERTISED_1000baseT_Full |
9516 ADVERTISED_TP);
a2fbb9ea
ET
9517 } else {
9518 BNX2X_ERR("NVRAM config error. "
9519 "Invalid link_config 0x%x"
9520 " speed_cap_mask 0x%x\n",
34f80b04 9521 bp->port.link_config,
c18487ee 9522 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9523 return;
9524 }
9525 break;
9526
9527 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 9528 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 9529 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
9530 bp->port.advertising = (ADVERTISED_2500baseX_Full |
9531 ADVERTISED_TP);
a2fbb9ea
ET
9532 } else {
9533 BNX2X_ERR("NVRAM config error. "
9534 "Invalid link_config 0x%x"
9535 " speed_cap_mask 0x%x\n",
34f80b04 9536 bp->port.link_config,
c18487ee 9537 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9538 return;
9539 }
9540 break;
9541
9542 case PORT_FEATURE_LINK_SPEED_10G_CX4:
9543 case PORT_FEATURE_LINK_SPEED_10G_KX4:
9544 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 9545 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 9546 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
9547 bp->port.advertising = (ADVERTISED_10000baseT_Full |
9548 ADVERTISED_FIBRE);
a2fbb9ea
ET
9549 } else {
9550 BNX2X_ERR("NVRAM config error. "
9551 "Invalid link_config 0x%x"
9552 " speed_cap_mask 0x%x\n",
34f80b04 9553 bp->port.link_config,
c18487ee 9554 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
9555 return;
9556 }
9557 break;
9558
9559 default:
9560 BNX2X_ERR("NVRAM config error. "
9561 "BAD link speed link_config 0x%x\n",
34f80b04 9562 bp->port.link_config);
c18487ee 9563 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 9564 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
9565 break;
9566 }
a2fbb9ea 9567
34f80b04
EG
9568 bp->link_params.req_flow_ctrl = (bp->port.link_config &
9569 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 9570 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 9571 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 9572 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 9573
c18487ee 9574 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 9575 " advertising 0x%x\n",
c18487ee
YR
9576 bp->link_params.req_line_speed,
9577 bp->link_params.req_duplex,
34f80b04 9578 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
9579}
9580
e665bfda
MC
9581static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
9582{
9583 mac_hi = cpu_to_be16(mac_hi);
9584 mac_lo = cpu_to_be32(mac_lo);
9585 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
9586 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
9587}
9588
34f80b04 9589static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 9590{
34f80b04
EG
9591 int port = BP_PORT(bp);
9592 u32 val, val2;
589abe3a 9593 u32 config;
c2c8b03e 9594 u16 i;
01cd4528 9595 u32 ext_phy_type;
a2fbb9ea 9596
c18487ee 9597 bp->link_params.bp = bp;
34f80b04 9598 bp->link_params.port = port;
c18487ee 9599
c18487ee 9600 bp->link_params.lane_config =
a2fbb9ea 9601 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 9602 bp->link_params.ext_phy_config =
a2fbb9ea
ET
9603 SHMEM_RD(bp,
9604 dev_info.port_hw_config[port].external_phy_config);
4d295db0
EG
9605 /* BCM8727_NOC => BCM8727 no over current */
9606 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
9607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
9608 bp->link_params.ext_phy_config &=
9609 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
9610 bp->link_params.ext_phy_config |=
9611 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
9612 bp->link_params.feature_config_flags |=
9613 FEATURE_CONFIG_BCM8727_NOC;
9614 }
9615
c18487ee 9616 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
9617 SHMEM_RD(bp,
9618 dev_info.port_hw_config[port].speed_capability_mask);
9619
34f80b04 9620 bp->port.link_config =
a2fbb9ea
ET
9621 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
9622
c2c8b03e
EG
9623 /* Get the 4 lanes xgxs config rx and tx */
9624 for (i = 0; i < 2; i++) {
9625 val = SHMEM_RD(bp,
9626 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
9627 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
9628 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
9629
9630 val = SHMEM_RD(bp,
9631 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
9632 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
9633 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
9634 }
9635
3ce2c3f9
EG
9636 /* If the device is capable of WoL, set the default state according
9637 * to the HW
9638 */
4d295db0 9639 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
9640 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
9641 (config & PORT_FEATURE_WOL_ENABLED));
9642
c2c8b03e
EG
9643 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
9644 " speed_cap_mask 0x%08x link_config 0x%08x\n",
c18487ee
YR
9645 bp->link_params.lane_config,
9646 bp->link_params.ext_phy_config,
34f80b04 9647 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 9648
4d295db0
EG
9649 bp->link_params.switch_cfg |= (bp->port.link_config &
9650 PORT_FEATURE_CONNECTED_SWITCH_MASK);
c18487ee 9651 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
9652
9653 bnx2x_link_settings_requested(bp);
9654
01cd4528
EG
9655 /*
9656 * If connected directly, work with the internal PHY, otherwise, work
9657 * with the external PHY
9658 */
9659 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
9660 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
9661 bp->mdio.prtad = bp->link_params.phy_addr;
9662
9663 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
9664 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
9665 bp->mdio.prtad =
659bc5c4 9666 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
01cd4528 9667
a2fbb9ea
ET
9668 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
9669 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
e665bfda 9670 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
c18487ee
YR
9671 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
9672 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
37b091ba
MC
9673
9674#ifdef BCM_CNIC
9675 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
9676 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
9677 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
9678#endif
34f80b04
EG
9679}
9680
9681static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
9682{
9683 int func = BP_FUNC(bp);
9684 u32 val, val2;
9685 int rc = 0;
a2fbb9ea 9686
34f80b04 9687 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 9688
34f80b04
EG
9689 bp->e1hov = 0;
9690 bp->e1hmf = 0;
9691 if (CHIP_IS_E1H(bp)) {
9692 bp->mf_config =
9693 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 9694
2691d51d 9695 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
3196a88a 9696 FUNC_MF_CFG_E1HOV_TAG_MASK);
2691d51d 9697 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
34f80b04 9698 bp->e1hmf = 1;
2691d51d
EG
9699 BNX2X_DEV_INFO("%s function mode\n",
9700 IS_E1HMF(bp) ? "multi" : "single");
9701
9702 if (IS_E1HMF(bp)) {
9703 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
9704 e1hov_tag) &
9705 FUNC_MF_CFG_E1HOV_TAG_MASK);
9706 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
9707 bp->e1hov = val;
9708 BNX2X_DEV_INFO("E1HOV for func %d is %d "
9709 "(0x%04x)\n",
9710 func, bp->e1hov, bp->e1hov);
9711 } else {
34f80b04
EG
9712 BNX2X_ERR("!!! No valid E1HOV for func %d,"
9713 " aborting\n", func);
9714 rc = -EPERM;
9715 }
2691d51d
EG
9716 } else {
9717 if (BP_E1HVN(bp)) {
9718 BNX2X_ERR("!!! VN %d in single function mode,"
9719 " aborting\n", BP_E1HVN(bp));
9720 rc = -EPERM;
9721 }
34f80b04
EG
9722 }
9723 }
a2fbb9ea 9724
34f80b04
EG
9725 if (!BP_NOMCP(bp)) {
9726 bnx2x_get_port_hwinfo(bp);
9727
9728 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
9729 DRV_MSG_SEQ_NUMBER_MASK);
9730 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9731 }
9732
9733 if (IS_E1HMF(bp)) {
9734 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
9735 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
9736 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
9737 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
9738 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
9739 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
9740 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
9741 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
9742 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
9743 bp->dev->dev_addr[5] = (u8)(val & 0xff);
9744 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
9745 ETH_ALEN);
9746 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
9747 ETH_ALEN);
a2fbb9ea 9748 }
34f80b04
EG
9749
9750 return rc;
a2fbb9ea
ET
9751 }
9752
34f80b04
EG
9753 if (BP_NOMCP(bp)) {
9754 /* only supposed to happen on emulation/FPGA */
33471629 9755 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
9756 random_ether_addr(bp->dev->dev_addr);
9757 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
9758 }
a2fbb9ea 9759
34f80b04
EG
9760 return rc;
9761}
9762
34f24c7f
VZ
9763static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
9764{
9765 int cnt, i, block_end, rodi;
9766 char vpd_data[BNX2X_VPD_LEN+1];
9767 char str_id_reg[VENDOR_ID_LEN+1];
9768 char str_id_cap[VENDOR_ID_LEN+1];
9769 u8 len;
9770
9771 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
9772 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
9773
9774 if (cnt < BNX2X_VPD_LEN)
9775 goto out_not_found;
9776
9777 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
9778 PCI_VPD_LRDT_RO_DATA);
9779 if (i < 0)
9780 goto out_not_found;
9781
9782
9783 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
9784 pci_vpd_lrdt_size(&vpd_data[i]);
9785
9786 i += PCI_VPD_LRDT_TAG_SIZE;
9787
9788 if (block_end > BNX2X_VPD_LEN)
9789 goto out_not_found;
9790
9791 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9792 PCI_VPD_RO_KEYWORD_MFR_ID);
9793 if (rodi < 0)
9794 goto out_not_found;
9795
9796 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9797
9798 if (len != VENDOR_ID_LEN)
9799 goto out_not_found;
9800
9801 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9802
9803 /* vendor specific info */
9804 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
9805 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
9806 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
9807 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
9808
9809 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
9810 PCI_VPD_RO_KEYWORD_VENDOR0);
9811 if (rodi >= 0) {
9812 len = pci_vpd_info_field_size(&vpd_data[rodi]);
9813
9814 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
9815
9816 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
9817 memcpy(bp->fw_ver, &vpd_data[rodi], len);
9818 bp->fw_ver[len] = ' ';
9819 }
9820 }
9821 return;
9822 }
9823out_not_found:
9824 return;
9825}
9826
34f80b04
EG
9827static int __devinit bnx2x_init_bp(struct bnx2x *bp)
9828{
9829 int func = BP_FUNC(bp);
87942b46 9830 int timer_interval;
34f80b04
EG
9831 int rc;
9832
da5a662a
VZ
9833 /* Disable interrupt handling until HW is initialized */
9834 atomic_set(&bp->intr_sem, 1);
e1510706 9835 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 9836
34f80b04 9837 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 9838 mutex_init(&bp->fw_mb_mutex);
993ac7b5
MC
9839#ifdef BCM_CNIC
9840 mutex_init(&bp->cnic_mutex);
9841#endif
a2fbb9ea 9842
1cf167f2 9843 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 9844 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
9845
9846 rc = bnx2x_get_hwinfo(bp);
9847
34f24c7f 9848 bnx2x_read_fwinfo(bp);
34f80b04
EG
9849 /* need to reset chip if undi was active */
9850 if (!BP_NOMCP(bp))
9851 bnx2x_undi_unload(bp);
9852
9853 if (CHIP_REV_IS_FPGA(bp))
7995c64e 9854 pr_err("FPGA detected\n");
34f80b04
EG
9855
9856 if (BP_NOMCP(bp) && (func == 0))
7995c64e 9857 pr_err("MCP disabled, must load devices in order!\n");
34f80b04 9858
555f6c78 9859 /* Set multi queue mode */
8badd27a
EG
9860 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
9861 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
7995c64e 9862 pr_err("Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
9863 multi_mode = ETH_RSS_MODE_DISABLED;
9864 }
9865 bp->multi_mode = multi_mode;
9866
9867
4fd89b7a
DK
9868 bp->dev->features |= NETIF_F_GRO;
9869
7a9b2557
VZ
9870 /* Set TPA flags */
9871 if (disable_tpa) {
9872 bp->flags &= ~TPA_ENABLE_FLAG;
9873 bp->dev->features &= ~NETIF_F_LRO;
9874 } else {
9875 bp->flags |= TPA_ENABLE_FLAG;
9876 bp->dev->features |= NETIF_F_LRO;
9877 }
9878
a18f5128
EG
9879 if (CHIP_IS_E1(bp))
9880 bp->dropless_fc = 0;
9881 else
9882 bp->dropless_fc = dropless_fc;
9883
8d5726c4 9884 bp->mrrs = mrrs;
7a9b2557 9885
34f80b04
EG
9886 bp->tx_ring_size = MAX_TX_AVAIL;
9887 bp->rx_ring_size = MAX_RX_AVAIL;
9888
9889 bp->rx_csum = 1;
34f80b04 9890
7d323bfd
EG
9891 /* make sure that the numbers are in the right granularity */
9892 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9893 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
34f80b04 9894
87942b46
EG
9895 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
9896 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
9897
9898 init_timer(&bp->timer);
9899 bp->timer.expires = jiffies + bp->current_interval;
9900 bp->timer.data = (unsigned long) bp;
9901 bp->timer.function = bnx2x_timer;
9902
9903 return rc;
a2fbb9ea
ET
9904}
9905
9906/*
9907 * ethtool service functions
9908 */
9909
9910/* All ethtool functions called with rtnl_lock */
9911
9912static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9913{
9914 struct bnx2x *bp = netdev_priv(dev);
9915
34f80b04
EG
9916 cmd->supported = bp->port.supported;
9917 cmd->advertising = bp->port.advertising;
a2fbb9ea 9918
f34d28ea
EG
9919 if ((bp->state == BNX2X_STATE_OPEN) &&
9920 !(bp->flags & MF_FUNC_DIS) &&
9921 (bp->link_vars.link_up)) {
c18487ee
YR
9922 cmd->speed = bp->link_vars.line_speed;
9923 cmd->duplex = bp->link_vars.duplex;
b015e3d1
EG
9924 if (IS_E1HMF(bp)) {
9925 u16 vn_max_rate;
34f80b04 9926
b015e3d1
EG
9927 vn_max_rate =
9928 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
34f80b04 9929 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
b015e3d1
EG
9930 if (vn_max_rate < cmd->speed)
9931 cmd->speed = vn_max_rate;
9932 }
9933 } else {
9934 cmd->speed = -1;
9935 cmd->duplex = -1;
34f80b04 9936 }
a2fbb9ea 9937
c18487ee
YR
9938 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
9939 u32 ext_phy_type =
9940 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
9941
9942 switch (ext_phy_type) {
9943 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
f1410647 9944 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 9945 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
589abe3a
EG
9946 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
9947 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
9948 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
4d295db0 9949 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
f1410647
ET
9950 cmd->port = PORT_FIBRE;
9951 break;
9952
9953 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
28577185 9954 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
f1410647
ET
9955 cmd->port = PORT_TP;
9956 break;
9957
c18487ee
YR
9958 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
9959 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
9960 bp->link_params.ext_phy_config);
9961 break;
9962
f1410647
ET
9963 default:
9964 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
9965 bp->link_params.ext_phy_config);
9966 break;
f1410647
ET
9967 }
9968 } else
a2fbb9ea 9969 cmd->port = PORT_TP;
a2fbb9ea 9970
01cd4528 9971 cmd->phy_address = bp->mdio.prtad;
a2fbb9ea
ET
9972 cmd->transceiver = XCVR_INTERNAL;
9973
c18487ee 9974 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 9975 cmd->autoneg = AUTONEG_ENABLE;
f1410647 9976 else
a2fbb9ea 9977 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
9978
9979 cmd->maxtxpkt = 0;
9980 cmd->maxrxpkt = 0;
9981
9982 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
9983 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
9984 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
9985 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
9986 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
9987 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
9988 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
9989
9990 return 0;
9991}
9992
9993static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9994{
9995 struct bnx2x *bp = netdev_priv(dev);
9996 u32 advertising;
9997
34f80b04
EG
9998 if (IS_E1HMF(bp))
9999 return 0;
10000
a2fbb9ea
ET
10001 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
10002 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
10003 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
10004 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
10005 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
10006 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
10007 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
10008
a2fbb9ea 10009 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
10010 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
10011 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 10012 return -EINVAL;
f1410647 10013 }
a2fbb9ea
ET
10014
10015 /* advertise the requested speed and duplex if supported */
34f80b04 10016 cmd->advertising &= bp->port.supported;
a2fbb9ea 10017
c18487ee
YR
10018 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
10019 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
10020 bp->port.advertising |= (ADVERTISED_Autoneg |
10021 cmd->advertising);
a2fbb9ea
ET
10022
10023 } else { /* forced speed */
10024 /* advertise the requested speed and duplex if supported */
10025 switch (cmd->speed) {
10026 case SPEED_10:
10027 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10028 if (!(bp->port.supported &
f1410647
ET
10029 SUPPORTED_10baseT_Full)) {
10030 DP(NETIF_MSG_LINK,
10031 "10M full not supported\n");
a2fbb9ea 10032 return -EINVAL;
f1410647 10033 }
a2fbb9ea
ET
10034
10035 advertising = (ADVERTISED_10baseT_Full |
10036 ADVERTISED_TP);
10037 } else {
34f80b04 10038 if (!(bp->port.supported &
f1410647
ET
10039 SUPPORTED_10baseT_Half)) {
10040 DP(NETIF_MSG_LINK,
10041 "10M half not supported\n");
a2fbb9ea 10042 return -EINVAL;
f1410647 10043 }
a2fbb9ea
ET
10044
10045 advertising = (ADVERTISED_10baseT_Half |
10046 ADVERTISED_TP);
10047 }
10048 break;
10049
10050 case SPEED_100:
10051 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 10052 if (!(bp->port.supported &
f1410647
ET
10053 SUPPORTED_100baseT_Full)) {
10054 DP(NETIF_MSG_LINK,
10055 "100M full not supported\n");
a2fbb9ea 10056 return -EINVAL;
f1410647 10057 }
a2fbb9ea
ET
10058
10059 advertising = (ADVERTISED_100baseT_Full |
10060 ADVERTISED_TP);
10061 } else {
34f80b04 10062 if (!(bp->port.supported &
f1410647
ET
10063 SUPPORTED_100baseT_Half)) {
10064 DP(NETIF_MSG_LINK,
10065 "100M half not supported\n");
a2fbb9ea 10066 return -EINVAL;
f1410647 10067 }
a2fbb9ea
ET
10068
10069 advertising = (ADVERTISED_100baseT_Half |
10070 ADVERTISED_TP);
10071 }
10072 break;
10073
10074 case SPEED_1000:
f1410647
ET
10075 if (cmd->duplex != DUPLEX_FULL) {
10076 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 10077 return -EINVAL;
f1410647 10078 }
a2fbb9ea 10079
34f80b04 10080 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 10081 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 10082 return -EINVAL;
f1410647 10083 }
a2fbb9ea
ET
10084
10085 advertising = (ADVERTISED_1000baseT_Full |
10086 ADVERTISED_TP);
10087 break;
10088
10089 case SPEED_2500:
f1410647
ET
10090 if (cmd->duplex != DUPLEX_FULL) {
10091 DP(NETIF_MSG_LINK,
10092 "2.5G half not supported\n");
a2fbb9ea 10093 return -EINVAL;
f1410647 10094 }
a2fbb9ea 10095
34f80b04 10096 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
10097 DP(NETIF_MSG_LINK,
10098 "2.5G full not supported\n");
a2fbb9ea 10099 return -EINVAL;
f1410647 10100 }
a2fbb9ea 10101
f1410647 10102 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
10103 ADVERTISED_TP);
10104 break;
10105
10106 case SPEED_10000:
f1410647
ET
10107 if (cmd->duplex != DUPLEX_FULL) {
10108 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 10109 return -EINVAL;
f1410647 10110 }
a2fbb9ea 10111
34f80b04 10112 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 10113 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 10114 return -EINVAL;
f1410647 10115 }
a2fbb9ea
ET
10116
10117 advertising = (ADVERTISED_10000baseT_Full |
10118 ADVERTISED_FIBRE);
10119 break;
10120
10121 default:
f1410647 10122 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
10123 return -EINVAL;
10124 }
10125
c18487ee
YR
10126 bp->link_params.req_line_speed = cmd->speed;
10127 bp->link_params.req_duplex = cmd->duplex;
34f80b04 10128 bp->port.advertising = advertising;
a2fbb9ea
ET
10129 }
10130
c18487ee 10131 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 10132 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 10133 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 10134 bp->port.advertising);
a2fbb9ea 10135
34f80b04 10136 if (netif_running(dev)) {
bb2a0f7a 10137 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10138 bnx2x_link_set(bp);
10139 }
a2fbb9ea
ET
10140
10141 return 0;
10142}
10143
0a64ea57
EG
10144#define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
10145#define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
10146
10147static int bnx2x_get_regs_len(struct net_device *dev)
10148{
0a64ea57 10149 struct bnx2x *bp = netdev_priv(dev);
0d28e49a 10150 int regdump_len = 0;
0a64ea57
EG
10151 int i;
10152
0a64ea57
EG
10153 if (CHIP_IS_E1(bp)) {
10154 for (i = 0; i < REGS_COUNT; i++)
10155 if (IS_E1_ONLINE(reg_addrs[i].info))
10156 regdump_len += reg_addrs[i].size;
10157
10158 for (i = 0; i < WREGS_COUNT_E1; i++)
10159 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
10160 regdump_len += wreg_addrs_e1[i].size *
10161 (1 + wreg_addrs_e1[i].read_regs_count);
10162
10163 } else { /* E1H */
10164 for (i = 0; i < REGS_COUNT; i++)
10165 if (IS_E1H_ONLINE(reg_addrs[i].info))
10166 regdump_len += reg_addrs[i].size;
10167
10168 for (i = 0; i < WREGS_COUNT_E1H; i++)
10169 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
10170 regdump_len += wreg_addrs_e1h[i].size *
10171 (1 + wreg_addrs_e1h[i].read_regs_count);
10172 }
10173 regdump_len *= 4;
10174 regdump_len += sizeof(struct dump_hdr);
10175
10176 return regdump_len;
10177}
10178
10179static void bnx2x_get_regs(struct net_device *dev,
10180 struct ethtool_regs *regs, void *_p)
10181{
10182 u32 *p = _p, i, j;
10183 struct bnx2x *bp = netdev_priv(dev);
10184 struct dump_hdr dump_hdr = {0};
10185
10186 regs->version = 0;
10187 memset(p, 0, regs->len);
10188
10189 if (!netif_running(bp->dev))
10190 return;
10191
10192 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
10193 dump_hdr.dump_sign = dump_sign_all;
10194 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
10195 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
10196 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
10197 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
10198 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
10199
10200 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
10201 p += dump_hdr.hdr_size + 1;
10202
10203 if (CHIP_IS_E1(bp)) {
10204 for (i = 0; i < REGS_COUNT; i++)
10205 if (IS_E1_ONLINE(reg_addrs[i].info))
10206 for (j = 0; j < reg_addrs[i].size; j++)
10207 *p++ = REG_RD(bp,
10208 reg_addrs[i].addr + j*4);
10209
10210 } else { /* E1H */
10211 for (i = 0; i < REGS_COUNT; i++)
10212 if (IS_E1H_ONLINE(reg_addrs[i].info))
10213 for (j = 0; j < reg_addrs[i].size; j++)
10214 *p++ = REG_RD(bp,
10215 reg_addrs[i].addr + j*4);
10216 }
10217}
10218
0d28e49a
EG
10219#define PHY_FW_VER_LEN 10
10220
10221static void bnx2x_get_drvinfo(struct net_device *dev,
10222 struct ethtool_drvinfo *info)
10223{
10224 struct bnx2x *bp = netdev_priv(dev);
10225 u8 phy_fw_ver[PHY_FW_VER_LEN];
10226
10227 strcpy(info->driver, DRV_MODULE_NAME);
10228 strcpy(info->version, DRV_MODULE_VERSION);
10229
10230 phy_fw_ver[0] = '\0';
10231 if (bp->port.pmf) {
10232 bnx2x_acquire_phy_lock(bp);
10233 bnx2x_get_ext_phy_fw_version(&bp->link_params,
10234 (bp->state != BNX2X_STATE_CLOSED),
10235 phy_fw_ver, PHY_FW_VER_LEN);
10236 bnx2x_release_phy_lock(bp);
10237 }
10238
34f24c7f
VZ
10239 strncpy(info->fw_version, bp->fw_ver, 32);
10240 snprintf(info->fw_version + strlen(bp->fw_ver), 32 - strlen(bp->fw_ver),
10241 "bc %d.%d.%d%s%s",
0d28e49a
EG
10242 (bp->common.bc_ver & 0xff0000) >> 16,
10243 (bp->common.bc_ver & 0xff00) >> 8,
10244 (bp->common.bc_ver & 0xff),
34f24c7f 10245 ((phy_fw_ver[0] != '\0') ? " phy " : ""), phy_fw_ver);
0d28e49a
EG
10246 strcpy(info->bus_info, pci_name(bp->pdev));
10247 info->n_stats = BNX2X_NUM_STATS;
10248 info->testinfo_len = BNX2X_NUM_TESTS;
10249 info->eedump_len = bp->common.flash_size;
10250 info->regdump_len = bnx2x_get_regs_len(dev);
10251}
10252
a2fbb9ea
ET
10253static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10254{
10255 struct bnx2x *bp = netdev_priv(dev);
10256
10257 if (bp->flags & NO_WOL_FLAG) {
10258 wol->supported = 0;
10259 wol->wolopts = 0;
10260 } else {
10261 wol->supported = WAKE_MAGIC;
10262 if (bp->wol)
10263 wol->wolopts = WAKE_MAGIC;
10264 else
10265 wol->wolopts = 0;
10266 }
10267 memset(&wol->sopass, 0, sizeof(wol->sopass));
10268}
10269
10270static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10271{
10272 struct bnx2x *bp = netdev_priv(dev);
10273
10274 if (wol->wolopts & ~WAKE_MAGIC)
10275 return -EINVAL;
10276
10277 if (wol->wolopts & WAKE_MAGIC) {
10278 if (bp->flags & NO_WOL_FLAG)
10279 return -EINVAL;
10280
10281 bp->wol = 1;
34f80b04 10282 } else
a2fbb9ea 10283 bp->wol = 0;
34f80b04 10284
a2fbb9ea
ET
10285 return 0;
10286}
10287
10288static u32 bnx2x_get_msglevel(struct net_device *dev)
10289{
10290 struct bnx2x *bp = netdev_priv(dev);
10291
7995c64e 10292 return bp->msg_enable;
a2fbb9ea
ET
10293}
10294
10295static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
10296{
10297 struct bnx2x *bp = netdev_priv(dev);
10298
10299 if (capable(CAP_NET_ADMIN))
7995c64e 10300 bp->msg_enable = level;
a2fbb9ea
ET
10301}
10302
10303static int bnx2x_nway_reset(struct net_device *dev)
10304{
10305 struct bnx2x *bp = netdev_priv(dev);
10306
34f80b04
EG
10307 if (!bp->port.pmf)
10308 return 0;
a2fbb9ea 10309
34f80b04 10310 if (netif_running(dev)) {
bb2a0f7a 10311 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10312 bnx2x_link_set(bp);
10313 }
a2fbb9ea
ET
10314
10315 return 0;
10316}
10317
ab6ad5a4 10318static u32 bnx2x_get_link(struct net_device *dev)
01e53298
NO
10319{
10320 struct bnx2x *bp = netdev_priv(dev);
10321
f34d28ea
EG
10322 if (bp->flags & MF_FUNC_DIS)
10323 return 0;
10324
01e53298
NO
10325 return bp->link_vars.link_up;
10326}
10327
a2fbb9ea
ET
10328static int bnx2x_get_eeprom_len(struct net_device *dev)
10329{
10330 struct bnx2x *bp = netdev_priv(dev);
10331
34f80b04 10332 return bp->common.flash_size;
a2fbb9ea
ET
10333}
10334
10335static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
10336{
34f80b04 10337 int port = BP_PORT(bp);
a2fbb9ea
ET
10338 int count, i;
10339 u32 val = 0;
10340
10341 /* adjust timeout for emulation/FPGA */
10342 count = NVRAM_TIMEOUT_COUNT;
10343 if (CHIP_REV_IS_SLOW(bp))
10344 count *= 100;
10345
10346 /* request access to nvram interface */
10347 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10348 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
10349
10350 for (i = 0; i < count*10; i++) {
10351 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10352 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
10353 break;
10354
10355 udelay(5);
10356 }
10357
10358 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 10359 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
10360 return -EBUSY;
10361 }
10362
10363 return 0;
10364}
10365
10366static int bnx2x_release_nvram_lock(struct bnx2x *bp)
10367{
34f80b04 10368 int port = BP_PORT(bp);
a2fbb9ea
ET
10369 int count, i;
10370 u32 val = 0;
10371
10372 /* adjust timeout for emulation/FPGA */
10373 count = NVRAM_TIMEOUT_COUNT;
10374 if (CHIP_REV_IS_SLOW(bp))
10375 count *= 100;
10376
10377 /* relinquish nvram interface */
10378 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10379 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
10380
10381 for (i = 0; i < count*10; i++) {
10382 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
10383 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
10384 break;
10385
10386 udelay(5);
10387 }
10388
10389 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 10390 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
10391 return -EBUSY;
10392 }
10393
10394 return 0;
10395}
10396
10397static void bnx2x_enable_nvram_access(struct bnx2x *bp)
10398{
10399 u32 val;
10400
10401 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10402
10403 /* enable both bits, even on read */
10404 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10405 (val | MCPR_NVM_ACCESS_ENABLE_EN |
10406 MCPR_NVM_ACCESS_ENABLE_WR_EN));
10407}
10408
10409static void bnx2x_disable_nvram_access(struct bnx2x *bp)
10410{
10411 u32 val;
10412
10413 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
10414
10415 /* disable both bits, even after read */
10416 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
10417 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
10418 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
10419}
10420
4781bfad 10421static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
a2fbb9ea
ET
10422 u32 cmd_flags)
10423{
f1410647 10424 int count, i, rc;
a2fbb9ea
ET
10425 u32 val;
10426
10427 /* build the command word */
10428 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
10429
10430 /* need to clear DONE bit separately */
10431 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10432
10433 /* address of the NVRAM to read from */
10434 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10435 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10436
10437 /* issue a read command */
10438 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10439
10440 /* adjust timeout for emulation/FPGA */
10441 count = NVRAM_TIMEOUT_COUNT;
10442 if (CHIP_REV_IS_SLOW(bp))
10443 count *= 100;
10444
10445 /* wait for completion */
10446 *ret_val = 0;
10447 rc = -EBUSY;
10448 for (i = 0; i < count; i++) {
10449 udelay(5);
10450 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10451
10452 if (val & MCPR_NVM_COMMAND_DONE) {
10453 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
10454 /* we read nvram data in cpu order
10455 * but ethtool sees it as an array of bytes
10456 * converting to big-endian will do the work */
4781bfad 10457 *ret_val = cpu_to_be32(val);
a2fbb9ea
ET
10458 rc = 0;
10459 break;
10460 }
10461 }
10462
10463 return rc;
10464}
10465
10466static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
10467 int buf_size)
10468{
10469 int rc;
10470 u32 cmd_flags;
4781bfad 10471 __be32 val;
a2fbb9ea
ET
10472
10473 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10474 DP(BNX2X_MSG_NVM,
c14423fe 10475 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10476 offset, buf_size);
10477 return -EINVAL;
10478 }
10479
34f80b04
EG
10480 if (offset + buf_size > bp->common.flash_size) {
10481 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10482 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10483 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10484 return -EINVAL;
10485 }
10486
10487 /* request access to nvram interface */
10488 rc = bnx2x_acquire_nvram_lock(bp);
10489 if (rc)
10490 return rc;
10491
10492 /* enable access to nvram interface */
10493 bnx2x_enable_nvram_access(bp);
10494
10495 /* read the first word(s) */
10496 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10497 while ((buf_size > sizeof(u32)) && (rc == 0)) {
10498 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10499 memcpy(ret_buf, &val, 4);
10500
10501 /* advance to the next dword */
10502 offset += sizeof(u32);
10503 ret_buf += sizeof(u32);
10504 buf_size -= sizeof(u32);
10505 cmd_flags = 0;
10506 }
10507
10508 if (rc == 0) {
10509 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10510 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
10511 memcpy(ret_buf, &val, 4);
10512 }
10513
10514 /* disable access to nvram interface */
10515 bnx2x_disable_nvram_access(bp);
10516 bnx2x_release_nvram_lock(bp);
10517
10518 return rc;
10519}
10520
10521static int bnx2x_get_eeprom(struct net_device *dev,
10522 struct ethtool_eeprom *eeprom, u8 *eebuf)
10523{
10524 struct bnx2x *bp = netdev_priv(dev);
10525 int rc;
10526
2add3acb
EG
10527 if (!netif_running(dev))
10528 return -EAGAIN;
10529
34f80b04 10530 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10531 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10532 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10533 eeprom->len, eeprom->len);
10534
10535 /* parameters already validated in ethtool_get_eeprom */
10536
10537 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
10538
10539 return rc;
10540}
10541
10542static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
10543 u32 cmd_flags)
10544{
f1410647 10545 int count, i, rc;
a2fbb9ea
ET
10546
10547 /* build the command word */
10548 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
10549
10550 /* need to clear DONE bit separately */
10551 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
10552
10553 /* write the data */
10554 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
10555
10556 /* address of the NVRAM to write to */
10557 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
10558 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
10559
10560 /* issue the write command */
10561 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
10562
10563 /* adjust timeout for emulation/FPGA */
10564 count = NVRAM_TIMEOUT_COUNT;
10565 if (CHIP_REV_IS_SLOW(bp))
10566 count *= 100;
10567
10568 /* wait for completion */
10569 rc = -EBUSY;
10570 for (i = 0; i < count; i++) {
10571 udelay(5);
10572 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
10573 if (val & MCPR_NVM_COMMAND_DONE) {
10574 rc = 0;
10575 break;
10576 }
10577 }
10578
10579 return rc;
10580}
10581
f1410647 10582#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
10583
10584static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
10585 int buf_size)
10586{
10587 int rc;
10588 u32 cmd_flags;
10589 u32 align_offset;
4781bfad 10590 __be32 val;
a2fbb9ea 10591
34f80b04
EG
10592 if (offset + buf_size > bp->common.flash_size) {
10593 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10594 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10595 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10596 return -EINVAL;
10597 }
10598
10599 /* request access to nvram interface */
10600 rc = bnx2x_acquire_nvram_lock(bp);
10601 if (rc)
10602 return rc;
10603
10604 /* enable access to nvram interface */
10605 bnx2x_enable_nvram_access(bp);
10606
10607 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
10608 align_offset = (offset & ~0x03);
10609 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
10610
10611 if (rc == 0) {
10612 val &= ~(0xff << BYTE_OFFSET(offset));
10613 val |= (*data_buf << BYTE_OFFSET(offset));
10614
10615 /* nvram data is returned as an array of bytes
10616 * convert it back to cpu order */
10617 val = be32_to_cpu(val);
10618
a2fbb9ea
ET
10619 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
10620 cmd_flags);
10621 }
10622
10623 /* disable access to nvram interface */
10624 bnx2x_disable_nvram_access(bp);
10625 bnx2x_release_nvram_lock(bp);
10626
10627 return rc;
10628}
10629
10630static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
10631 int buf_size)
10632{
10633 int rc;
10634 u32 cmd_flags;
10635 u32 val;
10636 u32 written_so_far;
10637
34f80b04 10638 if (buf_size == 1) /* ethtool */
a2fbb9ea 10639 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
10640
10641 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 10642 DP(BNX2X_MSG_NVM,
c14423fe 10643 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
10644 offset, buf_size);
10645 return -EINVAL;
10646 }
10647
34f80b04
EG
10648 if (offset + buf_size > bp->common.flash_size) {
10649 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 10650 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 10651 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
10652 return -EINVAL;
10653 }
10654
10655 /* request access to nvram interface */
10656 rc = bnx2x_acquire_nvram_lock(bp);
10657 if (rc)
10658 return rc;
10659
10660 /* enable access to nvram interface */
10661 bnx2x_enable_nvram_access(bp);
10662
10663 written_so_far = 0;
10664 cmd_flags = MCPR_NVM_COMMAND_FIRST;
10665 while ((written_so_far < buf_size) && (rc == 0)) {
10666 if (written_so_far == (buf_size - sizeof(u32)))
10667 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10668 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
10669 cmd_flags |= MCPR_NVM_COMMAND_LAST;
10670 else if ((offset % NVRAM_PAGE_SIZE) == 0)
10671 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
10672
10673 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
10674
10675 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
10676
10677 /* advance to the next dword */
10678 offset += sizeof(u32);
10679 data_buf += sizeof(u32);
10680 written_so_far += sizeof(u32);
10681 cmd_flags = 0;
10682 }
10683
10684 /* disable access to nvram interface */
10685 bnx2x_disable_nvram_access(bp);
10686 bnx2x_release_nvram_lock(bp);
10687
10688 return rc;
10689}
10690
10691static int bnx2x_set_eeprom(struct net_device *dev,
10692 struct ethtool_eeprom *eeprom, u8 *eebuf)
10693{
10694 struct bnx2x *bp = netdev_priv(dev);
f57a6025
EG
10695 int port = BP_PORT(bp);
10696 int rc = 0;
a2fbb9ea 10697
9f4c9583
EG
10698 if (!netif_running(dev))
10699 return -EAGAIN;
10700
34f80b04 10701 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
10702 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
10703 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
10704 eeprom->len, eeprom->len);
10705
10706 /* parameters already validated in ethtool_set_eeprom */
10707
f57a6025
EG
10708 /* PHY eeprom can be accessed only by the PMF */
10709 if ((eeprom->magic >= 0x50485900) && (eeprom->magic <= 0x504859FF) &&
10710 !bp->port.pmf)
10711 return -EINVAL;
10712
10713 if (eeprom->magic == 0x50485950) {
10714 /* 'PHYP' (0x50485950): prepare phy for FW upgrade */
10715 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 10716
f57a6025
EG
10717 bnx2x_acquire_phy_lock(bp);
10718 rc |= bnx2x_link_reset(&bp->link_params,
10719 &bp->link_vars, 0);
10720 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10721 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101)
10722 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10723 MISC_REGISTERS_GPIO_HIGH, port);
10724 bnx2x_release_phy_lock(bp);
10725 bnx2x_link_report(bp);
10726
10727 } else if (eeprom->magic == 0x50485952) {
10728 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
f34d28ea 10729 if (bp->state == BNX2X_STATE_OPEN) {
4a37fb66 10730 bnx2x_acquire_phy_lock(bp);
f57a6025
EG
10731 rc |= bnx2x_link_reset(&bp->link_params,
10732 &bp->link_vars, 1);
10733
10734 rc |= bnx2x_phy_init(&bp->link_params,
10735 &bp->link_vars);
4a37fb66 10736 bnx2x_release_phy_lock(bp);
f57a6025
EG
10737 bnx2x_calc_fc_adv(bp);
10738 }
10739 } else if (eeprom->magic == 0x53985943) {
10740 /* 'PHYC' (0x53985943): PHY FW upgrade completed */
10741 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
10742 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) {
10743 u8 ext_phy_addr =
659bc5c4 10744 XGXS_EXT_PHY_ADDR(bp->link_params.ext_phy_config);
f57a6025
EG
10745
10746 /* DSP Remove Download Mode */
10747 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_0,
10748 MISC_REGISTERS_GPIO_LOW, port);
34f80b04 10749
f57a6025
EG
10750 bnx2x_acquire_phy_lock(bp);
10751
10752 bnx2x_sfx7101_sp_sw_reset(bp, port, ext_phy_addr);
10753
10754 /* wait 0.5 sec to allow it to run */
10755 msleep(500);
10756 bnx2x_ext_phy_hw_reset(bp, port);
10757 msleep(500);
10758 bnx2x_release_phy_lock(bp);
10759 }
10760 } else
c18487ee 10761 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
10762
10763 return rc;
10764}
10765
10766static int bnx2x_get_coalesce(struct net_device *dev,
10767 struct ethtool_coalesce *coal)
10768{
10769 struct bnx2x *bp = netdev_priv(dev);
10770
10771 memset(coal, 0, sizeof(struct ethtool_coalesce));
10772
10773 coal->rx_coalesce_usecs = bp->rx_ticks;
10774 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
10775
10776 return 0;
10777}
10778
ca00392c 10779#define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
a2fbb9ea
ET
10780static int bnx2x_set_coalesce(struct net_device *dev,
10781 struct ethtool_coalesce *coal)
10782{
10783 struct bnx2x *bp = netdev_priv(dev);
10784
10785 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
ca00392c
EG
10786 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
10787 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea
ET
10788
10789 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
ca00392c
EG
10790 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
10791 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
a2fbb9ea 10792
34f80b04 10793 if (netif_running(dev))
a2fbb9ea
ET
10794 bnx2x_update_coalesce(bp);
10795
10796 return 0;
10797}
10798
10799static void bnx2x_get_ringparam(struct net_device *dev,
10800 struct ethtool_ringparam *ering)
10801{
10802 struct bnx2x *bp = netdev_priv(dev);
10803
10804 ering->rx_max_pending = MAX_RX_AVAIL;
10805 ering->rx_mini_max_pending = 0;
10806 ering->rx_jumbo_max_pending = 0;
10807
10808 ering->rx_pending = bp->rx_ring_size;
10809 ering->rx_mini_pending = 0;
10810 ering->rx_jumbo_pending = 0;
10811
10812 ering->tx_max_pending = MAX_TX_AVAIL;
10813 ering->tx_pending = bp->tx_ring_size;
10814}
10815
10816static int bnx2x_set_ringparam(struct net_device *dev,
10817 struct ethtool_ringparam *ering)
10818{
10819 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10820 int rc = 0;
a2fbb9ea 10821
72fd0718
VZ
10822 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10823 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10824 return -EAGAIN;
10825 }
10826
a2fbb9ea
ET
10827 if ((ering->rx_pending > MAX_RX_AVAIL) ||
10828 (ering->tx_pending > MAX_TX_AVAIL) ||
10829 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
10830 return -EINVAL;
10831
10832 bp->rx_ring_size = ering->rx_pending;
10833 bp->tx_ring_size = ering->tx_pending;
10834
34f80b04
EG
10835 if (netif_running(dev)) {
10836 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10837 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
10838 }
10839
34f80b04 10840 return rc;
a2fbb9ea
ET
10841}
10842
10843static void bnx2x_get_pauseparam(struct net_device *dev,
10844 struct ethtool_pauseparam *epause)
10845{
10846 struct bnx2x *bp = netdev_priv(dev);
10847
356e2385
EG
10848 epause->autoneg = (bp->link_params.req_flow_ctrl ==
10849 BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
10850 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
10851
c0700f90
DM
10852 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
10853 BNX2X_FLOW_CTRL_RX);
10854 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
10855 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
10856
10857 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10858 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10859 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10860}
10861
10862static int bnx2x_set_pauseparam(struct net_device *dev,
10863 struct ethtool_pauseparam *epause)
10864{
10865 struct bnx2x *bp = netdev_priv(dev);
10866
34f80b04
EG
10867 if (IS_E1HMF(bp))
10868 return 0;
10869
a2fbb9ea
ET
10870 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
10871 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
10872 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
10873
c0700f90 10874 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 10875
f1410647 10876 if (epause->rx_pause)
c0700f90 10877 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 10878
f1410647 10879 if (epause->tx_pause)
c0700f90 10880 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 10881
c0700f90
DM
10882 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
10883 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 10884
c18487ee 10885 if (epause->autoneg) {
34f80b04 10886 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 10887 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
10888 return -EINVAL;
10889 }
a2fbb9ea 10890
c18487ee 10891 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 10892 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 10893 }
a2fbb9ea 10894
c18487ee
YR
10895 DP(NETIF_MSG_LINK,
10896 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
10897
10898 if (netif_running(dev)) {
bb2a0f7a 10899 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
10900 bnx2x_link_set(bp);
10901 }
a2fbb9ea
ET
10902
10903 return 0;
10904}
10905
df0f2343
VZ
10906static int bnx2x_set_flags(struct net_device *dev, u32 data)
10907{
10908 struct bnx2x *bp = netdev_priv(dev);
10909 int changed = 0;
10910 int rc = 0;
10911
72fd0718
VZ
10912 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10913 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10914 return -EAGAIN;
10915 }
10916
df0f2343
VZ
10917 /* TPA requires Rx CSUM offloading */
10918 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
d43a7e67
VZ
10919 if (!disable_tpa) {
10920 if (!(dev->features & NETIF_F_LRO)) {
10921 dev->features |= NETIF_F_LRO;
10922 bp->flags |= TPA_ENABLE_FLAG;
10923 changed = 1;
10924 }
10925 } else
10926 rc = -EINVAL;
df0f2343
VZ
10927 } else if (dev->features & NETIF_F_LRO) {
10928 dev->features &= ~NETIF_F_LRO;
10929 bp->flags &= ~TPA_ENABLE_FLAG;
10930 changed = 1;
10931 }
10932
10933 if (changed && netif_running(dev)) {
10934 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10935 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
10936 }
10937
10938 return rc;
10939}
10940
a2fbb9ea
ET
10941static u32 bnx2x_get_rx_csum(struct net_device *dev)
10942{
10943 struct bnx2x *bp = netdev_priv(dev);
10944
10945 return bp->rx_csum;
10946}
10947
10948static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
10949{
10950 struct bnx2x *bp = netdev_priv(dev);
df0f2343 10951 int rc = 0;
a2fbb9ea 10952
72fd0718
VZ
10953 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
10954 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
10955 return -EAGAIN;
10956 }
10957
a2fbb9ea 10958 bp->rx_csum = data;
df0f2343
VZ
10959
10960 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
10961 TPA'ed packets will be discarded due to wrong TCP CSUM */
10962 if (!data) {
10963 u32 flags = ethtool_op_get_flags(dev);
10964
10965 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
10966 }
10967
10968 return rc;
a2fbb9ea
ET
10969}
10970
10971static int bnx2x_set_tso(struct net_device *dev, u32 data)
10972{
755735eb 10973 if (data) {
a2fbb9ea 10974 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10975 dev->features |= NETIF_F_TSO6;
10976 } else {
a2fbb9ea 10977 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
10978 dev->features &= ~NETIF_F_TSO6;
10979 }
10980
a2fbb9ea
ET
10981 return 0;
10982}
10983
f3c87cdd 10984static const struct {
a2fbb9ea
ET
10985 char string[ETH_GSTRING_LEN];
10986} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
10987 { "register_test (offline)" },
10988 { "memory_test (offline)" },
10989 { "loopback_test (offline)" },
10990 { "nvram_test (online)" },
10991 { "interrupt_test (online)" },
10992 { "link_test (online)" },
d3d4f495 10993 { "idle check (online)" }
a2fbb9ea
ET
10994};
10995
f3c87cdd
YG
10996static int bnx2x_test_registers(struct bnx2x *bp)
10997{
10998 int idx, i, rc = -ENODEV;
10999 u32 wr_val = 0;
9dabc424 11000 int port = BP_PORT(bp);
f3c87cdd
YG
11001 static const struct {
11002 u32 offset0;
11003 u32 offset1;
11004 u32 mask;
11005 } reg_tbl[] = {
11006/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
11007 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
11008 { HC_REG_AGG_INT_0, 4, 0x000003ff },
11009 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
11010 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
11011 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
11012 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
11013 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11014 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
11015 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
11016/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
11017 { QM_REG_CONNNUM_0, 4, 0x000fffff },
11018 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
11019 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
11020 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
11021 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
11022 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
11023 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
f3c87cdd 11024 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
c1f1a06f
EG
11025 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
11026/* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
f3c87cdd
YG
11027 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
11028 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
11029 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
11030 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
11031 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
11032 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
11033 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
11034 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
c1f1a06f
EG
11035 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
11036/* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
f3c87cdd
YG
11037 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
11038 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
11039 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
11040 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
11041 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
11042 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
11043
11044 { 0xffffffff, 0, 0x00000000 }
11045 };
11046
11047 if (!netif_running(bp->dev))
11048 return rc;
11049
11050 /* Repeat the test twice:
11051 First by writing 0x00000000, second by writing 0xffffffff */
11052 for (idx = 0; idx < 2; idx++) {
11053
11054 switch (idx) {
11055 case 0:
11056 wr_val = 0;
11057 break;
11058 case 1:
11059 wr_val = 0xffffffff;
11060 break;
11061 }
11062
11063 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
11064 u32 offset, mask, save_val, val;
f3c87cdd
YG
11065
11066 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
11067 mask = reg_tbl[i].mask;
11068
11069 save_val = REG_RD(bp, offset);
11070
11071 REG_WR(bp, offset, wr_val);
11072 val = REG_RD(bp, offset);
11073
11074 /* Restore the original register's value */
11075 REG_WR(bp, offset, save_val);
11076
11077 /* verify that value is as expected value */
11078 if ((val & mask) != (wr_val & mask))
11079 goto test_reg_exit;
11080 }
11081 }
11082
11083 rc = 0;
11084
11085test_reg_exit:
11086 return rc;
11087}
11088
11089static int bnx2x_test_memory(struct bnx2x *bp)
11090{
11091 int i, j, rc = -ENODEV;
11092 u32 val;
11093 static const struct {
11094 u32 offset;
11095 int size;
11096 } mem_tbl[] = {
11097 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
11098 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
11099 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
11100 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
11101 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
11102 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
11103 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
11104
11105 { 0xffffffff, 0 }
11106 };
11107 static const struct {
11108 char *name;
11109 u32 offset;
9dabc424
YG
11110 u32 e1_mask;
11111 u32 e1h_mask;
f3c87cdd 11112 } prty_tbl[] = {
9dabc424
YG
11113 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
11114 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
11115 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
11116 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
11117 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
11118 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
11119
11120 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
11121 };
11122
11123 if (!netif_running(bp->dev))
11124 return rc;
11125
11126 /* Go through all the memories */
11127 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
11128 for (j = 0; j < mem_tbl[i].size; j++)
11129 REG_RD(bp, mem_tbl[i].offset + j*4);
11130
11131 /* Check the parity status */
11132 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
11133 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
11134 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
11135 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
11136 DP(NETIF_MSG_HW,
11137 "%s is 0x%x\n", prty_tbl[i].name, val);
11138 goto test_mem_exit;
11139 }
11140 }
11141
11142 rc = 0;
11143
11144test_mem_exit:
11145 return rc;
11146}
11147
f3c87cdd
YG
11148static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
11149{
11150 int cnt = 1000;
11151
11152 if (link_up)
11153 while (bnx2x_link_test(bp) && cnt--)
11154 msleep(10);
11155}
11156
11157static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
11158{
11159 unsigned int pkt_size, num_pkts, i;
11160 struct sk_buff *skb;
11161 unsigned char *packet;
ca00392c 11162 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
54b9ddaa 11163 struct bnx2x_fastpath *fp_tx = &bp->fp[0];
f3c87cdd
YG
11164 u16 tx_start_idx, tx_idx;
11165 u16 rx_start_idx, rx_idx;
ca00392c 11166 u16 pkt_prod, bd_prod;
f3c87cdd 11167 struct sw_tx_bd *tx_buf;
ca00392c
EG
11168 struct eth_tx_start_bd *tx_start_bd;
11169 struct eth_tx_parse_bd *pbd = NULL;
f3c87cdd
YG
11170 dma_addr_t mapping;
11171 union eth_rx_cqe *cqe;
11172 u8 cqe_fp_flags;
11173 struct sw_rx_bd *rx_buf;
11174 u16 len;
11175 int rc = -ENODEV;
11176
b5bf9068
EG
11177 /* check the loopback mode */
11178 switch (loopback_mode) {
11179 case BNX2X_PHY_LOOPBACK:
11180 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
11181 return -EINVAL;
11182 break;
11183 case BNX2X_MAC_LOOPBACK:
f3c87cdd 11184 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 11185 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068
EG
11186 break;
11187 default:
f3c87cdd 11188 return -EINVAL;
b5bf9068 11189 }
f3c87cdd 11190
b5bf9068
EG
11191 /* prepare the loopback packet */
11192 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
11193 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
f3c87cdd
YG
11194 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
11195 if (!skb) {
11196 rc = -ENOMEM;
11197 goto test_loopback_exit;
11198 }
11199 packet = skb_put(skb, pkt_size);
11200 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
ca00392c
EG
11201 memset(packet + ETH_ALEN, 0, ETH_ALEN);
11202 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
f3c87cdd
YG
11203 for (i = ETH_HLEN; i < pkt_size; i++)
11204 packet[i] = (unsigned char) (i & 0xff);
11205
b5bf9068 11206 /* send the loopback packet */
f3c87cdd 11207 num_pkts = 0;
ca00392c
EG
11208 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
11209 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd 11210
ca00392c
EG
11211 pkt_prod = fp_tx->tx_pkt_prod++;
11212 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
11213 tx_buf->first_bd = fp_tx->tx_bd_prod;
f3c87cdd 11214 tx_buf->skb = skb;
ca00392c 11215 tx_buf->flags = 0;
f3c87cdd 11216
ca00392c
EG
11217 bd_prod = TX_BD(fp_tx->tx_bd_prod);
11218 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
1a983142
FT
11219 mapping = dma_map_single(&bp->pdev->dev, skb->data,
11220 skb_headlen(skb), DMA_TO_DEVICE);
ca00392c
EG
11221 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11222 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11223 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
11224 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
11225 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
11226 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
11227 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
11228 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
11229
11230 /* turn on parsing and get a BD */
11231 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
11232 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
11233
11234 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
f3c87cdd 11235
58f4c4cf
EG
11236 wmb();
11237
ca00392c
EG
11238 fp_tx->tx_db.data.prod += 2;
11239 barrier();
54b9ddaa 11240 DOORBELL(bp, fp_tx->index, fp_tx->tx_db.raw);
f3c87cdd
YG
11241
11242 mmiowb();
11243
11244 num_pkts++;
ca00392c 11245 fp_tx->tx_bd_prod += 2; /* start + pbd */
f3c87cdd
YG
11246
11247 udelay(100);
11248
ca00392c 11249 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
f3c87cdd
YG
11250 if (tx_idx != tx_start_idx + num_pkts)
11251 goto test_loopback_exit;
11252
ca00392c 11253 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
f3c87cdd
YG
11254 if (rx_idx != rx_start_idx + num_pkts)
11255 goto test_loopback_exit;
11256
ca00392c 11257 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
f3c87cdd
YG
11258 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
11259 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
11260 goto test_loopback_rx_exit;
11261
11262 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
11263 if (len != pkt_size)
11264 goto test_loopback_rx_exit;
11265
ca00392c 11266 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
f3c87cdd
YG
11267 skb = rx_buf->skb;
11268 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
11269 for (i = ETH_HLEN; i < pkt_size; i++)
11270 if (*(skb->data + i) != (unsigned char) (i & 0xff))
11271 goto test_loopback_rx_exit;
11272
11273 rc = 0;
11274
11275test_loopback_rx_exit:
f3c87cdd 11276
ca00392c
EG
11277 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
11278 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
11279 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
11280 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
f3c87cdd
YG
11281
11282 /* Update producers */
ca00392c
EG
11283 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
11284 fp_rx->rx_sge_prod);
f3c87cdd
YG
11285
11286test_loopback_exit:
11287 bp->link_params.loopback_mode = LOOPBACK_NONE;
11288
11289 return rc;
11290}
11291
11292static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
11293{
b5bf9068 11294 int rc = 0, res;
f3c87cdd
YG
11295
11296 if (!netif_running(bp->dev))
11297 return BNX2X_LOOPBACK_FAILED;
11298
f8ef6e44 11299 bnx2x_netif_stop(bp, 1);
3910c8ae 11300 bnx2x_acquire_phy_lock(bp);
f3c87cdd 11301
b5bf9068
EG
11302 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
11303 if (res) {
11304 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
11305 rc |= BNX2X_PHY_LOOPBACK_FAILED;
f3c87cdd
YG
11306 }
11307
b5bf9068
EG
11308 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
11309 if (res) {
11310 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
11311 rc |= BNX2X_MAC_LOOPBACK_FAILED;
f3c87cdd
YG
11312 }
11313
3910c8ae 11314 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
11315 bnx2x_netif_start(bp);
11316
11317 return rc;
11318}
11319
11320#define CRC32_RESIDUAL 0xdebb20e3
11321
11322static int bnx2x_test_nvram(struct bnx2x *bp)
11323{
11324 static const struct {
11325 int offset;
11326 int size;
11327 } nvram_tbl[] = {
11328 { 0, 0x14 }, /* bootstrap */
11329 { 0x14, 0xec }, /* dir */
11330 { 0x100, 0x350 }, /* manuf_info */
11331 { 0x450, 0xf0 }, /* feature_info */
11332 { 0x640, 0x64 }, /* upgrade_key_info */
11333 { 0x6a4, 0x64 },
11334 { 0x708, 0x70 }, /* manuf_key_info */
11335 { 0x778, 0x70 },
11336 { 0, 0 }
11337 };
4781bfad 11338 __be32 buf[0x350 / 4];
f3c87cdd
YG
11339 u8 *data = (u8 *)buf;
11340 int i, rc;
ab6ad5a4 11341 u32 magic, crc;
f3c87cdd
YG
11342
11343 rc = bnx2x_nvram_read(bp, 0, data, 4);
11344 if (rc) {
f5372251 11345 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
f3c87cdd
YG
11346 goto test_nvram_exit;
11347 }
11348
11349 magic = be32_to_cpu(buf[0]);
11350 if (magic != 0x669955aa) {
11351 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
11352 rc = -ENODEV;
11353 goto test_nvram_exit;
11354 }
11355
11356 for (i = 0; nvram_tbl[i].size; i++) {
11357
11358 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
11359 nvram_tbl[i].size);
11360 if (rc) {
11361 DP(NETIF_MSG_PROBE,
f5372251 11362 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
f3c87cdd
YG
11363 goto test_nvram_exit;
11364 }
11365
ab6ad5a4
EG
11366 crc = ether_crc_le(nvram_tbl[i].size, data);
11367 if (crc != CRC32_RESIDUAL) {
f3c87cdd 11368 DP(NETIF_MSG_PROBE,
ab6ad5a4 11369 "nvram_tbl[%d] crc value (0x%08x)\n", i, crc);
f3c87cdd
YG
11370 rc = -ENODEV;
11371 goto test_nvram_exit;
11372 }
11373 }
11374
11375test_nvram_exit:
11376 return rc;
11377}
11378
11379static int bnx2x_test_intr(struct bnx2x *bp)
11380{
11381 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
11382 int i, rc;
11383
11384 if (!netif_running(bp->dev))
11385 return -ENODEV;
11386
8d9c5f34 11387 config->hdr.length = 0;
af246401 11388 if (CHIP_IS_E1(bp))
0c43f43f
VZ
11389 /* use last unicast entries */
11390 config->hdr.offset = (BP_PORT(bp) ? 63 : 31);
af246401
EG
11391 else
11392 config->hdr.offset = BP_FUNC(bp);
0626b899 11393 config->hdr.client_id = bp->fp->cl_id;
f3c87cdd
YG
11394 config->hdr.reserved1 = 0;
11395
e665bfda
MC
11396 bp->set_mac_pending++;
11397 smp_wmb();
f3c87cdd
YG
11398 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11399 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
11400 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
11401 if (rc == 0) {
f3c87cdd
YG
11402 for (i = 0; i < 10; i++) {
11403 if (!bp->set_mac_pending)
11404 break;
e665bfda 11405 smp_rmb();
f3c87cdd
YG
11406 msleep_interruptible(10);
11407 }
11408 if (i == 10)
11409 rc = -ENODEV;
11410 }
11411
11412 return rc;
11413}
11414
a2fbb9ea
ET
11415static void bnx2x_self_test(struct net_device *dev,
11416 struct ethtool_test *etest, u64 *buf)
11417{
11418 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea 11419
72fd0718
VZ
11420 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
11421 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
11422 etest->flags |= ETH_TEST_FL_FAILED;
11423 return;
11424 }
11425
a2fbb9ea
ET
11426 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
11427
f3c87cdd 11428 if (!netif_running(dev))
a2fbb9ea 11429 return;
a2fbb9ea 11430
33471629 11431 /* offline tests are not supported in MF mode */
f3c87cdd
YG
11432 if (IS_E1HMF(bp))
11433 etest->flags &= ~ETH_TEST_FL_OFFLINE;
11434
11435 if (etest->flags & ETH_TEST_FL_OFFLINE) {
279abdf5
EG
11436 int port = BP_PORT(bp);
11437 u32 val;
f3c87cdd
YG
11438 u8 link_up;
11439
279abdf5
EG
11440 /* save current value of input enable for TX port IF */
11441 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
11442 /* disable input for TX port IF */
11443 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
11444
061bc702 11445 link_up = (bnx2x_link_test(bp) == 0);
f3c87cdd
YG
11446 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11447 bnx2x_nic_load(bp, LOAD_DIAG);
11448 /* wait until link state is restored */
11449 bnx2x_wait_for_link(bp, link_up);
11450
11451 if (bnx2x_test_registers(bp) != 0) {
11452 buf[0] = 1;
11453 etest->flags |= ETH_TEST_FL_FAILED;
11454 }
11455 if (bnx2x_test_memory(bp) != 0) {
11456 buf[1] = 1;
11457 etest->flags |= ETH_TEST_FL_FAILED;
11458 }
11459 buf[2] = bnx2x_test_loopback(bp, link_up);
11460 if (buf[2] != 0)
11461 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 11462
f3c87cdd 11463 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
279abdf5
EG
11464
11465 /* restore input for TX port IF */
11466 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
11467
f3c87cdd
YG
11468 bnx2x_nic_load(bp, LOAD_NORMAL);
11469 /* wait until link state is restored */
11470 bnx2x_wait_for_link(bp, link_up);
11471 }
11472 if (bnx2x_test_nvram(bp) != 0) {
11473 buf[3] = 1;
a2fbb9ea
ET
11474 etest->flags |= ETH_TEST_FL_FAILED;
11475 }
f3c87cdd
YG
11476 if (bnx2x_test_intr(bp) != 0) {
11477 buf[4] = 1;
11478 etest->flags |= ETH_TEST_FL_FAILED;
11479 }
11480 if (bp->port.pmf)
11481 if (bnx2x_link_test(bp) != 0) {
11482 buf[5] = 1;
11483 etest->flags |= ETH_TEST_FL_FAILED;
11484 }
f3c87cdd
YG
11485
11486#ifdef BNX2X_EXTRA_DEBUG
11487 bnx2x_panic_dump(bp);
11488#endif
a2fbb9ea
ET
11489}
11490
de832a55
EG
11491static const struct {
11492 long offset;
11493 int size;
11494 u8 string[ETH_GSTRING_LEN];
11495} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
11496/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
11497 { Q_STATS_OFFSET32(error_bytes_received_hi),
11498 8, "[%d]: rx_error_bytes" },
11499 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
11500 8, "[%d]: rx_ucast_packets" },
11501 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
11502 8, "[%d]: rx_mcast_packets" },
11503 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
11504 8, "[%d]: rx_bcast_packets" },
11505 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
11506 { Q_STATS_OFFSET32(rx_err_discard_pkt),
11507 4, "[%d]: rx_phy_ip_err_discards"},
11508 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
11509 4, "[%d]: rx_skb_alloc_discard" },
11510 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
11511
11512/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
11513 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11514 8, "[%d]: tx_packets" }
11515};
11516
bb2a0f7a
YG
11517static const struct {
11518 long offset;
11519 int size;
11520 u32 flags;
66e855f3
YG
11521#define STATS_FLAGS_PORT 1
11522#define STATS_FLAGS_FUNC 2
de832a55 11523#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 11524 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 11525} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
11526/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
11527 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 11528 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 11529 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 11530 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 11531 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 11532 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 11533 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 11534 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 11535 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 11536 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 11537 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 11538 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 11539 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
11540 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
11541 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
11542 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
11543 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
11544/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
11545 8, STATS_FLAGS_PORT, "rx_fragments" },
11546 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
11547 8, STATS_FLAGS_PORT, "rx_jabbers" },
11548 { STATS_OFFSET32(no_buff_discard_hi),
11549 8, STATS_FLAGS_BOTH, "rx_discards" },
11550 { STATS_OFFSET32(mac_filter_discard),
11551 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
11552 { STATS_OFFSET32(xxoverflow_discard),
11553 4, STATS_FLAGS_PORT, "rx_fw_discards" },
11554 { STATS_OFFSET32(brb_drop_hi),
11555 8, STATS_FLAGS_PORT, "rx_brb_discard" },
11556 { STATS_OFFSET32(brb_truncate_hi),
11557 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
11558 { STATS_OFFSET32(pause_frames_received_hi),
11559 8, STATS_FLAGS_PORT, "rx_pause_frames" },
11560 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
11561 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
11562 { STATS_OFFSET32(nig_timer_max),
11563 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
11564/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
11565 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
11566 { STATS_OFFSET32(rx_skb_alloc_failed),
11567 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
11568 { STATS_OFFSET32(hw_csum_err),
11569 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
11570
11571 { STATS_OFFSET32(total_bytes_transmitted_hi),
11572 8, STATS_FLAGS_BOTH, "tx_bytes" },
11573 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
11574 8, STATS_FLAGS_PORT, "tx_error_bytes" },
11575 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
11576 8, STATS_FLAGS_BOTH, "tx_packets" },
11577 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
11578 8, STATS_FLAGS_PORT, "tx_mac_errors" },
11579 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
11580 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 11581 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 11582 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 11583 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 11584 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 11585/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 11586 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 11587 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 11588 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 11589 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 11590 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 11591 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 11592 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 11593 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 11594 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 11595 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 11596 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 11597 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 11598 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 11599 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 11600 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 11601 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 11602 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 11603 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 11604 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 11605/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 11606 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
11607 { STATS_OFFSET32(pause_frames_sent_hi),
11608 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
11609};
11610
de832a55
EG
11611#define IS_PORT_STAT(i) \
11612 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
11613#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
11614#define IS_E1HMF_MODE_STAT(bp) \
7995c64e 11615 (IS_E1HMF(bp) && !(bp->msg_enable & BNX2X_MSG_STATS))
66e855f3 11616
15f0a394
BH
11617static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
11618{
11619 struct bnx2x *bp = netdev_priv(dev);
11620 int i, num_stats;
11621
11622 switch(stringset) {
11623 case ETH_SS_STATS:
11624 if (is_multi(bp)) {
54b9ddaa 11625 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues;
15f0a394
BH
11626 if (!IS_E1HMF_MODE_STAT(bp))
11627 num_stats += BNX2X_NUM_STATS;
11628 } else {
11629 if (IS_E1HMF_MODE_STAT(bp)) {
11630 num_stats = 0;
11631 for (i = 0; i < BNX2X_NUM_STATS; i++)
11632 if (IS_FUNC_STAT(i))
11633 num_stats++;
11634 } else
11635 num_stats = BNX2X_NUM_STATS;
11636 }
11637 return num_stats;
11638
11639 case ETH_SS_TEST:
11640 return BNX2X_NUM_TESTS;
11641
11642 default:
11643 return -EINVAL;
11644 }
11645}
11646
a2fbb9ea
ET
11647static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
11648{
bb2a0f7a 11649 struct bnx2x *bp = netdev_priv(dev);
de832a55 11650 int i, j, k;
bb2a0f7a 11651
a2fbb9ea
ET
11652 switch (stringset) {
11653 case ETH_SS_STATS:
de832a55
EG
11654 if (is_multi(bp)) {
11655 k = 0;
54b9ddaa 11656 for_each_queue(bp, i) {
de832a55
EG
11657 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
11658 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
11659 bnx2x_q_stats_arr[j].string, i);
11660 k += BNX2X_NUM_Q_STATS;
11661 }
11662 if (IS_E1HMF_MODE_STAT(bp))
11663 break;
11664 for (j = 0; j < BNX2X_NUM_STATS; j++)
11665 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
11666 bnx2x_stats_arr[j].string);
11667 } else {
11668 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11669 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11670 continue;
11671 strcpy(buf + j*ETH_GSTRING_LEN,
11672 bnx2x_stats_arr[i].string);
11673 j++;
11674 }
bb2a0f7a 11675 }
a2fbb9ea
ET
11676 break;
11677
11678 case ETH_SS_TEST:
11679 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
11680 break;
11681 }
11682}
11683
a2fbb9ea
ET
11684static void bnx2x_get_ethtool_stats(struct net_device *dev,
11685 struct ethtool_stats *stats, u64 *buf)
11686{
11687 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
11688 u32 *hw_stats, *offset;
11689 int i, j, k;
bb2a0f7a 11690
de832a55
EG
11691 if (is_multi(bp)) {
11692 k = 0;
54b9ddaa 11693 for_each_queue(bp, i) {
de832a55
EG
11694 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
11695 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
11696 if (bnx2x_q_stats_arr[j].size == 0) {
11697 /* skip this counter */
11698 buf[k + j] = 0;
11699 continue;
11700 }
11701 offset = (hw_stats +
11702 bnx2x_q_stats_arr[j].offset);
11703 if (bnx2x_q_stats_arr[j].size == 4) {
11704 /* 4-byte counter */
11705 buf[k + j] = (u64) *offset;
11706 continue;
11707 }
11708 /* 8-byte counter */
11709 buf[k + j] = HILO_U64(*offset, *(offset + 1));
11710 }
11711 k += BNX2X_NUM_Q_STATS;
11712 }
11713 if (IS_E1HMF_MODE_STAT(bp))
11714 return;
11715 hw_stats = (u32 *)&bp->eth_stats;
11716 for (j = 0; j < BNX2X_NUM_STATS; j++) {
11717 if (bnx2x_stats_arr[j].size == 0) {
11718 /* skip this counter */
11719 buf[k + j] = 0;
11720 continue;
11721 }
11722 offset = (hw_stats + bnx2x_stats_arr[j].offset);
11723 if (bnx2x_stats_arr[j].size == 4) {
11724 /* 4-byte counter */
11725 buf[k + j] = (u64) *offset;
11726 continue;
11727 }
11728 /* 8-byte counter */
11729 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 11730 }
de832a55
EG
11731 } else {
11732 hw_stats = (u32 *)&bp->eth_stats;
11733 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
11734 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
11735 continue;
11736 if (bnx2x_stats_arr[i].size == 0) {
11737 /* skip this counter */
11738 buf[j] = 0;
11739 j++;
11740 continue;
11741 }
11742 offset = (hw_stats + bnx2x_stats_arr[i].offset);
11743 if (bnx2x_stats_arr[i].size == 4) {
11744 /* 4-byte counter */
11745 buf[j] = (u64) *offset;
11746 j++;
11747 continue;
11748 }
11749 /* 8-byte counter */
11750 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 11751 j++;
a2fbb9ea 11752 }
a2fbb9ea
ET
11753 }
11754}
11755
11756static int bnx2x_phys_id(struct net_device *dev, u32 data)
11757{
11758 struct bnx2x *bp = netdev_priv(dev);
11759 int i;
11760
34f80b04
EG
11761 if (!netif_running(dev))
11762 return 0;
11763
11764 if (!bp->port.pmf)
11765 return 0;
11766
a2fbb9ea
ET
11767 if (data == 0)
11768 data = 2;
11769
11770 for (i = 0; i < (data * 2); i++) {
c18487ee 11771 if ((i % 2) == 0)
7846e471
YR
11772 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11773 SPEED_1000);
c18487ee 11774 else
7846e471 11775 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
c18487ee 11776
a2fbb9ea
ET
11777 msleep_interruptible(500);
11778 if (signal_pending(current))
11779 break;
11780 }
11781
c18487ee 11782 if (bp->link_vars.link_up)
7846e471
YR
11783 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
11784 bp->link_vars.line_speed);
a2fbb9ea
ET
11785
11786 return 0;
11787}
11788
0fc0b732 11789static const struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
11790 .get_settings = bnx2x_get_settings,
11791 .set_settings = bnx2x_set_settings,
11792 .get_drvinfo = bnx2x_get_drvinfo,
0a64ea57
EG
11793 .get_regs_len = bnx2x_get_regs_len,
11794 .get_regs = bnx2x_get_regs,
a2fbb9ea
ET
11795 .get_wol = bnx2x_get_wol,
11796 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
11797 .get_msglevel = bnx2x_get_msglevel,
11798 .set_msglevel = bnx2x_set_msglevel,
11799 .nway_reset = bnx2x_nway_reset,
01e53298 11800 .get_link = bnx2x_get_link,
7a9b2557
VZ
11801 .get_eeprom_len = bnx2x_get_eeprom_len,
11802 .get_eeprom = bnx2x_get_eeprom,
11803 .set_eeprom = bnx2x_set_eeprom,
11804 .get_coalesce = bnx2x_get_coalesce,
11805 .set_coalesce = bnx2x_set_coalesce,
11806 .get_ringparam = bnx2x_get_ringparam,
11807 .set_ringparam = bnx2x_set_ringparam,
11808 .get_pauseparam = bnx2x_get_pauseparam,
11809 .set_pauseparam = bnx2x_set_pauseparam,
11810 .get_rx_csum = bnx2x_get_rx_csum,
11811 .set_rx_csum = bnx2x_set_rx_csum,
11812 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 11813 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
11814 .set_flags = bnx2x_set_flags,
11815 .get_flags = ethtool_op_get_flags,
11816 .get_sg = ethtool_op_get_sg,
11817 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
11818 .get_tso = ethtool_op_get_tso,
11819 .set_tso = bnx2x_set_tso,
7a9b2557 11820 .self_test = bnx2x_self_test,
15f0a394 11821 .get_sset_count = bnx2x_get_sset_count,
7a9b2557 11822 .get_strings = bnx2x_get_strings,
a2fbb9ea 11823 .phys_id = bnx2x_phys_id,
bb2a0f7a 11824 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
11825};
11826
11827/* end of ethtool_ops */
11828
11829/****************************************************************************
11830* General service functions
11831****************************************************************************/
11832
11833static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
11834{
11835 u16 pmcsr;
11836
11837 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
11838
11839 switch (state) {
11840 case PCI_D0:
34f80b04 11841 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
11842 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
11843 PCI_PM_CTRL_PME_STATUS));
11844
11845 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 11846 /* delay required during transition out of D3hot */
a2fbb9ea 11847 msleep(20);
34f80b04 11848 break;
a2fbb9ea 11849
34f80b04
EG
11850 case PCI_D3hot:
11851 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
11852 pmcsr |= 3;
a2fbb9ea 11853
34f80b04
EG
11854 if (bp->wol)
11855 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 11856
34f80b04
EG
11857 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
11858 pmcsr);
a2fbb9ea 11859
34f80b04
EG
11860 /* No more memory access after this point until
11861 * device is brought back to D0.
11862 */
11863 break;
11864
11865 default:
11866 return -EINVAL;
11867 }
11868 return 0;
a2fbb9ea
ET
11869}
11870
237907c1
EG
11871static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
11872{
11873 u16 rx_cons_sb;
11874
11875 /* Tell compiler that status block fields can change */
11876 barrier();
11877 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
11878 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
11879 rx_cons_sb++;
11880 return (fp->rx_comp_cons != rx_cons_sb);
11881}
11882
34f80b04
EG
11883/*
11884 * net_device service functions
11885 */
11886
a2fbb9ea
ET
11887static int bnx2x_poll(struct napi_struct *napi, int budget)
11888{
54b9ddaa 11889 int work_done = 0;
a2fbb9ea
ET
11890 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
11891 napi);
11892 struct bnx2x *bp = fp->bp;
a2fbb9ea 11893
54b9ddaa 11894 while (1) {
a2fbb9ea 11895#ifdef BNX2X_STOP_ON_ERROR
54b9ddaa
VZ
11896 if (unlikely(bp->panic)) {
11897 napi_complete(napi);
11898 return 0;
11899 }
a2fbb9ea
ET
11900#endif
11901
54b9ddaa
VZ
11902 if (bnx2x_has_tx_work(fp))
11903 bnx2x_tx_int(fp);
356e2385 11904
54b9ddaa
VZ
11905 if (bnx2x_has_rx_work(fp)) {
11906 work_done += bnx2x_rx_int(fp, budget - work_done);
a2fbb9ea 11907
54b9ddaa
VZ
11908 /* must not complete if we consumed full budget */
11909 if (work_done >= budget)
11910 break;
11911 }
a2fbb9ea 11912
54b9ddaa
VZ
11913 /* Fall out from the NAPI loop if needed */
11914 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11915 bnx2x_update_fpsb_idx(fp);
11916 /* bnx2x_has_rx_work() reads the status block, thus we need
11917 * to ensure that status block indices have been actually read
11918 * (bnx2x_update_fpsb_idx) prior to this check
11919 * (bnx2x_has_rx_work) so that we won't write the "newer"
11920 * value of the status block to IGU (if there was a DMA right
11921 * after bnx2x_has_rx_work and if there is no rmb, the memory
11922 * reading (bnx2x_update_fpsb_idx) may be postponed to right
11923 * before bnx2x_ack_sb). In this case there will never be
11924 * another interrupt until there is another update of the
11925 * status block, while there is still unhandled work.
11926 */
11927 rmb();
a2fbb9ea 11928
54b9ddaa
VZ
11929 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
11930 napi_complete(napi);
11931 /* Re-enable interrupts */
11932 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
11933 le16_to_cpu(fp->fp_c_idx),
11934 IGU_INT_NOP, 1);
11935 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
11936 le16_to_cpu(fp->fp_u_idx),
11937 IGU_INT_ENABLE, 1);
11938 break;
11939 }
11940 }
a2fbb9ea 11941 }
356e2385 11942
a2fbb9ea
ET
11943 return work_done;
11944}
11945
755735eb
EG
11946
11947/* we split the first BD into headers and data BDs
33471629 11948 * to ease the pain of our fellow microcode engineers
755735eb
EG
11949 * we use one mapping for both BDs
11950 * So far this has only been observed to happen
11951 * in Other Operating Systems(TM)
11952 */
11953static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
11954 struct bnx2x_fastpath *fp,
ca00392c
EG
11955 struct sw_tx_bd *tx_buf,
11956 struct eth_tx_start_bd **tx_bd, u16 hlen,
755735eb
EG
11957 u16 bd_prod, int nbd)
11958{
ca00392c 11959 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
755735eb
EG
11960 struct eth_tx_bd *d_tx_bd;
11961 dma_addr_t mapping;
11962 int old_len = le16_to_cpu(h_tx_bd->nbytes);
11963
11964 /* first fix first BD */
11965 h_tx_bd->nbd = cpu_to_le16(nbd);
11966 h_tx_bd->nbytes = cpu_to_le16(hlen);
11967
11968 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
11969 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
11970 h_tx_bd->addr_lo, h_tx_bd->nbd);
11971
11972 /* now get a new data BD
11973 * (after the pbd) and fill it */
11974 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c 11975 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
755735eb
EG
11976
11977 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
11978 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
11979
11980 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
11981 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
11982 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
ca00392c
EG
11983
11984 /* this marks the BD as one that has no individual mapping */
11985 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
11986
755735eb
EG
11987 DP(NETIF_MSG_TX_QUEUED,
11988 "TSO split data size is %d (%x:%x)\n",
11989 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
11990
ca00392c
EG
11991 /* update tx_bd */
11992 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
755735eb
EG
11993
11994 return bd_prod;
11995}
11996
11997static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
11998{
11999 if (fix > 0)
12000 csum = (u16) ~csum_fold(csum_sub(csum,
12001 csum_partial(t_header - fix, fix, 0)));
12002
12003 else if (fix < 0)
12004 csum = (u16) ~csum_fold(csum_add(csum,
12005 csum_partial(t_header, -fix, 0)));
12006
12007 return swab16(csum);
12008}
12009
12010static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
12011{
12012 u32 rc;
12013
12014 if (skb->ip_summed != CHECKSUM_PARTIAL)
12015 rc = XMIT_PLAIN;
12016
12017 else {
4781bfad 12018 if (skb->protocol == htons(ETH_P_IPV6)) {
755735eb
EG
12019 rc = XMIT_CSUM_V6;
12020 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
12021 rc |= XMIT_CSUM_TCP;
12022
12023 } else {
12024 rc = XMIT_CSUM_V4;
12025 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
12026 rc |= XMIT_CSUM_TCP;
12027 }
12028 }
12029
12030 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
d6a2f98b 12031 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
755735eb
EG
12032
12033 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
d6a2f98b 12034 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
755735eb
EG
12035
12036 return rc;
12037}
12038
632da4d6 12039#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12040/* check if packet requires linearization (packet is too fragmented)
12041 no need to check fragmentation if page size > 8K (there will be no
12042 violation to FW restrictions) */
755735eb
EG
12043static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
12044 u32 xmit_type)
12045{
12046 int to_copy = 0;
12047 int hlen = 0;
12048 int first_bd_sz = 0;
12049
12050 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
12051 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
12052
12053 if (xmit_type & XMIT_GSO) {
12054 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
12055 /* Check if LSO packet needs to be copied:
12056 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
12057 int wnd_size = MAX_FETCH_BD - 3;
33471629 12058 /* Number of windows to check */
755735eb
EG
12059 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
12060 int wnd_idx = 0;
12061 int frag_idx = 0;
12062 u32 wnd_sum = 0;
12063
12064 /* Headers length */
12065 hlen = (int)(skb_transport_header(skb) - skb->data) +
12066 tcp_hdrlen(skb);
12067
12068 /* Amount of data (w/o headers) on linear part of SKB*/
12069 first_bd_sz = skb_headlen(skb) - hlen;
12070
12071 wnd_sum = first_bd_sz;
12072
12073 /* Calculate the first sum - it's special */
12074 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
12075 wnd_sum +=
12076 skb_shinfo(skb)->frags[frag_idx].size;
12077
12078 /* If there was data on linear skb data - check it */
12079 if (first_bd_sz > 0) {
12080 if (unlikely(wnd_sum < lso_mss)) {
12081 to_copy = 1;
12082 goto exit_lbl;
12083 }
12084
12085 wnd_sum -= first_bd_sz;
12086 }
12087
12088 /* Others are easier: run through the frag list and
12089 check all windows */
12090 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
12091 wnd_sum +=
12092 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
12093
12094 if (unlikely(wnd_sum < lso_mss)) {
12095 to_copy = 1;
12096 break;
12097 }
12098 wnd_sum -=
12099 skb_shinfo(skb)->frags[wnd_idx].size;
12100 }
755735eb
EG
12101 } else {
12102 /* in non-LSO too fragmented packet should always
12103 be linearized */
12104 to_copy = 1;
12105 }
12106 }
12107
12108exit_lbl:
12109 if (unlikely(to_copy))
12110 DP(NETIF_MSG_TX_QUEUED,
12111 "Linearization IS REQUIRED for %s packet. "
12112 "num_frags %d hlen %d first_bd_sz %d\n",
12113 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
12114 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
12115
12116 return to_copy;
12117}
632da4d6 12118#endif
755735eb
EG
12119
12120/* called with netif_tx_lock
a2fbb9ea 12121 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 12122 * netif_wake_queue()
a2fbb9ea 12123 */
61357325 12124static netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
a2fbb9ea
ET
12125{
12126 struct bnx2x *bp = netdev_priv(dev);
54b9ddaa 12127 struct bnx2x_fastpath *fp;
555f6c78 12128 struct netdev_queue *txq;
a2fbb9ea 12129 struct sw_tx_bd *tx_buf;
ca00392c
EG
12130 struct eth_tx_start_bd *tx_start_bd;
12131 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
a2fbb9ea
ET
12132 struct eth_tx_parse_bd *pbd = NULL;
12133 u16 pkt_prod, bd_prod;
755735eb 12134 int nbd, fp_index;
a2fbb9ea 12135 dma_addr_t mapping;
755735eb 12136 u32 xmit_type = bnx2x_xmit_type(bp, skb);
755735eb
EG
12137 int i;
12138 u8 hlen = 0;
ca00392c 12139 __le16 pkt_size = 0;
a2fbb9ea
ET
12140
12141#ifdef BNX2X_STOP_ON_ERROR
12142 if (unlikely(bp->panic))
12143 return NETDEV_TX_BUSY;
12144#endif
12145
555f6c78
EG
12146 fp_index = skb_get_queue_mapping(skb);
12147 txq = netdev_get_tx_queue(dev, fp_index);
12148
54b9ddaa 12149 fp = &bp->fp[fp_index];
755735eb 12150
231fd58a 12151 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
54b9ddaa 12152 fp->eth_q_stats.driver_xoff++;
555f6c78 12153 netif_tx_stop_queue(txq);
a2fbb9ea
ET
12154 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
12155 return NETDEV_TX_BUSY;
12156 }
12157
755735eb
EG
12158 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
12159 " gso type %x xmit_type %x\n",
12160 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
12161 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
12162
632da4d6 12163#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
f5372251
EG
12164 /* First, check if we need to linearize the skb (due to FW
12165 restrictions). No need to check fragmentation if page size > 8K
12166 (there will be no violation to FW restrictions) */
755735eb
EG
12167 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
12168 /* Statistics of linearization */
12169 bp->lin_cnt++;
12170 if (skb_linearize(skb) != 0) {
12171 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
12172 "silently dropping this SKB\n");
12173 dev_kfree_skb_any(skb);
da5a662a 12174 return NETDEV_TX_OK;
755735eb
EG
12175 }
12176 }
632da4d6 12177#endif
755735eb 12178
a2fbb9ea 12179 /*
755735eb 12180 Please read carefully. First we use one BD which we mark as start,
ca00392c 12181 then we have a parsing info BD (used for TSO or xsum),
755735eb 12182 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
12183 (don't forget to mark the last one as last,
12184 and to unmap only AFTER you write to the BD ...)
755735eb 12185 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
12186 */
12187
12188 pkt_prod = fp->tx_pkt_prod++;
755735eb 12189 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 12190
755735eb 12191 /* get a tx_buf and first BD */
a2fbb9ea 12192 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
ca00392c 12193 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
a2fbb9ea 12194
ca00392c
EG
12195 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
12196 tx_start_bd->general_data = (UNICAST_ADDRESS <<
12197 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a 12198 /* header nbd */
ca00392c 12199 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
a2fbb9ea 12200
755735eb
EG
12201 /* remember the first BD of the packet */
12202 tx_buf->first_bd = fp->tx_bd_prod;
12203 tx_buf->skb = skb;
ca00392c 12204 tx_buf->flags = 0;
a2fbb9ea
ET
12205
12206 DP(NETIF_MSG_TX_QUEUED,
12207 "sending pkt %u @%p next_idx %u bd %u @%p\n",
ca00392c 12208 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
a2fbb9ea 12209
0c6671b0
EG
12210#ifdef BCM_VLAN
12211 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
12212 (bp->flags & HW_VLAN_TX_FLAG)) {
ca00392c
EG
12213 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
12214 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
755735eb 12215 } else
0c6671b0 12216#endif
ca00392c 12217 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 12218
ca00392c
EG
12219 /* turn on parsing and get a BD */
12220 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12221 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
755735eb 12222
ca00392c 12223 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
755735eb
EG
12224
12225 if (xmit_type & XMIT_CSUM) {
ca00392c 12226 hlen = (skb_network_header(skb) - skb->data) / 2;
a2fbb9ea
ET
12227
12228 /* for now NS flag is not used in Linux */
4781bfad
EG
12229 pbd->global_data =
12230 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
12231 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 12232
755735eb
EG
12233 pbd->ip_hlen = (skb_transport_header(skb) -
12234 skb_network_header(skb)) / 2;
12235
12236 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 12237
755735eb 12238 pbd->total_hlen = cpu_to_le16(hlen);
ca00392c 12239 hlen = hlen*2;
a2fbb9ea 12240
ca00392c 12241 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
755735eb
EG
12242
12243 if (xmit_type & XMIT_CSUM_V4)
ca00392c 12244 tx_start_bd->bd_flags.as_bitfield |=
755735eb
EG
12245 ETH_TX_BD_FLAGS_IP_CSUM;
12246 else
ca00392c
EG
12247 tx_start_bd->bd_flags.as_bitfield |=
12248 ETH_TX_BD_FLAGS_IPV6;
755735eb
EG
12249
12250 if (xmit_type & XMIT_CSUM_TCP) {
12251 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
12252
12253 } else {
12254 s8 fix = SKB_CS_OFF(skb); /* signed! */
12255
ca00392c 12256 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
a2fbb9ea 12257
755735eb 12258 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12259 "hlen %d fix %d csum before fix %x\n",
12260 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
755735eb
EG
12261
12262 /* HW bug: fixup the CSUM */
12263 pbd->tcp_pseudo_csum =
12264 bnx2x_csum_fix(skb_transport_header(skb),
12265 SKB_CS(skb), fix);
12266
12267 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
12268 pbd->tcp_pseudo_csum);
12269 }
a2fbb9ea
ET
12270 }
12271
1a983142
FT
12272 mapping = dma_map_single(&bp->pdev->dev, skb->data,
12273 skb_headlen(skb), DMA_TO_DEVICE);
a2fbb9ea 12274
ca00392c
EG
12275 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12276 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12277 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
12278 tx_start_bd->nbd = cpu_to_le16(nbd);
12279 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
12280 pkt_size = tx_start_bd->nbytes;
a2fbb9ea
ET
12281
12282 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb 12283 " nbytes %d flags %x vlan %x\n",
ca00392c
EG
12284 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
12285 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
12286 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
a2fbb9ea 12287
755735eb 12288 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
12289
12290 DP(NETIF_MSG_TX_QUEUED,
12291 "TSO packet len %d hlen %d total len %d tso size %d\n",
12292 skb->len, hlen, skb_headlen(skb),
12293 skb_shinfo(skb)->gso_size);
12294
ca00392c 12295 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
a2fbb9ea 12296
755735eb 12297 if (unlikely(skb_headlen(skb) > hlen))
ca00392c
EG
12298 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
12299 hlen, bd_prod, ++nbd);
a2fbb9ea
ET
12300
12301 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
12302 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
12303 pbd->tcp_flags = pbd_tcp_flags(skb);
12304
12305 if (xmit_type & XMIT_GSO_V4) {
12306 pbd->ip_id = swab16(ip_hdr(skb)->id);
12307 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
12308 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
12309 ip_hdr(skb)->daddr,
12310 0, IPPROTO_TCP, 0));
755735eb
EG
12311
12312 } else
12313 pbd->tcp_pseudo_csum =
12314 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
12315 &ipv6_hdr(skb)->daddr,
12316 0, IPPROTO_TCP, 0));
12317
a2fbb9ea
ET
12318 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
12319 }
ca00392c 12320 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
a2fbb9ea 12321
755735eb
EG
12322 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
12323 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 12324
755735eb 12325 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
ca00392c
EG
12326 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
12327 if (total_pkt_bd == NULL)
12328 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
a2fbb9ea 12329
1a983142
FT
12330 mapping = dma_map_page(&bp->pdev->dev, frag->page,
12331 frag->page_offset,
12332 frag->size, DMA_TO_DEVICE);
a2fbb9ea 12333
ca00392c
EG
12334 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
12335 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
12336 tx_data_bd->nbytes = cpu_to_le16(frag->size);
12337 le16_add_cpu(&pkt_size, frag->size);
a2fbb9ea 12338
755735eb 12339 DP(NETIF_MSG_TX_QUEUED,
ca00392c
EG
12340 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
12341 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
12342 le16_to_cpu(tx_data_bd->nbytes));
a2fbb9ea
ET
12343 }
12344
ca00392c 12345 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
a2fbb9ea 12346
a2fbb9ea
ET
12347 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
12348
755735eb 12349 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
12350 * if the packet contains or ends with it
12351 */
12352 if (TX_BD_POFF(bd_prod) < nbd)
12353 nbd++;
12354
ca00392c
EG
12355 if (total_pkt_bd != NULL)
12356 total_pkt_bd->total_pkt_bytes = pkt_size;
12357
a2fbb9ea
ET
12358 if (pbd)
12359 DP(NETIF_MSG_TX_QUEUED,
12360 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
12361 " tcp_flags %x xsum %x seq %u hlen %u\n",
12362 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
12363 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 12364 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 12365
755735eb 12366 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 12367
58f4c4cf
EG
12368 /*
12369 * Make sure that the BD data is updated before updating the producer
12370 * since FW might read the BD right after the producer is updated.
12371 * This is only applicable for weak-ordered memory model archs such
12372 * as IA-64. The following barrier is also mandatory since FW will
12373 * assumes packets must have BDs.
12374 */
12375 wmb();
12376
ca00392c
EG
12377 fp->tx_db.data.prod += nbd;
12378 barrier();
54b9ddaa 12379 DOORBELL(bp, fp->index, fp->tx_db.raw);
a2fbb9ea
ET
12380
12381 mmiowb();
12382
755735eb 12383 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
12384
12385 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
ca00392c 12386 netif_tx_stop_queue(txq);
9baddeb8
SG
12387
12388 /* paired memory barrier is in bnx2x_tx_int(), we have to keep
12389 * ordering of set_bit() in netif_tx_stop_queue() and read of
12390 * fp->bd_tx_cons */
58f4c4cf 12391 smp_mb();
9baddeb8 12392
54b9ddaa 12393 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 12394 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 12395 netif_tx_wake_queue(txq);
a2fbb9ea 12396 }
54b9ddaa 12397 fp->tx_pkt++;
a2fbb9ea
ET
12398
12399 return NETDEV_TX_OK;
12400}
12401
bb2a0f7a 12402/* called with rtnl_lock */
a2fbb9ea
ET
12403static int bnx2x_open(struct net_device *dev)
12404{
12405 struct bnx2x *bp = netdev_priv(dev);
12406
6eccabb3
EG
12407 netif_carrier_off(dev);
12408
a2fbb9ea
ET
12409 bnx2x_set_power_state(bp, PCI_D0);
12410
72fd0718
VZ
12411 if (!bnx2x_reset_is_done(bp)) {
12412 do {
12413 /* Reset MCP mail box sequence if there is on going
12414 * recovery
12415 */
12416 bp->fw_seq = 0;
12417
12418 /* If it's the first function to load and reset done
12419 * is still not cleared it may mean that. We don't
12420 * check the attention state here because it may have
12421 * already been cleared by a "common" reset but we
12422 * shell proceed with "process kill" anyway.
12423 */
12424 if ((bnx2x_get_load_cnt(bp) == 0) &&
12425 bnx2x_trylock_hw_lock(bp,
12426 HW_LOCK_RESOURCE_RESERVED_08) &&
12427 (!bnx2x_leader_reset(bp))) {
12428 DP(NETIF_MSG_HW, "Recovered in open\n");
12429 break;
12430 }
12431
12432 bnx2x_set_power_state(bp, PCI_D3hot);
12433
12434 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
12435 " completed yet. Try again later. If u still see this"
12436 " message after a few retries then power cycle is"
12437 " required.\n", bp->dev->name);
12438
12439 return -EAGAIN;
12440 } while (0);
12441 }
12442
12443 bp->recovery_state = BNX2X_RECOVERY_DONE;
12444
bb2a0f7a 12445 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
12446}
12447
bb2a0f7a 12448/* called with rtnl_lock */
a2fbb9ea
ET
12449static int bnx2x_close(struct net_device *dev)
12450{
a2fbb9ea
ET
12451 struct bnx2x *bp = netdev_priv(dev);
12452
12453 /* Unload the driver, release IRQs */
bb2a0f7a
YG
12454 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
12455 if (atomic_read(&bp->pdev->enable_cnt) == 1)
12456 if (!CHIP_REV_IS_SLOW(bp))
12457 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
12458
12459 return 0;
12460}
12461
f5372251 12462/* called with netif_tx_lock from dev_mcast.c */
34f80b04
EG
12463static void bnx2x_set_rx_mode(struct net_device *dev)
12464{
12465 struct bnx2x *bp = netdev_priv(dev);
12466 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
12467 int port = BP_PORT(bp);
12468
12469 if (bp->state != BNX2X_STATE_OPEN) {
12470 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
12471 return;
12472 }
12473
12474 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
12475
12476 if (dev->flags & IFF_PROMISC)
12477 rx_mode = BNX2X_RX_MODE_PROMISC;
12478
12479 else if ((dev->flags & IFF_ALLMULTI) ||
4cd24eaf
JP
12480 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
12481 CHIP_IS_E1(bp)))
34f80b04
EG
12482 rx_mode = BNX2X_RX_MODE_ALLMULTI;
12483
12484 else { /* some multicasts */
12485 if (CHIP_IS_E1(bp)) {
12486 int i, old, offset;
22bedad3 12487 struct netdev_hw_addr *ha;
34f80b04
EG
12488 struct mac_configuration_cmd *config =
12489 bnx2x_sp(bp, mcast_config);
12490
0ddf477b 12491 i = 0;
22bedad3 12492 netdev_for_each_mc_addr(ha, dev) {
34f80b04
EG
12493 config->config_table[i].
12494 cam_entry.msb_mac_addr =
22bedad3 12495 swab16(*(u16 *)&ha->addr[0]);
34f80b04
EG
12496 config->config_table[i].
12497 cam_entry.middle_mac_addr =
22bedad3 12498 swab16(*(u16 *)&ha->addr[2]);
34f80b04
EG
12499 config->config_table[i].
12500 cam_entry.lsb_mac_addr =
22bedad3 12501 swab16(*(u16 *)&ha->addr[4]);
34f80b04
EG
12502 config->config_table[i].cam_entry.flags =
12503 cpu_to_le16(port);
12504 config->config_table[i].
12505 target_table_entry.flags = 0;
ca00392c
EG
12506 config->config_table[i].target_table_entry.
12507 clients_bit_vector =
12508 cpu_to_le32(1 << BP_L_ID(bp));
34f80b04
EG
12509 config->config_table[i].
12510 target_table_entry.vlan_id = 0;
12511
12512 DP(NETIF_MSG_IFUP,
12513 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
12514 config->config_table[i].
12515 cam_entry.msb_mac_addr,
12516 config->config_table[i].
12517 cam_entry.middle_mac_addr,
12518 config->config_table[i].
12519 cam_entry.lsb_mac_addr);
0ddf477b 12520 i++;
34f80b04 12521 }
8d9c5f34 12522 old = config->hdr.length;
34f80b04
EG
12523 if (old > i) {
12524 for (; i < old; i++) {
12525 if (CAM_IS_INVALID(config->
12526 config_table[i])) {
af246401 12527 /* already invalidated */
34f80b04
EG
12528 break;
12529 }
12530 /* invalidate */
12531 CAM_INVALIDATE(config->
12532 config_table[i]);
12533 }
12534 }
12535
12536 if (CHIP_REV_IS_SLOW(bp))
12537 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
12538 else
12539 offset = BNX2X_MAX_MULTICAST*(1 + port);
12540
8d9c5f34 12541 config->hdr.length = i;
34f80b04 12542 config->hdr.offset = offset;
8d9c5f34 12543 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
12544 config->hdr.reserved1 = 0;
12545
e665bfda
MC
12546 bp->set_mac_pending++;
12547 smp_wmb();
12548
34f80b04
EG
12549 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
12550 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
12551 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
12552 0);
12553 } else { /* E1H */
12554 /* Accept one or more multicasts */
22bedad3 12555 struct netdev_hw_addr *ha;
34f80b04
EG
12556 u32 mc_filter[MC_HASH_SIZE];
12557 u32 crc, bit, regidx;
12558 int i;
12559
12560 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
12561
22bedad3 12562 netdev_for_each_mc_addr(ha, dev) {
7c510e4b 12563 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
22bedad3 12564 ha->addr);
34f80b04 12565
22bedad3 12566 crc = crc32c_le(0, ha->addr, ETH_ALEN);
34f80b04
EG
12567 bit = (crc >> 24) & 0xff;
12568 regidx = bit >> 5;
12569 bit &= 0x1f;
12570 mc_filter[regidx] |= (1 << bit);
12571 }
12572
12573 for (i = 0; i < MC_HASH_SIZE; i++)
12574 REG_WR(bp, MC_HASH_OFFSET(bp, i),
12575 mc_filter[i]);
12576 }
12577 }
12578
12579 bp->rx_mode = rx_mode;
12580 bnx2x_set_storm_rx_mode(bp);
12581}
12582
12583/* called with rtnl_lock */
a2fbb9ea
ET
12584static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
12585{
12586 struct sockaddr *addr = p;
12587 struct bnx2x *bp = netdev_priv(dev);
12588
34f80b04 12589 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
12590 return -EINVAL;
12591
12592 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
12593 if (netif_running(dev)) {
12594 if (CHIP_IS_E1(bp))
e665bfda 12595 bnx2x_set_eth_mac_addr_e1(bp, 1);
34f80b04 12596 else
e665bfda 12597 bnx2x_set_eth_mac_addr_e1h(bp, 1);
34f80b04 12598 }
a2fbb9ea
ET
12599
12600 return 0;
12601}
12602
c18487ee 12603/* called with rtnl_lock */
01cd4528
EG
12604static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
12605 int devad, u16 addr)
a2fbb9ea 12606{
01cd4528
EG
12607 struct bnx2x *bp = netdev_priv(netdev);
12608 u16 value;
12609 int rc;
12610 u32 phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 12611
01cd4528
EG
12612 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
12613 prtad, devad, addr);
a2fbb9ea 12614
01cd4528
EG
12615 if (prtad != bp->mdio.prtad) {
12616 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12617 prtad, bp->mdio.prtad);
12618 return -EINVAL;
12619 }
12620
12621 /* The HW expects different devad if CL22 is used */
12622 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 12623
01cd4528
EG
12624 bnx2x_acquire_phy_lock(bp);
12625 rc = bnx2x_cl45_read(bp, BP_PORT(bp), phy_type, prtad,
12626 devad, addr, &value);
12627 bnx2x_release_phy_lock(bp);
12628 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 12629
01cd4528
EG
12630 if (!rc)
12631 rc = value;
12632 return rc;
12633}
a2fbb9ea 12634
01cd4528
EG
12635/* called with rtnl_lock */
12636static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
12637 u16 addr, u16 value)
12638{
12639 struct bnx2x *bp = netdev_priv(netdev);
12640 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
12641 int rc;
12642
12643 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
12644 " value 0x%x\n", prtad, devad, addr, value);
12645
12646 if (prtad != bp->mdio.prtad) {
12647 DP(NETIF_MSG_LINK, "prtad missmatch (cmd:0x%x != bp:0x%x)\n",
12648 prtad, bp->mdio.prtad);
12649 return -EINVAL;
a2fbb9ea
ET
12650 }
12651
01cd4528
EG
12652 /* The HW expects different devad if CL22 is used */
12653 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 12654
01cd4528
EG
12655 bnx2x_acquire_phy_lock(bp);
12656 rc = bnx2x_cl45_write(bp, BP_PORT(bp), ext_phy_type, prtad,
12657 devad, addr, value);
12658 bnx2x_release_phy_lock(bp);
12659 return rc;
12660}
c18487ee 12661
01cd4528
EG
12662/* called with rtnl_lock */
12663static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12664{
12665 struct bnx2x *bp = netdev_priv(dev);
12666 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 12667
01cd4528
EG
12668 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
12669 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 12670
01cd4528
EG
12671 if (!netif_running(dev))
12672 return -EAGAIN;
12673
12674 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
12675}
12676
34f80b04 12677/* called with rtnl_lock */
a2fbb9ea
ET
12678static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
12679{
12680 struct bnx2x *bp = netdev_priv(dev);
34f80b04 12681 int rc = 0;
a2fbb9ea 12682
72fd0718
VZ
12683 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
12684 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
12685 return -EAGAIN;
12686 }
12687
a2fbb9ea
ET
12688 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
12689 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
12690 return -EINVAL;
12691
12692 /* This does not race with packet allocation
c14423fe 12693 * because the actual alloc size is
a2fbb9ea
ET
12694 * only updated as part of load
12695 */
12696 dev->mtu = new_mtu;
12697
12698 if (netif_running(dev)) {
34f80b04
EG
12699 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
12700 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 12701 }
34f80b04
EG
12702
12703 return rc;
a2fbb9ea
ET
12704}
12705
12706static void bnx2x_tx_timeout(struct net_device *dev)
12707{
12708 struct bnx2x *bp = netdev_priv(dev);
12709
12710#ifdef BNX2X_STOP_ON_ERROR
12711 if (!bp->panic)
12712 bnx2x_panic();
12713#endif
12714 /* This allows the netif to be shutdown gracefully before resetting */
72fd0718 12715 schedule_delayed_work(&bp->reset_task, 0);
a2fbb9ea
ET
12716}
12717
12718#ifdef BCM_VLAN
34f80b04 12719/* called with rtnl_lock */
a2fbb9ea
ET
12720static void bnx2x_vlan_rx_register(struct net_device *dev,
12721 struct vlan_group *vlgrp)
12722{
12723 struct bnx2x *bp = netdev_priv(dev);
12724
12725 bp->vlgrp = vlgrp;
0c6671b0
EG
12726
12727 /* Set flags according to the required capabilities */
12728 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
12729
12730 if (dev->features & NETIF_F_HW_VLAN_TX)
12731 bp->flags |= HW_VLAN_TX_FLAG;
12732
12733 if (dev->features & NETIF_F_HW_VLAN_RX)
12734 bp->flags |= HW_VLAN_RX_FLAG;
12735
a2fbb9ea 12736 if (netif_running(dev))
49d66772 12737 bnx2x_set_client_config(bp);
a2fbb9ea 12738}
34f80b04 12739
a2fbb9ea
ET
12740#endif
12741
257ddbda 12742#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
12743static void poll_bnx2x(struct net_device *dev)
12744{
12745 struct bnx2x *bp = netdev_priv(dev);
12746
12747 disable_irq(bp->pdev->irq);
12748 bnx2x_interrupt(bp->pdev->irq, dev);
12749 enable_irq(bp->pdev->irq);
12750}
12751#endif
12752
c64213cd
SH
12753static const struct net_device_ops bnx2x_netdev_ops = {
12754 .ndo_open = bnx2x_open,
12755 .ndo_stop = bnx2x_close,
12756 .ndo_start_xmit = bnx2x_start_xmit,
356e2385 12757 .ndo_set_multicast_list = bnx2x_set_rx_mode,
c64213cd
SH
12758 .ndo_set_mac_address = bnx2x_change_mac_addr,
12759 .ndo_validate_addr = eth_validate_addr,
12760 .ndo_do_ioctl = bnx2x_ioctl,
12761 .ndo_change_mtu = bnx2x_change_mtu,
12762 .ndo_tx_timeout = bnx2x_tx_timeout,
12763#ifdef BCM_VLAN
12764 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
12765#endif
257ddbda 12766#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
12767 .ndo_poll_controller = poll_bnx2x,
12768#endif
12769};
12770
34f80b04
EG
12771static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
12772 struct net_device *dev)
a2fbb9ea
ET
12773{
12774 struct bnx2x *bp;
12775 int rc;
12776
12777 SET_NETDEV_DEV(dev, &pdev->dev);
12778 bp = netdev_priv(dev);
12779
34f80b04
EG
12780 bp->dev = dev;
12781 bp->pdev = pdev;
a2fbb9ea 12782 bp->flags = 0;
34f80b04 12783 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
12784
12785 rc = pci_enable_device(pdev);
12786 if (rc) {
7995c64e 12787 pr_err("Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
12788 goto err_out;
12789 }
12790
12791 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7995c64e 12792 pr_err("Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
12793 rc = -ENODEV;
12794 goto err_out_disable;
12795 }
12796
12797 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7995c64e 12798 pr_err("Cannot find second PCI device base address, aborting\n");
a2fbb9ea
ET
12799 rc = -ENODEV;
12800 goto err_out_disable;
12801 }
12802
34f80b04
EG
12803 if (atomic_read(&pdev->enable_cnt) == 1) {
12804 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12805 if (rc) {
7995c64e 12806 pr_err("Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12807 goto err_out_disable;
12808 }
a2fbb9ea 12809
34f80b04
EG
12810 pci_set_master(pdev);
12811 pci_save_state(pdev);
12812 }
a2fbb9ea
ET
12813
12814 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12815 if (bp->pm_cap == 0) {
7995c64e 12816 pr_err("Cannot find power management capability, aborting\n");
a2fbb9ea
ET
12817 rc = -EIO;
12818 goto err_out_release;
12819 }
12820
12821 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
12822 if (bp->pcie_cap == 0) {
7995c64e 12823 pr_err("Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
12824 rc = -EIO;
12825 goto err_out_release;
12826 }
12827
1a983142 12828 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 12829 bp->flags |= USING_DAC_FLAG;
1a983142
FT
12830 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
12831 pr_err("dma_set_coherent_mask failed, aborting\n");
a2fbb9ea
ET
12832 rc = -EIO;
12833 goto err_out_release;
12834 }
12835
1a983142 12836 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
7995c64e 12837 pr_err("System does not support DMA, aborting\n");
a2fbb9ea
ET
12838 rc = -EIO;
12839 goto err_out_release;
12840 }
12841
34f80b04
EG
12842 dev->mem_start = pci_resource_start(pdev, 0);
12843 dev->base_addr = dev->mem_start;
12844 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12845
12846 dev->irq = pdev->irq;
12847
275f165f 12848 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12849 if (!bp->regview) {
7995c64e 12850 pr_err("Cannot map register space, aborting\n");
a2fbb9ea
ET
12851 rc = -ENOMEM;
12852 goto err_out_release;
12853 }
12854
34f80b04
EG
12855 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12856 min_t(u64, BNX2X_DB_SIZE,
12857 pci_resource_len(pdev, 2)));
a2fbb9ea 12858 if (!bp->doorbells) {
7995c64e 12859 pr_err("Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
12860 rc = -ENOMEM;
12861 goto err_out_unmap;
12862 }
12863
12864 bnx2x_set_power_state(bp, PCI_D0);
12865
34f80b04
EG
12866 /* clean indirect addresses */
12867 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12868 PCICFG_VENDOR_ID_OFFSET);
12869 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
12870 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
12871 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
12872 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 12873
72fd0718
VZ
12874 /* Reset the load counter */
12875 bnx2x_clear_load_cnt(bp);
12876
34f80b04 12877 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 12878
c64213cd 12879 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 12880 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
12881 dev->features |= NETIF_F_SG;
12882 dev->features |= NETIF_F_HW_CSUM;
12883 if (bp->flags & USING_DAC_FLAG)
12884 dev->features |= NETIF_F_HIGHDMA;
5316bc0b
EG
12885 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12886 dev->features |= NETIF_F_TSO6;
34f80b04
EG
12887#ifdef BCM_VLAN
12888 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 12889 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
5316bc0b
EG
12890
12891 dev->vlan_features |= NETIF_F_SG;
12892 dev->vlan_features |= NETIF_F_HW_CSUM;
12893 if (bp->flags & USING_DAC_FLAG)
12894 dev->vlan_features |= NETIF_F_HIGHDMA;
12895 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
12896 dev->vlan_features |= NETIF_F_TSO6;
34f80b04 12897#endif
a2fbb9ea 12898
01cd4528
EG
12899 /* get_port_hwinfo() will set prtad and mmds properly */
12900 bp->mdio.prtad = MDIO_PRTAD_NONE;
12901 bp->mdio.mmds = 0;
12902 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12903 bp->mdio.dev = dev;
12904 bp->mdio.mdio_read = bnx2x_mdio_read;
12905 bp->mdio.mdio_write = bnx2x_mdio_write;
12906
a2fbb9ea
ET
12907 return 0;
12908
12909err_out_unmap:
12910 if (bp->regview) {
12911 iounmap(bp->regview);
12912 bp->regview = NULL;
12913 }
a2fbb9ea
ET
12914 if (bp->doorbells) {
12915 iounmap(bp->doorbells);
12916 bp->doorbells = NULL;
12917 }
12918
12919err_out_release:
34f80b04
EG
12920 if (atomic_read(&pdev->enable_cnt) == 1)
12921 pci_release_regions(pdev);
a2fbb9ea
ET
12922
12923err_out_disable:
12924 pci_disable_device(pdev);
12925 pci_set_drvdata(pdev, NULL);
12926
12927err_out:
12928 return rc;
12929}
12930
37f9ce62
EG
12931static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
12932 int *width, int *speed)
25047950
ET
12933{
12934 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
12935
37f9ce62 12936 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 12937
37f9ce62
EG
12938 /* return value of 1=2.5GHz 2=5GHz */
12939 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 12940}
37f9ce62 12941
94a78b79
VZ
12942static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
12943{
37f9ce62 12944 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12945 struct bnx2x_fw_file_hdr *fw_hdr;
12946 struct bnx2x_fw_file_section *sections;
94a78b79 12947 u32 offset, len, num_ops;
37f9ce62 12948 u16 *ops_offsets;
94a78b79 12949 int i;
37f9ce62 12950 const u8 *fw_ver;
94a78b79
VZ
12951
12952 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
12953 return -EINVAL;
12954
12955 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12956 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12957
12958 /* Make sure none of the offsets and sizes make us read beyond
12959 * the end of the firmware data */
12960 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12961 offset = be32_to_cpu(sections[i].offset);
12962 len = be32_to_cpu(sections[i].len);
12963 if (offset + len > firmware->size) {
7995c64e 12964 pr_err("Section %d length is out of bounds\n", i);
94a78b79
VZ
12965 return -EINVAL;
12966 }
12967 }
12968
12969 /* Likewise for the init_ops offsets */
12970 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
12971 ops_offsets = (u16 *)(firmware->data + offset);
12972 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12973
12974 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12975 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7995c64e 12976 pr_err("Section offset %d is out of bounds\n", i);
94a78b79
VZ
12977 return -EINVAL;
12978 }
12979 }
12980
12981 /* Check FW version */
12982 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12983 fw_ver = firmware->data + offset;
12984 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12985 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12986 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12987 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7995c64e 12988 pr_err("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
12989 fw_ver[0], fw_ver[1], fw_ver[2],
12990 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
12991 BCM_5710_FW_MINOR_VERSION,
12992 BCM_5710_FW_REVISION_VERSION,
12993 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12994 return -EINVAL;
94a78b79
VZ
12995 }
12996
12997 return 0;
12998}
12999
ab6ad5a4 13000static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13001{
ab6ad5a4
EG
13002 const __be32 *source = (const __be32 *)_source;
13003 u32 *target = (u32 *)_target;
94a78b79 13004 u32 i;
94a78b79
VZ
13005
13006 for (i = 0; i < n/4; i++)
13007 target[i] = be32_to_cpu(source[i]);
13008}
13009
13010/*
13011 Ops array is stored in the following format:
13012 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
13013 */
ab6ad5a4 13014static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 13015{
ab6ad5a4
EG
13016 const __be32 *source = (const __be32 *)_source;
13017 struct raw_op *target = (struct raw_op *)_target;
94a78b79 13018 u32 i, j, tmp;
94a78b79 13019
ab6ad5a4 13020 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
13021 tmp = be32_to_cpu(source[j]);
13022 target[i].op = (tmp >> 24) & 0xff;
13023 target[i].offset = tmp & 0xffffff;
13024 target[i].raw_data = be32_to_cpu(source[j+1]);
13025 }
13026}
ab6ad5a4
EG
13027
13028static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 13029{
ab6ad5a4
EG
13030 const __be16 *source = (const __be16 *)_source;
13031 u16 *target = (u16 *)_target;
94a78b79 13032 u32 i;
94a78b79
VZ
13033
13034 for (i = 0; i < n/2; i++)
13035 target[i] = be16_to_cpu(source[i]);
13036}
13037
7995c64e
JP
13038#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
13039do { \
13040 u32 len = be32_to_cpu(fw_hdr->arr.len); \
13041 bp->arr = kmalloc(len, GFP_KERNEL); \
13042 if (!bp->arr) { \
13043 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
13044 goto lbl; \
13045 } \
13046 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
13047 (u8 *)bp->arr, len); \
13048} while (0)
94a78b79 13049
94a78b79
VZ
13050static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
13051{
45229b42 13052 const char *fw_file_name;
94a78b79 13053 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 13054 int rc;
94a78b79 13055
94a78b79 13056 if (CHIP_IS_E1(bp))
45229b42 13057 fw_file_name = FW_FILE_NAME_E1;
94a78b79 13058 else
45229b42 13059 fw_file_name = FW_FILE_NAME_E1H;
94a78b79 13060
7995c64e 13061 pr_info("Loading %s\n", fw_file_name);
94a78b79
VZ
13062
13063 rc = request_firmware(&bp->firmware, fw_file_name, dev);
13064 if (rc) {
7995c64e 13065 pr_err("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
13066 goto request_firmware_exit;
13067 }
13068
13069 rc = bnx2x_check_firmware(bp);
13070 if (rc) {
7995c64e 13071 pr_err("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
13072 goto request_firmware_exit;
13073 }
13074
13075 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
13076
13077 /* Initialize the pointers to the init arrays */
13078 /* Blob */
13079 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
13080
13081 /* Opcodes */
13082 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
13083
13084 /* Offsets */
ab6ad5a4
EG
13085 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
13086 be16_to_cpu_n);
94a78b79
VZ
13087
13088 /* STORMs firmware */
573f2035
EG
13089 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13090 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
13091 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
13092 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
13093 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13094 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
13095 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
13096 be32_to_cpu(fw_hdr->usem_pram_data.offset);
13097 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13098 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
13099 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
13100 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
13101 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
13102 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
13103 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
13104 be32_to_cpu(fw_hdr->csem_pram_data.offset);
94a78b79
VZ
13105
13106 return 0;
ab6ad5a4 13107
94a78b79
VZ
13108init_offsets_alloc_err:
13109 kfree(bp->init_ops);
13110init_ops_alloc_err:
13111 kfree(bp->init_data);
13112request_firmware_exit:
13113 release_firmware(bp->firmware);
13114
13115 return rc;
13116}
13117
13118
a2fbb9ea
ET
13119static int __devinit bnx2x_init_one(struct pci_dev *pdev,
13120 const struct pci_device_id *ent)
13121{
a2fbb9ea
ET
13122 struct net_device *dev = NULL;
13123 struct bnx2x *bp;
37f9ce62 13124 int pcie_width, pcie_speed;
25047950 13125 int rc;
a2fbb9ea 13126
a2fbb9ea 13127 /* dev zeroed in init_etherdev */
555f6c78 13128 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04 13129 if (!dev) {
7995c64e 13130 pr_err("Cannot allocate net device\n");
a2fbb9ea 13131 return -ENOMEM;
34f80b04 13132 }
a2fbb9ea 13133
a2fbb9ea 13134 bp = netdev_priv(dev);
7995c64e 13135 bp->msg_enable = debug;
a2fbb9ea 13136
df4770de
EG
13137 pci_set_drvdata(pdev, dev);
13138
34f80b04 13139 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
13140 if (rc < 0) {
13141 free_netdev(dev);
13142 return rc;
13143 }
13144
34f80b04 13145 rc = bnx2x_init_bp(bp);
693fc0d1
EG
13146 if (rc)
13147 goto init_one_exit;
13148
94a78b79
VZ
13149 /* Set init arrays */
13150 rc = bnx2x_init_firmware(bp, &pdev->dev);
13151 if (rc) {
7995c64e 13152 pr_err("Error loading firmware\n");
94a78b79
VZ
13153 goto init_one_exit;
13154 }
13155
693fc0d1 13156 rc = register_netdev(dev);
34f80b04 13157 if (rc) {
693fc0d1 13158 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
13159 goto init_one_exit;
13160 }
13161
37f9ce62 13162 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7995c64e
JP
13163 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
13164 board_info[ent->driver_data].name,
13165 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
13166 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
13167 dev->base_addr, bp->pdev->irq, dev->dev_addr);
c016201c 13168
a2fbb9ea 13169 return 0;
34f80b04
EG
13170
13171init_one_exit:
13172 if (bp->regview)
13173 iounmap(bp->regview);
13174
13175 if (bp->doorbells)
13176 iounmap(bp->doorbells);
13177
13178 free_netdev(dev);
13179
13180 if (atomic_read(&pdev->enable_cnt) == 1)
13181 pci_release_regions(pdev);
13182
13183 pci_disable_device(pdev);
13184 pci_set_drvdata(pdev, NULL);
13185
13186 return rc;
a2fbb9ea
ET
13187}
13188
13189static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
13190{
13191 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13192 struct bnx2x *bp;
13193
13194 if (!dev) {
7995c64e 13195 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
13196 return;
13197 }
228241eb 13198 bp = netdev_priv(dev);
a2fbb9ea 13199
a2fbb9ea
ET
13200 unregister_netdev(dev);
13201
72fd0718
VZ
13202 /* Make sure RESET task is not scheduled before continuing */
13203 cancel_delayed_work_sync(&bp->reset_task);
13204
94a78b79
VZ
13205 kfree(bp->init_ops_offsets);
13206 kfree(bp->init_ops);
13207 kfree(bp->init_data);
13208 release_firmware(bp->firmware);
13209
a2fbb9ea
ET
13210 if (bp->regview)
13211 iounmap(bp->regview);
13212
13213 if (bp->doorbells)
13214 iounmap(bp->doorbells);
13215
13216 free_netdev(dev);
34f80b04
EG
13217
13218 if (atomic_read(&pdev->enable_cnt) == 1)
13219 pci_release_regions(pdev);
13220
a2fbb9ea
ET
13221 pci_disable_device(pdev);
13222 pci_set_drvdata(pdev, NULL);
13223}
13224
13225static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
13226{
13227 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
13228 struct bnx2x *bp;
13229
34f80b04 13230 if (!dev) {
7995c64e 13231 pr_err("BAD net device from bnx2x_init_one\n");
34f80b04
EG
13232 return -ENODEV;
13233 }
13234 bp = netdev_priv(dev);
a2fbb9ea 13235
34f80b04 13236 rtnl_lock();
a2fbb9ea 13237
34f80b04 13238 pci_save_state(pdev);
228241eb 13239
34f80b04
EG
13240 if (!netif_running(dev)) {
13241 rtnl_unlock();
13242 return 0;
13243 }
a2fbb9ea
ET
13244
13245 netif_device_detach(dev);
a2fbb9ea 13246
da5a662a 13247 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 13248
a2fbb9ea 13249 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 13250
34f80b04
EG
13251 rtnl_unlock();
13252
a2fbb9ea
ET
13253 return 0;
13254}
13255
13256static int bnx2x_resume(struct pci_dev *pdev)
13257{
13258 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 13259 struct bnx2x *bp;
a2fbb9ea
ET
13260 int rc;
13261
228241eb 13262 if (!dev) {
7995c64e 13263 pr_err("BAD net device from bnx2x_init_one\n");
228241eb
ET
13264 return -ENODEV;
13265 }
228241eb 13266 bp = netdev_priv(dev);
a2fbb9ea 13267
72fd0718
VZ
13268 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13269 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13270 return -EAGAIN;
13271 }
13272
34f80b04
EG
13273 rtnl_lock();
13274
228241eb 13275 pci_restore_state(pdev);
34f80b04
EG
13276
13277 if (!netif_running(dev)) {
13278 rtnl_unlock();
13279 return 0;
13280 }
13281
a2fbb9ea
ET
13282 bnx2x_set_power_state(bp, PCI_D0);
13283 netif_device_attach(dev);
13284
da5a662a 13285 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 13286
34f80b04
EG
13287 rtnl_unlock();
13288
13289 return rc;
a2fbb9ea
ET
13290}
13291
f8ef6e44
YG
13292static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
13293{
13294 int i;
13295
13296 bp->state = BNX2X_STATE_ERROR;
13297
13298 bp->rx_mode = BNX2X_RX_MODE_NONE;
13299
13300 bnx2x_netif_stop(bp, 0);
13301
13302 del_timer_sync(&bp->timer);
13303 bp->stats_state = STATS_STATE_DISABLED;
13304 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
13305
13306 /* Release IRQs */
6cbe5065 13307 bnx2x_free_irq(bp, false);
f8ef6e44
YG
13308
13309 if (CHIP_IS_E1(bp)) {
13310 struct mac_configuration_cmd *config =
13311 bnx2x_sp(bp, mcast_config);
13312
8d9c5f34 13313 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
13314 CAM_INVALIDATE(config->config_table[i]);
13315 }
13316
13317 /* Free SKBs, SGEs, TPA pool and driver internals */
13318 bnx2x_free_skbs(bp);
54b9ddaa 13319 for_each_queue(bp, i)
f8ef6e44 13320 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
54b9ddaa 13321 for_each_queue(bp, i)
7cde1c8b 13322 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
13323 bnx2x_free_mem(bp);
13324
13325 bp->state = BNX2X_STATE_CLOSED;
13326
13327 netif_carrier_off(bp->dev);
13328
13329 return 0;
13330}
13331
13332static void bnx2x_eeh_recover(struct bnx2x *bp)
13333{
13334 u32 val;
13335
13336 mutex_init(&bp->port.phy_mutex);
13337
13338 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
13339 bp->link_params.shmem_base = bp->common.shmem_base;
13340 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
13341
13342 if (!bp->common.shmem_base ||
13343 (bp->common.shmem_base < 0xA0000) ||
13344 (bp->common.shmem_base >= 0xC0000)) {
13345 BNX2X_DEV_INFO("MCP not active\n");
13346 bp->flags |= NO_MCP_FLAG;
13347 return;
13348 }
13349
13350 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
13351 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13352 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
13353 BNX2X_ERR("BAD MCP validity signature\n");
13354
13355 if (!BP_NOMCP(bp)) {
13356 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
13357 & DRV_MSG_SEQ_NUMBER_MASK);
13358 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
13359 }
13360}
13361
493adb1f
WX
13362/**
13363 * bnx2x_io_error_detected - called when PCI error is detected
13364 * @pdev: Pointer to PCI device
13365 * @state: The current pci connection state
13366 *
13367 * This function is called after a PCI bus error affecting
13368 * this device has been detected.
13369 */
13370static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
13371 pci_channel_state_t state)
13372{
13373 struct net_device *dev = pci_get_drvdata(pdev);
13374 struct bnx2x *bp = netdev_priv(dev);
13375
13376 rtnl_lock();
13377
13378 netif_device_detach(dev);
13379
07ce50e4
DN
13380 if (state == pci_channel_io_perm_failure) {
13381 rtnl_unlock();
13382 return PCI_ERS_RESULT_DISCONNECT;
13383 }
13384
493adb1f 13385 if (netif_running(dev))
f8ef6e44 13386 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
13387
13388 pci_disable_device(pdev);
13389
13390 rtnl_unlock();
13391
13392 /* Request a slot reset */
13393 return PCI_ERS_RESULT_NEED_RESET;
13394}
13395
13396/**
13397 * bnx2x_io_slot_reset - called after the PCI bus has been reset
13398 * @pdev: Pointer to PCI device
13399 *
13400 * Restart the card from scratch, as if from a cold-boot.
13401 */
13402static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
13403{
13404 struct net_device *dev = pci_get_drvdata(pdev);
13405 struct bnx2x *bp = netdev_priv(dev);
13406
13407 rtnl_lock();
13408
13409 if (pci_enable_device(pdev)) {
13410 dev_err(&pdev->dev,
13411 "Cannot re-enable PCI device after reset\n");
13412 rtnl_unlock();
13413 return PCI_ERS_RESULT_DISCONNECT;
13414 }
13415
13416 pci_set_master(pdev);
13417 pci_restore_state(pdev);
13418
13419 if (netif_running(dev))
13420 bnx2x_set_power_state(bp, PCI_D0);
13421
13422 rtnl_unlock();
13423
13424 return PCI_ERS_RESULT_RECOVERED;
13425}
13426
13427/**
13428 * bnx2x_io_resume - called when traffic can start flowing again
13429 * @pdev: Pointer to PCI device
13430 *
13431 * This callback is called when the error recovery driver tells us that
13432 * its OK to resume normal operation.
13433 */
13434static void bnx2x_io_resume(struct pci_dev *pdev)
13435{
13436 struct net_device *dev = pci_get_drvdata(pdev);
13437 struct bnx2x *bp = netdev_priv(dev);
13438
72fd0718
VZ
13439 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
13440 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
13441 return;
13442 }
13443
493adb1f
WX
13444 rtnl_lock();
13445
f8ef6e44
YG
13446 bnx2x_eeh_recover(bp);
13447
493adb1f 13448 if (netif_running(dev))
f8ef6e44 13449 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
13450
13451 netif_device_attach(dev);
13452
13453 rtnl_unlock();
13454}
13455
13456static struct pci_error_handlers bnx2x_err_handler = {
13457 .error_detected = bnx2x_io_error_detected,
356e2385
EG
13458 .slot_reset = bnx2x_io_slot_reset,
13459 .resume = bnx2x_io_resume,
493adb1f
WX
13460};
13461
a2fbb9ea 13462static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13463 .name = DRV_MODULE_NAME,
13464 .id_table = bnx2x_pci_tbl,
13465 .probe = bnx2x_init_one,
13466 .remove = __devexit_p(bnx2x_remove_one),
13467 .suspend = bnx2x_suspend,
13468 .resume = bnx2x_resume,
13469 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
13470};
13471
13472static int __init bnx2x_init(void)
13473{
dd21ca6d
SG
13474 int ret;
13475
7995c64e 13476 pr_info("%s", version);
938cf541 13477
1cf167f2
EG
13478 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13479 if (bnx2x_wq == NULL) {
7995c64e 13480 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13481 return -ENOMEM;
13482 }
13483
dd21ca6d
SG
13484 ret = pci_register_driver(&bnx2x_pci_driver);
13485 if (ret) {
7995c64e 13486 pr_err("Cannot register driver\n");
dd21ca6d
SG
13487 destroy_workqueue(bnx2x_wq);
13488 }
13489 return ret;
a2fbb9ea
ET
13490}
13491
13492static void __exit bnx2x_cleanup(void)
13493{
13494 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13495
13496 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
13497}
13498
13499module_init(bnx2x_init);
13500module_exit(bnx2x_cleanup);
13501
993ac7b5
MC
13502#ifdef BCM_CNIC
13503
13504/* count denotes the number of new completions we have seen */
13505static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13506{
13507 struct eth_spe *spe;
13508
13509#ifdef BNX2X_STOP_ON_ERROR
13510 if (unlikely(bp->panic))
13511 return;
13512#endif
13513
13514 spin_lock_bh(&bp->spq_lock);
13515 bp->cnic_spq_pending -= count;
13516
13517 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
13518 bp->cnic_spq_pending++) {
13519
13520 if (!bp->cnic_kwq_pending)
13521 break;
13522
13523 spe = bnx2x_sp_get_next(bp);
13524 *spe = *bp->cnic_kwq_cons;
13525
13526 bp->cnic_kwq_pending--;
13527
13528 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
13529 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13530
13531 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13532 bp->cnic_kwq_cons = bp->cnic_kwq;
13533 else
13534 bp->cnic_kwq_cons++;
13535 }
13536 bnx2x_sp_prod_update(bp);
13537 spin_unlock_bh(&bp->spq_lock);
13538}
13539
13540static int bnx2x_cnic_sp_queue(struct net_device *dev,
13541 struct kwqe_16 *kwqes[], u32 count)
13542{
13543 struct bnx2x *bp = netdev_priv(dev);
13544 int i;
13545
13546#ifdef BNX2X_STOP_ON_ERROR
13547 if (unlikely(bp->panic))
13548 return -EIO;
13549#endif
13550
13551 spin_lock_bh(&bp->spq_lock);
13552
13553 for (i = 0; i < count; i++) {
13554 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13555
13556 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13557 break;
13558
13559 *bp->cnic_kwq_prod = *spe;
13560
13561 bp->cnic_kwq_pending++;
13562
13563 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
13564 spe->hdr.conn_and_cmd_data, spe->hdr.type,
13565 spe->data.mac_config_addr.hi,
13566 spe->data.mac_config_addr.lo,
13567 bp->cnic_kwq_pending);
13568
13569 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13570 bp->cnic_kwq_prod = bp->cnic_kwq;
13571 else
13572 bp->cnic_kwq_prod++;
13573 }
13574
13575 spin_unlock_bh(&bp->spq_lock);
13576
13577 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13578 bnx2x_cnic_sp_post(bp, 0);
13579
13580 return i;
13581}
13582
13583static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13584{
13585 struct cnic_ops *c_ops;
13586 int rc = 0;
13587
13588 mutex_lock(&bp->cnic_mutex);
13589 c_ops = bp->cnic_ops;
13590 if (c_ops)
13591 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13592 mutex_unlock(&bp->cnic_mutex);
13593
13594 return rc;
13595}
13596
13597static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13598{
13599 struct cnic_ops *c_ops;
13600 int rc = 0;
13601
13602 rcu_read_lock();
13603 c_ops = rcu_dereference(bp->cnic_ops);
13604 if (c_ops)
13605 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13606 rcu_read_unlock();
13607
13608 return rc;
13609}
13610
13611/*
13612 * for commands that have no data
13613 */
13614static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
13615{
13616 struct cnic_ctl_info ctl = {0};
13617
13618 ctl.cmd = cmd;
13619
13620 return bnx2x_cnic_ctl_send(bp, &ctl);
13621}
13622
13623static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
13624{
13625 struct cnic_ctl_info ctl;
13626
13627 /* first we tell CNIC and only then we count this as a completion */
13628 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13629 ctl.data.comp.cid = cid;
13630
13631 bnx2x_cnic_ctl_send_bh(bp, &ctl);
13632 bnx2x_cnic_sp_post(bp, 1);
13633}
13634
13635static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13636{
13637 struct bnx2x *bp = netdev_priv(dev);
13638 int rc = 0;
13639
13640 switch (ctl->cmd) {
13641 case DRV_CTL_CTXTBL_WR_CMD: {
13642 u32 index = ctl->data.io.offset;
13643 dma_addr_t addr = ctl->data.io.dma_addr;
13644
13645 bnx2x_ilt_wr(bp, index, addr);
13646 break;
13647 }
13648
13649 case DRV_CTL_COMPLETION_CMD: {
13650 int count = ctl->data.comp.comp_count;
13651
13652 bnx2x_cnic_sp_post(bp, count);
13653 break;
13654 }
13655
13656 /* rtnl_lock is held. */
13657 case DRV_CTL_START_L2_CMD: {
13658 u32 cli = ctl->data.ring.client_id;
13659
13660 bp->rx_mode_cl_mask |= (1 << cli);
13661 bnx2x_set_storm_rx_mode(bp);
13662 break;
13663 }
13664
13665 /* rtnl_lock is held. */
13666 case DRV_CTL_STOP_L2_CMD: {
13667 u32 cli = ctl->data.ring.client_id;
13668
13669 bp->rx_mode_cl_mask &= ~(1 << cli);
13670 bnx2x_set_storm_rx_mode(bp);
13671 break;
13672 }
13673
13674 default:
13675 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13676 rc = -EINVAL;
13677 }
13678
13679 return rc;
13680}
13681
13682static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
13683{
13684 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13685
13686 if (bp->flags & USING_MSIX_FLAG) {
13687 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13688 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13689 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13690 } else {
13691 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13692 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13693 }
13694 cp->irq_arr[0].status_blk = bp->cnic_sb;
13695 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
13696 cp->irq_arr[1].status_blk = bp->def_status_blk;
13697 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
13698
13699 cp->num_irq = 2;
13700}
13701
13702static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13703 void *data)
13704{
13705 struct bnx2x *bp = netdev_priv(dev);
13706 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13707
13708 if (ops == NULL)
13709 return -EINVAL;
13710
13711 if (atomic_read(&bp->intr_sem) != 0)
13712 return -EBUSY;
13713
13714 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13715 if (!bp->cnic_kwq)
13716 return -ENOMEM;
13717
13718 bp->cnic_kwq_cons = bp->cnic_kwq;
13719 bp->cnic_kwq_prod = bp->cnic_kwq;
13720 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13721
13722 bp->cnic_spq_pending = 0;
13723 bp->cnic_kwq_pending = 0;
13724
13725 bp->cnic_data = data;
13726
13727 cp->num_irq = 0;
13728 cp->drv_state = CNIC_DRV_STATE_REGD;
13729
13730 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
13731
13732 bnx2x_setup_cnic_irq_info(bp);
13733 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
13734 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
13735 rcu_assign_pointer(bp->cnic_ops, ops);
13736
13737 return 0;
13738}
13739
13740static int bnx2x_unregister_cnic(struct net_device *dev)
13741{
13742 struct bnx2x *bp = netdev_priv(dev);
13743 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13744
13745 mutex_lock(&bp->cnic_mutex);
13746 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
13747 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
13748 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
13749 }
13750 cp->drv_state = 0;
13751 rcu_assign_pointer(bp->cnic_ops, NULL);
13752 mutex_unlock(&bp->cnic_mutex);
13753 synchronize_rcu();
13754 kfree(bp->cnic_kwq);
13755 bp->cnic_kwq = NULL;
13756
13757 return 0;
13758}
13759
13760struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13761{
13762 struct bnx2x *bp = netdev_priv(dev);
13763 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13764
13765 cp->drv_owner = THIS_MODULE;
13766 cp->chip_id = CHIP_ID(bp);
13767 cp->pdev = bp->pdev;
13768 cp->io_base = bp->regview;
13769 cp->io_base2 = bp->doorbells;
13770 cp->max_kwqe_pending = 8;
13771 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
13772 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
13773 cp->ctx_tbl_len = CNIC_ILT_LINES;
13774 cp->starting_cid = BCM_CNIC_CID_START;
13775 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13776 cp->drv_ctl = bnx2x_drv_ctl;
13777 cp->drv_register_cnic = bnx2x_register_cnic;
13778 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
13779
13780 return cp;
13781}
13782EXPORT_SYMBOL(bnx2x_cnic_probe);
13783
13784#endif /* BCM_CNIC */
94a78b79 13785