stmmac: removed not used definitions
[linux-2.6-block.git] / drivers / net / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
3359fced 3 * Copyright (c) 2007-2010 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
a2fbb9ea
ET
26#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
0c6671b0 40#include <linux/if_vlan.h>
a2fbb9ea
ET
41#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
34f80b04 44#include <net/ip6_checksum.h>
a2fbb9ea
ET
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
34f80b04 47#include <linux/crc32c.h>
a2fbb9ea
ET
48#include <linux/prefetch.h>
49#include <linux/zlib.h>
a2fbb9ea 50#include <linux/io.h>
45229b42 51#include <linux/stringify.h>
a2fbb9ea 52
b0efbb99 53#define BNX2X_MAIN
a2fbb9ea
ET
54#include "bnx2x.h"
55#include "bnx2x_init.h"
94a78b79 56#include "bnx2x_init_ops.h"
9f6c9258 57#include "bnx2x_cmn.h"
e4901dde 58#include "bnx2x_dcb.h"
a2fbb9ea 59
94a78b79
VZ
60#include <linux/firmware.h>
61#include "bnx2x_fw_file_hdr.h"
62/* FW files */
45229b42
BH
63#define FW_FILE_VERSION \
64 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
65 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
66 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
67 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
68#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
69#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 70#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 71
34f80b04
EG
72/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
a2fbb9ea 74
53a10565 75static char version[] __devinitdata =
34f80b04 76 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
f2e0899f
DK
80MODULE_DESCRIPTION("Broadcom NetXtreme II "
81 "BCM57710/57711/57711E/57712/57712E Driver");
a2fbb9ea
ET
82MODULE_LICENSE("GPL");
83MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
84MODULE_FIRMWARE(FW_FILE_NAME_E1);
85MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 86MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 87
555f6c78
EG
88static int multi_mode = 1;
89module_param(multi_mode, int, 0);
ca00392c
EG
90MODULE_PARM_DESC(multi_mode, " Multi queue mode "
91 "(0 Disable; 1 Enable (default))");
92
d6214d7a 93int num_queues;
54b9ddaa
VZ
94module_param(num_queues, int, 0);
95MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
96 " (default is as a number of CPUs)");
555f6c78 97
19680c48 98static int disable_tpa;
19680c48 99module_param(disable_tpa, int, 0);
9898f86d 100MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a
EG
101
102static int int_mode;
103module_param(int_mode, int, 0);
cdaa7cb8
VZ
104MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
105 "(1 INT#x; 2 MSI)");
8badd27a 106
a18f5128
EG
107static int dropless_fc;
108module_param(dropless_fc, int, 0);
109MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
110
9898f86d 111static int poll;
a2fbb9ea 112module_param(poll, int, 0);
9898f86d 113MODULE_PARM_DESC(poll, " Use polling (for debug)");
8d5726c4
EG
114
115static int mrrs = -1;
116module_param(mrrs, int, 0);
117MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
118
9898f86d 119static int debug;
a2fbb9ea 120module_param(debug, int, 0);
9898f86d
EG
121MODULE_PARM_DESC(debug, " Default debug msglevel");
122
1cf167f2 123static struct workqueue_struct *bnx2x_wq;
a2fbb9ea 124
ec6ba945
VZ
125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
a2fbb9ea
ET
129enum bnx2x_board_type {
130 BCM57710 = 0,
34f80b04
EG
131 BCM57711 = 1,
132 BCM57711E = 2,
f2e0899f
DK
133 BCM57712 = 3,
134 BCM57712E = 4
a2fbb9ea
ET
135};
136
34f80b04 137/* indexed by board_type, above */
53a10565 138static struct {
a2fbb9ea
ET
139 char *name;
140} board_info[] __devinitdata = {
34f80b04
EG
141 { "Broadcom NetXtreme II BCM57710 XGb" },
142 { "Broadcom NetXtreme II BCM57711 XGb" },
f2e0899f
DK
143 { "Broadcom NetXtreme II BCM57711E XGb" },
144 { "Broadcom NetXtreme II BCM57712 XGb" },
145 { "Broadcom NetXtreme II BCM57712E XGb" }
a2fbb9ea
ET
146};
147
a3aa1884 148static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
149 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
150 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f
DK
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
a2fbb9ea
ET
154 { 0 }
155};
156
157MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
158
159/****************************************************************************
160* General service functions
161****************************************************************************/
162
523224a3
DK
163static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
164 u32 addr, dma_addr_t mapping)
165{
166 REG_WR(bp, addr, U64_LO(mapping));
167 REG_WR(bp, addr + 4, U64_HI(mapping));
168}
169
170static inline void __storm_memset_fill(struct bnx2x *bp,
171 u32 addr, size_t size, u32 val)
172{
173 int i;
174 for (i = 0; i < size/4; i++)
175 REG_WR(bp, addr + (i * 4), val);
176}
177
178static inline void storm_memset_ustats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
180{
181 size_t size = sizeof(struct ustorm_per_client_stats);
182
183 u32 addr = BAR_USTRORM_INTMEM +
184 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
185
186 __storm_memset_fill(bp, addr, size, 0);
187}
188
189static inline void storm_memset_tstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
191{
192 size_t size = sizeof(struct tstorm_per_client_stats);
193
194 u32 addr = BAR_TSTRORM_INTMEM +
195 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
196
197 __storm_memset_fill(bp, addr, size, 0);
198}
199
200static inline void storm_memset_xstats_zero(struct bnx2x *bp,
201 u8 port, u16 stat_id)
202{
203 size_t size = sizeof(struct xstorm_per_client_stats);
204
205 u32 addr = BAR_XSTRORM_INTMEM +
206 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
207
208 __storm_memset_fill(bp, addr, size, 0);
209}
210
211
212static inline void storm_memset_spq_addr(struct bnx2x *bp,
213 dma_addr_t mapping, u16 abs_fid)
214{
215 u32 addr = XSEM_REG_FAST_MEMORY +
216 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
217
218 __storm_memset_dma_mapping(bp, addr, mapping);
219}
220
221static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
222{
223 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
224}
225
226static inline void storm_memset_func_cfg(struct bnx2x *bp,
227 struct tstorm_eth_function_common_config *tcfg,
228 u16 abs_fid)
229{
230 size_t size = sizeof(struct tstorm_eth_function_common_config);
231
232 u32 addr = BAR_TSTRORM_INTMEM +
233 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
234
235 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
236}
237
238static inline void storm_memset_xstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
240 u16 abs_fid)
241{
242 size_t size = sizeof(struct stats_indication_flags);
243
244 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
245
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
247}
248
249static inline void storm_memset_tstats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
251 u16 abs_fid)
252{
253 size_t size = sizeof(struct stats_indication_flags);
254
255 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
256
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
258}
259
260static inline void storm_memset_ustats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
262 u16 abs_fid)
263{
264 size_t size = sizeof(struct stats_indication_flags);
265
266 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
267
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
269}
270
271static inline void storm_memset_cstats_flags(struct bnx2x *bp,
272 struct stats_indication_flags *flags,
273 u16 abs_fid)
274{
275 size_t size = sizeof(struct stats_indication_flags);
276
277 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
278
279 __storm_memset_struct(bp, addr, size, (u32 *)flags);
280}
281
282static inline void storm_memset_xstats_addr(struct bnx2x *bp,
283 dma_addr_t mapping, u16 abs_fid)
284{
285 u32 addr = BAR_XSTRORM_INTMEM +
286 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
287
288 __storm_memset_dma_mapping(bp, addr, mapping);
289}
290
291static inline void storm_memset_tstats_addr(struct bnx2x *bp,
292 dma_addr_t mapping, u16 abs_fid)
293{
294 u32 addr = BAR_TSTRORM_INTMEM +
295 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
296
297 __storm_memset_dma_mapping(bp, addr, mapping);
298}
299
300static inline void storm_memset_ustats_addr(struct bnx2x *bp,
301 dma_addr_t mapping, u16 abs_fid)
302{
303 u32 addr = BAR_USTRORM_INTMEM +
304 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
305
306 __storm_memset_dma_mapping(bp, addr, mapping);
307}
308
309static inline void storm_memset_cstats_addr(struct bnx2x *bp,
310 dma_addr_t mapping, u16 abs_fid)
311{
312 u32 addr = BAR_CSTRORM_INTMEM +
313 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
314
315 __storm_memset_dma_mapping(bp, addr, mapping);
316}
317
318static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
319 u16 pf_id)
320{
321 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
322 pf_id);
323 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329}
330
331static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
332 u8 enable)
333{
334 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
335 enable);
336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342}
343
344static inline void storm_memset_eq_data(struct bnx2x *bp,
345 struct event_ring_data *eq_data,
346 u16 pfid)
347{
348 size_t size = sizeof(struct event_ring_data);
349
350 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
351
352 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
353}
354
355static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
356 u16 pfid)
357{
358 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
359 REG_WR16(bp, addr, eq_prod);
360}
361
362static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
363 u16 fw_sb_id, u8 sb_index,
364 u8 ticks)
365{
366
f2e0899f
DK
367 int index_offset = CHIP_IS_E2(bp) ?
368 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
369 offsetof(struct hc_status_block_data_e1x, index_data);
370 u32 addr = BAR_CSTRORM_INTMEM +
371 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
372 index_offset +
373 sizeof(struct hc_index_data)*sb_index +
374 offsetof(struct hc_index_data, timeout);
375 REG_WR8(bp, addr, ticks);
376 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
377 port, fw_sb_id, sb_index, ticks);
378}
379static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
380 u16 fw_sb_id, u8 sb_index,
381 u8 disable)
382{
383 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
f2e0899f
DK
384 int index_offset = CHIP_IS_E2(bp) ?
385 offsetof(struct hc_status_block_data_e2, index_data) :
523224a3
DK
386 offsetof(struct hc_status_block_data_e1x, index_data);
387 u32 addr = BAR_CSTRORM_INTMEM +
388 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
389 index_offset +
390 sizeof(struct hc_index_data)*sb_index +
391 offsetof(struct hc_index_data, flags);
392 u16 flags = REG_RD16(bp, addr);
393 /* clear and set */
394 flags &= ~HC_INDEX_DATA_HC_ENABLED;
395 flags |= enable_flag;
396 REG_WR16(bp, addr, flags);
397 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
398 port, fw_sb_id, sb_index, disable);
399}
400
a2fbb9ea
ET
401/* used only at init
402 * locking is done by mcp
403 */
8d96286a 404static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
405{
406 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
407 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
409 PCICFG_VENDOR_ID_OFFSET);
410}
411
a2fbb9ea
ET
412static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
413{
414 u32 val;
415
416 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
417 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
419 PCICFG_VENDOR_ID_OFFSET);
420
421 return val;
422}
a2fbb9ea 423
f2e0899f
DK
424#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
425#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
426#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
427#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
428#define DMAE_DP_DST_NONE "dst_addr [none]"
429
8d96286a 430static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
431 int msglvl)
f2e0899f
DK
432{
433 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
434
435 switch (dmae->opcode & DMAE_COMMAND_DST) {
436 case DMAE_CMD_DST_PCI:
437 if (src_type == DMAE_CMD_SRC_PCI)
438 DP(msglvl, "DMAE: opcode 0x%08x\n"
439 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
440 "comp_addr [%x:%08x], comp_val 0x%08x\n",
441 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
442 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
443 dmae->comp_addr_hi, dmae->comp_addr_lo,
444 dmae->comp_val);
445 else
446 DP(msglvl, "DMAE: opcode 0x%08x\n"
447 "src [%08x], len [%d*4], dst [%x:%08x]\n"
448 "comp_addr [%x:%08x], comp_val 0x%08x\n",
449 dmae->opcode, dmae->src_addr_lo >> 2,
450 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
451 dmae->comp_addr_hi, dmae->comp_addr_lo,
452 dmae->comp_val);
453 break;
454 case DMAE_CMD_DST_GRC:
455 if (src_type == DMAE_CMD_SRC_PCI)
456 DP(msglvl, "DMAE: opcode 0x%08x\n"
457 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
458 "comp_addr [%x:%08x], comp_val 0x%08x\n",
459 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
460 dmae->len, dmae->dst_addr_lo >> 2,
461 dmae->comp_addr_hi, dmae->comp_addr_lo,
462 dmae->comp_val);
463 else
464 DP(msglvl, "DMAE: opcode 0x%08x\n"
465 "src [%08x], len [%d*4], dst [%08x]\n"
466 "comp_addr [%x:%08x], comp_val 0x%08x\n",
467 dmae->opcode, dmae->src_addr_lo >> 2,
468 dmae->len, dmae->dst_addr_lo >> 2,
469 dmae->comp_addr_hi, dmae->comp_addr_lo,
470 dmae->comp_val);
471 break;
472 default:
473 if (src_type == DMAE_CMD_SRC_PCI)
474 DP(msglvl, "DMAE: opcode 0x%08x\n"
475 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
476 "dst_addr [none]\n"
477 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
478 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
479 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
480 dmae->comp_val);
481 else
482 DP(msglvl, "DMAE: opcode 0x%08x\n"
483 DP_LEVEL "src_addr [%08x] len [%d * 4] "
484 "dst_addr [none]\n"
485 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
486 dmae->opcode, dmae->src_addr_lo >> 2,
487 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
488 dmae->comp_val);
489 break;
490 }
491
492}
493
6c719d00 494const u32 dmae_reg_go_c[] = {
a2fbb9ea
ET
495 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
496 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
497 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
498 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
499};
500
501/* copy command into DMAE command memory and set DMAE command go */
6c719d00 502void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
503{
504 u32 cmd_offset;
505 int i;
506
507 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
508 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
509 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
510
ad8d3948
EG
511 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
512 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
513 }
514 REG_WR(bp, dmae_reg_go_c[idx], 1);
515}
516
f2e0899f 517u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 518{
f2e0899f
DK
519 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
520 DMAE_CMD_C_ENABLE);
521}
ad8d3948 522
f2e0899f
DK
523u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
524{
525 return opcode & ~DMAE_CMD_SRC_RESET;
526}
ad8d3948 527
f2e0899f
DK
528u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
529 bool with_comp, u8 comp_type)
530{
531 u32 opcode = 0;
532
533 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
534 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 535
f2e0899f
DK
536 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
537
538 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
539 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
540 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
541 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 542
a2fbb9ea 543#ifdef __BIG_ENDIAN
f2e0899f 544 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 545#else
f2e0899f 546 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 547#endif
f2e0899f
DK
548 if (with_comp)
549 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
550 return opcode;
551}
552
8d96286a 553static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
554 struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
f2e0899f
DK
556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
8d96286a 570static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
571 struct dmae_command *dmae)
f2e0899f
DK
572{
573 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
574 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
575 int rc = 0;
576
577 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
578 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
579 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea 580
f2e0899f 581 /* lock the dmae channel */
6e30dd4e 582 spin_lock_bh(&bp->dmae_lock);
5ff7b6d4 583
f2e0899f 584 /* reset completion */
a2fbb9ea
ET
585 *wb_comp = 0;
586
f2e0899f
DK
587 /* post the command on the channel used for initializations */
588 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 589
f2e0899f 590 /* wait for completion */
a2fbb9ea 591 udelay(5);
f2e0899f 592 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948
EG
593 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
594
ad8d3948 595 if (!cnt) {
c3eefaf6 596 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
597 rc = DMAE_TIMEOUT;
598 goto unlock;
a2fbb9ea 599 }
ad8d3948 600 cnt--;
f2e0899f 601 udelay(50);
a2fbb9ea 602 }
f2e0899f
DK
603 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
604 BNX2X_ERR("DMAE PCI error!\n");
605 rc = DMAE_PCI_ERROR;
606 }
607
608 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
609 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
610 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948 611
f2e0899f 612unlock:
6e30dd4e 613 spin_unlock_bh(&bp->dmae_lock);
f2e0899f
DK
614 return rc;
615}
616
617void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
618 u32 len32)
619{
620 struct dmae_command dmae;
621
622 if (!bp->dmae_ready) {
623 u32 *data = bnx2x_sp(bp, wb_data[0]);
624
625 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
626 " using indirect\n", dst_addr, len32);
627 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
628 return;
629 }
630
631 /* set opcode and fixed command fields */
632 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
633
634 /* fill in addresses and len */
635 dmae.src_addr_lo = U64_LO(dma_addr);
636 dmae.src_addr_hi = U64_HI(dma_addr);
637 dmae.dst_addr_lo = dst_addr >> 2;
638 dmae.dst_addr_hi = 0;
639 dmae.len = len32;
640
641 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
642
643 /* issue the command and wait for completion */
644 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
645}
646
c18487ee 647void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 648{
5ff7b6d4 649 struct dmae_command dmae;
ad8d3948
EG
650
651 if (!bp->dmae_ready) {
652 u32 *data = bnx2x_sp(bp, wb_data[0]);
653 int i;
654
655 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
656 " using indirect\n", src_addr, len32);
657 for (i = 0; i < len32; i++)
658 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
659 return;
660 }
661
f2e0899f
DK
662 /* set opcode and fixed command fields */
663 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 664
f2e0899f 665 /* fill in addresses and len */
5ff7b6d4
EG
666 dmae.src_addr_lo = src_addr >> 2;
667 dmae.src_addr_hi = 0;
668 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
669 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
670 dmae.len = len32;
ad8d3948 671
f2e0899f 672 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
ad8d3948 673
f2e0899f
DK
674 /* issue the command and wait for completion */
675 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
676}
677
8d96286a 678static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
679 u32 addr, u32 len)
573f2035 680{
02e3c6cb 681 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
682 int offset = 0;
683
02e3c6cb 684 while (len > dmae_wr_max) {
573f2035 685 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
686 addr + offset, dmae_wr_max);
687 offset += dmae_wr_max * 4;
688 len -= dmae_wr_max;
573f2035
EG
689 }
690
691 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
692}
693
ad8d3948
EG
694/* used only for slowpath so not inlined */
695static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
696{
697 u32 wb_write[2];
698
699 wb_write[0] = val_hi;
700 wb_write[1] = val_lo;
701 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 702}
a2fbb9ea 703
ad8d3948
EG
704#ifdef USE_WB_RD
705static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
706{
707 u32 wb_data[2];
708
709 REG_RD_DMAE(bp, reg, wb_data, 2);
710
711 return HILO_U64(wb_data[0], wb_data[1]);
712}
713#endif
714
a2fbb9ea
ET
715static int bnx2x_mc_assert(struct bnx2x *bp)
716{
a2fbb9ea 717 char last_idx;
34f80b04
EG
718 int i, rc = 0;
719 u32 row0, row1, row2, row3;
720
721 /* XSTORM */
722 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
723 XSTORM_ASSERT_LIST_INDEX_OFFSET);
724 if (last_idx)
725 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
726
727 /* print the asserts */
728 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
729
730 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
731 XSTORM_ASSERT_LIST_OFFSET(i));
732 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
734 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
736 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
738
739 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
740 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
741 " 0x%08x 0x%08x 0x%08x\n",
742 i, row3, row2, row1, row0);
743 rc++;
744 } else {
745 break;
746 }
747 }
748
749 /* TSTORM */
750 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
751 TSTORM_ASSERT_LIST_INDEX_OFFSET);
752 if (last_idx)
753 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
754
755 /* print the asserts */
756 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
757
758 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
759 TSTORM_ASSERT_LIST_OFFSET(i));
760 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
762 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
764 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
766
767 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
768 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
769 " 0x%08x 0x%08x 0x%08x\n",
770 i, row3, row2, row1, row0);
771 rc++;
772 } else {
773 break;
774 }
775 }
776
777 /* CSTORM */
778 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
779 CSTORM_ASSERT_LIST_INDEX_OFFSET);
780 if (last_idx)
781 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
782
783 /* print the asserts */
784 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
785
786 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
787 CSTORM_ASSERT_LIST_OFFSET(i));
788 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
790 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
792 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
794
795 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
796 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
797 " 0x%08x 0x%08x 0x%08x\n",
798 i, row3, row2, row1, row0);
799 rc++;
800 } else {
801 break;
802 }
803 }
804
805 /* USTORM */
806 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
807 USTORM_ASSERT_LIST_INDEX_OFFSET);
808 if (last_idx)
809 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
810
811 /* print the asserts */
812 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
813
814 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
815 USTORM_ASSERT_LIST_OFFSET(i));
816 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i) + 4);
818 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 8);
820 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 12);
822
823 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
824 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
825 " 0x%08x 0x%08x 0x%08x\n",
826 i, row3, row2, row1, row0);
827 rc++;
828 } else {
829 break;
a2fbb9ea
ET
830 }
831 }
34f80b04 832
a2fbb9ea
ET
833 return rc;
834}
c14423fe 835
a2fbb9ea
ET
836static void bnx2x_fw_dump(struct bnx2x *bp)
837{
cdaa7cb8 838 u32 addr;
a2fbb9ea 839 u32 mark, offset;
4781bfad 840 __be32 data[9];
a2fbb9ea 841 int word;
f2e0899f 842 u32 trace_shmem_base;
2145a920
VZ
843 if (BP_NOMCP(bp)) {
844 BNX2X_ERR("NO MCP - can not dump\n");
845 return;
846 }
cdaa7cb8 847
f2e0899f
DK
848 if (BP_PATH(bp) == 0)
849 trace_shmem_base = bp->common.shmem_base;
850 else
851 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
852 addr = trace_shmem_base - 0x0800 + 4;
cdaa7cb8 853 mark = REG_RD(bp, addr);
f2e0899f
DK
854 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
855 + ((mark + 0x3) & ~0x3) - 0x08000000;
7995c64e 856 pr_err("begin fw dump (mark 0x%x)\n", mark);
a2fbb9ea 857
7995c64e 858 pr_err("");
f2e0899f 859 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 860 for (word = 0; word < 8; word++)
cdaa7cb8 861 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 862 data[8] = 0x0;
7995c64e 863 pr_cont("%s", (char *)data);
a2fbb9ea 864 }
cdaa7cb8 865 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 866 for (word = 0; word < 8; word++)
cdaa7cb8 867 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 868 data[8] = 0x0;
7995c64e 869 pr_cont("%s", (char *)data);
a2fbb9ea 870 }
7995c64e 871 pr_err("end of fw dump\n");
a2fbb9ea
ET
872}
873
6c719d00 874void bnx2x_panic_dump(struct bnx2x *bp)
a2fbb9ea
ET
875{
876 int i;
523224a3
DK
877 u16 j;
878 struct hc_sp_status_block_data sp_sb_data;
879 int func = BP_FUNC(bp);
880#ifdef BNX2X_STOP_ON_ERROR
881 u16 start = 0, end = 0;
882#endif
a2fbb9ea 883
66e855f3
YG
884 bp->stats_state = STATS_STATE_DISABLED;
885 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
886
a2fbb9ea
ET
887 BNX2X_ERR("begin crash dump -----------------\n");
888
8440d2b6
EG
889 /* Indices */
890 /* Common */
523224a3 891 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
cdaa7cb8 892 " spq_prod_idx(0x%x)\n",
523224a3
DK
893 bp->def_idx, bp->def_att_idx,
894 bp->attn_state, bp->spq_prod_idx);
895 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
896 bp->def_status_blk->atten_status_block.attn_bits,
897 bp->def_status_blk->atten_status_block.attn_bits_ack,
898 bp->def_status_blk->atten_status_block.status_block_id,
899 bp->def_status_blk->atten_status_block.attn_bits_index);
900 BNX2X_ERR(" def (");
901 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 pr_cont("0x%x%s",
903 bp->def_status_blk->sp_sb.index_values[i],
904 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
905
906 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
907 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
908 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
909 i*sizeof(u32));
910
911 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
912 "pf_id(0x%x) vnic_id(0x%x) "
913 "vf_id(0x%x) vf_valid (0x%x)\n",
914 sp_sb_data.igu_sb_id,
915 sp_sb_data.igu_seg_id,
916 sp_sb_data.p_func.pf_id,
917 sp_sb_data.p_func.vnic_id,
918 sp_sb_data.p_func.vf_id,
919 sp_sb_data.p_func.vf_valid);
920
8440d2b6 921
ec6ba945 922 for_each_eth_queue(bp, i) {
a2fbb9ea 923 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 924 int loop;
f2e0899f 925 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
926 struct hc_status_block_data_e1x sb_data_e1x;
927 struct hc_status_block_sm *hc_sm_p =
f2e0899f
DK
928 CHIP_IS_E2(bp) ?
929 sb_data_e2.common.state_machine :
523224a3
DK
930 sb_data_e1x.common.state_machine;
931 struct hc_index_data *hc_index_p =
f2e0899f
DK
932 CHIP_IS_E2(bp) ?
933 sb_data_e2.index_data :
523224a3
DK
934 sb_data_e1x.index_data;
935 int data_size;
936 u32 *sb_data_p;
937
938 /* Rx */
cdaa7cb8 939 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
523224a3 940 " rx_comp_prod(0x%x)"
cdaa7cb8 941 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 942 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 943 fp->rx_comp_prod,
66e855f3 944 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
cdaa7cb8 945 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
523224a3 946 " fp_hc_idx(0x%x)\n",
8440d2b6 947 fp->rx_sge_prod, fp->last_max_sge,
523224a3 948 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 949
523224a3 950 /* Tx */
cdaa7cb8
VZ
951 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
952 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
953 " *tx_cons_sb(0x%x)\n",
8440d2b6
EG
954 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
955 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
523224a3 956
f2e0899f
DK
957 loop = CHIP_IS_E2(bp) ?
958 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
523224a3
DK
959
960 /* host sb data */
961
ec6ba945
VZ
962#ifdef BCM_CNIC
963 if (IS_FCOE_FP(fp))
964 continue;
965#endif
523224a3
DK
966 BNX2X_ERR(" run indexes (");
967 for (j = 0; j < HC_SB_MAX_SM; j++)
968 pr_cont("0x%x%s",
969 fp->sb_running_index[j],
970 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
971
972 BNX2X_ERR(" indexes (");
973 for (j = 0; j < loop; j++)
974 pr_cont("0x%x%s",
975 fp->sb_index_values[j],
976 (j == loop - 1) ? ")" : " ");
977 /* fw sb data */
f2e0899f
DK
978 data_size = CHIP_IS_E2(bp) ?
979 sizeof(struct hc_status_block_data_e2) :
523224a3
DK
980 sizeof(struct hc_status_block_data_e1x);
981 data_size /= sizeof(u32);
f2e0899f
DK
982 sb_data_p = CHIP_IS_E2(bp) ?
983 (u32 *)&sb_data_e2 :
984 (u32 *)&sb_data_e1x;
523224a3
DK
985 /* copy sb data in here */
986 for (j = 0; j < data_size; j++)
987 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
988 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
989 j * sizeof(u32));
990
f2e0899f
DK
991 if (CHIP_IS_E2(bp)) {
992 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
993 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
994 sb_data_e2.common.p_func.pf_id,
995 sb_data_e2.common.p_func.vf_id,
996 sb_data_e2.common.p_func.vf_valid,
997 sb_data_e2.common.p_func.vnic_id,
998 sb_data_e2.common.same_igu_sb_1b);
999 } else {
1000 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1001 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1002 sb_data_e1x.common.p_func.pf_id,
1003 sb_data_e1x.common.p_func.vf_id,
1004 sb_data_e1x.common.p_func.vf_valid,
1005 sb_data_e1x.common.p_func.vnic_id,
1006 sb_data_e1x.common.same_igu_sb_1b);
1007 }
523224a3
DK
1008
1009 /* SB_SMs data */
1010 for (j = 0; j < HC_SB_MAX_SM; j++) {
1011 pr_cont("SM[%d] __flags (0x%x) "
1012 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1013 "time_to_expire (0x%x) "
1014 "timer_value(0x%x)\n", j,
1015 hc_sm_p[j].__flags,
1016 hc_sm_p[j].igu_sb_id,
1017 hc_sm_p[j].igu_seg_id,
1018 hc_sm_p[j].time_to_expire,
1019 hc_sm_p[j].timer_value);
1020 }
1021
1022 /* Indecies data */
1023 for (j = 0; j < loop; j++) {
1024 pr_cont("INDEX[%d] flags (0x%x) "
1025 "timeout (0x%x)\n", j,
1026 hc_index_p[j].flags,
1027 hc_index_p[j].timeout);
1028 }
8440d2b6 1029 }
a2fbb9ea 1030
523224a3 1031#ifdef BNX2X_STOP_ON_ERROR
8440d2b6
EG
1032 /* Rings */
1033 /* Rx */
ec6ba945 1034 for_each_rx_queue(bp, i) {
8440d2b6 1035 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1036
1037 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1038 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1039 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1040 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1041 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1042
c3eefaf6
EG
1043 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1044 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
1045 }
1046
3196a88a
EG
1047 start = RX_SGE(fp->rx_sge_prod);
1048 end = RX_SGE(fp->last_max_sge);
8440d2b6 1049 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1050 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1051 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1052
c3eefaf6
EG
1053 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1054 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1055 }
1056
a2fbb9ea
ET
1057 start = RCQ_BD(fp->rx_comp_cons - 10);
1058 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1059 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1060 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1061
c3eefaf6
EG
1062 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1063 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1064 }
1065 }
1066
8440d2b6 1067 /* Tx */
ec6ba945 1068 for_each_tx_queue(bp, i) {
8440d2b6
EG
1069 struct bnx2x_fastpath *fp = &bp->fp[i];
1070
1071 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1072 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1073 for (j = start; j != end; j = TX_BD(j + 1)) {
1074 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1075
c3eefaf6
EG
1076 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1077 i, j, sw_bd->skb, sw_bd->first_bd);
8440d2b6
EG
1078 }
1079
1080 start = TX_BD(fp->tx_bd_cons - 10);
1081 end = TX_BD(fp->tx_bd_cons + 254);
1082 for (j = start; j != end; j = TX_BD(j + 1)) {
1083 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1084
c3eefaf6
EG
1085 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1086 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
8440d2b6
EG
1087 }
1088 }
523224a3 1089#endif
34f80b04 1090 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1091 bnx2x_mc_assert(bp);
1092 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1093}
1094
f2e0899f 1095static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1096{
34f80b04 1097 int port = BP_PORT(bp);
a2fbb9ea
ET
1098 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1099 u32 val = REG_RD(bp, addr);
1100 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1101 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
1102
1103 if (msix) {
8badd27a
EG
1104 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1105 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1106 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
1108 } else if (msi) {
1109 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1110 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1111 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1113 } else {
1114 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1115 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1116 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1117 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1118
a0fd065c
DK
1119 if (!CHIP_IS_E1(bp)) {
1120 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1121 val, port, addr);
615f8fd9 1122
a0fd065c 1123 REG_WR(bp, addr, val);
615f8fd9 1124
a0fd065c
DK
1125 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1126 }
a2fbb9ea
ET
1127 }
1128
a0fd065c
DK
1129 if (CHIP_IS_E1(bp))
1130 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1131
8badd27a
EG
1132 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1133 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1134
1135 REG_WR(bp, addr, val);
37dbbf32
EG
1136 /*
1137 * Ensure that HC_CONFIG is written before leading/trailing edge config
1138 */
1139 mmiowb();
1140 barrier();
34f80b04 1141
f2e0899f 1142 if (!CHIP_IS_E1(bp)) {
34f80b04 1143 /* init leading/trailing edge */
fb3bff17 1144 if (IS_MF(bp)) {
8badd27a 1145 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 1146 if (bp->port.pmf)
4acac6a5
EG
1147 /* enable nig and gpio3 attention */
1148 val |= 0x1100;
34f80b04
EG
1149 } else
1150 val = 0xffff;
1151
1152 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1153 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1154 }
37dbbf32
EG
1155
1156 /* Make sure that interrupts are indeed enabled from here on */
1157 mmiowb();
a2fbb9ea
ET
1158}
1159
f2e0899f
DK
1160static void bnx2x_igu_int_enable(struct bnx2x *bp)
1161{
1162 u32 val;
1163 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1164 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1165
1166 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1167
1168 if (msix) {
1169 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1170 IGU_PF_CONF_SINGLE_ISR_EN);
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN);
1174 } else if (msi) {
1175 val &= ~IGU_PF_CONF_INT_LINE_EN;
1176 val |= (IGU_PF_CONF_FUNC_EN |
1177 IGU_PF_CONF_MSI_MSIX_EN |
1178 IGU_PF_CONF_ATTN_BIT_EN |
1179 IGU_PF_CONF_SINGLE_ISR_EN);
1180 } else {
1181 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1182 val |= (IGU_PF_CONF_FUNC_EN |
1183 IGU_PF_CONF_INT_LINE_EN |
1184 IGU_PF_CONF_ATTN_BIT_EN |
1185 IGU_PF_CONF_SINGLE_ISR_EN);
1186 }
1187
1188 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1189 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1190
1191 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1192
1193 barrier();
1194
1195 /* init leading/trailing edge */
1196 if (IS_MF(bp)) {
1197 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1198 if (bp->port.pmf)
1199 /* enable nig and gpio3 attention */
1200 val |= 0x1100;
1201 } else
1202 val = 0xffff;
1203
1204 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1205 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1206
1207 /* Make sure that interrupts are indeed enabled from here on */
1208 mmiowb();
1209}
1210
1211void bnx2x_int_enable(struct bnx2x *bp)
1212{
1213 if (bp->common.int_block == INT_BLOCK_HC)
1214 bnx2x_hc_int_enable(bp);
1215 else
1216 bnx2x_igu_int_enable(bp);
1217}
1218
1219static void bnx2x_hc_int_disable(struct bnx2x *bp)
a2fbb9ea 1220{
34f80b04 1221 int port = BP_PORT(bp);
a2fbb9ea
ET
1222 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1223 u32 val = REG_RD(bp, addr);
1224
a0fd065c
DK
1225 /*
1226 * in E1 we must use only PCI configuration space to disable
1227 * MSI/MSIX capablility
1228 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1229 */
1230 if (CHIP_IS_E1(bp)) {
1231 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1232 * Use mask register to prevent from HC sending interrupts
1233 * after we exit the function
1234 */
1235 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1236
1237 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1240 } else
1241 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1242 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1243 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1244 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1245
1246 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1247 val, port, addr);
1248
8badd27a
EG
1249 /* flush all outstanding writes */
1250 mmiowb();
1251
a2fbb9ea
ET
1252 REG_WR(bp, addr, val);
1253 if (REG_RD(bp, addr) != val)
1254 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1255}
1256
f2e0899f
DK
1257static void bnx2x_igu_int_disable(struct bnx2x *bp)
1258{
1259 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1260
1261 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1262 IGU_PF_CONF_INT_LINE_EN |
1263 IGU_PF_CONF_ATTN_BIT_EN);
1264
1265 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1266
1267 /* flush all outstanding writes */
1268 mmiowb();
1269
1270 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1271 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1272 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1273}
1274
8d96286a 1275static void bnx2x_int_disable(struct bnx2x *bp)
f2e0899f
DK
1276{
1277 if (bp->common.int_block == INT_BLOCK_HC)
1278 bnx2x_hc_int_disable(bp);
1279 else
1280 bnx2x_igu_int_disable(bp);
1281}
1282
9f6c9258 1283void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1284{
a2fbb9ea 1285 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1286 int i, offset;
a2fbb9ea 1287
34f80b04 1288 /* disable interrupt handling */
a2fbb9ea 1289 atomic_inc(&bp->intr_sem);
e1510706
EG
1290 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1291
f8ef6e44
YG
1292 if (disable_hw)
1293 /* prevent the HW from sending interrupts */
1294 bnx2x_int_disable(bp);
a2fbb9ea
ET
1295
1296 /* make sure all ISRs are done */
1297 if (msix) {
8badd27a
EG
1298 synchronize_irq(bp->msix_table[0].vector);
1299 offset = 1;
37b091ba
MC
1300#ifdef BCM_CNIC
1301 offset++;
1302#endif
ec6ba945 1303 for_each_eth_queue(bp, i)
8badd27a 1304 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
1305 } else
1306 synchronize_irq(bp->pdev->irq);
1307
1308 /* make sure sp_task is not running */
1cf167f2
EG
1309 cancel_delayed_work(&bp->sp_task);
1310 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1311}
1312
34f80b04 1313/* fast path */
a2fbb9ea
ET
1314
1315/*
34f80b04 1316 * General service functions
a2fbb9ea
ET
1317 */
1318
72fd0718
VZ
1319/* Return true if succeeded to acquire the lock */
1320static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1321{
1322 u32 lock_status;
1323 u32 resource_bit = (1 << resource);
1324 int func = BP_FUNC(bp);
1325 u32 hw_lock_control_reg;
1326
1327 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1328
1329 /* Validating that the resource is within range */
1330 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1331 DP(NETIF_MSG_HW,
1332 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1333 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1334 return false;
72fd0718
VZ
1335 }
1336
1337 if (func <= 5)
1338 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1339 else
1340 hw_lock_control_reg =
1341 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1342
1343 /* Try to acquire the lock */
1344 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1345 lock_status = REG_RD(bp, hw_lock_control_reg);
1346 if (lock_status & resource_bit)
1347 return true;
1348
1349 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1350 return false;
1351}
1352
993ac7b5
MC
1353#ifdef BCM_CNIC
1354static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1355#endif
3196a88a 1356
9f6c9258 1357void bnx2x_sp_event(struct bnx2x_fastpath *fp,
a2fbb9ea
ET
1358 union eth_rx_cqe *rr_cqe)
1359{
1360 struct bnx2x *bp = fp->bp;
1361 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1362 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1363
34f80b04 1364 DP(BNX2X_MSG_SP,
a2fbb9ea 1365 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1366 fp->index, cid, command, bp->state,
34f80b04 1367 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1368
523224a3
DK
1369 switch (command | fp->state) {
1370 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1371 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1372 fp->state = BNX2X_FP_STATE_OPEN;
a2fbb9ea
ET
1373 break;
1374
523224a3
DK
1375 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1376 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
a2fbb9ea
ET
1377 fp->state = BNX2X_FP_STATE_HALTED;
1378 break;
1379
523224a3
DK
1380 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1381 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1382 fp->state = BNX2X_FP_STATE_TERMINATED;
a2fbb9ea
ET
1383 break;
1384
523224a3
DK
1385 default:
1386 BNX2X_ERR("unexpected MC reply (%d) "
1387 "fp[%d] state is %x\n",
1388 command, fp->index, fp->state);
993ac7b5 1389 break;
523224a3 1390 }
3196a88a 1391
8fe23fbd 1392 smp_mb__before_atomic_inc();
6e30dd4e 1393 atomic_inc(&bp->cq_spq_left);
523224a3
DK
1394 /* push the change in fp->state and towards the memory */
1395 smp_wmb();
49d66772 1396
523224a3 1397 return;
a2fbb9ea
ET
1398}
1399
9f6c9258 1400irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1401{
555f6c78 1402 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1403 u16 status = bnx2x_ack_int(bp);
34f80b04 1404 u16 mask;
ca00392c 1405 int i;
a2fbb9ea 1406
34f80b04 1407 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1408 if (unlikely(status == 0)) {
1409 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1410 return IRQ_NONE;
1411 }
f5372251 1412 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1413
34f80b04 1414 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1415 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1416 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1417 return IRQ_HANDLED;
1418 }
1419
3196a88a
EG
1420#ifdef BNX2X_STOP_ON_ERROR
1421 if (unlikely(bp->panic))
1422 return IRQ_HANDLED;
1423#endif
1424
ec6ba945 1425 for_each_eth_queue(bp, i) {
ca00392c 1426 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1427
523224a3 1428 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
ca00392c 1429 if (status & mask) {
54b9ddaa
VZ
1430 /* Handle Rx and Tx according to SB id */
1431 prefetch(fp->rx_cons_sb);
54b9ddaa 1432 prefetch(fp->tx_cons_sb);
523224a3 1433 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1434 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1435 status &= ~mask;
1436 }
a2fbb9ea
ET
1437 }
1438
993ac7b5 1439#ifdef BCM_CNIC
523224a3 1440 mask = 0x2;
993ac7b5
MC
1441 if (status & (mask | 0x1)) {
1442 struct cnic_ops *c_ops = NULL;
1443
1444 rcu_read_lock();
1445 c_ops = rcu_dereference(bp->cnic_ops);
1446 if (c_ops)
1447 c_ops->cnic_handler(bp->cnic_data, NULL);
1448 rcu_read_unlock();
1449
1450 status &= ~mask;
1451 }
1452#endif
a2fbb9ea 1453
34f80b04 1454 if (unlikely(status & 0x1)) {
1cf167f2 1455 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1456
1457 status &= ~0x1;
1458 if (!status)
1459 return IRQ_HANDLED;
1460 }
1461
cdaa7cb8
VZ
1462 if (unlikely(status))
1463 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1464 status);
a2fbb9ea 1465
c18487ee 1466 return IRQ_HANDLED;
a2fbb9ea
ET
1467}
1468
c18487ee 1469/* end of fast path */
a2fbb9ea 1470
a2fbb9ea 1471
c18487ee
YR
1472/* Link */
1473
1474/*
1475 * General service functions
1476 */
a2fbb9ea 1477
9f6c9258 1478int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1479{
1480 u32 lock_status;
1481 u32 resource_bit = (1 << resource);
4a37fb66
YG
1482 int func = BP_FUNC(bp);
1483 u32 hw_lock_control_reg;
c18487ee 1484 int cnt;
a2fbb9ea 1485
c18487ee
YR
1486 /* Validating that the resource is within range */
1487 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1488 DP(NETIF_MSG_HW,
1489 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1490 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1491 return -EINVAL;
1492 }
a2fbb9ea 1493
4a37fb66
YG
1494 if (func <= 5) {
1495 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1496 } else {
1497 hw_lock_control_reg =
1498 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1499 }
1500
c18487ee 1501 /* Validating that the resource is not already taken */
4a37fb66 1502 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1503 if (lock_status & resource_bit) {
1504 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1505 lock_status, resource_bit);
1506 return -EEXIST;
1507 }
a2fbb9ea 1508
46230476
EG
1509 /* Try for 5 second every 5ms */
1510 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1511 /* Try to acquire the lock */
4a37fb66
YG
1512 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1513 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1514 if (lock_status & resource_bit)
1515 return 0;
a2fbb9ea 1516
c18487ee 1517 msleep(5);
a2fbb9ea 1518 }
c18487ee
YR
1519 DP(NETIF_MSG_HW, "Timeout\n");
1520 return -EAGAIN;
1521}
a2fbb9ea 1522
9f6c9258 1523int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1524{
1525 u32 lock_status;
1526 u32 resource_bit = (1 << resource);
4a37fb66
YG
1527 int func = BP_FUNC(bp);
1528 u32 hw_lock_control_reg;
a2fbb9ea 1529
72fd0718
VZ
1530 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1531
c18487ee
YR
1532 /* Validating that the resource is within range */
1533 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1534 DP(NETIF_MSG_HW,
1535 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1536 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1537 return -EINVAL;
1538 }
1539
4a37fb66
YG
1540 if (func <= 5) {
1541 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1542 } else {
1543 hw_lock_control_reg =
1544 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1545 }
1546
c18487ee 1547 /* Validating that the resource is currently taken */
4a37fb66 1548 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1549 if (!(lock_status & resource_bit)) {
1550 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1551 lock_status, resource_bit);
1552 return -EFAULT;
a2fbb9ea
ET
1553 }
1554
9f6c9258
DK
1555 REG_WR(bp, hw_lock_control_reg, resource_bit);
1556 return 0;
c18487ee 1557}
a2fbb9ea 1558
9f6c9258 1559
4acac6a5
EG
1560int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1561{
1562 /* The GPIO should be swapped if swap register is set and active */
1563 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1564 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1565 int gpio_shift = gpio_num +
1566 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1567 u32 gpio_mask = (1 << gpio_shift);
1568 u32 gpio_reg;
1569 int value;
1570
1571 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1572 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1573 return -EINVAL;
1574 }
1575
1576 /* read GPIO value */
1577 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1578
1579 /* get the requested pin value */
1580 if ((gpio_reg & gpio_mask) == gpio_mask)
1581 value = 1;
1582 else
1583 value = 0;
1584
1585 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1586
1587 return value;
1588}
1589
17de50b7 1590int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1591{
1592 /* The GPIO should be swapped if swap register is set and active */
1593 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1594 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1595 int gpio_shift = gpio_num +
1596 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1597 u32 gpio_mask = (1 << gpio_shift);
1598 u32 gpio_reg;
a2fbb9ea 1599
c18487ee
YR
1600 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1601 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1602 return -EINVAL;
1603 }
a2fbb9ea 1604
4a37fb66 1605 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1606 /* read GPIO and mask except the float bits */
1607 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1608
c18487ee
YR
1609 switch (mode) {
1610 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1611 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1612 gpio_num, gpio_shift);
1613 /* clear FLOAT and set CLR */
1614 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1615 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1616 break;
a2fbb9ea 1617
c18487ee
YR
1618 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1619 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1620 gpio_num, gpio_shift);
1621 /* clear FLOAT and set SET */
1622 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1623 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1624 break;
a2fbb9ea 1625
17de50b7 1626 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1627 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1628 gpio_num, gpio_shift);
1629 /* set FLOAT */
1630 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1631 break;
a2fbb9ea 1632
c18487ee
YR
1633 default:
1634 break;
a2fbb9ea
ET
1635 }
1636
c18487ee 1637 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1638 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1639
c18487ee 1640 return 0;
a2fbb9ea
ET
1641}
1642
4acac6a5
EG
1643int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1644{
1645 /* The GPIO should be swapped if swap register is set and active */
1646 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1647 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1648 int gpio_shift = gpio_num +
1649 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1650 u32 gpio_mask = (1 << gpio_shift);
1651 u32 gpio_reg;
1652
1653 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1654 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1655 return -EINVAL;
1656 }
1657
1658 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1659 /* read GPIO int */
1660 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1661
1662 switch (mode) {
1663 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1664 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1665 "output low\n", gpio_num, gpio_shift);
1666 /* clear SET and set CLR */
1667 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1668 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1669 break;
1670
1671 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1672 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1673 "output high\n", gpio_num, gpio_shift);
1674 /* clear CLR and set SET */
1675 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1676 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1677 break;
1678
1679 default:
1680 break;
1681 }
1682
1683 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1684 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1685
1686 return 0;
1687}
1688
c18487ee 1689static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1690{
c18487ee
YR
1691 u32 spio_mask = (1 << spio_num);
1692 u32 spio_reg;
a2fbb9ea 1693
c18487ee
YR
1694 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1695 (spio_num > MISC_REGISTERS_SPIO_7)) {
1696 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1697 return -EINVAL;
a2fbb9ea
ET
1698 }
1699
4a37fb66 1700 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1701 /* read SPIO and mask except the float bits */
1702 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1703
c18487ee 1704 switch (mode) {
6378c025 1705 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1706 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1707 /* clear FLOAT and set CLR */
1708 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1709 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1710 break;
a2fbb9ea 1711
6378c025 1712 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1713 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1714 /* clear FLOAT and set SET */
1715 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1716 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1717 break;
a2fbb9ea 1718
c18487ee
YR
1719 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1720 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1721 /* set FLOAT */
1722 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1723 break;
a2fbb9ea 1724
c18487ee
YR
1725 default:
1726 break;
a2fbb9ea
ET
1727 }
1728
c18487ee 1729 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1730 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1731
a2fbb9ea
ET
1732 return 0;
1733}
1734
a22f0788
YR
1735int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1736{
1737 u32 sel_phy_idx = 0;
1738 if (bp->link_vars.link_up) {
1739 sel_phy_idx = EXT_PHY1;
1740 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1741 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1742 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1743 sel_phy_idx = EXT_PHY2;
1744 } else {
1745
1746 switch (bnx2x_phy_selection(&bp->link_params)) {
1747 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1748 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1749 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1750 sel_phy_idx = EXT_PHY1;
1751 break;
1752 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1753 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1754 sel_phy_idx = EXT_PHY2;
1755 break;
1756 }
1757 }
1758 /*
1759 * The selected actived PHY is always after swapping (in case PHY
1760 * swapping is enabled). So when swapping is enabled, we need to reverse
1761 * the configuration
1762 */
1763
1764 if (bp->link_params.multi_phy_config &
1765 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1766 if (sel_phy_idx == EXT_PHY1)
1767 sel_phy_idx = EXT_PHY2;
1768 else if (sel_phy_idx == EXT_PHY2)
1769 sel_phy_idx = EXT_PHY1;
1770 }
1771 return LINK_CONFIG_IDX(sel_phy_idx);
1772}
1773
9f6c9258 1774void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1775{
a22f0788 1776 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
1777 switch (bp->link_vars.ieee_fc &
1778 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1779 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 1780 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1781 ADVERTISED_Pause);
c18487ee 1782 break;
356e2385 1783
c18487ee 1784 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 1785 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 1786 ADVERTISED_Pause);
c18487ee 1787 break;
356e2385 1788
c18487ee 1789 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 1790 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 1791 break;
356e2385 1792
c18487ee 1793 default:
a22f0788 1794 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 1795 ADVERTISED_Pause);
c18487ee
YR
1796 break;
1797 }
1798}
f1410647 1799
9f6c9258 1800u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
c18487ee 1801{
19680c48
EG
1802 if (!BP_NOMCP(bp)) {
1803 u8 rc;
a22f0788
YR
1804 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1805 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
19680c48 1806 /* Initialize link parameters structure variables */
8c99e7b0
YR
1807 /* It is recommended to turn off RX FC for jumbo frames
1808 for better performance */
f2e0899f 1809 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
c0700f90 1810 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 1811 else
c0700f90 1812 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 1813
4a37fb66 1814 bnx2x_acquire_phy_lock(bp);
b5bf9068 1815
a22f0788 1816 if (load_mode == LOAD_DIAG) {
de6eae1f 1817 bp->link_params.loopback_mode = LOOPBACK_XGXS;
a22f0788
YR
1818 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1819 }
b5bf9068 1820
19680c48 1821 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 1822
4a37fb66 1823 bnx2x_release_phy_lock(bp);
a2fbb9ea 1824
3c96c68b
EG
1825 bnx2x_calc_fc_adv(bp);
1826
b5bf9068
EG
1827 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1828 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 1829 bnx2x_link_report(bp);
b5bf9068 1830 }
a22f0788 1831 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
1832 return rc;
1833 }
f5372251 1834 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 1835 return -EINVAL;
a2fbb9ea
ET
1836}
1837
9f6c9258 1838void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1839{
19680c48 1840 if (!BP_NOMCP(bp)) {
4a37fb66 1841 bnx2x_acquire_phy_lock(bp);
54c2fb78 1842 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
19680c48 1843 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 1844 bnx2x_release_phy_lock(bp);
a2fbb9ea 1845
19680c48
EG
1846 bnx2x_calc_fc_adv(bp);
1847 } else
f5372251 1848 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 1849}
a2fbb9ea 1850
c18487ee
YR
1851static void bnx2x__link_reset(struct bnx2x *bp)
1852{
19680c48 1853 if (!BP_NOMCP(bp)) {
4a37fb66 1854 bnx2x_acquire_phy_lock(bp);
589abe3a 1855 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
4a37fb66 1856 bnx2x_release_phy_lock(bp);
19680c48 1857 } else
f5372251 1858 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 1859}
a2fbb9ea 1860
a22f0788 1861u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 1862{
2145a920 1863 u8 rc = 0;
a2fbb9ea 1864
2145a920
VZ
1865 if (!BP_NOMCP(bp)) {
1866 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
1867 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1868 is_serdes);
2145a920
VZ
1869 bnx2x_release_phy_lock(bp);
1870 } else
1871 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 1872
c18487ee
YR
1873 return rc;
1874}
a2fbb9ea 1875
8a1c38d1 1876static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 1877{
8a1c38d1
EG
1878 u32 r_param = bp->link_vars.line_speed / 8;
1879 u32 fair_periodic_timeout_usec;
1880 u32 t_fair;
34f80b04 1881
8a1c38d1
EG
1882 memset(&(bp->cmng.rs_vars), 0,
1883 sizeof(struct rate_shaping_vars_per_port));
1884 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 1885
8a1c38d1
EG
1886 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1887 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 1888
8a1c38d1
EG
1889 /* this is the threshold below which no timer arming will occur
1890 1.25 coefficient is for the threshold to be a little bigger
1891 than the real time, to compensate for timer in-accuracy */
1892 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
1893 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1894
8a1c38d1
EG
1895 /* resolution of fairness timer */
1896 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1897 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1898 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 1899
8a1c38d1
EG
1900 /* this is the threshold below which we won't arm the timer anymore */
1901 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 1902
8a1c38d1
EG
1903 /* we multiply by 1e3/8 to get bytes/msec.
1904 We don't want the credits to pass a credit
1905 of the t_fair*FAIR_MEM (algorithm resolution) */
1906 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1907 /* since each tick is 4 usec */
1908 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
1909}
1910
2691d51d
EG
1911/* Calculates the sum of vn_min_rates.
1912 It's needed for further normalizing of the min_rates.
1913 Returns:
1914 sum of vn_min_rates.
1915 or
1916 0 - if all the min_rates are 0.
1917 In the later case fainess algorithm should be deactivated.
1918 If not all min_rates are zero then those that are zeroes will be set to 1.
1919 */
1920static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1921{
1922 int all_zero = 1;
2691d51d
EG
1923 int vn;
1924
1925 bp->vn_weight_sum = 0;
1926 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
f2e0899f 1927 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
1928 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1929 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1930
1931 /* Skip hidden vns */
1932 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1933 continue;
1934
1935 /* If min rate is zero - set it to 1 */
1936 if (!vn_min_rate)
1937 vn_min_rate = DEF_MIN_RATE;
1938 else
1939 all_zero = 0;
1940
1941 bp->vn_weight_sum += vn_min_rate;
1942 }
1943
1944 /* ... only if all min rates are zeros - disable fairness */
b015e3d1
EG
1945 if (all_zero) {
1946 bp->cmng.flags.cmng_enables &=
1947 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1948 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1949 " fairness will be disabled\n");
1950 } else
1951 bp->cmng.flags.cmng_enables |=
1952 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
1953}
1954
f2e0899f 1955static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
34f80b04
EG
1956{
1957 struct rate_shaping_vars_per_vn m_rs_vn;
1958 struct fairness_vars_per_vn m_fair_vn;
f2e0899f
DK
1959 u32 vn_cfg = bp->mf_config[vn];
1960 int func = 2*vn + BP_PORT(bp);
34f80b04
EG
1961 u16 vn_min_rate, vn_max_rate;
1962 int i;
1963
1964 /* If function is hidden - set min and max to zeroes */
1965 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1966 vn_min_rate = 0;
1967 vn_max_rate = 0;
1968
1969 } else {
faa6fcbb
DK
1970 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
1971
34f80b04
EG
1972 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1973 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
faa6fcbb
DK
1974 /* If fairness is enabled (not all min rates are zeroes) and
1975 if current min rate is zero - set it to 1.
1976 This is a requirement of the algorithm. */
f2e0899f 1977 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04 1978 vn_min_rate = DEF_MIN_RATE;
faa6fcbb
DK
1979
1980 if (IS_MF_SI(bp))
1981 /* maxCfg in percents of linkspeed */
1982 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
1983 else
1984 /* maxCfg is absolute in 100Mb units */
1985 vn_max_rate = maxCfg * 100;
34f80b04 1986 }
f85582f8 1987
8a1c38d1 1988 DP(NETIF_MSG_IFUP,
b015e3d1 1989 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
8a1c38d1 1990 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
1991
1992 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1993 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1994
1995 /* global vn counter - maximal Mbps for this vn */
1996 m_rs_vn.vn_counter.rate = vn_max_rate;
1997
1998 /* quota - number of bytes transmitted in this period */
1999 m_rs_vn.vn_counter.quota =
2000 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2001
8a1c38d1 2002 if (bp->vn_weight_sum) {
34f80b04
EG
2003 /* credit for each period of the fairness algorithm:
2004 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2005 vn_weight_sum should not be larger than 10000, thus
2006 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2007 than zero */
34f80b04 2008 m_fair_vn.vn_credit_delta =
cdaa7cb8
VZ
2009 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2010 (8 * bp->vn_weight_sum))),
ff80ee02
DK
2011 (bp->cmng.fair_vars.fair_threshold +
2012 MIN_ABOVE_THRESH));
cdaa7cb8 2013 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
34f80b04
EG
2014 m_fair_vn.vn_credit_delta);
2015 }
2016
34f80b04
EG
2017 /* Store it to internal memory */
2018 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2019 REG_WR(bp, BAR_XSTRORM_INTMEM +
2020 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2021 ((u32 *)(&m_rs_vn))[i]);
2022
2023 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2024 REG_WR(bp, BAR_XSTRORM_INTMEM +
2025 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2026 ((u32 *)(&m_fair_vn))[i]);
2027}
f85582f8 2028
523224a3
DK
2029static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2030{
2031 if (CHIP_REV_IS_SLOW(bp))
2032 return CMNG_FNS_NONE;
fb3bff17 2033 if (IS_MF(bp))
523224a3
DK
2034 return CMNG_FNS_MINMAX;
2035
2036 return CMNG_FNS_NONE;
2037}
2038
2039static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2040{
0793f83f 2041 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
2042
2043 if (BP_NOMCP(bp))
2044 return; /* what should be the default bvalue in this case */
2045
0793f83f
DK
2046 /* For 2 port configuration the absolute function number formula
2047 * is:
2048 * abs_func = 2 * vn + BP_PORT + BP_PATH
2049 *
2050 * and there are 4 functions per port
2051 *
2052 * For 4 port configuration it is
2053 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2054 *
2055 * and there are 2 functions per port
2056 */
523224a3 2057 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
0793f83f
DK
2058 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2059
2060 if (func >= E1H_FUNC_MAX)
2061 break;
2062
f2e0899f 2063 bp->mf_config[vn] =
523224a3
DK
2064 MF_CFG_RD(bp, func_mf_config[func].config);
2065 }
2066}
2067
2068static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2069{
2070
2071 if (cmng_type == CMNG_FNS_MINMAX) {
2072 int vn;
2073
2074 /* clear cmng_enables */
2075 bp->cmng.flags.cmng_enables = 0;
2076
2077 /* read mf conf from shmem */
2078 if (read_cfg)
2079 bnx2x_read_mf_cfg(bp);
2080
2081 /* Init rate shaping and fairness contexts */
2082 bnx2x_init_port_minmax(bp);
2083
2084 /* vn_weight_sum and enable fairness if not 0 */
2085 bnx2x_calc_vn_weight_sum(bp);
2086
2087 /* calculate and set min-max rate for each vn */
c4154f25
DK
2088 if (bp->port.pmf)
2089 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2090 bnx2x_init_vn_minmax(bp, vn);
523224a3
DK
2091
2092 /* always enable rate shaping and fairness */
2093 bp->cmng.flags.cmng_enables |=
2094 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2095 if (!bp->vn_weight_sum)
2096 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2097 " fairness will be disabled\n");
2098 return;
2099 }
2100
2101 /* rate shaping and fairness are disabled */
2102 DP(NETIF_MSG_IFUP,
2103 "rate shaping and fairness are disabled\n");
2104}
34f80b04 2105
523224a3
DK
2106static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2107{
2108 int port = BP_PORT(bp);
2109 int func;
2110 int vn;
2111
2112 /* Set the attention towards other drivers on the same port */
2113 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2114 if (vn == BP_E1HVN(bp))
2115 continue;
2116
2117 func = ((vn << 1) | port);
2118 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2119 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2120 }
2121}
8a1c38d1 2122
c18487ee
YR
2123/* This function is called upon link interrupt */
2124static void bnx2x_link_attn(struct bnx2x *bp)
2125{
d9e8b185 2126 u32 prev_link_status = bp->link_vars.link_status;
bb2a0f7a
YG
2127 /* Make sure that we are synced with the current statistics */
2128 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2129
c18487ee 2130 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2131
bb2a0f7a
YG
2132 if (bp->link_vars.link_up) {
2133
1c06328c 2134 /* dropless flow control */
f2e0899f 2135 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2136 int port = BP_PORT(bp);
2137 u32 pause_enabled = 0;
2138
2139 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2140 pause_enabled = 1;
2141
2142 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2143 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2144 pause_enabled);
2145 }
2146
bb2a0f7a
YG
2147 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2148 struct host_port_stats *pstats;
2149
2150 pstats = bnx2x_sp(bp, port_stats);
2151 /* reset old bmac stats */
2152 memset(&(pstats->mac_stx[0]), 0,
2153 sizeof(struct mac_stx));
2154 }
f34d28ea 2155 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2156 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2157 }
2158
f2e0899f
DK
2159 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2160 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2161
f2e0899f
DK
2162 if (cmng_fns != CMNG_FNS_NONE) {
2163 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2164 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2165 } else
2166 /* rate shaping and fairness are disabled */
2167 DP(NETIF_MSG_IFUP,
2168 "single function mode without fairness\n");
34f80b04 2169 }
9fdc3e95
DK
2170
2171 if (IS_MF(bp))
2172 bnx2x_link_sync_notify(bp);
2173
2174 /* indicate link status only if link status actually changed */
2175 if (prev_link_status != bp->link_vars.link_status)
2176 bnx2x_link_report(bp);
c18487ee 2177}
a2fbb9ea 2178
9f6c9258 2179void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2180{
f34d28ea 2181 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
c18487ee 2182 return;
a2fbb9ea 2183
c18487ee 2184 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2185
bb2a0f7a
YG
2186 if (bp->link_vars.link_up)
2187 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2188 else
2189 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2190
f2e0899f
DK
2191 /* the link status update could be the result of a DCC event
2192 hence re-read the shmem mf configuration */
2193 bnx2x_read_mf_cfg(bp);
2691d51d 2194
c18487ee
YR
2195 /* indicate link status */
2196 bnx2x_link_report(bp);
a2fbb9ea 2197}
a2fbb9ea 2198
34f80b04
EG
2199static void bnx2x_pmf_update(struct bnx2x *bp)
2200{
2201 int port = BP_PORT(bp);
2202 u32 val;
2203
2204 bp->port.pmf = 1;
2205 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2206
2207 /* enable nig attention */
2208 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
f2e0899f
DK
2209 if (bp->common.int_block == INT_BLOCK_HC) {
2210 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2211 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2212 } else if (CHIP_IS_E2(bp)) {
2213 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2214 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2215 }
bb2a0f7a
YG
2216
2217 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2218}
2219
c18487ee 2220/* end of Link */
a2fbb9ea
ET
2221
2222/* slow path */
2223
2224/*
2225 * General service functions
2226 */
2227
2691d51d 2228/* send the MCP a request, block until there is a reply */
a22f0788 2229u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2230{
f2e0899f 2231 int mb_idx = BP_FW_MB_IDX(bp);
2691d51d
EG
2232 u32 seq = ++bp->fw_seq;
2233 u32 rc = 0;
2234 u32 cnt = 1;
2235 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2236
c4ff7cbf 2237 mutex_lock(&bp->fw_mb_mutex);
f2e0899f
DK
2238 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2239 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2240
2691d51d
EG
2241 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2242
2243 do {
2244 /* let the FW do it's magic ... */
2245 msleep(delay);
2246
f2e0899f 2247 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2248
c4ff7cbf
EG
2249 /* Give the FW up to 5 second (500*10ms) */
2250 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2251
2252 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2253 cnt*delay, rc, seq);
2254
2255 /* is this a reply to our command? */
2256 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2257 rc &= FW_MSG_CODE_MASK;
2258 else {
2259 /* FW BUG! */
2260 BNX2X_ERR("FW failed to respond!\n");
2261 bnx2x_fw_dump(bp);
2262 rc = 0;
2263 }
c4ff7cbf 2264 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2265
2266 return rc;
2267}
2268
ec6ba945
VZ
2269static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2270{
2271#ifdef BCM_CNIC
2272 if (IS_FCOE_FP(fp) && IS_MF(bp))
2273 return false;
2274#endif
2275 return true;
2276}
2277
523224a3 2278/* must be called under rtnl_lock */
8d96286a 2279static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2691d51d 2280{
523224a3 2281 u32 mask = (1 << cl_id);
2691d51d 2282
523224a3
DK
2283 /* initial seeting is BNX2X_ACCEPT_NONE */
2284 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2285 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2286 u8 unmatched_unicast = 0;
2691d51d 2287
0793f83f
DK
2288 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2289 unmatched_unicast = 1;
2290
523224a3
DK
2291 if (filters & BNX2X_PROMISCUOUS_MODE) {
2292 /* promiscious - accept all, drop none */
2293 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2294 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
0793f83f
DK
2295 if (IS_MF_SI(bp)) {
2296 /*
2297 * SI mode defines to accept in promiscuos mode
2298 * only unmatched packets
2299 */
2300 unmatched_unicast = 1;
2301 accp_all_ucast = 0;
2302 }
523224a3
DK
2303 }
2304 if (filters & BNX2X_ACCEPT_UNICAST) {
2305 /* accept matched ucast */
2306 drop_all_ucast = 0;
2307 }
d9c8f498 2308 if (filters & BNX2X_ACCEPT_MULTICAST)
523224a3
DK
2309 /* accept matched mcast */
2310 drop_all_mcast = 0;
d9c8f498 2311
523224a3
DK
2312 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2313 /* accept all mcast */
2314 drop_all_ucast = 0;
2315 accp_all_ucast = 1;
2316 }
2317 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2318 /* accept all mcast */
2319 drop_all_mcast = 0;
2320 accp_all_mcast = 1;
2321 }
2322 if (filters & BNX2X_ACCEPT_BROADCAST) {
2323 /* accept (all) bcast */
2324 drop_all_bcast = 0;
2325 accp_all_bcast = 1;
2326 }
2691d51d 2327
523224a3
DK
2328 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2329 bp->mac_filters.ucast_drop_all | mask :
2330 bp->mac_filters.ucast_drop_all & ~mask;
2691d51d 2331
523224a3
DK
2332 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2333 bp->mac_filters.mcast_drop_all | mask :
2334 bp->mac_filters.mcast_drop_all & ~mask;
2691d51d 2335
523224a3
DK
2336 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2337 bp->mac_filters.bcast_drop_all | mask :
2338 bp->mac_filters.bcast_drop_all & ~mask;
2691d51d 2339
523224a3
DK
2340 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2341 bp->mac_filters.ucast_accept_all | mask :
2342 bp->mac_filters.ucast_accept_all & ~mask;
2691d51d 2343
523224a3
DK
2344 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2345 bp->mac_filters.mcast_accept_all | mask :
2346 bp->mac_filters.mcast_accept_all & ~mask;
2347
2348 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2349 bp->mac_filters.bcast_accept_all | mask :
2350 bp->mac_filters.bcast_accept_all & ~mask;
2351
2352 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2353 bp->mac_filters.unmatched_unicast | mask :
2354 bp->mac_filters.unmatched_unicast & ~mask;
2691d51d
EG
2355}
2356
8d96286a 2357static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2691d51d 2358{
030f3356
DK
2359 struct tstorm_eth_function_common_config tcfg = {0};
2360 u16 rss_flgs;
2691d51d 2361
030f3356
DK
2362 /* tpa */
2363 if (p->func_flgs & FUNC_FLG_TPA)
2364 tcfg.config_flags |=
2365 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2691d51d 2366
030f3356
DK
2367 /* set rss flags */
2368 rss_flgs = (p->rss->mode <<
2369 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2370
2371 if (p->rss->cap & RSS_IPV4_CAP)
2372 rss_flgs |= RSS_IPV4_CAP_MASK;
2373 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2374 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2375 if (p->rss->cap & RSS_IPV6_CAP)
2376 rss_flgs |= RSS_IPV6_CAP_MASK;
2377 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2378 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2379
2380 tcfg.config_flags |= rss_flgs;
2381 tcfg.rss_result_mask = p->rss->result_mask;
2382
2383 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2691d51d 2384
523224a3
DK
2385 /* Enable the function in the FW */
2386 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2387 storm_memset_func_en(bp, p->func_id, 1);
2691d51d 2388
523224a3
DK
2389 /* statistics */
2390 if (p->func_flgs & FUNC_FLG_STATS) {
2391 struct stats_indication_flags stats_flags = {0};
2392 stats_flags.collect_eth = 1;
2691d51d 2393
523224a3
DK
2394 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2395 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2396
523224a3
DK
2397 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2398 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2399
523224a3
DK
2400 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2401 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d 2402
523224a3
DK
2403 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2404 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2691d51d
EG
2405 }
2406
523224a3
DK
2407 /* spq */
2408 if (p->func_flgs & FUNC_FLG_SPQ) {
2409 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2410 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2411 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2412 }
2691d51d
EG
2413}
2414
523224a3
DK
2415static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2416 struct bnx2x_fastpath *fp)
28912902 2417{
523224a3 2418 u16 flags = 0;
28912902 2419
523224a3
DK
2420 /* calculate queue flags */
2421 flags |= QUEUE_FLG_CACHE_ALIGN;
2422 flags |= QUEUE_FLG_HC;
0793f83f 2423 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
28912902 2424
523224a3
DK
2425 flags |= QUEUE_FLG_VLAN;
2426 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
523224a3
DK
2427
2428 if (!fp->disable_tpa)
2429 flags |= QUEUE_FLG_TPA;
2430
ec6ba945
VZ
2431 flags = stat_counter_valid(bp, fp) ?
2432 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
523224a3
DK
2433
2434 return flags;
2435}
2436
2437static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2438 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2439 struct bnx2x_rxq_init_params *rxq_init)
2440{
2441 u16 max_sge = 0;
2442 u16 sge_sz = 0;
2443 u16 tpa_agg_size = 0;
2444
2445 /* calculate queue flags */
2446 u16 flags = bnx2x_get_cl_flags(bp, fp);
2447
2448 if (!fp->disable_tpa) {
2449 pause->sge_th_hi = 250;
2450 pause->sge_th_lo = 150;
2451 tpa_agg_size = min_t(u32,
2452 (min_t(u32, 8, MAX_SKB_FRAGS) *
2453 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2454 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2455 SGE_PAGE_SHIFT;
2456 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2457 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2458 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2459 0xffff);
2460 }
2461
2462 /* pause - not for e1 */
2463 if (!CHIP_IS_E1(bp)) {
2464 pause->bd_th_hi = 350;
2465 pause->bd_th_lo = 250;
2466 pause->rcq_th_hi = 350;
2467 pause->rcq_th_lo = 250;
2468 pause->sge_th_hi = 0;
2469 pause->sge_th_lo = 0;
2470 pause->pri_map = 1;
2471 }
2472
2473 /* rxq setup */
2474 rxq_init->flags = flags;
2475 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2476 rxq_init->dscr_map = fp->rx_desc_mapping;
2477 rxq_init->sge_map = fp->rx_sge_mapping;
2478 rxq_init->rcq_map = fp->rx_comp_mapping;
2479 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
a8c94b91
VZ
2480
2481 /* Always use mini-jumbo MTU for FCoE L2 ring */
2482 if (IS_FCOE_FP(fp))
2483 rxq_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
2484 else
2485 rxq_init->mtu = bp->dev->mtu;
2486
2487 rxq_init->buf_sz = fp->rx_buf_size;
523224a3
DK
2488 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2489 rxq_init->cl_id = fp->cl_id;
2490 rxq_init->spcl_id = fp->cl_id;
2491 rxq_init->stat_id = fp->cl_id;
2492 rxq_init->tpa_agg_sz = tpa_agg_size;
2493 rxq_init->sge_buf_sz = sge_sz;
2494 rxq_init->max_sges_pkt = max_sge;
2495 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2496 rxq_init->fw_sb_id = fp->fw_sb_id;
2497
ec6ba945
VZ
2498 if (IS_FCOE_FP(fp))
2499 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2500 else
2501 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
523224a3
DK
2502
2503 rxq_init->cid = HW_CID(bp, fp->cid);
2504
2505 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2506}
2507
2508static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2509 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2510{
2511 u16 flags = bnx2x_get_cl_flags(bp, fp);
2512
2513 txq_init->flags = flags;
2514 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2515 txq_init->dscr_map = fp->tx_desc_mapping;
2516 txq_init->stat_id = fp->cl_id;
2517 txq_init->cid = HW_CID(bp, fp->cid);
2518 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2519 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2520 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945
VZ
2521
2522 if (IS_FCOE_FP(fp)) {
2523 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2524 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2525 }
2526
523224a3
DK
2527 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2528}
2529
8d96286a 2530static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
2531{
2532 struct bnx2x_func_init_params func_init = {0};
2533 struct bnx2x_rss_params rss = {0};
2534 struct event_ring_data eq_data = { {0} };
2535 u16 flags;
2536
2537 /* pf specific setups */
2538 if (!CHIP_IS_E1(bp))
fb3bff17 2539 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
523224a3 2540
f2e0899f
DK
2541 if (CHIP_IS_E2(bp)) {
2542 /* reset IGU PF statistics: MSIX + ATTN */
2543 /* PF */
2544 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2545 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2546 (CHIP_MODE_IS_4_PORT(bp) ?
2547 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2548 /* ATTN */
2549 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2550 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2551 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2552 (CHIP_MODE_IS_4_PORT(bp) ?
2553 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2554 }
2555
523224a3
DK
2556 /* function setup flags */
2557 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2558
f2e0899f
DK
2559 if (CHIP_IS_E1x(bp))
2560 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2561 else
2562 flags |= FUNC_FLG_TPA;
523224a3 2563
030f3356
DK
2564 /* function setup */
2565
523224a3
DK
2566 /**
2567 * Although RSS is meaningless when there is a single HW queue we
2568 * still need it enabled in order to have HW Rx hash generated.
523224a3 2569 */
030f3356
DK
2570 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2571 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2572 rss.mode = bp->multi_mode;
2573 rss.result_mask = MULTI_MASK;
2574 func_init.rss = &rss;
523224a3
DK
2575
2576 func_init.func_flgs = flags;
2577 func_init.pf_id = BP_FUNC(bp);
2578 func_init.func_id = BP_FUNC(bp);
2579 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2580 func_init.spq_map = bp->spq_mapping;
2581 func_init.spq_prod = bp->spq_prod_idx;
2582
2583 bnx2x_func_init(bp, &func_init);
2584
2585 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2586
2587 /*
2588 Congestion management values depend on the link rate
2589 There is no active link so initial link rate is set to 10 Gbps.
2590 When the link comes up The congestion management values are
2591 re-calculated according to the actual link rate.
2592 */
2593 bp->link_vars.line_speed = SPEED_10000;
2594 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2595
2596 /* Only the PMF sets the HW */
2597 if (bp->port.pmf)
2598 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2599
2600 /* no rx until link is up */
2601 bp->rx_mode = BNX2X_RX_MODE_NONE;
2602 bnx2x_set_storm_rx_mode(bp);
2603
2604 /* init Event Queue */
2605 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2606 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2607 eq_data.producer = bp->eq_prod;
2608 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2609 eq_data.sb_id = DEF_SB_ID;
2610 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2611}
2612
2613
2614static void bnx2x_e1h_disable(struct bnx2x *bp)
2615{
2616 int port = BP_PORT(bp);
2617
2618 netif_tx_disable(bp->dev);
2619
2620 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2621
2622 netif_carrier_off(bp->dev);
2623}
2624
2625static void bnx2x_e1h_enable(struct bnx2x *bp)
2626{
2627 int port = BP_PORT(bp);
2628
2629 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2630
2631 /* Tx queue should be only reenabled */
2632 netif_tx_wake_all_queues(bp->dev);
2633
2634 /*
2635 * Should not call netif_carrier_on since it will be called if the link
2636 * is up when checking for link state
2637 */
2638}
2639
0793f83f
DK
2640/* called due to MCP event (on pmf):
2641 * reread new bandwidth configuration
2642 * configure FW
2643 * notify others function about the change
2644 */
2645static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2646{
2647 if (bp->link_vars.link_up) {
2648 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2649 bnx2x_link_sync_notify(bp);
2650 }
2651 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2652}
2653
2654static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2655{
2656 bnx2x_config_mf_bw(bp);
2657 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2658}
2659
523224a3
DK
2660static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2661{
2662 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2663
2664 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2665
2666 /*
2667 * This is the only place besides the function initialization
2668 * where the bp->flags can change so it is done without any
2669 * locks
2670 */
f2e0899f 2671 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
523224a3
DK
2672 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2673 bp->flags |= MF_FUNC_DIS;
2674
2675 bnx2x_e1h_disable(bp);
2676 } else {
2677 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2678 bp->flags &= ~MF_FUNC_DIS;
2679
2680 bnx2x_e1h_enable(bp);
2681 }
2682 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2683 }
2684 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 2685 bnx2x_config_mf_bw(bp);
523224a3
DK
2686 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2687 }
2688
2689 /* Report results to MCP */
2690 if (dcc_event)
2691 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
2692 else
2693 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
2694}
2695
2696/* must be called under the spq lock */
2697static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2698{
2699 struct eth_spe *next_spe = bp->spq_prod_bd;
2700
2701 if (bp->spq_prod_bd == bp->spq_last_bd) {
2702 bp->spq_prod_bd = bp->spq;
2703 bp->spq_prod_idx = 0;
2704 DP(NETIF_MSG_TIMER, "end of spq\n");
2705 } else {
2706 bp->spq_prod_bd++;
2707 bp->spq_prod_idx++;
2708 }
2709 return next_spe;
2710}
2711
2712/* must be called under the spq lock */
28912902
MC
2713static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2714{
2715 int func = BP_FUNC(bp);
2716
2717 /* Make sure that BD data is updated before writing the producer */
2718 wmb();
2719
523224a3 2720 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 2721 bp->spq_prod_idx);
28912902
MC
2722 mmiowb();
2723}
2724
a2fbb9ea 2725/* the slow path queue is odd since completions arrive on the fastpath ring */
9f6c9258 2726int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
f85582f8 2727 u32 data_hi, u32 data_lo, int common)
a2fbb9ea 2728{
28912902 2729 struct eth_spe *spe;
523224a3 2730 u16 type;
a2fbb9ea 2731
a2fbb9ea
ET
2732#ifdef BNX2X_STOP_ON_ERROR
2733 if (unlikely(bp->panic))
2734 return -EIO;
2735#endif
2736
34f80b04 2737 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 2738
6e30dd4e
VZ
2739 if (common) {
2740 if (!atomic_read(&bp->eq_spq_left)) {
2741 BNX2X_ERR("BUG! EQ ring full!\n");
2742 spin_unlock_bh(&bp->spq_lock);
2743 bnx2x_panic();
2744 return -EBUSY;
2745 }
2746 } else if (!atomic_read(&bp->cq_spq_left)) {
2747 BNX2X_ERR("BUG! SPQ ring full!\n");
2748 spin_unlock_bh(&bp->spq_lock);
2749 bnx2x_panic();
2750 return -EBUSY;
a2fbb9ea 2751 }
f1410647 2752
28912902
MC
2753 spe = bnx2x_sp_get_next(bp);
2754
a2fbb9ea 2755 /* CID needs port number to be encoded int it */
28912902 2756 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
2757 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2758 HW_CID(bp, cid));
523224a3 2759
a2fbb9ea 2760 if (common)
523224a3
DK
2761 /* Common ramrods:
2762 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2763 * TRAFFIC_STOP, TRAFFIC_START
2764 */
2765 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2766 & SPE_HDR_CONN_TYPE;
2767 else
2768 /* ETH ramrods: SETUP, HALT */
2769 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2770 & SPE_HDR_CONN_TYPE;
a2fbb9ea 2771
523224a3
DK
2772 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2773 SPE_HDR_FUNCTION_ID);
a2fbb9ea 2774
523224a3
DK
2775 spe->hdr.type = cpu_to_le16(type);
2776
2777 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2778 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2779
2780 /* stats ramrod has it's own slot on the spq */
6e30dd4e 2781 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY) {
523224a3
DK
2782 /* It's ok if the actual decrement is issued towards the memory
2783 * somewhere between the spin_lock and spin_unlock. Thus no
2784 * more explict memory barrier is needed.
2785 */
6e30dd4e
VZ
2786 if (common)
2787 atomic_dec(&bp->eq_spq_left);
2788 else
2789 atomic_dec(&bp->cq_spq_left);
2790 }
2791
a2fbb9ea 2792
cdaa7cb8 2793 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
523224a3 2794 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
6e30dd4e 2795 "type(0x%x) left (ETH, COMMON) (%x,%x)\n",
cdaa7cb8
VZ
2796 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2797 (u32)(U64_LO(bp->spq_mapping) +
2798 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
6e30dd4e
VZ
2799 HW_CID(bp, cid), data_hi, data_lo, type,
2800 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
cdaa7cb8 2801
28912902 2802 bnx2x_sp_prod_update(bp);
34f80b04 2803 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2804 return 0;
2805}
2806
2807/* acquire split MCP access lock register */
4a37fb66 2808static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2809{
72fd0718 2810 u32 j, val;
34f80b04 2811 int rc = 0;
a2fbb9ea
ET
2812
2813 might_sleep();
72fd0718 2814 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
2815 val = (1UL << 31);
2816 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2817 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2818 if (val & (1L << 31))
2819 break;
2820
2821 msleep(5);
2822 }
a2fbb9ea 2823 if (!(val & (1L << 31))) {
19680c48 2824 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2825 rc = -EBUSY;
2826 }
2827
2828 return rc;
2829}
2830
4a37fb66
YG
2831/* release split MCP access lock register */
2832static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 2833{
72fd0718 2834 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
2835}
2836
523224a3
DK
2837#define BNX2X_DEF_SB_ATT_IDX 0x0001
2838#define BNX2X_DEF_SB_IDX 0x0002
2839
a2fbb9ea
ET
2840static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2841{
523224a3 2842 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
2843 u16 rc = 0;
2844
2845 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2846 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2847 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 2848 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 2849 }
523224a3
DK
2850
2851 if (bp->def_idx != def_sb->sp_sb.running_index) {
2852 bp->def_idx = def_sb->sp_sb.running_index;
2853 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 2854 }
523224a3
DK
2855
2856 /* Do not reorder: indecies reading should complete before handling */
2857 barrier();
a2fbb9ea
ET
2858 return rc;
2859}
2860
2861/*
2862 * slow path service functions
2863 */
2864
2865static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2866{
34f80b04 2867 int port = BP_PORT(bp);
a2fbb9ea
ET
2868 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2869 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2870 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2871 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2872 u32 aeu_mask;
87942b46 2873 u32 nig_mask = 0;
f2e0899f 2874 u32 reg_addr;
a2fbb9ea 2875
a2fbb9ea
ET
2876 if (bp->attn_state & asserted)
2877 BNX2X_ERR("IGU ERROR\n");
2878
3fcaf2e5
EG
2879 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2880 aeu_mask = REG_RD(bp, aeu_addr);
2881
a2fbb9ea 2882 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 2883 aeu_mask, asserted);
72fd0718 2884 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 2885 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2886
3fcaf2e5
EG
2887 REG_WR(bp, aeu_addr, aeu_mask);
2888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2889
3fcaf2e5 2890 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2891 bp->attn_state |= asserted;
3fcaf2e5 2892 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2893
2894 if (asserted & ATTN_HARD_WIRED_MASK) {
2895 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2896
a5e9a7cf
EG
2897 bnx2x_acquire_phy_lock(bp);
2898
877e9aa4 2899 /* save nig interrupt mask */
87942b46 2900 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2901 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2902
c18487ee 2903 bnx2x_link_attn(bp);
a2fbb9ea
ET
2904
2905 /* handle unicore attn? */
2906 }
2907 if (asserted & ATTN_SW_TIMER_4_FUNC)
2908 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2909
2910 if (asserted & GPIO_2_FUNC)
2911 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2912
2913 if (asserted & GPIO_3_FUNC)
2914 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2915
2916 if (asserted & GPIO_4_FUNC)
2917 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2918
2919 if (port == 0) {
2920 if (asserted & ATTN_GENERAL_ATTN_1) {
2921 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2922 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2923 }
2924 if (asserted & ATTN_GENERAL_ATTN_2) {
2925 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2926 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2927 }
2928 if (asserted & ATTN_GENERAL_ATTN_3) {
2929 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2930 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2931 }
2932 } else {
2933 if (asserted & ATTN_GENERAL_ATTN_4) {
2934 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2935 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2936 }
2937 if (asserted & ATTN_GENERAL_ATTN_5) {
2938 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2939 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2940 }
2941 if (asserted & ATTN_GENERAL_ATTN_6) {
2942 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2943 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2944 }
2945 }
2946
2947 } /* if hardwired */
2948
f2e0899f
DK
2949 if (bp->common.int_block == INT_BLOCK_HC)
2950 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2951 COMMAND_REG_ATTN_BITS_SET);
2952 else
2953 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2954
2955 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2956 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2957 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
2958
2959 /* now set back the mask */
a5e9a7cf 2960 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2961 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2962 bnx2x_release_phy_lock(bp);
2963 }
a2fbb9ea
ET
2964}
2965
fd4ef40d
EG
2966static inline void bnx2x_fan_failure(struct bnx2x *bp)
2967{
2968 int port = BP_PORT(bp);
b7737c9b 2969 u32 ext_phy_config;
fd4ef40d 2970 /* mark the failure */
b7737c9b
YR
2971 ext_phy_config =
2972 SHMEM_RD(bp,
2973 dev_info.port_hw_config[port].external_phy_config);
2974
2975 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2976 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 2977 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 2978 ext_phy_config);
fd4ef40d
EG
2979
2980 /* log the failure */
cdaa7cb8
VZ
2981 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2982 " the driver to shutdown the card to prevent permanent"
2983 " damage. Please contact OEM Support for assistance\n");
fd4ef40d 2984}
ab6ad5a4 2985
877e9aa4 2986static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2987{
34f80b04 2988 int port = BP_PORT(bp);
877e9aa4 2989 int reg_offset;
d90d96ba 2990 u32 val;
877e9aa4 2991
34f80b04
EG
2992 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2993 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2994
34f80b04 2995 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2996
2997 val = REG_RD(bp, reg_offset);
2998 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2999 REG_WR(bp, reg_offset, val);
3000
3001 BNX2X_ERR("SPIO5 hw attention\n");
3002
fd4ef40d 3003 /* Fan failure attention */
d90d96ba 3004 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 3005 bnx2x_fan_failure(bp);
877e9aa4 3006 }
34f80b04 3007
589abe3a
EG
3008 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
3009 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
3010 bnx2x_acquire_phy_lock(bp);
3011 bnx2x_handle_module_detect_int(&bp->link_params);
3012 bnx2x_release_phy_lock(bp);
3013 }
3014
34f80b04
EG
3015 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3016
3017 val = REG_RD(bp, reg_offset);
3018 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3019 REG_WR(bp, reg_offset, val);
3020
3021 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3022 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3023 bnx2x_panic();
3024 }
877e9aa4
ET
3025}
3026
3027static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3028{
3029 u32 val;
3030
0626b899 3031 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3032
3033 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3034 BNX2X_ERR("DB hw attention 0x%x\n", val);
3035 /* DORQ discard attention */
3036 if (val & 0x2)
3037 BNX2X_ERR("FATAL error from DORQ\n");
3038 }
34f80b04
EG
3039
3040 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3041
3042 int port = BP_PORT(bp);
3043 int reg_offset;
3044
3045 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3046 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3047
3048 val = REG_RD(bp, reg_offset);
3049 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3050 REG_WR(bp, reg_offset, val);
3051
3052 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3053 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3054 bnx2x_panic();
3055 }
877e9aa4
ET
3056}
3057
3058static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3059{
3060 u32 val;
3061
3062 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3063
3064 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3065 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3066 /* CFC error attention */
3067 if (val & 0x2)
3068 BNX2X_ERR("FATAL error from CFC\n");
3069 }
3070
3071 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3072
3073 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3074 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3075 /* RQ_USDMDP_FIFO_OVERFLOW */
3076 if (val & 0x18000)
3077 BNX2X_ERR("FATAL error from PXP\n");
f2e0899f
DK
3078 if (CHIP_IS_E2(bp)) {
3079 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3080 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3081 }
877e9aa4 3082 }
34f80b04
EG
3083
3084 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3085
3086 int port = BP_PORT(bp);
3087 int reg_offset;
3088
3089 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3090 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3091
3092 val = REG_RD(bp, reg_offset);
3093 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3094 REG_WR(bp, reg_offset, val);
3095
3096 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3097 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3098 bnx2x_panic();
3099 }
877e9aa4
ET
3100}
3101
3102static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3103{
34f80b04
EG
3104 u32 val;
3105
877e9aa4
ET
3106 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3107
34f80b04
EG
3108 if (attn & BNX2X_PMF_LINK_ASSERT) {
3109 int func = BP_FUNC(bp);
3110
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
f2e0899f
DK
3112 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3113 func_mf_config[BP_ABS_FUNC(bp)].config);
3114 val = SHMEM_RD(bp,
3115 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3116 if (val & DRV_STATUS_DCC_EVENT_MASK)
3117 bnx2x_dcc_event(bp,
3118 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
3119
3120 if (val & DRV_STATUS_SET_MF_BW)
3121 bnx2x_set_mf_bw(bp);
3122
34f80b04 3123 bnx2x__link_status_update(bp);
2691d51d 3124 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3125 bnx2x_pmf_update(bp);
3126
e4901dde 3127 if (bp->port.pmf &&
785b9b1a
SR
3128 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3129 bp->dcbx_enabled > 0)
e4901dde
VZ
3130 /* start dcbx state machine */
3131 bnx2x_dcbx_set_params(bp,
3132 BNX2X_DCBX_STATE_NEG_RECEIVED);
34f80b04 3133 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
3134
3135 BNX2X_ERR("MC assert!\n");
3136 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3137 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3138 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3139 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3140 bnx2x_panic();
3141
3142 } else if (attn & BNX2X_MCP_ASSERT) {
3143
3144 BNX2X_ERR("MCP assert!\n");
3145 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 3146 bnx2x_fw_dump(bp);
877e9aa4
ET
3147
3148 } else
3149 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3150 }
3151
3152 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
3153 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3154 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
3155 val = CHIP_IS_E1(bp) ? 0 :
3156 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
3157 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3158 }
3159 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
3160 val = CHIP_IS_E1(bp) ? 0 :
3161 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
3162 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3163 }
877e9aa4 3164 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
3165 }
3166}
3167
72fd0718
VZ
3168#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3169#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3170#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3171#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3172#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
f85582f8 3173
72fd0718
VZ
3174/*
3175 * should be run under rtnl lock
3176 */
3177static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3178{
3179 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3180 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3181 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3182 barrier();
3183 mmiowb();
3184}
3185
3186/*
3187 * should be run under rtnl lock
3188 */
3189static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3190{
3191 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3192 val |= (1 << 16);
3193 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3194 barrier();
3195 mmiowb();
3196}
3197
3198/*
3199 * should be run under rtnl lock
3200 */
9f6c9258 3201bool bnx2x_reset_is_done(struct bnx2x *bp)
72fd0718
VZ
3202{
3203 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3204 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3205 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3206}
3207
3208/*
3209 * should be run under rtnl lock
3210 */
9f6c9258 3211inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3212{
3213 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3214
3215 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3216
3217 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3218 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3219 barrier();
3220 mmiowb();
3221}
3222
3223/*
3224 * should be run under rtnl lock
3225 */
9f6c9258 3226u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
72fd0718
VZ
3227{
3228 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3229
3230 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3231
3232 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3233 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3234 barrier();
3235 mmiowb();
3236
3237 return val1;
3238}
3239
3240/*
3241 * should be run under rtnl lock
3242 */
3243static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3244{
3245 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3246}
3247
3248static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3249{
3250 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3251 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3252}
3253
3254static inline void _print_next_block(int idx, const char *blk)
3255{
3256 if (idx)
3257 pr_cont(", ");
3258 pr_cont("%s", blk);
3259}
3260
3261static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3262{
3263 int i = 0;
3264 u32 cur_bit = 0;
3265 for (i = 0; sig; i++) {
3266 cur_bit = ((u32)0x1 << i);
3267 if (sig & cur_bit) {
3268 switch (cur_bit) {
3269 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3270 _print_next_block(par_num++, "BRB");
3271 break;
3272 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3273 _print_next_block(par_num++, "PARSER");
3274 break;
3275 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3276 _print_next_block(par_num++, "TSDM");
3277 break;
3278 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3279 _print_next_block(par_num++, "SEARCHER");
3280 break;
3281 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3282 _print_next_block(par_num++, "TSEMI");
3283 break;
3284 }
3285
3286 /* Clear the bit */
3287 sig &= ~cur_bit;
3288 }
3289 }
3290
3291 return par_num;
3292}
3293
3294static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3295{
3296 int i = 0;
3297 u32 cur_bit = 0;
3298 for (i = 0; sig; i++) {
3299 cur_bit = ((u32)0x1 << i);
3300 if (sig & cur_bit) {
3301 switch (cur_bit) {
3302 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3303 _print_next_block(par_num++, "PBCLIENT");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3306 _print_next_block(par_num++, "QM");
3307 break;
3308 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3309 _print_next_block(par_num++, "XSDM");
3310 break;
3311 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3312 _print_next_block(par_num++, "XSEMI");
3313 break;
3314 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3315 _print_next_block(par_num++, "DOORBELLQ");
3316 break;
3317 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3318 _print_next_block(par_num++, "VAUX PCI CORE");
3319 break;
3320 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3321 _print_next_block(par_num++, "DEBUG");
3322 break;
3323 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3324 _print_next_block(par_num++, "USDM");
3325 break;
3326 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3327 _print_next_block(par_num++, "USEMI");
3328 break;
3329 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3330 _print_next_block(par_num++, "UPB");
3331 break;
3332 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3333 _print_next_block(par_num++, "CSDM");
3334 break;
3335 }
3336
3337 /* Clear the bit */
3338 sig &= ~cur_bit;
3339 }
3340 }
3341
3342 return par_num;
3343}
3344
3345static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3346{
3347 int i = 0;
3348 u32 cur_bit = 0;
3349 for (i = 0; sig; i++) {
3350 cur_bit = ((u32)0x1 << i);
3351 if (sig & cur_bit) {
3352 switch (cur_bit) {
3353 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3354 _print_next_block(par_num++, "CSEMI");
3355 break;
3356 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3357 _print_next_block(par_num++, "PXP");
3358 break;
3359 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3360 _print_next_block(par_num++,
3361 "PXPPCICLOCKCLIENT");
3362 break;
3363 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3364 _print_next_block(par_num++, "CFC");
3365 break;
3366 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3367 _print_next_block(par_num++, "CDU");
3368 break;
3369 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3370 _print_next_block(par_num++, "IGU");
3371 break;
3372 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3373 _print_next_block(par_num++, "MISC");
3374 break;
3375 }
3376
3377 /* Clear the bit */
3378 sig &= ~cur_bit;
3379 }
3380 }
3381
3382 return par_num;
3383}
3384
3385static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3386{
3387 int i = 0;
3388 u32 cur_bit = 0;
3389 for (i = 0; sig; i++) {
3390 cur_bit = ((u32)0x1 << i);
3391 if (sig & cur_bit) {
3392 switch (cur_bit) {
3393 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3394 _print_next_block(par_num++, "MCP ROM");
3395 break;
3396 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3397 _print_next_block(par_num++, "MCP UMP RX");
3398 break;
3399 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3400 _print_next_block(par_num++, "MCP UMP TX");
3401 break;
3402 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3403 _print_next_block(par_num++, "MCP SCPAD");
3404 break;
3405 }
3406
3407 /* Clear the bit */
3408 sig &= ~cur_bit;
3409 }
3410 }
3411
3412 return par_num;
3413}
3414
3415static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3416 u32 sig2, u32 sig3)
3417{
3418 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3419 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3420 int par_num = 0;
3421 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3422 "[0]:0x%08x [1]:0x%08x "
3423 "[2]:0x%08x [3]:0x%08x\n",
3424 sig0 & HW_PRTY_ASSERT_SET_0,
3425 sig1 & HW_PRTY_ASSERT_SET_1,
3426 sig2 & HW_PRTY_ASSERT_SET_2,
3427 sig3 & HW_PRTY_ASSERT_SET_3);
3428 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3429 bp->dev->name);
3430 par_num = bnx2x_print_blocks_with_parity0(
3431 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3432 par_num = bnx2x_print_blocks_with_parity1(
3433 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3434 par_num = bnx2x_print_blocks_with_parity2(
3435 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3436 par_num = bnx2x_print_blocks_with_parity3(
3437 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3438 printk("\n");
3439 return true;
3440 } else
3441 return false;
3442}
3443
9f6c9258 3444bool bnx2x_chk_parity_attn(struct bnx2x *bp)
877e9aa4 3445{
a2fbb9ea 3446 struct attn_route attn;
72fd0718
VZ
3447 int port = BP_PORT(bp);
3448
3449 attn.sig[0] = REG_RD(bp,
3450 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3451 port*4);
3452 attn.sig[1] = REG_RD(bp,
3453 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3454 port*4);
3455 attn.sig[2] = REG_RD(bp,
3456 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3457 port*4);
3458 attn.sig[3] = REG_RD(bp,
3459 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3460 port*4);
3461
3462 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3463 attn.sig[3]);
3464}
3465
f2e0899f
DK
3466
3467static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3468{
3469 u32 val;
3470 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3471
3472 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3473 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3474 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3475 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3476 "ADDRESS_ERROR\n");
3477 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3478 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3479 "INCORRECT_RCV_BEHAVIOR\n");
3480 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3481 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3482 "WAS_ERROR_ATTN\n");
3483 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3484 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3485 "VF_LENGTH_VIOLATION_ATTN\n");
3486 if (val &
3487 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3488 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3489 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3490 if (val &
3491 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3492 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3493 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3494 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3495 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3496 "TCPL_ERROR_ATTN\n");
3497 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3498 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3499 "TCPL_IN_TWO_RCBS_ATTN\n");
3500 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3501 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3502 "CSSNOOP_FIFO_OVERFLOW\n");
3503 }
3504 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3505 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3506 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3507 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3508 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3509 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3510 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3511 "_ATC_TCPL_TO_NOT_PEND\n");
3512 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3513 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3514 "ATC_GPA_MULTIPLE_HITS\n");
3515 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3516 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3517 "ATC_RCPL_TO_EMPTY_CNT\n");
3518 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3519 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3520 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3521 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3522 "ATC_IREQ_LESS_THAN_STU\n");
3523 }
3524
3525 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3526 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3527 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3528 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3529 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3530 }
3531
3532}
3533
72fd0718
VZ
3534static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3535{
3536 struct attn_route attn, *group_mask;
34f80b04 3537 int port = BP_PORT(bp);
877e9aa4 3538 int index;
a2fbb9ea
ET
3539 u32 reg_addr;
3540 u32 val;
3fcaf2e5 3541 u32 aeu_mask;
a2fbb9ea
ET
3542
3543 /* need to take HW lock because MCP or other port might also
3544 try to handle this event */
4a37fb66 3545 bnx2x_acquire_alr(bp);
a2fbb9ea 3546
4a33bc03 3547 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
72fd0718
VZ
3548 bp->recovery_state = BNX2X_RECOVERY_INIT;
3549 bnx2x_set_reset_in_progress(bp);
3550 schedule_delayed_work(&bp->reset_task, 0);
3551 /* Disable HW interrupts */
3552 bnx2x_int_disable(bp);
3553 bnx2x_release_alr(bp);
3554 /* In case of parity errors don't handle attentions so that
3555 * other function would "see" parity errors.
3556 */
3557 return;
3558 }
3559
a2fbb9ea
ET
3560 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3561 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3562 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3563 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
f2e0899f
DK
3564 if (CHIP_IS_E2(bp))
3565 attn.sig[4] =
3566 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3567 else
3568 attn.sig[4] = 0;
3569
3570 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3571 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
3572
3573 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3574 if (deasserted & (1 << index)) {
72fd0718 3575 group_mask = &bp->attn_group[index];
a2fbb9ea 3576
f2e0899f
DK
3577 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3578 "%08x %08x %08x\n",
3579 index,
3580 group_mask->sig[0], group_mask->sig[1],
3581 group_mask->sig[2], group_mask->sig[3],
3582 group_mask->sig[4]);
a2fbb9ea 3583
f2e0899f
DK
3584 bnx2x_attn_int_deasserted4(bp,
3585 attn.sig[4] & group_mask->sig[4]);
877e9aa4 3586 bnx2x_attn_int_deasserted3(bp,
72fd0718 3587 attn.sig[3] & group_mask->sig[3]);
877e9aa4 3588 bnx2x_attn_int_deasserted1(bp,
72fd0718 3589 attn.sig[1] & group_mask->sig[1]);
877e9aa4 3590 bnx2x_attn_int_deasserted2(bp,
72fd0718 3591 attn.sig[2] & group_mask->sig[2]);
877e9aa4 3592 bnx2x_attn_int_deasserted0(bp,
72fd0718 3593 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
3594 }
3595 }
3596
4a37fb66 3597 bnx2x_release_alr(bp);
a2fbb9ea 3598
f2e0899f
DK
3599 if (bp->common.int_block == INT_BLOCK_HC)
3600 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3601 COMMAND_REG_ATTN_BITS_CLR);
3602 else
3603 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
3604
3605 val = ~deasserted;
f2e0899f
DK
3606 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3607 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 3608 REG_WR(bp, reg_addr, val);
a2fbb9ea 3609
a2fbb9ea 3610 if (~bp->attn_state & deasserted)
3fcaf2e5 3611 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
3612
3613 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3614 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3615
3fcaf2e5
EG
3616 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3617 aeu_mask = REG_RD(bp, reg_addr);
3618
3619 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3620 aeu_mask, deasserted);
72fd0718 3621 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 3622 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3623
3fcaf2e5
EG
3624 REG_WR(bp, reg_addr, aeu_mask);
3625 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
3626
3627 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3628 bp->attn_state &= ~deasserted;
3629 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3630}
3631
3632static void bnx2x_attn_int(struct bnx2x *bp)
3633{
3634 /* read local copy of bits */
68d59484
EG
3635 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3636 attn_bits);
3637 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3638 attn_bits_ack);
a2fbb9ea
ET
3639 u32 attn_state = bp->attn_state;
3640
3641 /* look for changed bits */
3642 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3643 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3644
3645 DP(NETIF_MSG_HW,
3646 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3647 attn_bits, attn_ack, asserted, deasserted);
3648
3649 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 3650 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
3651
3652 /* handle bits that were raised */
3653 if (asserted)
3654 bnx2x_attn_int_asserted(bp, asserted);
3655
3656 if (deasserted)
3657 bnx2x_attn_int_deasserted(bp, deasserted);
3658}
3659
523224a3
DK
3660static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3661{
3662 /* No memory barriers */
3663 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3664 mmiowb(); /* keep prod updates ordered */
3665}
3666
3667#ifdef BCM_CNIC
3668static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3669 union event_ring_elem *elem)
3670{
3671 if (!bp->cnic_eth_dev.starting_cid ||
3672 cid < bp->cnic_eth_dev.starting_cid)
3673 return 1;
3674
3675 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3676
3677 if (unlikely(elem->message.data.cfc_del_event.error)) {
3678 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3679 cid);
3680 bnx2x_panic_dump(bp);
3681 }
3682 bnx2x_cnic_cfc_comp(bp, cid);
3683 return 0;
3684}
3685#endif
3686
3687static void bnx2x_eq_int(struct bnx2x *bp)
3688{
3689 u16 hw_cons, sw_cons, sw_prod;
3690 union event_ring_elem *elem;
3691 u32 cid;
3692 u8 opcode;
3693 int spqe_cnt = 0;
3694
3695 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3696
3697 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3698 * when we get the the next-page we nned to adjust so the loop
3699 * condition below will be met. The next element is the size of a
3700 * regular element and hence incrementing by 1
3701 */
3702 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3703 hw_cons++;
3704
25985edc 3705 /* This function may never run in parallel with itself for a
523224a3
DK
3706 * specific bp, thus there is no need in "paired" read memory
3707 * barrier here.
3708 */
3709 sw_cons = bp->eq_cons;
3710 sw_prod = bp->eq_prod;
3711
6e30dd4e
VZ
3712 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->cq_spq_left %u\n",
3713 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
523224a3
DK
3714
3715 for (; sw_cons != hw_cons;
3716 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3717
3718
3719 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3720
3721 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3722 opcode = elem->message.opcode;
3723
3724
3725 /* handle eq element */
3726 switch (opcode) {
3727 case EVENT_RING_OPCODE_STAT_QUERY:
3728 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3729 /* nothing to do with stats comp */
3730 continue;
3731
3732 case EVENT_RING_OPCODE_CFC_DEL:
3733 /* handle according to cid range */
3734 /*
3735 * we may want to verify here that the bp state is
3736 * HALTING
3737 */
3738 DP(NETIF_MSG_IFDOWN,
3739 "got delete ramrod for MULTI[%d]\n", cid);
3740#ifdef BCM_CNIC
3741 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3742 goto next_spqe;
ec6ba945
VZ
3743 if (cid == BNX2X_FCOE_ETH_CID)
3744 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3745 else
523224a3 3746#endif
ec6ba945 3747 bnx2x_fp(bp, cid, state) =
523224a3
DK
3748 BNX2X_FP_STATE_CLOSED;
3749
3750 goto next_spqe;
e4901dde
VZ
3751
3752 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3753 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3754 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3755 goto next_spqe;
3756 case EVENT_RING_OPCODE_START_TRAFFIC:
3757 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3758 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3759 goto next_spqe;
523224a3
DK
3760 }
3761
3762 switch (opcode | bp->state) {
3763 case (EVENT_RING_OPCODE_FUNCTION_START |
3764 BNX2X_STATE_OPENING_WAIT4_PORT):
3765 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3766 bp->state = BNX2X_STATE_FUNC_STARTED;
3767 break;
3768
3769 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3770 BNX2X_STATE_CLOSING_WAIT4_HALT):
3771 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3772 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3773 break;
3774
3775 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3776 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3777 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
6e30dd4e
VZ
3778 if (elem->message.data.set_mac_event.echo)
3779 bp->set_mac_pending = 0;
523224a3
DK
3780 break;
3781
3782 case (EVENT_RING_OPCODE_SET_MAC |
3783 BNX2X_STATE_CLOSING_WAIT4_HALT):
3784 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
6e30dd4e
VZ
3785 if (elem->message.data.set_mac_event.echo)
3786 bp->set_mac_pending = 0;
523224a3
DK
3787 break;
3788 default:
3789 /* unknown event log error and continue */
3790 BNX2X_ERR("Unknown EQ event %d\n",
3791 elem->message.opcode);
3792 }
3793next_spqe:
3794 spqe_cnt++;
3795 } /* for */
3796
8fe23fbd 3797 smp_mb__before_atomic_inc();
6e30dd4e 3798 atomic_add(spqe_cnt, &bp->eq_spq_left);
523224a3
DK
3799
3800 bp->eq_cons = sw_cons;
3801 bp->eq_prod = sw_prod;
3802 /* Make sure that above mem writes were issued towards the memory */
3803 smp_wmb();
3804
3805 /* update producer */
3806 bnx2x_update_eq_prod(bp, bp->eq_prod);
3807}
3808
a2fbb9ea
ET
3809static void bnx2x_sp_task(struct work_struct *work)
3810{
1cf167f2 3811 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
3812 u16 status;
3813
3814 /* Return here if interrupt is disabled */
3815 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3816 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3817 return;
3818 }
3819
3820 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
3821/* if (status == 0) */
3822/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 3823
cdaa7cb8 3824 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
a2fbb9ea 3825
877e9aa4 3826 /* HW attentions */
523224a3 3827 if (status & BNX2X_DEF_SB_ATT_IDX) {
a2fbb9ea 3828 bnx2x_attn_int(bp);
523224a3 3829 status &= ~BNX2X_DEF_SB_ATT_IDX;
cdaa7cb8
VZ
3830 }
3831
523224a3
DK
3832 /* SP events: STAT_QUERY and others */
3833 if (status & BNX2X_DEF_SB_IDX) {
ec6ba945
VZ
3834#ifdef BCM_CNIC
3835 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 3836
ec6ba945
VZ
3837 if ((!NO_FCOE(bp)) &&
3838 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3839 napi_schedule(&bnx2x_fcoe(bp, napi));
3840#endif
523224a3
DK
3841 /* Handle EQ completions */
3842 bnx2x_eq_int(bp);
3843
3844 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3845 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3846
3847 status &= ~BNX2X_DEF_SB_IDX;
cdaa7cb8
VZ
3848 }
3849
3850 if (unlikely(status))
3851 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3852 status);
a2fbb9ea 3853
523224a3
DK
3854 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3855 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
a2fbb9ea
ET
3856}
3857
9f6c9258 3858irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
3859{
3860 struct net_device *dev = dev_instance;
3861 struct bnx2x *bp = netdev_priv(dev);
3862
3863 /* Return here if interrupt is disabled */
3864 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 3865 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
3866 return IRQ_HANDLED;
3867 }
3868
523224a3
DK
3869 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3870 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
3871
3872#ifdef BNX2X_STOP_ON_ERROR
3873 if (unlikely(bp->panic))
3874 return IRQ_HANDLED;
3875#endif
3876
993ac7b5
MC
3877#ifdef BCM_CNIC
3878 {
3879 struct cnic_ops *c_ops;
3880
3881 rcu_read_lock();
3882 c_ops = rcu_dereference(bp->cnic_ops);
3883 if (c_ops)
3884 c_ops->cnic_handler(bp->cnic_data, NULL);
3885 rcu_read_unlock();
3886 }
3887#endif
1cf167f2 3888 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
3889
3890 return IRQ_HANDLED;
3891}
3892
3893/* end of slow path */
3894
a2fbb9ea
ET
3895static void bnx2x_timer(unsigned long data)
3896{
3897 struct bnx2x *bp = (struct bnx2x *) data;
3898
3899 if (!netif_running(bp->dev))
3900 return;
3901
3902 if (atomic_read(&bp->intr_sem) != 0)
f1410647 3903 goto timer_restart;
a2fbb9ea
ET
3904
3905 if (poll) {
3906 struct bnx2x_fastpath *fp = &bp->fp[0];
a2fbb9ea 3907
7961f791 3908 bnx2x_tx_int(fp);
b8ee8328 3909 bnx2x_rx_int(fp, 1000);
a2fbb9ea
ET
3910 }
3911
34f80b04 3912 if (!BP_NOMCP(bp)) {
f2e0899f 3913 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
3914 u32 drv_pulse;
3915 u32 mcp_pulse;
3916
3917 ++bp->fw_drv_pulse_wr_seq;
3918 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3919 /* TBD - add SYSTEM_TIME */
3920 drv_pulse = bp->fw_drv_pulse_wr_seq;
f2e0899f 3921 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
a2fbb9ea 3922
f2e0899f 3923 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
3924 MCP_PULSE_SEQ_MASK);
3925 /* The delta between driver pulse and mcp response
3926 * should be 1 (before mcp response) or 0 (after mcp response)
3927 */
3928 if ((drv_pulse != mcp_pulse) &&
3929 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3930 /* someone lost a heartbeat... */
3931 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3932 drv_pulse, mcp_pulse);
3933 }
3934 }
3935
f34d28ea 3936 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 3937 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 3938
f1410647 3939timer_restart:
a2fbb9ea
ET
3940 mod_timer(&bp->timer, jiffies + bp->current_interval);
3941}
3942
3943/* end of Statistics */
3944
3945/* nic init */
3946
3947/*
3948 * nic init service functions
3949 */
3950
523224a3 3951static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 3952{
523224a3
DK
3953 u32 i;
3954 if (!(len%4) && !(addr%4))
3955 for (i = 0; i < len; i += 4)
3956 REG_WR(bp, addr + i, fill);
3957 else
3958 for (i = 0; i < len; i++)
3959 REG_WR8(bp, addr + i, fill);
34f80b04 3960
34f80b04
EG
3961}
3962
523224a3
DK
3963/* helper: writes FP SP data to FW - data_size in dwords */
3964static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3965 int fw_sb_id,
3966 u32 *sb_data_p,
3967 u32 data_size)
34f80b04 3968{
a2fbb9ea 3969 int index;
523224a3
DK
3970 for (index = 0; index < data_size; index++)
3971 REG_WR(bp, BAR_CSTRORM_INTMEM +
3972 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3973 sizeof(u32)*index,
3974 *(sb_data_p + index));
3975}
a2fbb9ea 3976
523224a3
DK
3977static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3978{
3979 u32 *sb_data_p;
3980 u32 data_size = 0;
f2e0899f 3981 struct hc_status_block_data_e2 sb_data_e2;
523224a3 3982 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 3983
523224a3 3984 /* disable the function first */
f2e0899f
DK
3985 if (CHIP_IS_E2(bp)) {
3986 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3987 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3988 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3989 sb_data_e2.common.p_func.vf_valid = false;
3990 sb_data_p = (u32 *)&sb_data_e2;
3991 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3992 } else {
3993 memset(&sb_data_e1x, 0,
3994 sizeof(struct hc_status_block_data_e1x));
3995 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3996 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3997 sb_data_e1x.common.p_func.vf_valid = false;
3998 sb_data_p = (u32 *)&sb_data_e1x;
3999 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4000 }
523224a3 4001 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 4002
523224a3
DK
4003 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4004 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
4005 CSTORM_STATUS_BLOCK_SIZE);
4006 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4007 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
4008 CSTORM_SYNC_BLOCK_SIZE);
4009}
34f80b04 4010
523224a3
DK
4011/* helper: writes SP SB data to FW */
4012static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
4013 struct hc_sp_status_block_data *sp_sb_data)
4014{
4015 int func = BP_FUNC(bp);
4016 int i;
4017 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
4018 REG_WR(bp, BAR_CSTRORM_INTMEM +
4019 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
4020 i*sizeof(u32),
4021 *((u32 *)sp_sb_data + i));
34f80b04
EG
4022}
4023
523224a3 4024static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
4025{
4026 int func = BP_FUNC(bp);
523224a3
DK
4027 struct hc_sp_status_block_data sp_sb_data;
4028 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 4029
523224a3
DK
4030 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
4031 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
4032 sp_sb_data.p_func.vf_valid = false;
4033
4034 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4035
4036 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4037 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4038 CSTORM_SP_STATUS_BLOCK_SIZE);
4039 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4040 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4041 CSTORM_SP_SYNC_BLOCK_SIZE);
4042
4043}
4044
4045
4046static inline
4047void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4048 int igu_sb_id, int igu_seg_id)
4049{
4050 hc_sm->igu_sb_id = igu_sb_id;
4051 hc_sm->igu_seg_id = igu_seg_id;
4052 hc_sm->timer_value = 0xFF;
4053 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
4054}
4055
8d96286a 4056static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 4057 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 4058{
523224a3
DK
4059 int igu_seg_id;
4060
f2e0899f 4061 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
4062 struct hc_status_block_data_e1x sb_data_e1x;
4063 struct hc_status_block_sm *hc_sm_p;
523224a3
DK
4064 int data_size;
4065 u32 *sb_data_p;
4066
f2e0899f
DK
4067 if (CHIP_INT_MODE_IS_BC(bp))
4068 igu_seg_id = HC_SEG_ACCESS_NORM;
4069 else
4070 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
4071
4072 bnx2x_zero_fp_sb(bp, fw_sb_id);
4073
f2e0899f
DK
4074 if (CHIP_IS_E2(bp)) {
4075 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4076 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4077 sb_data_e2.common.p_func.vf_id = vfid;
4078 sb_data_e2.common.p_func.vf_valid = vf_valid;
4079 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4080 sb_data_e2.common.same_igu_sb_1b = true;
4081 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4082 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4083 hc_sm_p = sb_data_e2.common.state_machine;
f2e0899f
DK
4084 sb_data_p = (u32 *)&sb_data_e2;
4085 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4086 } else {
4087 memset(&sb_data_e1x, 0,
4088 sizeof(struct hc_status_block_data_e1x));
4089 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4090 sb_data_e1x.common.p_func.vf_id = 0xff;
4091 sb_data_e1x.common.p_func.vf_valid = false;
4092 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4093 sb_data_e1x.common.same_igu_sb_1b = true;
4094 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4095 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4096 hc_sm_p = sb_data_e1x.common.state_machine;
f2e0899f
DK
4097 sb_data_p = (u32 *)&sb_data_e1x;
4098 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4099 }
523224a3
DK
4100
4101 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4102 igu_sb_id, igu_seg_id);
4103 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4104 igu_sb_id, igu_seg_id);
4105
4106 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4107
4108 /* write indecies to HW */
4109 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4110}
4111
4112static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4113 u8 sb_index, u8 disable, u16 usec)
4114{
4115 int port = BP_PORT(bp);
4116 u8 ticks = usec / BNX2X_BTR;
4117
4118 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4119
4120 disable = disable ? 1 : (usec ? 0 : 1);
4121 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4122}
4123
4124static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4125 u16 tx_usec, u16 rx_usec)
4126{
4127 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4128 false, rx_usec);
4129 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4130 false, tx_usec);
4131}
f2e0899f 4132
523224a3
DK
4133static void bnx2x_init_def_sb(struct bnx2x *bp)
4134{
4135 struct host_sp_status_block *def_sb = bp->def_status_blk;
4136 dma_addr_t mapping = bp->def_status_blk_mapping;
4137 int igu_sp_sb_index;
4138 int igu_seg_id;
34f80b04
EG
4139 int port = BP_PORT(bp);
4140 int func = BP_FUNC(bp);
523224a3 4141 int reg_offset;
a2fbb9ea 4142 u64 section;
523224a3
DK
4143 int index;
4144 struct hc_sp_status_block_data sp_sb_data;
4145 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4146
f2e0899f
DK
4147 if (CHIP_INT_MODE_IS_BC(bp)) {
4148 igu_sp_sb_index = DEF_SB_IGU_ID;
4149 igu_seg_id = HC_SEG_ACCESS_DEF;
4150 } else {
4151 igu_sp_sb_index = bp->igu_dsb_id;
4152 igu_seg_id = IGU_SEG_ACCESS_DEF;
4153 }
a2fbb9ea
ET
4154
4155 /* ATTN */
523224a3 4156 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 4157 atten_status_block);
523224a3 4158 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 4159
49d66772
ET
4160 bp->attn_state = 0;
4161
a2fbb9ea
ET
4162 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4163 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
34f80b04 4164 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
4165 int sindex;
4166 /* take care of sig[0]..sig[4] */
4167 for (sindex = 0; sindex < 4; sindex++)
4168 bp->attn_group[index].sig[sindex] =
4169 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f
DK
4170
4171 if (CHIP_IS_E2(bp))
4172 /*
4173 * enable5 is separate from the rest of the registers,
4174 * and therefore the address skip is 4
4175 * and not 16 between the different groups
4176 */
4177 bp->attn_group[index].sig[4] = REG_RD(bp,
4178 reg_offset + 0x10 + 0x4*index);
4179 else
4180 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
4181 }
4182
f2e0899f
DK
4183 if (bp->common.int_block == INT_BLOCK_HC) {
4184 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4185 HC_REG_ATTN_MSG0_ADDR_L);
4186
4187 REG_WR(bp, reg_offset, U64_LO(section));
4188 REG_WR(bp, reg_offset + 4, U64_HI(section));
4189 } else if (CHIP_IS_E2(bp)) {
4190 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4191 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4192 }
a2fbb9ea 4193
523224a3
DK
4194 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4195 sp_sb);
a2fbb9ea 4196
523224a3 4197 bnx2x_zero_sp_sb(bp);
a2fbb9ea 4198
523224a3
DK
4199 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4200 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4201 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4202 sp_sb_data.igu_seg_id = igu_seg_id;
4203 sp_sb_data.p_func.pf_id = func;
f2e0899f 4204 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 4205 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 4206
523224a3 4207 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 4208
bb2a0f7a 4209 bp->stats_pending = 0;
66e855f3 4210 bp->set_mac_pending = 0;
bb2a0f7a 4211
523224a3 4212 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4213}
4214
9f6c9258 4215void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 4216{
a2fbb9ea
ET
4217 int i;
4218
ec6ba945 4219 for_each_eth_queue(bp, i)
523224a3 4220 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
423cfa7e 4221 bp->tx_ticks, bp->rx_ticks);
a2fbb9ea
ET
4222}
4223
a2fbb9ea
ET
4224static void bnx2x_init_sp_ring(struct bnx2x *bp)
4225{
a2fbb9ea 4226 spin_lock_init(&bp->spq_lock);
6e30dd4e 4227 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
a2fbb9ea 4228
a2fbb9ea 4229 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4230 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4231 bp->spq_prod_bd = bp->spq;
4232 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
4233}
4234
523224a3 4235static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
4236{
4237 int i;
523224a3
DK
4238 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4239 union event_ring_elem *elem =
4240 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 4241
523224a3
DK
4242 elem->next_page.addr.hi =
4243 cpu_to_le32(U64_HI(bp->eq_mapping +
4244 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4245 elem->next_page.addr.lo =
4246 cpu_to_le32(U64_LO(bp->eq_mapping +
4247 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 4248 }
523224a3
DK
4249 bp->eq_cons = 0;
4250 bp->eq_prod = NUM_EQ_DESC;
4251 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6e30dd4e
VZ
4252 /* we want a warning message before it gets rought... */
4253 atomic_set(&bp->eq_spq_left,
4254 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
a2fbb9ea
ET
4255}
4256
ab532cf3 4257void bnx2x_push_indir_table(struct bnx2x *bp)
a2fbb9ea 4258{
26c8fa4d 4259 int func = BP_FUNC(bp);
a2fbb9ea
ET
4260 int i;
4261
555f6c78 4262 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4263 return;
4264
4265 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4266 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4267 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
ab532cf3
TH
4268 bp->fp->cl_id + bp->rx_indir_table[i]);
4269}
4270
4271static void bnx2x_init_ind_table(struct bnx2x *bp)
4272{
4273 int i;
4274
4275 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4276 bp->rx_indir_table[i] = i % BNX2X_NUM_ETH_QUEUES(bp);
4277
4278 bnx2x_push_indir_table(bp);
a2fbb9ea
ET
4279}
4280
9f6c9258 4281void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
a2fbb9ea 4282{
34f80b04 4283 int mode = bp->rx_mode;
ec6ba945 4284 int port = BP_PORT(bp);
523224a3 4285 u16 cl_id;
ec6ba945 4286 u32 def_q_filters = 0;
523224a3 4287
581ce43d
EG
4288 /* All but management unicast packets should pass to the host as well */
4289 u32 llh_mask =
4290 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4291 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4292 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4293 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
a2fbb9ea 4294
a2fbb9ea
ET
4295 switch (mode) {
4296 case BNX2X_RX_MODE_NONE: /* no Rx */
ec6ba945
VZ
4297 def_q_filters = BNX2X_ACCEPT_NONE;
4298#ifdef BCM_CNIC
4299 if (!NO_FCOE(bp)) {
4300 cl_id = bnx2x_fcoe(bp, cl_id);
4301 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4302 }
4303#endif
a2fbb9ea 4304 break;
356e2385 4305
a2fbb9ea 4306 case BNX2X_RX_MODE_NORMAL:
ec6ba945
VZ
4307 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4308 BNX2X_ACCEPT_MULTICAST;
4309#ifdef BCM_CNIC
711c9146
VZ
4310 if (!NO_FCOE(bp)) {
4311 cl_id = bnx2x_fcoe(bp, cl_id);
4312 bnx2x_rxq_set_mac_filters(bp, cl_id,
4313 BNX2X_ACCEPT_UNICAST |
4314 BNX2X_ACCEPT_MULTICAST);
4315 }
ec6ba945 4316#endif
a2fbb9ea 4317 break;
356e2385 4318
a2fbb9ea 4319 case BNX2X_RX_MODE_ALLMULTI:
ec6ba945
VZ
4320 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4321 BNX2X_ACCEPT_ALL_MULTICAST;
4322#ifdef BCM_CNIC
711c9146
VZ
4323 /*
4324 * Prevent duplication of multicast packets by configuring FCoE
4325 * L2 Client to receive only matched unicast frames.
4326 */
4327 if (!NO_FCOE(bp)) {
4328 cl_id = bnx2x_fcoe(bp, cl_id);
4329 bnx2x_rxq_set_mac_filters(bp, cl_id,
4330 BNX2X_ACCEPT_UNICAST);
4331 }
ec6ba945 4332#endif
a2fbb9ea 4333 break;
356e2385 4334
a2fbb9ea 4335 case BNX2X_RX_MODE_PROMISC:
ec6ba945
VZ
4336 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4337#ifdef BCM_CNIC
711c9146
VZ
4338 /*
4339 * Prevent packets duplication by configuring DROP_ALL for FCoE
4340 * L2 Client.
4341 */
4342 if (!NO_FCOE(bp)) {
4343 cl_id = bnx2x_fcoe(bp, cl_id);
4344 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4345 }
ec6ba945 4346#endif
581ce43d
EG
4347 /* pass management unicast packets as well */
4348 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
a2fbb9ea 4349 break;
356e2385 4350
a2fbb9ea 4351 default:
34f80b04
EG
4352 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4353 break;
a2fbb9ea
ET
4354 }
4355
ec6ba945
VZ
4356 cl_id = BP_L_ID(bp);
4357 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4358
581ce43d 4359 REG_WR(bp,
ec6ba945
VZ
4360 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4361 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
581ce43d 4362
523224a3
DK
4363 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4364 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
ec6ba945
VZ
4365 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4366 "unmatched_ucast 0x%x\n", mode,
523224a3
DK
4367 bp->mac_filters.ucast_drop_all,
4368 bp->mac_filters.mcast_drop_all,
4369 bp->mac_filters.bcast_drop_all,
4370 bp->mac_filters.ucast_accept_all,
4371 bp->mac_filters.mcast_accept_all,
ec6ba945
VZ
4372 bp->mac_filters.bcast_accept_all,
4373 bp->mac_filters.unmatched_unicast
523224a3 4374 );
a2fbb9ea 4375
523224a3 4376 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
a2fbb9ea
ET
4377}
4378
471de716
EG
4379static void bnx2x_init_internal_common(struct bnx2x *bp)
4380{
4381 int i;
4382
523224a3 4383 if (!CHIP_IS_E1(bp)) {
de832a55 4384
523224a3
DK
4385 /* xstorm needs to know whether to add ovlan to packets or not,
4386 * in switch-independent we'll write 0 to here... */
34f80b04 4387 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4388 bp->mf_mode);
34f80b04 4389 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4390 bp->mf_mode);
34f80b04 4391 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4392 bp->mf_mode);
34f80b04 4393 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
fb3bff17 4394 bp->mf_mode);
34f80b04
EG
4395 }
4396
0793f83f
DK
4397 if (IS_MF_SI(bp))
4398 /*
4399 * In switch independent mode, the TSTORM needs to accept
4400 * packets that failed classification, since approximate match
4401 * mac addresses aren't written to NIG LLH
4402 */
4403 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4404 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4405
523224a3
DK
4406 /* Zero this manually as its initialization is
4407 currently missing in the initTool */
4408 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 4409 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 4410 USTORM_AGG_DATA_OFFSET + i * 4, 0);
f2e0899f
DK
4411 if (CHIP_IS_E2(bp)) {
4412 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4413 CHIP_INT_MODE_IS_BC(bp) ?
4414 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4415 }
523224a3 4416}
8a1c38d1 4417
523224a3
DK
4418static void bnx2x_init_internal_port(struct bnx2x *bp)
4419{
4420 /* port */
e4901dde 4421 bnx2x_dcb_init_intmem_pfc(bp);
a2fbb9ea
ET
4422}
4423
471de716
EG
4424static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4425{
4426 switch (load_code) {
4427 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 4428 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
4429 bnx2x_init_internal_common(bp);
4430 /* no break */
4431
4432 case FW_MSG_CODE_DRV_LOAD_PORT:
4433 bnx2x_init_internal_port(bp);
4434 /* no break */
4435
4436 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
4437 /* internal memory per function is
4438 initialized inside bnx2x_pf_init */
471de716
EG
4439 break;
4440
4441 default:
4442 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4443 break;
4444 }
4445}
4446
523224a3
DK
4447static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4448{
4449 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4450
4451 fp->state = BNX2X_FP_STATE_CLOSED;
4452
4453 fp->index = fp->cid = fp_idx;
4454 fp->cl_id = BP_L_ID(bp) + fp_idx;
4455 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4456 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4457 /* qZone id equals to FW (per path) client id */
4458 fp->cl_qzone_id = fp->cl_id +
f2e0899f
DK
4459 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4460 ETH_MAX_RX_CLIENTS_E1H);
523224a3 4461 /* init shortcut */
f2e0899f
DK
4462 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4463 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
523224a3
DK
4464 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4465 /* Setup SB indicies */
4466 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4467 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4468
4469 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4470 "cl_id %d fw_sb %d igu_sb %d\n",
4471 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4472 fp->igu_sb_id);
4473 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4474 fp->fw_sb_id, fp->igu_sb_id);
4475
4476 bnx2x_update_fpsb_idx(fp);
4477}
4478
9f6c9258 4479void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4480{
4481 int i;
4482
ec6ba945 4483 for_each_eth_queue(bp, i)
523224a3 4484 bnx2x_init_fp_sb(bp, i);
37b091ba 4485#ifdef BCM_CNIC
ec6ba945
VZ
4486 if (!NO_FCOE(bp))
4487 bnx2x_init_fcoe_fp(bp);
523224a3
DK
4488
4489 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4490 BNX2X_VF_ID_INVALID, false,
4491 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4492
37b091ba 4493#endif
a2fbb9ea 4494
16119785
EG
4495 /* ensure status block indices were read */
4496 rmb();
4497
523224a3 4498 bnx2x_init_def_sb(bp);
5c862848 4499 bnx2x_update_dsb_idx(bp);
a2fbb9ea 4500 bnx2x_init_rx_rings(bp);
523224a3 4501 bnx2x_init_tx_rings(bp);
a2fbb9ea 4502 bnx2x_init_sp_ring(bp);
523224a3 4503 bnx2x_init_eq_ring(bp);
471de716 4504 bnx2x_init_internal(bp, load_code);
523224a3 4505 bnx2x_pf_init(bp);
a2fbb9ea 4506 bnx2x_init_ind_table(bp);
0ef00459
EG
4507 bnx2x_stats_init(bp);
4508
4509 /* At this point, we are ready for interrupts */
4510 atomic_set(&bp->intr_sem, 0);
4511
4512 /* flush all before enabling interrupts */
4513 mb();
4514 mmiowb();
4515
615f8fd9 4516 bnx2x_int_enable(bp);
eb8da205
EG
4517
4518 /* Check for SPIO5 */
4519 bnx2x_attn_int_deasserted0(bp,
4520 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4521 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
4522}
4523
4524/* end of nic init */
4525
4526/*
4527 * gzip service functions
4528 */
4529
4530static int bnx2x_gunzip_init(struct bnx2x *bp)
4531{
1a983142
FT
4532 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4533 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
4534 if (bp->gunzip_buf == NULL)
4535 goto gunzip_nomem1;
4536
4537 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4538 if (bp->strm == NULL)
4539 goto gunzip_nomem2;
4540
4541 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4542 GFP_KERNEL);
4543 if (bp->strm->workspace == NULL)
4544 goto gunzip_nomem3;
4545
4546 return 0;
4547
4548gunzip_nomem3:
4549 kfree(bp->strm);
4550 bp->strm = NULL;
4551
4552gunzip_nomem2:
1a983142
FT
4553 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4554 bp->gunzip_mapping);
a2fbb9ea
ET
4555 bp->gunzip_buf = NULL;
4556
4557gunzip_nomem1:
cdaa7cb8
VZ
4558 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4559 " un-compression\n");
a2fbb9ea
ET
4560 return -ENOMEM;
4561}
4562
4563static void bnx2x_gunzip_end(struct bnx2x *bp)
4564{
4565 kfree(bp->strm->workspace);
a2fbb9ea
ET
4566 kfree(bp->strm);
4567 bp->strm = NULL;
4568
4569 if (bp->gunzip_buf) {
1a983142
FT
4570 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4571 bp->gunzip_mapping);
a2fbb9ea
ET
4572 bp->gunzip_buf = NULL;
4573 }
4574}
4575
94a78b79 4576static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
4577{
4578 int n, rc;
4579
4580 /* check gzip header */
94a78b79
VZ
4581 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4582 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 4583 return -EINVAL;
94a78b79 4584 }
a2fbb9ea
ET
4585
4586 n = 10;
4587
34f80b04 4588#define FNAME 0x8
a2fbb9ea
ET
4589
4590 if (zbuf[3] & FNAME)
4591 while ((zbuf[n++] != 0) && (n < len));
4592
94a78b79 4593 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
4594 bp->strm->avail_in = len - n;
4595 bp->strm->next_out = bp->gunzip_buf;
4596 bp->strm->avail_out = FW_BUF_SIZE;
4597
4598 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4599 if (rc != Z_OK)
4600 return rc;
4601
4602 rc = zlib_inflate(bp->strm, Z_FINISH);
4603 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
4604 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4605 bp->strm->msg);
a2fbb9ea
ET
4606
4607 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4608 if (bp->gunzip_outlen & 0x3)
cdaa7cb8
VZ
4609 netdev_err(bp->dev, "Firmware decompression error:"
4610 " gunzip_outlen (%d) not aligned\n",
4611 bp->gunzip_outlen);
a2fbb9ea
ET
4612 bp->gunzip_outlen >>= 2;
4613
4614 zlib_inflateEnd(bp->strm);
4615
4616 if (rc == Z_STREAM_END)
4617 return 0;
4618
4619 return rc;
4620}
4621
4622/* nic load/unload */
4623
4624/*
34f80b04 4625 * General service functions
a2fbb9ea
ET
4626 */
4627
4628/* send a NIG loopback debug packet */
4629static void bnx2x_lb_pckt(struct bnx2x *bp)
4630{
a2fbb9ea 4631 u32 wb_write[3];
a2fbb9ea
ET
4632
4633 /* Ethernet source and destination addresses */
a2fbb9ea
ET
4634 wb_write[0] = 0x55555555;
4635 wb_write[1] = 0x55555555;
34f80b04 4636 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 4637 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4638
4639 /* NON-IP protocol */
a2fbb9ea
ET
4640 wb_write[0] = 0x09000000;
4641 wb_write[1] = 0x55555555;
34f80b04 4642 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 4643 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
4644}
4645
4646/* some of the internal memories
4647 * are not directly readable from the driver
4648 * to test them we send debug packets
4649 */
4650static int bnx2x_int_mem_test(struct bnx2x *bp)
4651{
4652 int factor;
4653 int count, i;
4654 u32 val = 0;
4655
ad8d3948 4656 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 4657 factor = 120;
ad8d3948
EG
4658 else if (CHIP_REV_IS_EMUL(bp))
4659 factor = 200;
4660 else
a2fbb9ea 4661 factor = 1;
a2fbb9ea 4662
a2fbb9ea
ET
4663 /* Disable inputs of parser neighbor blocks */
4664 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4665 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4666 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4667 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4668
4669 /* Write 0 to parser credits for CFC search request */
4670 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4671
4672 /* send Ethernet packet */
4673 bnx2x_lb_pckt(bp);
4674
4675 /* TODO do i reset NIG statistic? */
4676 /* Wait until NIG register shows 1 packet of size 0x10 */
4677 count = 1000 * factor;
4678 while (count) {
34f80b04 4679
a2fbb9ea
ET
4680 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4681 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4682 if (val == 0x10)
4683 break;
4684
4685 msleep(10);
4686 count--;
4687 }
4688 if (val != 0x10) {
4689 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4690 return -1;
4691 }
4692
4693 /* Wait until PRS register shows 1 packet */
4694 count = 1000 * factor;
4695 while (count) {
4696 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
4697 if (val == 1)
4698 break;
4699
4700 msleep(10);
4701 count--;
4702 }
4703 if (val != 0x1) {
4704 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4705 return -2;
4706 }
4707
4708 /* Reset and init BRB, PRS */
34f80b04 4709 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 4710 msleep(50);
34f80b04 4711 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 4712 msleep(50);
94a78b79
VZ
4713 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4714 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
a2fbb9ea
ET
4715
4716 DP(NETIF_MSG_HW, "part2\n");
4717
4718 /* Disable inputs of parser neighbor blocks */
4719 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4720 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4721 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 4722 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
4723
4724 /* Write 0 to parser credits for CFC search request */
4725 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4726
4727 /* send 10 Ethernet packets */
4728 for (i = 0; i < 10; i++)
4729 bnx2x_lb_pckt(bp);
4730
4731 /* Wait until NIG register shows 10 + 1
4732 packets of size 11*0x10 = 0xb0 */
4733 count = 1000 * factor;
4734 while (count) {
34f80b04 4735
a2fbb9ea
ET
4736 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4737 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
4738 if (val == 0xb0)
4739 break;
4740
4741 msleep(10);
4742 count--;
4743 }
4744 if (val != 0xb0) {
4745 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4746 return -3;
4747 }
4748
4749 /* Wait until PRS register shows 2 packets */
4750 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4751 if (val != 2)
4752 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4753
4754 /* Write 1 to parser credits for CFC search request */
4755 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4756
4757 /* Wait until PRS register shows 3 packets */
4758 msleep(10 * factor);
4759 /* Wait until NIG register shows 1 packet of size 0x10 */
4760 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4761 if (val != 3)
4762 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4763
4764 /* clear NIG EOP FIFO */
4765 for (i = 0; i < 11; i++)
4766 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4767 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4768 if (val != 1) {
4769 BNX2X_ERR("clear of NIG failed\n");
4770 return -4;
4771 }
4772
4773 /* Reset and init BRB, PRS, NIG */
4774 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4775 msleep(50);
4776 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4777 msleep(50);
94a78b79
VZ
4778 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4779 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
37b091ba 4780#ifndef BCM_CNIC
a2fbb9ea
ET
4781 /* set NIC mode */
4782 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4783#endif
4784
4785 /* Enable inputs of parser neighbor blocks */
4786 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4787 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4788 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 4789 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
4790
4791 DP(NETIF_MSG_HW, "done\n");
4792
4793 return 0; /* OK */
4794}
4795
4a33bc03 4796static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea
ET
4797{
4798 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
f2e0899f
DK
4799 if (CHIP_IS_E2(bp))
4800 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4801 else
4802 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
4803 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4804 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
4805 /*
4806 * mask read length error interrupts in brb for parser
4807 * (parsing unit and 'checksum and crc' unit)
4808 * these errors are legal (PU reads fixed length and CAC can cause
4809 * read length error on truncated packets)
4810 */
4811 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
4812 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4813 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4814 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4815 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4816 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
4817/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4818/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4819 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4820 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4821 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
4822/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4823/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4824 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4825 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4826 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4827 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
4828/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4829/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 4830
34f80b04
EG
4831 if (CHIP_REV_IS_FPGA(bp))
4832 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
f2e0899f
DK
4833 else if (CHIP_IS_E2(bp))
4834 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4835 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4836 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4837 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4838 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4839 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
34f80b04
EG
4840 else
4841 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
4842 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4843 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4844 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
4845/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4846/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
4847 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4848 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 4849/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 4850 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
4851}
4852
81f75bbf
EG
4853static void bnx2x_reset_common(struct bnx2x *bp)
4854{
4855 /* reset_common */
4856 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4857 0xd3ffff7f);
4858 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4859}
4860
573f2035
EG
4861static void bnx2x_init_pxp(struct bnx2x *bp)
4862{
4863 u16 devctl;
4864 int r_order, w_order;
4865
4866 pci_read_config_word(bp->pdev,
4867 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4868 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4869 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4870 if (bp->mrrs == -1)
4871 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4872 else {
4873 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4874 r_order = bp->mrrs;
4875 }
4876
4877 bnx2x_init_pxp_arb(bp, r_order, w_order);
4878}
fd4ef40d
EG
4879
4880static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4881{
2145a920 4882 int is_required;
fd4ef40d 4883 u32 val;
2145a920 4884 int port;
fd4ef40d 4885
2145a920
VZ
4886 if (BP_NOMCP(bp))
4887 return;
4888
4889 is_required = 0;
fd4ef40d
EG
4890 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4891 SHARED_HW_CFG_FAN_FAILURE_MASK;
4892
4893 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4894 is_required = 1;
4895
4896 /*
4897 * The fan failure mechanism is usually related to the PHY type since
4898 * the power consumption of the board is affected by the PHY. Currently,
4899 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4900 */
4901 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4902 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 4903 is_required |=
d90d96ba
YR
4904 bnx2x_fan_failure_det_req(
4905 bp,
4906 bp->common.shmem_base,
a22f0788 4907 bp->common.shmem2_base,
d90d96ba 4908 port);
fd4ef40d
EG
4909 }
4910
4911 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4912
4913 if (is_required == 0)
4914 return;
4915
4916 /* Fan failure is indicated by SPIO 5 */
4917 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4918 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4919
4920 /* set to active low mode */
4921 val = REG_RD(bp, MISC_REG_SPIO_INT);
4922 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
cdaa7cb8 4923 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
4924 REG_WR(bp, MISC_REG_SPIO_INT, val);
4925
4926 /* enable interrupt to signal the IGU */
4927 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4928 val |= (1 << MISC_REGISTERS_SPIO_5);
4929 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4930}
4931
f2e0899f
DK
4932static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4933{
4934 u32 offset = 0;
4935
4936 if (CHIP_IS_E1(bp))
4937 return;
4938 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4939 return;
4940
4941 switch (BP_ABS_FUNC(bp)) {
4942 case 0:
4943 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4944 break;
4945 case 1:
4946 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4947 break;
4948 case 2:
4949 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4950 break;
4951 case 3:
4952 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4953 break;
4954 case 4:
4955 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4956 break;
4957 case 5:
4958 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4959 break;
4960 case 6:
4961 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4962 break;
4963 case 7:
4964 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4965 break;
4966 default:
4967 return;
4968 }
4969
4970 REG_WR(bp, offset, pretend_func_num);
4971 REG_RD(bp, offset);
4972 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4973}
4974
4975static void bnx2x_pf_disable(struct bnx2x *bp)
4976{
4977 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4978 val &= ~IGU_PF_CONF_FUNC_EN;
4979
4980 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4981 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4982 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4983}
4984
523224a3 4985static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
a2fbb9ea 4986{
a2fbb9ea 4987 u32 val, i;
a2fbb9ea 4988
f2e0899f 4989 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 4990
81f75bbf 4991 bnx2x_reset_common(bp);
34f80b04
EG
4992 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4993 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 4994
94a78b79 4995 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
f2e0899f 4996 if (!CHIP_IS_E1(bp))
fb3bff17 4997 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
a2fbb9ea 4998
f2e0899f
DK
4999 if (CHIP_IS_E2(bp)) {
5000 u8 fid;
5001
5002 /**
5003 * 4-port mode or 2-port mode we need to turn of master-enable
5004 * for everyone, after that, turn it back on for self.
5005 * so, we disregard multi-function or not, and always disable
5006 * for all functions on the given path, this means 0,2,4,6 for
5007 * path 0 and 1,3,5,7 for path 1
5008 */
5009 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
5010 if (fid == BP_ABS_FUNC(bp)) {
5011 REG_WR(bp,
5012 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
5013 1);
5014 continue;
5015 }
5016
5017 bnx2x_pretend_func(bp, fid);
5018 /* clear pf enable */
5019 bnx2x_pf_disable(bp);
5020 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5021 }
5022 }
a2fbb9ea 5023
94a78b79 5024 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
34f80b04
EG
5025 if (CHIP_IS_E1(bp)) {
5026 /* enable HW interrupt from PXP on USDM overflow
5027 bit 16 on INT_MASK_0 */
5028 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5029 }
a2fbb9ea 5030
94a78b79 5031 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
34f80b04 5032 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5033
5034#ifdef __BIG_ENDIAN
34f80b04
EG
5035 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5036 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5037 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5038 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5039 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5040 /* make sure this value is 0 */
5041 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5042
5043/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5044 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5045 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5046 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5047 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5048#endif
5049
523224a3
DK
5050 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5051
34f80b04
EG
5052 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5053 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5054
34f80b04
EG
5055 /* let the HW do it's magic ... */
5056 msleep(100);
5057 /* finish PXP init */
5058 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5059 if (val != 1) {
5060 BNX2X_ERR("PXP2 CFG failed\n");
5061 return -EBUSY;
5062 }
5063 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5064 if (val != 1) {
5065 BNX2X_ERR("PXP2 RD_INIT failed\n");
5066 return -EBUSY;
5067 }
a2fbb9ea 5068
f2e0899f
DK
5069 /* Timers bug workaround E2 only. We need to set the entire ILT to
5070 * have entries with value "0" and valid bit on.
5071 * This needs to be done by the first PF that is loaded in a path
5072 * (i.e. common phase)
5073 */
5074 if (CHIP_IS_E2(bp)) {
5075 struct ilt_client_info ilt_cli;
5076 struct bnx2x_ilt ilt;
5077 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5078 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5079
b595076a 5080 /* initialize dummy TM client */
f2e0899f
DK
5081 ilt_cli.start = 0;
5082 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5083 ilt_cli.client_num = ILT_CLIENT_TM;
5084
5085 /* Step 1: set zeroes to all ilt page entries with valid bit on
5086 * Step 2: set the timers first/last ilt entry to point
5087 * to the entire range to prevent ILT range error for 3rd/4th
25985edc 5088 * vnic (this code assumes existence of the vnic)
f2e0899f
DK
5089 *
5090 * both steps performed by call to bnx2x_ilt_client_init_op()
5091 * with dummy TM client
5092 *
5093 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5094 * and his brother are split registers
5095 */
5096 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5097 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5098 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5099
5100 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5101 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5102 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5103 }
5104
5105
34f80b04
EG
5106 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5107 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5108
f2e0899f
DK
5109 if (CHIP_IS_E2(bp)) {
5110 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5111 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5112 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5113
5114 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5115
5116 /* let the HW do it's magic ... */
5117 do {
5118 msleep(200);
5119 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5120 } while (factor-- && (val != 1));
5121
5122 if (val != 1) {
5123 BNX2X_ERR("ATC_INIT failed\n");
5124 return -EBUSY;
5125 }
5126 }
5127
94a78b79 5128 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
a2fbb9ea 5129
34f80b04
EG
5130 /* clean the DMAE memory */
5131 bp->dmae_ready = 1;
5132 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5133
94a78b79
VZ
5134 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5135 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5136 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5137 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
a2fbb9ea 5138
34f80b04
EG
5139 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5140 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5141 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5142 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5143
94a78b79 5144 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
37b091ba 5145
f2e0899f
DK
5146 if (CHIP_MODE_IS_4_PORT(bp))
5147 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
f85582f8 5148
523224a3
DK
5149 /* QM queues pointers table */
5150 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
5151
34f80b04
EG
5152 /* soft reset pulse */
5153 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5154 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 5155
37b091ba 5156#ifdef BCM_CNIC
94a78b79 5157 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
a2fbb9ea 5158#endif
a2fbb9ea 5159
94a78b79 5160 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
523224a3
DK
5161 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5162
34f80b04
EG
5163 if (!CHIP_REV_IS_SLOW(bp)) {
5164 /* enable hw interrupt from doorbell Q */
5165 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5166 }
a2fbb9ea 5167
94a78b79 5168 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
f2e0899f
DK
5169 if (CHIP_MODE_IS_4_PORT(bp)) {
5170 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5171 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5172 }
5173
94a78b79 5174 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
26c8fa4d 5175 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
37b091ba 5176#ifndef BCM_CNIC
3196a88a
EG
5177 /* set NIC mode */
5178 REG_WR(bp, PRS_REG_NIC_MODE, 1);
37b091ba 5179#endif
f2e0899f 5180 if (!CHIP_IS_E1(bp))
0793f83f 5181 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
f85582f8 5182
f2e0899f
DK
5183 if (CHIP_IS_E2(bp)) {
5184 /* Bit-map indicating which L2 hdrs may appear after the
5185 basic Ethernet header */
0793f83f 5186 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5187 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5188 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5189 }
a2fbb9ea 5190
94a78b79
VZ
5191 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5192 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5193 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5194 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
a2fbb9ea 5195
ca00392c
EG
5196 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5197 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5198 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5199 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
a2fbb9ea 5200
94a78b79
VZ
5201 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5202 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5203 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5204 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
a2fbb9ea 5205
f2e0899f
DK
5206 if (CHIP_MODE_IS_4_PORT(bp))
5207 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5208
34f80b04
EG
5209 /* sync semi rtc */
5210 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5211 0x80000000);
5212 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5213 0x80000000);
a2fbb9ea 5214
94a78b79
VZ
5215 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5216 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5217 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
a2fbb9ea 5218
f2e0899f 5219 if (CHIP_IS_E2(bp)) {
0793f83f 5220 int has_ovlan = IS_MF_SD(bp);
f2e0899f
DK
5221 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5222 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5223 }
5224
34f80b04 5225 REG_WR(bp, SRC_REG_SOFT_RST, 1);
c68ed255
TH
5226 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5227 REG_WR(bp, i, random32());
f85582f8 5228
94a78b79 5229 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
37b091ba
MC
5230#ifdef BCM_CNIC
5231 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5232 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5233 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5234 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5235 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5236 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5237 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5238 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5239 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5240 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5241#endif
34f80b04 5242 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5243
34f80b04
EG
5244 if (sizeof(union cdu_context) != 1024)
5245 /* we currently assume that a context is 1024 bytes */
cdaa7cb8
VZ
5246 dev_alert(&bp->pdev->dev, "please adjust the size "
5247 "of cdu_context(%ld)\n",
7995c64e 5248 (long)sizeof(union cdu_context));
a2fbb9ea 5249
94a78b79 5250 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
34f80b04
EG
5251 val = (4 << 24) + (0 << 12) + 1024;
5252 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 5253
94a78b79 5254 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
34f80b04 5255 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5256 /* enable context validation interrupt from CFC */
5257 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5258
5259 /* set the thresholds to prevent CFC/CDU race */
5260 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5261
94a78b79 5262 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
f2e0899f
DK
5263
5264 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5265 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5266
5267 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
94a78b79 5268 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
a2fbb9ea 5269
94a78b79 5270 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
34f80b04
EG
5271 /* Reset PCIE errors for debug */
5272 REG_WR(bp, 0x2814, 0xffffffff);
5273 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5274
f2e0899f
DK
5275 if (CHIP_IS_E2(bp)) {
5276 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5277 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5278 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5279 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5280 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5281 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5282 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5283 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5284 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5285 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5286 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5287 }
5288
94a78b79 5289 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
94a78b79 5290 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
94a78b79 5291 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
94a78b79 5292 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
34f80b04 5293
94a78b79 5294 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
f2e0899f 5295 if (!CHIP_IS_E1(bp)) {
fb3bff17 5296 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
0793f83f 5297 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04 5298 }
f2e0899f
DK
5299 if (CHIP_IS_E2(bp)) {
5300 /* Bit-map indicating which L2 hdrs may appear after the
5301 basic Ethernet header */
0793f83f 5302 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
f2e0899f 5303 }
34f80b04
EG
5304
5305 if (CHIP_REV_IS_SLOW(bp))
5306 msleep(200);
5307
5308 /* finish CFC init */
5309 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5310 if (val != 1) {
5311 BNX2X_ERR("CFC LL_INIT failed\n");
5312 return -EBUSY;
5313 }
5314 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5315 if (val != 1) {
5316 BNX2X_ERR("CFC AC_INIT failed\n");
5317 return -EBUSY;
5318 }
5319 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5320 if (val != 1) {
5321 BNX2X_ERR("CFC CAM_INIT failed\n");
5322 return -EBUSY;
5323 }
5324 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5325
f2e0899f
DK
5326 if (CHIP_IS_E1(bp)) {
5327 /* read NIG statistic
5328 to see if this is our first up since powerup */
5329 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5330 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 5331
f2e0899f
DK
5332 /* do internal memory self test */
5333 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5334 BNX2X_ERR("internal mem self test failed\n");
5335 return -EBUSY;
5336 }
34f80b04
EG
5337 }
5338
fd4ef40d
EG
5339 bnx2x_setup_fan_failure_detection(bp);
5340
34f80b04
EG
5341 /* clear PXP2 attentions */
5342 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5343
4a33bc03
VZ
5344 bnx2x_enable_blocks_attention(bp);
5345 if (CHIP_PARITY_ENABLED(bp))
5346 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 5347
6bbca910 5348 if (!BP_NOMCP(bp)) {
f2e0899f
DK
5349 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5350 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5351 CHIP_IS_E1x(bp)) {
5352 u32 shmem_base[2], shmem2_base[2];
5353 shmem_base[0] = bp->common.shmem_base;
5354 shmem2_base[0] = bp->common.shmem2_base;
5355 if (CHIP_IS_E2(bp)) {
5356 shmem_base[1] =
5357 SHMEM2_RD(bp, other_shmem_base_addr);
5358 shmem2_base[1] =
5359 SHMEM2_RD(bp, other_shmem2_base_addr);
5360 }
5361 bnx2x_acquire_phy_lock(bp);
5362 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5363 bp->common.chip_id);
5364 bnx2x_release_phy_lock(bp);
5365 }
6bbca910
YR
5366 } else
5367 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5368
34f80b04
EG
5369 return 0;
5370}
a2fbb9ea 5371
523224a3 5372static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
5373{
5374 int port = BP_PORT(bp);
94a78b79 5375 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
1c06328c 5376 u32 low, high;
34f80b04 5377 u32 val;
a2fbb9ea 5378
cdaa7cb8 5379 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
34f80b04
EG
5380
5381 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 5382
94a78b79 5383 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
94a78b79 5384 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
ca00392c 5385
f2e0899f
DK
5386 /* Timers bug workaround: disables the pf_master bit in pglue at
5387 * common phase, we need to enable it here before any dmae access are
5388 * attempted. Therefore we manually added the enable-master to the
5389 * port phase (it also happens in the function phase)
5390 */
5391 if (CHIP_IS_E2(bp))
5392 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5393
ca00392c
EG
5394 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5395 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5396 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
94a78b79 5397 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
a2fbb9ea 5398
523224a3
DK
5399 /* QM cid (connection) count */
5400 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 5401
523224a3 5402#ifdef BCM_CNIC
94a78b79 5403 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
37b091ba
MC
5404 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5405 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
a2fbb9ea 5406#endif
cdaa7cb8 5407
94a78b79 5408 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
1c06328c 5409
f2e0899f
DK
5410 if (CHIP_MODE_IS_4_PORT(bp))
5411 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
5412
5413 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5414 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5415 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5416 /* no pause for emulation and FPGA */
5417 low = 0;
5418 high = 513;
5419 } else {
5420 if (IS_MF(bp))
5421 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5422 else if (bp->dev->mtu > 4096) {
5423 if (bp->flags & ONE_PORT_FLAG)
5424 low = 160;
5425 else {
5426 val = bp->dev->mtu;
5427 /* (24*1024 + val*4)/256 */
5428 low = 96 + (val/64) +
5429 ((val % 64) ? 1 : 0);
5430 }
5431 } else
5432 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5433 high = low + 56; /* 14*1024/256 */
5434 }
5435 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5436 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 5437 }
1c06328c 5438
f2e0899f
DK
5439 if (CHIP_MODE_IS_4_PORT(bp)) {
5440 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5441 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5442 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5443 BRB1_REG_MAC_GUARANTIED_0), 40);
5444 }
1c06328c 5445
94a78b79 5446 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
ca00392c 5447
94a78b79 5448 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
94a78b79 5449 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
94a78b79 5450 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
94a78b79 5451 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
356e2385 5452
94a78b79
VZ
5453 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5454 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5455 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5456 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
f2e0899f
DK
5457 if (CHIP_MODE_IS_4_PORT(bp))
5458 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
356e2385 5459
94a78b79 5460 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
94a78b79 5461 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
34f80b04 5462
94a78b79 5463 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
a2fbb9ea 5464
f2e0899f
DK
5465 if (!CHIP_IS_E2(bp)) {
5466 /* configure PBF to work without PAUSE mtu 9000 */
5467 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 5468
f2e0899f
DK
5469 /* update threshold */
5470 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5471 /* update init credit */
5472 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 5473
f2e0899f
DK
5474 /* probe changes */
5475 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5476 udelay(50);
5477 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5478 }
a2fbb9ea 5479
37b091ba
MC
5480#ifdef BCM_CNIC
5481 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
a2fbb9ea 5482#endif
94a78b79 5483 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
94a78b79 5484 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
34f80b04
EG
5485
5486 if (CHIP_IS_E1(bp)) {
5487 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5488 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5489 }
94a78b79 5490 bnx2x_init_block(bp, HC_BLOCK, init_stage);
34f80b04 5491
f2e0899f
DK
5492 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5493
94a78b79 5494 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
34f80b04
EG
5495 /* init aeu_mask_attn_func_0/1:
5496 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5497 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5498 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
5499 val = IS_MF(bp) ? 0xF7 : 0x7;
5500 /* Enable DCBX attention for all but E1 */
5501 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5502 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 5503
94a78b79 5504 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
94a78b79 5505 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
94a78b79 5506 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
94a78b79 5507 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
94a78b79 5508 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
356e2385 5509
94a78b79 5510 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
34f80b04
EG
5511
5512 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5513
f2e0899f 5514 if (!CHIP_IS_E1(bp)) {
fb3bff17 5515 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 5516 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 5517 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 5518
f2e0899f
DK
5519 if (CHIP_IS_E2(bp)) {
5520 val = 0;
5521 switch (bp->mf_mode) {
5522 case MULTI_FUNCTION_SD:
5523 val = 1;
5524 break;
5525 case MULTI_FUNCTION_SI:
5526 val = 2;
5527 break;
5528 }
5529
5530 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5531 NIG_REG_LLH0_CLS_TYPE), val);
5532 }
1c06328c
EG
5533 {
5534 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5535 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5536 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5537 }
34f80b04
EG
5538 }
5539
94a78b79 5540 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
94a78b79 5541 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
d90d96ba 5542 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
a22f0788 5543 bp->common.shmem2_base, port)) {
4d295db0
EG
5544 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5545 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5546 val = REG_RD(bp, reg_addr);
f1410647 5547 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 5548 REG_WR(bp, reg_addr, val);
f1410647 5549 }
c18487ee 5550 bnx2x__link_reset(bp);
a2fbb9ea 5551
34f80b04
EG
5552 return 0;
5553}
5554
34f80b04
EG
5555static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5556{
5557 int reg;
5558
f2e0899f 5559 if (CHIP_IS_E1(bp))
34f80b04 5560 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
5561 else
5562 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04
EG
5563
5564 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5565}
5566
f2e0899f
DK
5567static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5568{
5569 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5570}
5571
5572static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5573{
5574 u32 i, base = FUNC_ILT_BASE(func);
5575 for (i = base; i < base + ILT_PER_FUNC; i++)
5576 bnx2x_ilt_wr(bp, i, 0);
5577}
5578
523224a3 5579static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
5580{
5581 int port = BP_PORT(bp);
5582 int func = BP_FUNC(bp);
523224a3
DK
5583 struct bnx2x_ilt *ilt = BP_ILT(bp);
5584 u16 cdu_ilt_start;
8badd27a 5585 u32 addr, val;
f4a66897
VZ
5586 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5587 int i, main_mem_width;
34f80b04 5588
cdaa7cb8 5589 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
34f80b04 5590
8badd27a 5591 /* set MSI reconfigure capability */
f2e0899f
DK
5592 if (bp->common.int_block == INT_BLOCK_HC) {
5593 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5594 val = REG_RD(bp, addr);
5595 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5596 REG_WR(bp, addr, val);
5597 }
8badd27a 5598
523224a3
DK
5599 ilt = BP_ILT(bp);
5600 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 5601
523224a3
DK
5602 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5603 ilt->lines[cdu_ilt_start + i].page =
5604 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5605 ilt->lines[cdu_ilt_start + i].page_mapping =
5606 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5607 /* cdu ilt pages are allocated manually so there's no need to
5608 set the size */
37b091ba 5609 }
523224a3 5610 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 5611
523224a3
DK
5612#ifdef BCM_CNIC
5613 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
37b091ba 5614
523224a3
DK
5615 /* T1 hash bits value determines the T1 number of entries */
5616 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
5617#endif
37b091ba 5618
523224a3
DK
5619#ifndef BCM_CNIC
5620 /* set NIC mode */
5621 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5622#endif /* BCM_CNIC */
37b091ba 5623
f2e0899f
DK
5624 if (CHIP_IS_E2(bp)) {
5625 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5626
5627 /* Turn on a single ISR mode in IGU if driver is going to use
5628 * INT#x or MSI
5629 */
5630 if (!(bp->flags & USING_MSIX_FLAG))
5631 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5632 /*
5633 * Timers workaround bug: function init part.
5634 * Need to wait 20msec after initializing ILT,
5635 * needed to make sure there are no requests in
5636 * one of the PXP internal queues with "old" ILT addresses
5637 */
5638 msleep(20);
5639 /*
5640 * Master enable - Due to WB DMAE writes performed before this
5641 * register is re-initialized as part of the regular function
5642 * init
5643 */
5644 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5645 /* Enable the function in IGU */
5646 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5647 }
5648
523224a3 5649 bp->dmae_ready = 1;
34f80b04 5650
523224a3
DK
5651 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5652
f2e0899f
DK
5653 if (CHIP_IS_E2(bp))
5654 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5655
523224a3
DK
5656 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5657 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5658 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5659 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5660 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5661 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5662 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5663 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5664 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5665
f2e0899f
DK
5666 if (CHIP_IS_E2(bp)) {
5667 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5668 BP_PATH(bp));
5669 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5670 BP_PATH(bp));
5671 }
5672
5673 if (CHIP_MODE_IS_4_PORT(bp))
5674 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5675
5676 if (CHIP_IS_E2(bp))
5677 REG_WR(bp, QM_REG_PF_EN, 1);
5678
523224a3 5679 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5680
5681 if (CHIP_MODE_IS_4_PORT(bp))
5682 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5683
523224a3
DK
5684 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5685 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5686 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5687 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5688 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5689 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5690 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5691 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5692 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5693 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5694 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
f2e0899f
DK
5695 if (CHIP_IS_E2(bp))
5696 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5697
523224a3
DK
5698 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5699
5700 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
34f80b04 5701
f2e0899f
DK
5702 if (CHIP_IS_E2(bp))
5703 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5704
fb3bff17 5705 if (IS_MF(bp)) {
34f80b04 5706 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 5707 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
5708 }
5709
523224a3
DK
5710 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5711
34f80b04 5712 /* HC init per function */
f2e0899f
DK
5713 if (bp->common.int_block == INT_BLOCK_HC) {
5714 if (CHIP_IS_E1H(bp)) {
5715 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5716
5717 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5718 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5719 }
5720 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5721
5722 } else {
5723 int num_segs, sb_idx, prod_offset;
5724
34f80b04
EG
5725 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5726
f2e0899f
DK
5727 if (CHIP_IS_E2(bp)) {
5728 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5729 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5730 }
5731
5732 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5733
5734 if (CHIP_IS_E2(bp)) {
5735 int dsb_idx = 0;
5736 /**
5737 * Producer memory:
5738 * E2 mode: address 0-135 match to the mapping memory;
5739 * 136 - PF0 default prod; 137 - PF1 default prod;
5740 * 138 - PF2 default prod; 139 - PF3 default prod;
5741 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5742 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5743 * 144-147 reserved.
5744 *
5745 * E1.5 mode - In backward compatible mode;
5746 * for non default SB; each even line in the memory
5747 * holds the U producer and each odd line hold
5748 * the C producer. The first 128 producers are for
5749 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5750 * producers are for the DSB for each PF.
5751 * Each PF has five segments: (the order inside each
5752 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5753 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5754 * 144-147 attn prods;
5755 */
5756 /* non-default-status-blocks */
5757 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5758 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5759 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5760 prod_offset = (bp->igu_base_sb + sb_idx) *
5761 num_segs;
5762
5763 for (i = 0; i < num_segs; i++) {
5764 addr = IGU_REG_PROD_CONS_MEMORY +
5765 (prod_offset + i) * 4;
5766 REG_WR(bp, addr, 0);
5767 }
5768 /* send consumer update with value 0 */
5769 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5770 USTORM_ID, 0, IGU_INT_NOP, 1);
5771 bnx2x_igu_clear_sb(bp,
5772 bp->igu_base_sb + sb_idx);
5773 }
5774
5775 /* default-status-blocks */
5776 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5777 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5778
5779 if (CHIP_MODE_IS_4_PORT(bp))
5780 dsb_idx = BP_FUNC(bp);
5781 else
5782 dsb_idx = BP_E1HVN(bp);
5783
5784 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5785 IGU_BC_BASE_DSB_PROD + dsb_idx :
5786 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5787
5788 for (i = 0; i < (num_segs * E1HVN_MAX);
5789 i += E1HVN_MAX) {
5790 addr = IGU_REG_PROD_CONS_MEMORY +
5791 (prod_offset + i)*4;
5792 REG_WR(bp, addr, 0);
5793 }
5794 /* send consumer update with 0 */
5795 if (CHIP_INT_MODE_IS_BC(bp)) {
5796 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5797 USTORM_ID, 0, IGU_INT_NOP, 1);
5798 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5799 CSTORM_ID, 0, IGU_INT_NOP, 1);
5800 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5801 XSTORM_ID, 0, IGU_INT_NOP, 1);
5802 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5803 TSTORM_ID, 0, IGU_INT_NOP, 1);
5804 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5805 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5806 } else {
5807 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5808 USTORM_ID, 0, IGU_INT_NOP, 1);
5809 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5810 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5811 }
5812 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5813
5814 /* !!! these should become driver const once
5815 rf-tool supports split-68 const */
5816 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5817 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5818 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5819 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5820 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5821 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5822 }
34f80b04 5823 }
34f80b04 5824
c14423fe 5825 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5826 REG_WR(bp, 0x2114, 0xffffffff);
5827 REG_WR(bp, 0x2120, 0xffffffff);
523224a3
DK
5828
5829 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5830 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5831 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5832 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5833 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5834 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5835
f4a66897
VZ
5836 if (CHIP_IS_E1x(bp)) {
5837 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5838 main_mem_base = HC_REG_MAIN_MEMORY +
5839 BP_PORT(bp) * (main_mem_size * 4);
5840 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5841 main_mem_width = 8;
5842
5843 val = REG_RD(bp, main_mem_prty_clr);
5844 if (val)
5845 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5846 "block during "
5847 "function init (0x%x)!\n", val);
5848
5849 /* Clear "false" parity errors in MSI-X table */
5850 for (i = main_mem_base;
5851 i < main_mem_base + main_mem_size * 4;
5852 i += main_mem_width) {
5853 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5854 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5855 i, main_mem_width / 4);
5856 }
5857 /* Clear HC parity attention */
5858 REG_RD(bp, main_mem_prty_clr);
5859 }
5860
b7737c9b 5861 bnx2x_phy_probe(&bp->link_params);
f85582f8 5862
34f80b04
EG
5863 return 0;
5864}
5865
9f6c9258 5866int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
34f80b04 5867{
523224a3 5868 int rc = 0;
a2fbb9ea 5869
34f80b04 5870 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
f2e0899f 5871 BP_ABS_FUNC(bp), load_code);
a2fbb9ea 5872
34f80b04 5873 bp->dmae_ready = 0;
6e30dd4e 5874 spin_lock_init(&bp->dmae_lock);
54016b26
EG
5875 rc = bnx2x_gunzip_init(bp);
5876 if (rc)
5877 return rc;
a2fbb9ea 5878
34f80b04
EG
5879 switch (load_code) {
5880 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5881 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
523224a3 5882 rc = bnx2x_init_hw_common(bp, load_code);
34f80b04
EG
5883 if (rc)
5884 goto init_hw_err;
5885 /* no break */
5886
5887 case FW_MSG_CODE_DRV_LOAD_PORT:
523224a3 5888 rc = bnx2x_init_hw_port(bp);
34f80b04
EG
5889 if (rc)
5890 goto init_hw_err;
5891 /* no break */
5892
5893 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3 5894 rc = bnx2x_init_hw_func(bp);
34f80b04
EG
5895 if (rc)
5896 goto init_hw_err;
5897 break;
5898
5899 default:
5900 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5901 break;
5902 }
5903
5904 if (!BP_NOMCP(bp)) {
f2e0899f 5905 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5906
5907 bp->fw_drv_pulse_wr_seq =
f2e0899f 5908 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
a2fbb9ea 5909 DRV_PULSE_SEQ_MASK);
6fe49bb9
EG
5910 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5911 }
a2fbb9ea 5912
34f80b04
EG
5913init_hw_err:
5914 bnx2x_gunzip_end(bp);
5915
5916 return rc;
a2fbb9ea
ET
5917}
5918
9f6c9258 5919void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea
ET
5920{
5921
5922#define BNX2X_PCI_FREE(x, y, size) \
5923 do { \
5924 if (x) { \
523224a3 5925 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
a2fbb9ea
ET
5926 x = NULL; \
5927 y = 0; \
5928 } \
5929 } while (0)
5930
5931#define BNX2X_FREE(x) \
5932 do { \
5933 if (x) { \
523224a3 5934 kfree((void *)x); \
a2fbb9ea
ET
5935 x = NULL; \
5936 } \
5937 } while (0)
5938
5939 int i;
5940
5941 /* fastpath */
555f6c78 5942 /* Common */
a2fbb9ea 5943 for_each_queue(bp, i) {
ec6ba945
VZ
5944#ifdef BCM_CNIC
5945 /* FCoE client uses default status block */
5946 if (IS_FCOE_IDX(i)) {
5947 union host_hc_status_block *sb =
5948 &bnx2x_fp(bp, i, status_blk);
5949 memset(sb, 0, sizeof(union host_hc_status_block));
5950 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5951 } else {
5952#endif
555f6c78 5953 /* status blocks */
f2e0899f
DK
5954 if (CHIP_IS_E2(bp))
5955 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5956 bnx2x_fp(bp, i, status_blk_mapping),
5957 sizeof(struct host_hc_status_block_e2));
5958 else
5959 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5960 bnx2x_fp(bp, i, status_blk_mapping),
5961 sizeof(struct host_hc_status_block_e1x));
ec6ba945
VZ
5962#ifdef BCM_CNIC
5963 }
5964#endif
555f6c78
EG
5965 }
5966 /* Rx */
ec6ba945 5967 for_each_rx_queue(bp, i) {
a2fbb9ea 5968
555f6c78 5969 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5970 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5971 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5972 bnx2x_fp(bp, i, rx_desc_mapping),
5973 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5974
5975 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5976 bnx2x_fp(bp, i, rx_comp_mapping),
5977 sizeof(struct eth_fast_path_rx_cqe) *
5978 NUM_RCQ_BD);
a2fbb9ea 5979
7a9b2557 5980 /* SGE ring */
32626230 5981 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
5982 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5983 bnx2x_fp(bp, i, rx_sge_mapping),
5984 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5985 }
555f6c78 5986 /* Tx */
ec6ba945 5987 for_each_tx_queue(bp, i) {
555f6c78
EG
5988
5989 /* fastpath tx rings: tx_buf tx_desc */
5990 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5991 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5992 bnx2x_fp(bp, i, tx_desc_mapping),
ca00392c 5993 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
555f6c78 5994 }
a2fbb9ea
ET
5995 /* end of fastpath */
5996
5997 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 5998 sizeof(struct host_sp_status_block));
a2fbb9ea
ET
5999
6000 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6001 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6002
523224a3
DK
6003 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
6004 bp->context.size);
6005
6006 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
6007
6008 BNX2X_FREE(bp->ilt->lines);
f85582f8 6009
37b091ba 6010#ifdef BCM_CNIC
f2e0899f
DK
6011 if (CHIP_IS_E2(bp))
6012 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
6013 sizeof(struct host_hc_status_block_e2));
6014 else
6015 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
6016 sizeof(struct host_hc_status_block_e1x));
f85582f8 6017
523224a3 6018 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
a2fbb9ea 6019#endif
f85582f8 6020
7a9b2557 6021 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 6022
523224a3
DK
6023 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6024 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6025
ab532cf3
TH
6026 BNX2X_FREE(bp->rx_indir_table);
6027
a2fbb9ea
ET
6028#undef BNX2X_PCI_FREE
6029#undef BNX2X_KFREE
6030}
6031
f2e0899f
DK
6032static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6033{
6034 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6035 if (CHIP_IS_E2(bp)) {
6036 bnx2x_fp(bp, index, sb_index_values) =
6037 (__le16 *)status_blk.e2_sb->sb.index_values;
6038 bnx2x_fp(bp, index, sb_running_index) =
6039 (__le16 *)status_blk.e2_sb->sb.running_index;
6040 } else {
6041 bnx2x_fp(bp, index, sb_index_values) =
6042 (__le16 *)status_blk.e1x_sb->sb.index_values;
6043 bnx2x_fp(bp, index, sb_running_index) =
6044 (__le16 *)status_blk.e1x_sb->sb.running_index;
6045 }
6046}
6047
9f6c9258 6048int bnx2x_alloc_mem(struct bnx2x *bp)
a2fbb9ea 6049{
a2fbb9ea
ET
6050#define BNX2X_PCI_ALLOC(x, y, size) \
6051 do { \
1a983142 6052 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
9f6c9258
DK
6053 if (x == NULL) \
6054 goto alloc_mem_err; \
6055 memset(x, 0, size); \
6056 } while (0)
a2fbb9ea 6057
9f6c9258
DK
6058#define BNX2X_ALLOC(x, size) \
6059 do { \
523224a3 6060 x = kzalloc(size, GFP_KERNEL); \
9f6c9258
DK
6061 if (x == NULL) \
6062 goto alloc_mem_err; \
9f6c9258 6063 } while (0)
a2fbb9ea 6064
9f6c9258 6065 int i;
a2fbb9ea 6066
9f6c9258
DK
6067 /* fastpath */
6068 /* Common */
a2fbb9ea 6069 for_each_queue(bp, i) {
f2e0899f 6070 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
9f6c9258 6071 bnx2x_fp(bp, i, bp) = bp;
9f6c9258 6072 /* status blocks */
ec6ba945
VZ
6073#ifdef BCM_CNIC
6074 if (!IS_FCOE_IDX(i)) {
6075#endif
6076 if (CHIP_IS_E2(bp))
6077 BNX2X_PCI_ALLOC(sb->e2_sb,
6078 &bnx2x_fp(bp, i, status_blk_mapping),
6079 sizeof(struct host_hc_status_block_e2));
6080 else
6081 BNX2X_PCI_ALLOC(sb->e1x_sb,
6082 &bnx2x_fp(bp, i, status_blk_mapping),
6083 sizeof(struct host_hc_status_block_e1x));
6084#ifdef BCM_CNIC
6085 }
6086#endif
f2e0899f 6087 set_sb_shortcuts(bp, i);
a2fbb9ea 6088 }
9f6c9258
DK
6089 /* Rx */
6090 for_each_queue(bp, i) {
a2fbb9ea 6091
9f6c9258
DK
6092 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6093 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6094 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6095 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6096 &bnx2x_fp(bp, i, rx_desc_mapping),
6097 sizeof(struct eth_rx_bd) * NUM_RX_BD);
555f6c78 6098
9f6c9258
DK
6099 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6100 &bnx2x_fp(bp, i, rx_comp_mapping),
6101 sizeof(struct eth_fast_path_rx_cqe) *
6102 NUM_RCQ_BD);
a2fbb9ea 6103
9f6c9258
DK
6104 /* SGE ring */
6105 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6106 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6107 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6108 &bnx2x_fp(bp, i, rx_sge_mapping),
6109 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6110 }
6111 /* Tx */
6112 for_each_queue(bp, i) {
8badd27a 6113
9f6c9258
DK
6114 /* fastpath tx rings: tx_buf tx_desc */
6115 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6116 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6117 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6118 &bnx2x_fp(bp, i, tx_desc_mapping),
6119 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
8badd27a 6120 }
9f6c9258 6121 /* end of fastpath */
8badd27a 6122
523224a3 6123#ifdef BCM_CNIC
f2e0899f
DK
6124 if (CHIP_IS_E2(bp))
6125 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6126 sizeof(struct host_hc_status_block_e2));
6127 else
6128 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6129 sizeof(struct host_hc_status_block_e1x));
8badd27a 6130
523224a3
DK
6131 /* allocate searcher T2 table */
6132 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6133#endif
a2fbb9ea 6134
8badd27a 6135
523224a3
DK
6136 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6137 sizeof(struct host_sp_status_block));
a2fbb9ea 6138
523224a3
DK
6139 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6140 sizeof(struct bnx2x_slowpath));
a2fbb9ea 6141
523224a3 6142 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
f85582f8 6143
523224a3
DK
6144 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6145 bp->context.size);
65abd74d 6146
523224a3 6147 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 6148
523224a3
DK
6149 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6150 goto alloc_mem_err;
65abd74d 6151
9f6c9258
DK
6152 /* Slow path ring */
6153 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 6154
523224a3
DK
6155 /* EQ */
6156 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6157 BCM_PAGE_SIZE * NUM_EQ_PAGES);
ab532cf3
TH
6158
6159 BNX2X_ALLOC(bp->rx_indir_table, sizeof(bp->rx_indir_table[0]) *
6160 TSTORM_INDIRECTION_TABLE_SIZE);
9f6c9258 6161 return 0;
e1510706 6162
9f6c9258
DK
6163alloc_mem_err:
6164 bnx2x_free_mem(bp);
6165 return -ENOMEM;
e1510706 6166
9f6c9258
DK
6167#undef BNX2X_PCI_ALLOC
6168#undef BNX2X_ALLOC
65abd74d
YG
6169}
6170
a2fbb9ea
ET
6171/*
6172 * Init service functions
6173 */
8d96286a 6174static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6175 int *state_p, int flags);
6176
523224a3 6177int bnx2x_func_start(struct bnx2x *bp)
a2fbb9ea 6178{
523224a3 6179 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
a2fbb9ea 6180
523224a3
DK
6181 /* Wait for completion */
6182 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6183 WAIT_RAMROD_COMMON);
6184}
a2fbb9ea 6185
8d96286a 6186static int bnx2x_func_stop(struct bnx2x *bp)
523224a3
DK
6187{
6188 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
a2fbb9ea 6189
523224a3
DK
6190 /* Wait for completion */
6191 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6192 0, &(bp->state), WAIT_RAMROD_COMMON);
a2fbb9ea
ET
6193}
6194
e665bfda 6195/**
f85582f8 6196 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
e665bfda
MC
6197 *
6198 * @param bp driver descriptor
6199 * @param set set or clear an entry (1 or 0)
6200 * @param mac pointer to a buffer containing a MAC
6201 * @param cl_bit_vec bit vector of clients to register a MAC for
6202 * @param cam_offset offset in a CAM to use
523224a3 6203 * @param is_bcast is the set MAC a broadcast address (for E1 only)
e665bfda 6204 */
215faf9c 6205static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
f85582f8
DK
6206 u32 cl_bit_vec, u8 cam_offset,
6207 u8 is_bcast)
34f80b04 6208{
523224a3
DK
6209 struct mac_configuration_cmd *config =
6210 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6211 int ramrod_flags = WAIT_RAMROD_COMMON;
6212
6213 bp->set_mac_pending = 1;
523224a3 6214
8d9c5f34 6215 config->hdr.length = 1;
e665bfda
MC
6216 config->hdr.offset = cam_offset;
6217 config->hdr.client_id = 0xff;
6e30dd4e
VZ
6218 /* Mark the single MAC configuration ramrod as opposed to a
6219 * UC/MC list configuration).
6220 */
6221 config->hdr.echo = 1;
34f80b04
EG
6222
6223 /* primary MAC */
6224 config->config_table[0].msb_mac_addr =
e665bfda 6225 swab16(*(u16 *)&mac[0]);
34f80b04 6226 config->config_table[0].middle_mac_addr =
e665bfda 6227 swab16(*(u16 *)&mac[2]);
34f80b04 6228 config->config_table[0].lsb_mac_addr =
e665bfda 6229 swab16(*(u16 *)&mac[4]);
ca00392c 6230 config->config_table[0].clients_bit_vector =
e665bfda 6231 cpu_to_le32(cl_bit_vec);
34f80b04 6232 config->config_table[0].vlan_id = 0;
523224a3 6233 config->config_table[0].pf_id = BP_FUNC(bp);
3101c2bc 6234 if (set)
523224a3
DK
6235 SET_FLAG(config->config_table[0].flags,
6236 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6237 T_ETH_MAC_COMMAND_SET);
3101c2bc 6238 else
523224a3
DK
6239 SET_FLAG(config->config_table[0].flags,
6240 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6241 T_ETH_MAC_COMMAND_INVALIDATE);
34f80b04 6242
523224a3
DK
6243 if (is_bcast)
6244 SET_FLAG(config->config_table[0].flags,
6245 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6246
6247 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
3101c2bc 6248 (set ? "setting" : "clearing"),
34f80b04
EG
6249 config->config_table[0].msb_mac_addr,
6250 config->config_table[0].middle_mac_addr,
523224a3 6251 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
34f80b04 6252
6e30dd4e
VZ
6253 mb();
6254
523224a3 6255 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
34f80b04 6256 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
523224a3
DK
6257 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6258
6259 /* Wait for a completion */
6260 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
34f80b04
EG
6261}
6262
8d96286a 6263static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6264 int *state_p, int flags)
a2fbb9ea
ET
6265{
6266 /* can take a while if any port is running */
8b3a0f0b 6267 int cnt = 5000;
523224a3
DK
6268 u8 poll = flags & WAIT_RAMROD_POLL;
6269 u8 common = flags & WAIT_RAMROD_COMMON;
a2fbb9ea 6270
c14423fe
ET
6271 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6272 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6273
6274 might_sleep();
34f80b04 6275 while (cnt--) {
a2fbb9ea 6276 if (poll) {
523224a3
DK
6277 if (common)
6278 bnx2x_eq_int(bp);
6279 else {
6280 bnx2x_rx_int(bp->fp, 10);
6281 /* if index is different from 0
6282 * the reply for some commands will
6283 * be on the non default queue
6284 */
6285 if (idx)
6286 bnx2x_rx_int(&bp->fp[idx], 10);
6287 }
a2fbb9ea 6288 }
a2fbb9ea 6289
3101c2bc 6290 mb(); /* state is changed by bnx2x_sp_event() */
8b3a0f0b
EG
6291 if (*state_p == state) {
6292#ifdef BNX2X_STOP_ON_ERROR
6293 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6294#endif
a2fbb9ea 6295 return 0;
8b3a0f0b 6296 }
a2fbb9ea 6297
a2fbb9ea 6298 msleep(1);
e3553b29
EG
6299
6300 if (bp->panic)
6301 return -EIO;
a2fbb9ea
ET
6302 }
6303
a2fbb9ea 6304 /* timeout! */
49d66772
ET
6305 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6306 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6307#ifdef BNX2X_STOP_ON_ERROR
6308 bnx2x_panic();
6309#endif
a2fbb9ea 6310
49d66772 6311 return -EBUSY;
a2fbb9ea
ET
6312}
6313
8d96286a 6314static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
e665bfda 6315{
f2e0899f
DK
6316 if (CHIP_IS_E1H(bp))
6317 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6318 else if (CHIP_MODE_IS_4_PORT(bp))
6e30dd4e 6319 return E2_FUNC_MAX * rel_offset + BP_FUNC(bp);
f2e0899f 6320 else
6e30dd4e 6321 return E2_FUNC_MAX * rel_offset + BP_VN(bp);
523224a3
DK
6322}
6323
0793f83f
DK
6324/**
6325 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6326 * relevant. In addition, current implementation is tuned for a
6327 * single ETH MAC.
0793f83f
DK
6328 */
6329enum {
6330 LLH_CAM_ISCSI_ETH_LINE = 0,
6331 LLH_CAM_ETH_LINE,
6332 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6333};
6334
6335static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6336 int set,
6337 unsigned char *dev_addr,
6338 int index)
6339{
6340 u32 wb_data[2];
6341 u32 mem_offset, ena_offset, mem_index;
6342 /**
6343 * indexes mapping:
6344 * 0..7 - goes to MEM
6345 * 8..15 - goes to MEM2
6346 */
6347
6348 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6349 return;
6350
6351 /* calculate memory start offset according to the mapping
6352 * and index in the memory */
6353 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6354 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6355 NIG_REG_LLH0_FUNC_MEM;
6356 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6357 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6358 mem_index = index;
6359 } else {
6360 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6361 NIG_REG_P0_LLH_FUNC_MEM2;
6362 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6363 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6364 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6365 }
6366
6367 if (set) {
6368 /* LLH_FUNC_MEM is a u64 WB register */
6369 mem_offset += 8*mem_index;
6370
6371 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6372 (dev_addr[4] << 8) | dev_addr[5]);
6373 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6374
6375 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6376 }
6377
6378 /* enable/disable the entry */
6379 REG_WR(bp, ena_offset + 4*mem_index, set);
6380
6381}
6382
523224a3
DK
6383void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6384{
6385 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6386 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
e665bfda 6387
523224a3
DK
6388 /* networking MAC */
6389 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6390 (1 << bp->fp->cl_id), cam_offset , 0);
e665bfda 6391
0793f83f
DK
6392 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6393
523224a3
DK
6394 if (CHIP_IS_E1(bp)) {
6395 /* broadcast MAC */
215faf9c
JP
6396 static const u8 bcast[ETH_ALEN] = {
6397 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6398 };
523224a3
DK
6399 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6400 }
e665bfda 6401}
6e30dd4e
VZ
6402
6403static inline u8 bnx2x_e1_cam_mc_offset(struct bnx2x *bp)
6404{
6405 return CHIP_REV_IS_SLOW(bp) ?
6406 (BNX2X_MAX_EMUL_MULTI * (1 + BP_PORT(bp))) :
6407 (BNX2X_MAX_MULTICAST * (1 + BP_PORT(bp)));
6408}
6409
6410/* set mc list, do not wait as wait implies sleep and
6411 * set_rx_mode can be invoked from non-sleepable context.
6412 *
6413 * Instead we use the same ramrod data buffer each time we need
6414 * to configure a list of addresses, and use the fact that the
6415 * list of MACs is changed in an incremental way and that the
6416 * function is called under the netif_addr_lock. A temporary
6417 * inconsistent CAM configuration (possible in case of a very fast
6418 * sequence of add/del/add on the host side) will shortly be
6419 * restored by the handler of the last ramrod.
6420 */
6421static int bnx2x_set_e1_mc_list(struct bnx2x *bp)
523224a3
DK
6422{
6423 int i = 0, old;
6424 struct net_device *dev = bp->dev;
6e30dd4e 6425 u8 offset = bnx2x_e1_cam_mc_offset(bp);
523224a3
DK
6426 struct netdev_hw_addr *ha;
6427 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6428 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6429
6e30dd4e
VZ
6430 if (netdev_mc_count(dev) > BNX2X_MAX_MULTICAST)
6431 return -EINVAL;
6432
523224a3
DK
6433 netdev_for_each_mc_addr(ha, dev) {
6434 /* copy mac */
6435 config_cmd->config_table[i].msb_mac_addr =
6436 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6437 config_cmd->config_table[i].middle_mac_addr =
6438 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6439 config_cmd->config_table[i].lsb_mac_addr =
6440 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
e665bfda 6441
523224a3
DK
6442 config_cmd->config_table[i].vlan_id = 0;
6443 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6444 config_cmd->config_table[i].clients_bit_vector =
6445 cpu_to_le32(1 << BP_L_ID(bp));
6446
6447 SET_FLAG(config_cmd->config_table[i].flags,
6448 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6449 T_ETH_MAC_COMMAND_SET);
6450
6451 DP(NETIF_MSG_IFUP,
6452 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6453 config_cmd->config_table[i].msb_mac_addr,
6454 config_cmd->config_table[i].middle_mac_addr,
6455 config_cmd->config_table[i].lsb_mac_addr);
6456 i++;
6457 }
6458 old = config_cmd->hdr.length;
6459 if (old > i) {
6460 for (; i < old; i++) {
6461 if (CAM_IS_INVALID(config_cmd->
6462 config_table[i])) {
6463 /* already invalidated */
6464 break;
6465 }
6466 /* invalidate */
6467 SET_FLAG(config_cmd->config_table[i].flags,
6468 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6469 T_ETH_MAC_COMMAND_INVALIDATE);
6470 }
6471 }
6472
6e30dd4e
VZ
6473 wmb();
6474
523224a3
DK
6475 config_cmd->hdr.length = i;
6476 config_cmd->hdr.offset = offset;
6477 config_cmd->hdr.client_id = 0xff;
6e30dd4e
VZ
6478 /* Mark that this ramrod doesn't use bp->set_mac_pending for
6479 * synchronization.
6480 */
6481 config_cmd->hdr.echo = 0;
523224a3 6482
6e30dd4e 6483 mb();
523224a3 6484
6e30dd4e 6485 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
523224a3
DK
6486 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6487}
6e30dd4e
VZ
6488
6489void bnx2x_invalidate_e1_mc_list(struct bnx2x *bp)
e665bfda 6490{
523224a3
DK
6491 int i;
6492 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6493 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6494 int ramrod_flags = WAIT_RAMROD_COMMON;
6e30dd4e 6495 u8 offset = bnx2x_e1_cam_mc_offset(bp);
523224a3 6496
6e30dd4e 6497 for (i = 0; i < BNX2X_MAX_MULTICAST; i++)
523224a3
DK
6498 SET_FLAG(config_cmd->config_table[i].flags,
6499 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6500 T_ETH_MAC_COMMAND_INVALIDATE);
6501
6e30dd4e
VZ
6502 wmb();
6503
6504 config_cmd->hdr.length = BNX2X_MAX_MULTICAST;
6505 config_cmd->hdr.offset = offset;
6506 config_cmd->hdr.client_id = 0xff;
6507 /* We'll wait for a completion this time... */
6508 config_cmd->hdr.echo = 1;
6509
6510 bp->set_mac_pending = 1;
6511
6512 mb();
6513
523224a3
DK
6514 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6515 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
e665bfda
MC
6516
6517 /* Wait for a completion */
523224a3
DK
6518 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6519 ramrod_flags);
6520
e665bfda
MC
6521}
6522
6e30dd4e
VZ
6523/* Accept one or more multicasts */
6524static int bnx2x_set_e1h_mc_list(struct bnx2x *bp)
6525{
6526 struct net_device *dev = bp->dev;
6527 struct netdev_hw_addr *ha;
6528 u32 mc_filter[MC_HASH_SIZE];
6529 u32 crc, bit, regidx;
6530 int i;
6531
6532 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6533
6534 netdev_for_each_mc_addr(ha, dev) {
6535 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6536 bnx2x_mc_addr(ha));
6537
6538 crc = crc32c_le(0, bnx2x_mc_addr(ha),
6539 ETH_ALEN);
6540 bit = (crc >> 24) & 0xff;
6541 regidx = bit >> 5;
6542 bit &= 0x1f;
6543 mc_filter[regidx] |= (1 << bit);
6544 }
6545
6546 for (i = 0; i < MC_HASH_SIZE; i++)
6547 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6548 mc_filter[i]);
6549
6550 return 0;
6551}
6552
6553void bnx2x_invalidate_e1h_mc_list(struct bnx2x *bp)
6554{
6555 int i;
6556
6557 for (i = 0; i < MC_HASH_SIZE; i++)
6558 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6559}
6560
993ac7b5
MC
6561#ifdef BCM_CNIC
6562/**
6563 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6564 * MAC(s). This function will wait until the ramdord completion
6565 * returns.
6566 *
6567 * @param bp driver handle
6568 * @param set set or clear the CAM entry
6569 *
6570 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6571 */
8d96286a 6572static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
993ac7b5 6573{
523224a3
DK
6574 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6575 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
ec6ba945
VZ
6576 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6577 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
523224a3 6578 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
2ba45142 6579 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
993ac7b5
MC
6580
6581 /* Send a SET_MAC ramrod */
2ba45142 6582 bnx2x_set_mac_addr_gen(bp, set, iscsi_mac, cl_bit_vec,
523224a3 6583 cam_offset, 0);
0793f83f 6584
2ba45142 6585 bnx2x_set_mac_in_nig(bp, set, iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
ec6ba945
VZ
6586
6587 return 0;
6588}
6589
6590/**
6591 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6592 * ETH MAC(s). This function will wait until the ramdord
6593 * completion returns.
6594 *
6595 * @param bp driver handle
6596 * @param set set or clear the CAM entry
6597 *
6598 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6599 */
6600int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6601{
6602 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6603 /**
6604 * CAM allocation for E1H
6605 * eth unicasts: by func number
6606 * iscsi: by func number
6607 * fip unicast: by func number
6608 * fip multicast: by func number
6609 */
6610 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6611 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6612
6613 return 0;
6614}
6615
6616int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6617{
6618 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6619
6620 /**
6621 * CAM allocation for E1H
6622 * eth unicasts: by func number
6623 * iscsi: by func number
6624 * fip unicast: by func number
6625 * fip multicast: by func number
6626 */
6627 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6628 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6629
993ac7b5
MC
6630 return 0;
6631}
6632#endif
6633
523224a3
DK
6634static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6635 struct bnx2x_client_init_params *params,
6636 u8 activate,
6637 struct client_init_ramrod_data *data)
6638{
6639 /* Clear the buffer */
6640 memset(data, 0, sizeof(*data));
6641
6642 /* general */
6643 data->general.client_id = params->rxq_params.cl_id;
6644 data->general.statistics_counter_id = params->rxq_params.stat_id;
6645 data->general.statistics_en_flg =
6646 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
ec6ba945
VZ
6647 data->general.is_fcoe_flg =
6648 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
523224a3
DK
6649 data->general.activate_flg = activate;
6650 data->general.sp_client_id = params->rxq_params.spcl_id;
6651
6652 /* Rx data */
6653 data->rx.tpa_en_flg =
6654 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6655 data->rx.vmqueue_mode_en_flg = 0;
6656 data->rx.cache_line_alignment_log_size =
6657 params->rxq_params.cache_line_log;
6658 data->rx.enable_dynamic_hc =
6659 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6660 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6661 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6662 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6663
6664 /* We don't set drop flags */
6665 data->rx.drop_ip_cs_err_flg = 0;
6666 data->rx.drop_tcp_cs_err_flg = 0;
6667 data->rx.drop_ttl0_flg = 0;
6668 data->rx.drop_udp_cs_err_flg = 0;
6669
6670 data->rx.inner_vlan_removal_enable_flg =
6671 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6672 data->rx.outer_vlan_removal_enable_flg =
6673 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6674 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6675 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6676 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6677 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6678 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6679 data->rx.bd_page_base.lo =
6680 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6681 data->rx.bd_page_base.hi =
6682 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6683 data->rx.sge_page_base.lo =
6684 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6685 data->rx.sge_page_base.hi =
6686 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6687 data->rx.cqe_page_base.lo =
6688 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6689 data->rx.cqe_page_base.hi =
6690 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6691 data->rx.is_leading_rss =
6692 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6693 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6694
6695 /* Tx data */
6696 data->tx.enforce_security_flg = 0; /* VF specific */
6697 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6698 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6699 data->tx.mtu = 0; /* VF specific */
6700 data->tx.tx_bd_page_base.lo =
6701 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6702 data->tx.tx_bd_page_base.hi =
6703 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6704
6705 /* flow control data */
6706 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6707 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6708 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6709 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6710 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6711 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6712 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6713
6714 data->fc.safc_group_num = params->txq_params.cos;
6715 data->fc.safc_group_en_flg =
6716 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
ec6ba945
VZ
6717 data->fc.traffic_type =
6718 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6719 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
523224a3
DK
6720}
6721
6722static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6723{
6724 /* ustorm cxt validation */
6725 cxt->ustorm_ag_context.cdu_usage =
6726 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6727 ETH_CONNECTION_TYPE);
6728 /* xcontext validation */
6729 cxt->xstorm_ag_context.cdu_reserved =
6730 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6731 ETH_CONNECTION_TYPE);
6732}
6733
8d96286a 6734static int bnx2x_setup_fw_client(struct bnx2x *bp,
6735 struct bnx2x_client_init_params *params,
6736 u8 activate,
6737 struct client_init_ramrod_data *data,
6738 dma_addr_t data_mapping)
523224a3
DK
6739{
6740 u16 hc_usec;
6741 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6742 int ramrod_flags = 0, rc;
6743
6744 /* HC and context validation values */
6745 hc_usec = params->txq_params.hc_rate ?
6746 1000000 / params->txq_params.hc_rate : 0;
6747 bnx2x_update_coalesce_sb_index(bp,
6748 params->txq_params.fw_sb_id,
6749 params->txq_params.sb_cq_index,
6750 !(params->txq_params.flags & QUEUE_FLG_HC),
6751 hc_usec);
6752
6753 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6754
6755 hc_usec = params->rxq_params.hc_rate ?
6756 1000000 / params->rxq_params.hc_rate : 0;
6757 bnx2x_update_coalesce_sb_index(bp,
6758 params->rxq_params.fw_sb_id,
6759 params->rxq_params.sb_cq_index,
6760 !(params->rxq_params.flags & QUEUE_FLG_HC),
6761 hc_usec);
6762
6763 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6764 params->rxq_params.cid);
6765
6766 /* zero stats */
6767 if (params->txq_params.flags & QUEUE_FLG_STATS)
6768 storm_memset_xstats_zero(bp, BP_PORT(bp),
6769 params->txq_params.stat_id);
6770
6771 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6772 storm_memset_ustats_zero(bp, BP_PORT(bp),
6773 params->rxq_params.stat_id);
6774 storm_memset_tstats_zero(bp, BP_PORT(bp),
6775 params->rxq_params.stat_id);
6776 }
6777
6778 /* Fill the ramrod data */
6779 bnx2x_fill_cl_init_data(bp, params, activate, data);
6780
6781 /* SETUP ramrod.
6782 *
6783 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6784 * barrier except from mmiowb() is needed to impose a
6785 * proper ordering of memory operations.
6786 */
6787 mmiowb();
a2fbb9ea 6788
a2fbb9ea 6789
523224a3
DK
6790 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6791 U64_HI(data_mapping), U64_LO(data_mapping), 0);
a2fbb9ea 6792
34f80b04 6793 /* Wait for completion */
523224a3
DK
6794 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6795 params->ramrod_params.index,
6796 params->ramrod_params.pstate,
6797 ramrod_flags);
34f80b04 6798 return rc;
a2fbb9ea
ET
6799}
6800
d6214d7a
DK
6801/**
6802 * Configure interrupt mode according to current configuration.
6803 * In case of MSI-X it will also try to enable MSI-X.
6804 *
6805 * @param bp
6806 *
6807 * @return int
6808 */
6809static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 6810{
d6214d7a 6811 int rc = 0;
ca00392c 6812
d6214d7a
DK
6813 switch (bp->int_mode) {
6814 case INT_MODE_MSI:
6815 bnx2x_enable_msi(bp);
6816 /* falling through... */
6817 case INT_MODE_INTx:
ec6ba945 6818 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a 6819 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
ca00392c 6820 break;
d6214d7a
DK
6821 default:
6822 /* Set number of queues according to bp->multi_mode value */
6823 bnx2x_set_num_queues(bp);
ca00392c 6824
d6214d7a
DK
6825 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6826 bp->num_queues);
ca00392c 6827
d6214d7a
DK
6828 /* if we can't use MSI-X we only need one fp,
6829 * so try to enable MSI-X with the requested number of fp's
6830 * and fallback to MSI or legacy INTx with one fp
6831 */
6832 rc = bnx2x_enable_msix(bp);
6833 if (rc) {
6834 /* failed to enable MSI-X */
6835 if (bp->multi_mode)
6836 DP(NETIF_MSG_IFUP,
6837 "Multi requested but failed to "
6838 "enable MSI-X (%d), "
6839 "set number of queues to %d\n",
6840 bp->num_queues,
ec6ba945
VZ
6841 1 + NONE_ETH_CONTEXT_USE);
6842 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
d6214d7a
DK
6843
6844 if (!(bp->flags & DISABLE_MSI_FLAG))
6845 bnx2x_enable_msi(bp);
6846 }
ca00392c 6847
9f6c9258
DK
6848 break;
6849 }
d6214d7a
DK
6850
6851 return rc;
a2fbb9ea
ET
6852}
6853
c2bff63f
DK
6854/* must be called prioir to any HW initializations */
6855static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6856{
6857 return L2_ILT_LINES(bp);
6858}
6859
523224a3
DK
6860void bnx2x_ilt_set_info(struct bnx2x *bp)
6861{
6862 struct ilt_client_info *ilt_client;
6863 struct bnx2x_ilt *ilt = BP_ILT(bp);
6864 u16 line = 0;
6865
6866 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6867 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6868
6869 /* CDU */
6870 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6871 ilt_client->client_num = ILT_CLIENT_CDU;
6872 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6873 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6874 ilt_client->start = line;
6875 line += L2_ILT_LINES(bp);
6876#ifdef BCM_CNIC
6877 line += CNIC_ILT_LINES;
6878#endif
6879 ilt_client->end = line - 1;
6880
6881 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6882 "flags 0x%x, hw psz %d\n",
6883 ilt_client->start,
6884 ilt_client->end,
6885 ilt_client->page_size,
6886 ilt_client->flags,
6887 ilog2(ilt_client->page_size >> 12));
6888
6889 /* QM */
6890 if (QM_INIT(bp->qm_cid_count)) {
6891 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6892 ilt_client->client_num = ILT_CLIENT_QM;
6893 ilt_client->page_size = QM_ILT_PAGE_SZ;
6894 ilt_client->flags = 0;
6895 ilt_client->start = line;
6896
6897 /* 4 bytes for each cid */
6898 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6899 QM_ILT_PAGE_SZ);
6900
6901 ilt_client->end = line - 1;
6902
6903 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6904 "flags 0x%x, hw psz %d\n",
6905 ilt_client->start,
6906 ilt_client->end,
6907 ilt_client->page_size,
6908 ilt_client->flags,
6909 ilog2(ilt_client->page_size >> 12));
6910
6911 }
6912 /* SRC */
6913 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6914#ifdef BCM_CNIC
6915 ilt_client->client_num = ILT_CLIENT_SRC;
6916 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6917 ilt_client->flags = 0;
6918 ilt_client->start = line;
6919 line += SRC_ILT_LINES;
6920 ilt_client->end = line - 1;
6921
6922 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6923 "flags 0x%x, hw psz %d\n",
6924 ilt_client->start,
6925 ilt_client->end,
6926 ilt_client->page_size,
6927 ilt_client->flags,
6928 ilog2(ilt_client->page_size >> 12));
6929
6930#else
6931 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6932#endif
9f6c9258 6933
523224a3
DK
6934 /* TM */
6935 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6936#ifdef BCM_CNIC
6937 ilt_client->client_num = ILT_CLIENT_TM;
6938 ilt_client->page_size = TM_ILT_PAGE_SZ;
6939 ilt_client->flags = 0;
6940 ilt_client->start = line;
6941 line += TM_ILT_LINES;
6942 ilt_client->end = line - 1;
6943
6944 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6945 "flags 0x%x, hw psz %d\n",
6946 ilt_client->start,
6947 ilt_client->end,
6948 ilt_client->page_size,
6949 ilt_client->flags,
6950 ilog2(ilt_client->page_size >> 12));
9f6c9258 6951
523224a3
DK
6952#else
6953 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6954#endif
6955}
f85582f8 6956
523224a3
DK
6957int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6958 int is_leading)
a2fbb9ea 6959{
523224a3 6960 struct bnx2x_client_init_params params = { {0} };
a2fbb9ea
ET
6961 int rc;
6962
ec6ba945
VZ
6963 /* reset IGU state skip FCoE L2 queue */
6964 if (!IS_FCOE_FP(fp))
6965 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 6966 IGU_INT_ENABLE, 0);
a2fbb9ea 6967
523224a3
DK
6968 params.ramrod_params.pstate = &fp->state;
6969 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6970 params.ramrod_params.index = fp->index;
6971 params.ramrod_params.cid = fp->cid;
a2fbb9ea 6972
ec6ba945
VZ
6973#ifdef BCM_CNIC
6974 if (IS_FCOE_FP(fp))
6975 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6976
6977#endif
6978
523224a3
DK
6979 if (is_leading)
6980 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
a2fbb9ea 6981
523224a3
DK
6982 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6983
6984 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6985
6986 rc = bnx2x_setup_fw_client(bp, &params, 1,
6987 bnx2x_sp(bp, client_init_data),
6988 bnx2x_sp_mapping(bp, client_init_data));
34f80b04 6989 return rc;
a2fbb9ea
ET
6990}
6991
8d96286a 6992static int bnx2x_stop_fw_client(struct bnx2x *bp,
6993 struct bnx2x_client_ramrod_params *p)
a2fbb9ea 6994{
34f80b04 6995 int rc;
a2fbb9ea 6996
523224a3 6997 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
a2fbb9ea 6998
523224a3
DK
6999 /* halt the connection */
7000 *p->pstate = BNX2X_FP_STATE_HALTING;
7001 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
7002 p->cl_id, 0);
a2fbb9ea 7003
34f80b04 7004 /* Wait for completion */
523224a3
DK
7005 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
7006 p->pstate, poll_flag);
34f80b04 7007 if (rc) /* timeout */
da5a662a 7008 return rc;
a2fbb9ea 7009
523224a3
DK
7010 *p->pstate = BNX2X_FP_STATE_TERMINATING;
7011 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
7012 p->cl_id, 0);
7013 /* Wait for completion */
7014 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
7015 p->pstate, poll_flag);
7016 if (rc) /* timeout */
7017 return rc;
a2fbb9ea 7018
a2fbb9ea 7019
523224a3
DK
7020 /* delete cfc entry */
7021 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
da5a662a 7022
523224a3
DK
7023 /* Wait for completion */
7024 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
7025 p->pstate, WAIT_RAMROD_COMMON);
da5a662a 7026 return rc;
a2fbb9ea
ET
7027}
7028
523224a3
DK
7029static int bnx2x_stop_client(struct bnx2x *bp, int index)
7030{
7031 struct bnx2x_client_ramrod_params client_stop = {0};
7032 struct bnx2x_fastpath *fp = &bp->fp[index];
7033
7034 client_stop.index = index;
7035 client_stop.cid = fp->cid;
7036 client_stop.cl_id = fp->cl_id;
7037 client_stop.pstate = &(fp->state);
7038 client_stop.poll = 0;
7039
7040 return bnx2x_stop_fw_client(bp, &client_stop);
7041}
7042
7043
34f80b04
EG
7044static void bnx2x_reset_func(struct bnx2x *bp)
7045{
7046 int port = BP_PORT(bp);
7047 int func = BP_FUNC(bp);
f2e0899f 7048 int i;
523224a3 7049 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
f2e0899f
DK
7050 (CHIP_IS_E2(bp) ?
7051 offsetof(struct hc_status_block_data_e2, common) :
7052 offsetof(struct hc_status_block_data_e1x, common));
523224a3
DK
7053 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
7054 int pfid_offset = offsetof(struct pci_entity, pf_id);
7055
7056 /* Disable the function in the FW */
7057 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
7058 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
7059 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
7060 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
7061
7062 /* FP SBs */
ec6ba945 7063 for_each_eth_queue(bp, i) {
523224a3
DK
7064 struct bnx2x_fastpath *fp = &bp->fp[i];
7065 REG_WR8(bp,
7066 BAR_CSTRORM_INTMEM +
7067 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
7068 + pfunc_offset_fp + pfid_offset,
7069 HC_FUNCTION_DISABLED);
7070 }
7071
7072 /* SP SB */
7073 REG_WR8(bp,
7074 BAR_CSTRORM_INTMEM +
7075 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
7076 pfunc_offset_sp + pfid_offset,
7077 HC_FUNCTION_DISABLED);
7078
7079
7080 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
7081 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
7082 0);
34f80b04
EG
7083
7084 /* Configure IGU */
f2e0899f
DK
7085 if (bp->common.int_block == INT_BLOCK_HC) {
7086 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7087 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7088 } else {
7089 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7090 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7091 }
34f80b04 7092
37b091ba
MC
7093#ifdef BCM_CNIC
7094 /* Disable Timer scan */
7095 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7096 /*
7097 * Wait for at least 10ms and up to 2 second for the timers scan to
7098 * complete
7099 */
7100 for (i = 0; i < 200; i++) {
7101 msleep(10);
7102 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7103 break;
7104 }
7105#endif
34f80b04 7106 /* Clear ILT */
f2e0899f
DK
7107 bnx2x_clear_func_ilt(bp, func);
7108
7109 /* Timers workaround bug for E2: if this is vnic-3,
7110 * we need to set the entire ilt range for this timers.
7111 */
7112 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7113 struct ilt_client_info ilt_cli;
7114 /* use dummy TM client */
7115 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7116 ilt_cli.start = 0;
7117 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7118 ilt_cli.client_num = ILT_CLIENT_TM;
7119
7120 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7121 }
7122
7123 /* this assumes that reset_port() called before reset_func()*/
7124 if (CHIP_IS_E2(bp))
7125 bnx2x_pf_disable(bp);
523224a3
DK
7126
7127 bp->dmae_ready = 0;
34f80b04
EG
7128}
7129
7130static void bnx2x_reset_port(struct bnx2x *bp)
7131{
7132 int port = BP_PORT(bp);
7133 u32 val;
7134
7135 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7136
7137 /* Do not rcv packets to BRB */
7138 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7139 /* Do not direct rcv packets that are not for MCP to the BRB */
7140 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7141 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7142
7143 /* Configure AEU */
7144 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7145
7146 msleep(100);
7147 /* Check for BRB port occupancy */
7148 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7149 if (val)
7150 DP(NETIF_MSG_IFDOWN,
33471629 7151 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7152
7153 /* TODO: Close Doorbell port? */
7154}
7155
34f80b04
EG
7156static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7157{
7158 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
f2e0899f 7159 BP_ABS_FUNC(bp), reset_code);
34f80b04
EG
7160
7161 switch (reset_code) {
7162 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7163 bnx2x_reset_port(bp);
7164 bnx2x_reset_func(bp);
7165 bnx2x_reset_common(bp);
7166 break;
7167
7168 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7169 bnx2x_reset_port(bp);
7170 bnx2x_reset_func(bp);
7171 break;
7172
7173 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7174 bnx2x_reset_func(bp);
7175 break;
49d66772 7176
34f80b04
EG
7177 default:
7178 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7179 break;
7180 }
7181}
7182
ec6ba945
VZ
7183#ifdef BCM_CNIC
7184static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7185{
7186 if (bp->flags & FCOE_MACS_SET) {
7187 if (!IS_MF_SD(bp))
7188 bnx2x_set_fip_eth_mac_addr(bp, 0);
7189
7190 bnx2x_set_all_enode_macs(bp, 0);
7191
7192 bp->flags &= ~FCOE_MACS_SET;
7193 }
7194}
7195#endif
7196
9f6c9258 7197void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7198{
da5a662a 7199 int port = BP_PORT(bp);
a2fbb9ea 7200 u32 reset_code = 0;
da5a662a 7201 int i, cnt, rc;
a2fbb9ea 7202
555f6c78 7203 /* Wait until tx fastpath tasks complete */
ec6ba945 7204 for_each_tx_queue(bp, i) {
228241eb
ET
7205 struct bnx2x_fastpath *fp = &bp->fp[i];
7206
34f80b04 7207 cnt = 1000;
e8b5fc51 7208 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7209
34f80b04
EG
7210 if (!cnt) {
7211 BNX2X_ERR("timeout waiting for queue[%d]\n",
7212 i);
7213#ifdef BNX2X_STOP_ON_ERROR
7214 bnx2x_panic();
7215 return -EBUSY;
7216#else
7217 break;
7218#endif
7219 }
7220 cnt--;
da5a662a 7221 msleep(1);
34f80b04 7222 }
228241eb 7223 }
da5a662a
VZ
7224 /* Give HW time to discard old tx messages */
7225 msleep(1);
a2fbb9ea 7226
6e30dd4e 7227 bnx2x_set_eth_mac(bp, 0);
65abd74d 7228
6e30dd4e 7229 bnx2x_invalidate_uc_list(bp);
3101c2bc 7230
6e30dd4e
VZ
7231 if (CHIP_IS_E1(bp))
7232 bnx2x_invalidate_e1_mc_list(bp);
7233 else {
7234 bnx2x_invalidate_e1h_mc_list(bp);
7235 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
3101c2bc 7236 }
523224a3 7237
993ac7b5 7238#ifdef BCM_CNIC
ec6ba945 7239 bnx2x_del_fcoe_eth_macs(bp);
993ac7b5 7240#endif
3101c2bc 7241
65abd74d
YG
7242 if (unload_mode == UNLOAD_NORMAL)
7243 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7244
7d0446c2 7245 else if (bp->flags & NO_WOL_FLAG)
65abd74d 7246 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 7247
7d0446c2 7248 else if (bp->wol) {
65abd74d
YG
7249 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7250 u8 *mac_addr = bp->dev->dev_addr;
7251 u32 val;
7252 /* The mac address is written to entries 1-4 to
7253 preserve entry 0 which is used by the PMF */
7254 u8 entry = (BP_E1HVN(bp) + 1)*8;
7255
7256 val = (mac_addr[0] << 8) | mac_addr[1];
7257 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7258
7259 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7260 (mac_addr[4] << 8) | mac_addr[5];
7261 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7262
7263 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7264
7265 } else
7266 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7267
34f80b04
EG
7268 /* Close multi and leading connections
7269 Completions for ramrods are collected in a synchronous way */
523224a3
DK
7270 for_each_queue(bp, i)
7271
7272 if (bnx2x_stop_client(bp, i))
7273#ifdef BNX2X_STOP_ON_ERROR
7274 return;
7275#else
228241eb 7276 goto unload_error;
523224a3 7277#endif
a2fbb9ea 7278
523224a3 7279 rc = bnx2x_func_stop(bp);
da5a662a 7280 if (rc) {
523224a3 7281 BNX2X_ERR("Function stop failed!\n");
da5a662a 7282#ifdef BNX2X_STOP_ON_ERROR
523224a3 7283 return;
da5a662a
VZ
7284#else
7285 goto unload_error;
34f80b04 7286#endif
228241eb 7287 }
523224a3 7288#ifndef BNX2X_STOP_ON_ERROR
228241eb 7289unload_error:
523224a3 7290#endif
34f80b04 7291 if (!BP_NOMCP(bp))
a22f0788 7292 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04 7293 else {
f2e0899f
DK
7294 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7295 "%d, %d, %d\n", BP_PATH(bp),
7296 load_count[BP_PATH(bp)][0],
7297 load_count[BP_PATH(bp)][1],
7298 load_count[BP_PATH(bp)][2]);
7299 load_count[BP_PATH(bp)][0]--;
7300 load_count[BP_PATH(bp)][1 + port]--;
7301 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7302 "%d, %d, %d\n", BP_PATH(bp),
7303 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7304 load_count[BP_PATH(bp)][2]);
7305 if (load_count[BP_PATH(bp)][0] == 0)
34f80b04 7306 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
f2e0899f 7307 else if (load_count[BP_PATH(bp)][1 + port] == 0)
34f80b04
EG
7308 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7309 else
7310 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7311 }
a2fbb9ea 7312
34f80b04
EG
7313 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7314 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7315 bnx2x__link_reset(bp);
a2fbb9ea 7316
523224a3
DK
7317 /* Disable HW interrupts, NAPI */
7318 bnx2x_netif_stop(bp, 1);
7319
7320 /* Release IRQs */
d6214d7a 7321 bnx2x_free_irq(bp);
523224a3 7322
a2fbb9ea 7323 /* Reset the chip */
228241eb 7324 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7325
7326 /* Report UNLOAD_DONE to MCP */
34f80b04 7327 if (!BP_NOMCP(bp))
a22f0788 7328 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
356e2385 7329
72fd0718
VZ
7330}
7331
9f6c9258 7332void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
7333{
7334 u32 val;
7335
7336 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7337
7338 if (CHIP_IS_E1(bp)) {
7339 int port = BP_PORT(bp);
7340 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7341 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7342
7343 val = REG_RD(bp, addr);
7344 val &= ~(0x300);
7345 REG_WR(bp, addr, val);
7346 } else if (CHIP_IS_E1H(bp)) {
7347 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7348 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7349 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7350 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7351 }
7352}
7353
72fd0718
VZ
7354/* Close gates #2, #3 and #4: */
7355static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7356{
7357 u32 val, addr;
7358
7359 /* Gates #2 and #4a are closed/opened for "not E1" only */
7360 if (!CHIP_IS_E1(bp)) {
7361 /* #4 */
7362 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7363 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7364 close ? (val | 0x1) : (val & (~(u32)1)));
7365 /* #2 */
7366 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7367 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7368 close ? (val | 0x1) : (val & (~(u32)1)));
7369 }
7370
7371 /* #3 */
7372 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7373 val = REG_RD(bp, addr);
7374 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7375
7376 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7377 close ? "closing" : "opening");
7378 mmiowb();
7379}
7380
7381#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7382
7383static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7384{
7385 /* Do some magic... */
7386 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7387 *magic_val = val & SHARED_MF_CLP_MAGIC;
7388 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7389}
7390
7391/* Restore the value of the `magic' bit.
7392 *
7393 * @param pdev Device handle.
7394 * @param magic_val Old value of the `magic' bit.
7395 */
7396static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7397{
7398 /* Restore the `magic' bit value... */
72fd0718
VZ
7399 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7400 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7401 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7402}
7403
f85582f8
DK
7404/**
7405 * Prepares for MCP reset: takes care of CLP configurations.
72fd0718
VZ
7406 *
7407 * @param bp
7408 * @param magic_val Old value of 'magic' bit.
7409 */
7410static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7411{
7412 u32 shmem;
7413 u32 validity_offset;
7414
7415 DP(NETIF_MSG_HW, "Starting\n");
7416
7417 /* Set `magic' bit in order to save MF config */
7418 if (!CHIP_IS_E1(bp))
7419 bnx2x_clp_reset_prep(bp, magic_val);
7420
7421 /* Get shmem offset */
7422 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7423 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7424
7425 /* Clear validity map flags */
7426 if (shmem > 0)
7427 REG_WR(bp, shmem + validity_offset, 0);
7428}
7429
7430#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7431#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7432
7433/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7434 * depending on the HW type.
7435 *
7436 * @param bp
7437 */
7438static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7439{
7440 /* special handling for emulation and FPGA,
7441 wait 10 times longer */
7442 if (CHIP_REV_IS_SLOW(bp))
7443 msleep(MCP_ONE_TIMEOUT*10);
7444 else
7445 msleep(MCP_ONE_TIMEOUT);
7446}
7447
7448static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7449{
7450 u32 shmem, cnt, validity_offset, val;
7451 int rc = 0;
7452
7453 msleep(100);
7454
7455 /* Get shmem offset */
7456 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7457 if (shmem == 0) {
7458 BNX2X_ERR("Shmem 0 return failure\n");
7459 rc = -ENOTTY;
7460 goto exit_lbl;
7461 }
7462
7463 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7464
7465 /* Wait for MCP to come up */
7466 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7467 /* TBD: its best to check validity map of last port.
7468 * currently checks on port 0.
7469 */
7470 val = REG_RD(bp, shmem + validity_offset);
7471 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7472 shmem + validity_offset, val);
7473
7474 /* check that shared memory is valid. */
7475 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7476 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7477 break;
7478
7479 bnx2x_mcp_wait_one(bp);
7480 }
7481
7482 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7483
7484 /* Check that shared memory is valid. This indicates that MCP is up. */
7485 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7486 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7487 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7488 rc = -ENOTTY;
7489 goto exit_lbl;
7490 }
7491
7492exit_lbl:
7493 /* Restore the `magic' bit value */
7494 if (!CHIP_IS_E1(bp))
7495 bnx2x_clp_reset_done(bp, magic_val);
7496
7497 return rc;
7498}
7499
7500static void bnx2x_pxp_prep(struct bnx2x *bp)
7501{
7502 if (!CHIP_IS_E1(bp)) {
7503 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7504 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7505 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7506 mmiowb();
7507 }
7508}
7509
7510/*
7511 * Reset the whole chip except for:
7512 * - PCIE core
7513 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7514 * one reset bit)
7515 * - IGU
7516 * - MISC (including AEU)
7517 * - GRC
7518 * - RBCN, RBCP
7519 */
7520static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7521{
7522 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7523
7524 not_reset_mask1 =
7525 MISC_REGISTERS_RESET_REG_1_RST_HC |
7526 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7527 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7528
7529 not_reset_mask2 =
7530 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7531 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7532 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7533 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7534 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7535 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7536 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7537 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7538
7539 reset_mask1 = 0xffffffff;
7540
7541 if (CHIP_IS_E1(bp))
7542 reset_mask2 = 0xffff;
7543 else
7544 reset_mask2 = 0x1ffff;
7545
7546 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7547 reset_mask1 & (~not_reset_mask1));
7548 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7549 reset_mask2 & (~not_reset_mask2));
7550
7551 barrier();
7552 mmiowb();
7553
7554 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7555 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7556 mmiowb();
7557}
7558
7559static int bnx2x_process_kill(struct bnx2x *bp)
7560{
7561 int cnt = 1000;
7562 u32 val = 0;
7563 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7564
7565
7566 /* Empty the Tetris buffer, wait for 1s */
7567 do {
7568 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7569 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7570 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7571 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7572 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7573 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7574 ((port_is_idle_0 & 0x1) == 0x1) &&
7575 ((port_is_idle_1 & 0x1) == 0x1) &&
7576 (pgl_exp_rom2 == 0xffffffff))
7577 break;
7578 msleep(1);
7579 } while (cnt-- > 0);
7580
7581 if (cnt <= 0) {
7582 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7583 " are still"
7584 " outstanding read requests after 1s!\n");
7585 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7586 " port_is_idle_0=0x%08x,"
7587 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7588 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7589 pgl_exp_rom2);
7590 return -EAGAIN;
7591 }
7592
7593 barrier();
7594
7595 /* Close gates #2, #3 and #4 */
7596 bnx2x_set_234_gates(bp, true);
7597
7598 /* TBD: Indicate that "process kill" is in progress to MCP */
7599
7600 /* Clear "unprepared" bit */
7601 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7602 barrier();
7603
7604 /* Make sure all is written to the chip before the reset */
7605 mmiowb();
7606
7607 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7608 * PSWHST, GRC and PSWRD Tetris buffer.
7609 */
7610 msleep(1);
7611
7612 /* Prepare to chip reset: */
7613 /* MCP */
7614 bnx2x_reset_mcp_prep(bp, &val);
7615
7616 /* PXP */
7617 bnx2x_pxp_prep(bp);
7618 barrier();
7619
7620 /* reset the chip */
7621 bnx2x_process_kill_chip_reset(bp);
7622 barrier();
7623
7624 /* Recover after reset: */
7625 /* MCP */
7626 if (bnx2x_reset_mcp_comp(bp, val))
7627 return -EAGAIN;
7628
7629 /* PXP */
7630 bnx2x_pxp_prep(bp);
7631
7632 /* Open the gates #2, #3 and #4 */
7633 bnx2x_set_234_gates(bp, false);
7634
7635 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7636 * reset state, re-enable attentions. */
7637
a2fbb9ea
ET
7638 return 0;
7639}
7640
72fd0718
VZ
7641static int bnx2x_leader_reset(struct bnx2x *bp)
7642{
7643 int rc = 0;
7644 /* Try to recover after the failure */
7645 if (bnx2x_process_kill(bp)) {
7646 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7647 bp->dev->name);
7648 rc = -EAGAIN;
7649 goto exit_leader_reset;
7650 }
7651
7652 /* Clear "reset is in progress" bit and update the driver state */
7653 bnx2x_set_reset_done(bp);
7654 bp->recovery_state = BNX2X_RECOVERY_DONE;
7655
7656exit_leader_reset:
7657 bp->is_leader = 0;
7658 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7659 smp_wmb();
7660 return rc;
7661}
7662
72fd0718
VZ
7663/* Assumption: runs under rtnl lock. This together with the fact
7664 * that it's called only from bnx2x_reset_task() ensure that it
7665 * will never be called when netif_running(bp->dev) is false.
7666 */
7667static void bnx2x_parity_recover(struct bnx2x *bp)
7668{
7669 DP(NETIF_MSG_HW, "Handling parity\n");
7670 while (1) {
7671 switch (bp->recovery_state) {
7672 case BNX2X_RECOVERY_INIT:
7673 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7674 /* Try to get a LEADER_LOCK HW lock */
7675 if (bnx2x_trylock_hw_lock(bp,
7676 HW_LOCK_RESOURCE_RESERVED_08))
7677 bp->is_leader = 1;
7678
7679 /* Stop the driver */
7680 /* If interface has been removed - break */
7681 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7682 return;
7683
7684 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7685 /* Ensure "is_leader" and "recovery_state"
7686 * update values are seen on other CPUs
7687 */
7688 smp_wmb();
7689 break;
7690
7691 case BNX2X_RECOVERY_WAIT:
7692 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7693 if (bp->is_leader) {
7694 u32 load_counter = bnx2x_get_load_cnt(bp);
7695 if (load_counter) {
7696 /* Wait until all other functions get
7697 * down.
7698 */
7699 schedule_delayed_work(&bp->reset_task,
7700 HZ/10);
7701 return;
7702 } else {
7703 /* If all other functions got down -
7704 * try to bring the chip back to
7705 * normal. In any case it's an exit
7706 * point for a leader.
7707 */
7708 if (bnx2x_leader_reset(bp) ||
7709 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7710 printk(KERN_ERR"%s: Recovery "
7711 "has failed. Power cycle is "
7712 "needed.\n", bp->dev->name);
7713 /* Disconnect this device */
7714 netif_device_detach(bp->dev);
7715 /* Block ifup for all function
7716 * of this ASIC until
7717 * "process kill" or power
7718 * cycle.
7719 */
7720 bnx2x_set_reset_in_progress(bp);
7721 /* Shut down the power */
7722 bnx2x_set_power_state(bp,
7723 PCI_D3hot);
7724 return;
7725 }
7726
7727 return;
7728 }
7729 } else { /* non-leader */
7730 if (!bnx2x_reset_is_done(bp)) {
7731 /* Try to get a LEADER_LOCK HW lock as
7732 * long as a former leader may have
7733 * been unloaded by the user or
7734 * released a leadership by another
7735 * reason.
7736 */
7737 if (bnx2x_trylock_hw_lock(bp,
7738 HW_LOCK_RESOURCE_RESERVED_08)) {
7739 /* I'm a leader now! Restart a
7740 * switch case.
7741 */
7742 bp->is_leader = 1;
7743 break;
7744 }
7745
7746 schedule_delayed_work(&bp->reset_task,
7747 HZ/10);
7748 return;
7749
7750 } else { /* A leader has completed
7751 * the "process kill". It's an exit
7752 * point for a non-leader.
7753 */
7754 bnx2x_nic_load(bp, LOAD_NORMAL);
7755 bp->recovery_state =
7756 BNX2X_RECOVERY_DONE;
7757 smp_wmb();
7758 return;
7759 }
7760 }
7761 default:
7762 return;
7763 }
7764 }
7765}
7766
7767/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7768 * scheduled on a general queue in order to prevent a dead lock.
7769 */
34f80b04
EG
7770static void bnx2x_reset_task(struct work_struct *work)
7771{
72fd0718 7772 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
34f80b04
EG
7773
7774#ifdef BNX2X_STOP_ON_ERROR
7775 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7776 " so reset not done to allow debug dump,\n"
72fd0718 7777 KERN_ERR " you will need to reboot when done\n");
34f80b04
EG
7778 return;
7779#endif
7780
7781 rtnl_lock();
7782
7783 if (!netif_running(bp->dev))
7784 goto reset_task_exit;
7785
72fd0718
VZ
7786 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7787 bnx2x_parity_recover(bp);
7788 else {
7789 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7790 bnx2x_nic_load(bp, LOAD_NORMAL);
7791 }
34f80b04
EG
7792
7793reset_task_exit:
7794 rtnl_unlock();
7795}
7796
a2fbb9ea
ET
7797/* end of nic load/unload */
7798
a2fbb9ea
ET
7799/*
7800 * Init service functions
7801 */
7802
8d96286a 7803static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
7804{
7805 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7806 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7807 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
7808}
7809
f2e0899f 7810static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
f1ef27ef 7811{
f2e0899f 7812 u32 reg = bnx2x_get_pretend_reg(bp);
f1ef27ef
EG
7813
7814 /* Flush all outstanding writes */
7815 mmiowb();
7816
7817 /* Pretend to be function 0 */
7818 REG_WR(bp, reg, 0);
f2e0899f 7819 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
f1ef27ef
EG
7820
7821 /* From now we are in the "like-E1" mode */
7822 bnx2x_int_disable(bp);
7823
7824 /* Flush all outstanding writes */
7825 mmiowb();
7826
f2e0899f
DK
7827 /* Restore the original function */
7828 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7829 REG_RD(bp, reg);
f1ef27ef
EG
7830}
7831
f2e0899f 7832static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
f1ef27ef 7833{
f2e0899f 7834 if (CHIP_IS_E1(bp))
f1ef27ef 7835 bnx2x_int_disable(bp);
f2e0899f
DK
7836 else
7837 bnx2x_undi_int_disable_e1h(bp);
f1ef27ef
EG
7838}
7839
34f80b04
EG
7840static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7841{
7842 u32 val;
7843
7844 /* Check if there is any driver already loaded */
7845 val = REG_RD(bp, MISC_REG_UNPREPARED);
7846 if (val == 0x1) {
7847 /* Check if it is the UNDI driver
7848 * UNDI driver initializes CID offset for normal bell to 0x7
7849 */
4a37fb66 7850 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7851 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7852 if (val == 0x7) {
7853 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
f2e0899f
DK
7854 /* save our pf_num */
7855 int orig_pf_num = bp->pf_num;
da5a662a
VZ
7856 u32 swap_en;
7857 u32 swap_val;
34f80b04 7858
b4661739
EG
7859 /* clear the UNDI indication */
7860 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7861
34f80b04
EG
7862 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7863
7864 /* try unload UNDI on port 0 */
f2e0899f 7865 bp->pf_num = 0;
da5a662a 7866 bp->fw_seq =
f2e0899f 7867 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7868 DRV_MSG_SEQ_NUMBER_MASK);
a22f0788 7869 reset_code = bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7870
7871 /* if UNDI is loaded on the other port */
7872 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7873
da5a662a 7874 /* send "DONE" for previous unload */
a22f0788
YR
7875 bnx2x_fw_command(bp,
7876 DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7877
7878 /* unload UNDI on port 1 */
f2e0899f 7879 bp->pf_num = 1;
da5a662a 7880 bp->fw_seq =
f2e0899f 7881 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a
VZ
7882 DRV_MSG_SEQ_NUMBER_MASK);
7883 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7884
a22f0788 7885 bnx2x_fw_command(bp, reset_code, 0);
34f80b04
EG
7886 }
7887
b4661739
EG
7888 /* now it's safe to release the lock */
7889 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7890
f2e0899f 7891 bnx2x_undi_int_disable(bp);
da5a662a
VZ
7892
7893 /* close input traffic and wait for it */
7894 /* Do not rcv packets to BRB */
7895 REG_WR(bp,
7896 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7897 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7898 /* Do not direct rcv packets that are not for MCP to
7899 * the BRB */
7900 REG_WR(bp,
7901 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7902 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7903 /* clear AEU */
7904 REG_WR(bp,
7905 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7906 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7907 msleep(10);
7908
7909 /* save NIG port swap info */
7910 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7911 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7912 /* reset device */
7913 REG_WR(bp,
7914 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7915 0xd3ffffff);
34f80b04
EG
7916 REG_WR(bp,
7917 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7918 0x1403);
da5a662a
VZ
7919 /* take the NIG out of reset and restore swap values */
7920 REG_WR(bp,
7921 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7922 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7923 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7924 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7925
7926 /* send unload done to the MCP */
a22f0788 7927 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
da5a662a
VZ
7928
7929 /* restore our func and fw_seq */
f2e0899f 7930 bp->pf_num = orig_pf_num;
da5a662a 7931 bp->fw_seq =
f2e0899f 7932 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
da5a662a 7933 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7934 } else
7935 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7936 }
7937}
7938
7939static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7940{
7941 u32 val, val2, val3, val4, id;
72ce58c3 7942 u16 pmc;
34f80b04
EG
7943
7944 /* Get the chip revision id and number. */
7945 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7946 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7947 id = ((val & 0xffff) << 16);
7948 val = REG_RD(bp, MISC_REG_CHIP_REV);
7949 id |= ((val & 0xf) << 12);
7950 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7951 id |= ((val & 0xff) << 4);
5a40e08e 7952 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7953 id |= (val & 0xf);
7954 bp->common.chip_id = id;
523224a3
DK
7955
7956 /* Set doorbell size */
7957 bp->db_size = (1 << BNX2X_DB_SHIFT);
7958
f2e0899f
DK
7959 if (CHIP_IS_E2(bp)) {
7960 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7961 if ((val & 1) == 0)
7962 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7963 else
7964 val = (val >> 1) & 1;
7965 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7966 "2_PORT_MODE");
7967 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7968 CHIP_2_PORT_MODE;
7969
7970 if (CHIP_MODE_IS_4_PORT(bp))
7971 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7972 else
7973 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7974 } else {
7975 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7976 bp->pfid = bp->pf_num; /* 0..7 */
7977 }
7978
523224a3
DK
7979 /*
7980 * set base FW non-default (fast path) status block id, this value is
7981 * used to initialize the fw_sb_id saved on the fp/queue structure to
7982 * determine the id used by the FW.
7983 */
f2e0899f
DK
7984 if (CHIP_IS_E1x(bp))
7985 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7986 else /* E2 */
7987 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7988
7989 bp->link_params.chip_id = bp->common.chip_id;
7990 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 7991
1c06328c
EG
7992 val = (REG_RD(bp, 0x2874) & 0x55);
7993 if ((bp->common.chip_id & 0x1) ||
7994 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7995 bp->flags |= ONE_PORT_FLAG;
7996 BNX2X_DEV_INFO("single port device\n");
7997 }
7998
34f80b04
EG
7999 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
8000 bp->common.flash_size = (NVRAM_1MB_SIZE <<
8001 (val & MCPR_NVM_CFG4_FLASH_SIZE));
8002 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
8003 bp->common.flash_size, bp->common.flash_size);
8004
8005 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
f2e0899f
DK
8006 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
8007 MISC_REG_GENERIC_CR_1 :
8008 MISC_REG_GENERIC_CR_0));
34f80b04 8009 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 8010 bp->link_params.shmem2_base = bp->common.shmem2_base;
2691d51d
EG
8011 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
8012 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 8013
f2e0899f 8014 if (!bp->common.shmem_base) {
34f80b04
EG
8015 BNX2X_DEV_INFO("MCP not active\n");
8016 bp->flags |= NO_MCP_FLAG;
8017 return;
8018 }
8019
8020 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8021 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8022 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f2e0899f 8023 BNX2X_ERR("BAD MCP validity signature\n");
34f80b04
EG
8024
8025 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 8026 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
8027
8028 bp->link_params.hw_led_mode = ((bp->common.hw_config &
8029 SHARED_HW_CFG_LED_MODE_MASK) >>
8030 SHARED_HW_CFG_LED_MODE_SHIFT);
8031
c2c8b03e
EG
8032 bp->link_params.feature_config_flags = 0;
8033 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
8034 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
8035 bp->link_params.feature_config_flags |=
8036 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8037 else
8038 bp->link_params.feature_config_flags &=
8039 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
8040
34f80b04
EG
8041 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
8042 bp->common.bc_ver = val;
8043 BNX2X_DEV_INFO("bc_ver %X\n", val);
8044 if (val < BNX2X_BC_VER) {
8045 /* for now only warn
8046 * later we might need to enforce this */
f2e0899f
DK
8047 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
8048 "please upgrade BC\n", BNX2X_BC_VER, val);
34f80b04 8049 }
4d295db0 8050 bp->link_params.feature_config_flags |=
a22f0788 8051 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
8052 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
8053
a22f0788
YR
8054 bp->link_params.feature_config_flags |=
8055 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
8056 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
72ce58c3
EG
8057
8058 if (BP_E1HVN(bp) == 0) {
8059 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
8060 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
8061 } else {
8062 /* no WOL capability for E1HVN != 0 */
8063 bp->flags |= NO_WOL_FLAG;
8064 }
8065 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 8066 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
8067
8068 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
8069 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
8070 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
8071 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
8072
cdaa7cb8
VZ
8073 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
8074 val, val2, val3, val4);
34f80b04
EG
8075}
8076
f2e0899f
DK
8077#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
8078#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
8079
8080static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
8081{
8082 int pfid = BP_FUNC(bp);
8083 int vn = BP_E1HVN(bp);
8084 int igu_sb_id;
8085 u32 val;
8086 u8 fid;
8087
8088 bp->igu_base_sb = 0xff;
8089 bp->igu_sb_cnt = 0;
8090 if (CHIP_INT_MODE_IS_BC(bp)) {
8091 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
ec6ba945 8092 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8093
8094 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8095 FP_SB_MAX_E1x;
8096
8097 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
8098 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8099
8100 return;
8101 }
8102
8103 /* IGU in normal mode - read CAM */
8104 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8105 igu_sb_id++) {
8106 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8107 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8108 continue;
8109 fid = IGU_FID(val);
8110 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8111 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8112 continue;
8113 if (IGU_VEC(val) == 0)
8114 /* default status block */
8115 bp->igu_dsb_id = igu_sb_id;
8116 else {
8117 if (bp->igu_base_sb == 0xff)
8118 bp->igu_base_sb = igu_sb_id;
8119 bp->igu_sb_cnt++;
8120 }
8121 }
8122 }
ec6ba945
VZ
8123 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8124 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8125 if (bp->igu_sb_cnt == 0)
8126 BNX2X_ERR("CAM configuration error\n");
8127}
8128
34f80b04
EG
8129static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8130 u32 switch_cfg)
a2fbb9ea 8131{
a22f0788
YR
8132 int cfg_size = 0, idx, port = BP_PORT(bp);
8133
8134 /* Aggregation of supported attributes of all external phys */
8135 bp->port.supported[0] = 0;
8136 bp->port.supported[1] = 0;
b7737c9b
YR
8137 switch (bp->link_params.num_phys) {
8138 case 1:
a22f0788
YR
8139 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8140 cfg_size = 1;
8141 break;
b7737c9b 8142 case 2:
a22f0788
YR
8143 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8144 cfg_size = 1;
8145 break;
8146 case 3:
8147 if (bp->link_params.multi_phy_config &
8148 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8149 bp->port.supported[1] =
8150 bp->link_params.phy[EXT_PHY1].supported;
8151 bp->port.supported[0] =
8152 bp->link_params.phy[EXT_PHY2].supported;
8153 } else {
8154 bp->port.supported[0] =
8155 bp->link_params.phy[EXT_PHY1].supported;
8156 bp->port.supported[1] =
8157 bp->link_params.phy[EXT_PHY2].supported;
8158 }
8159 cfg_size = 2;
8160 break;
b7737c9b 8161 }
a2fbb9ea 8162
a22f0788 8163 if (!(bp->port.supported[0] || bp->port.supported[1])) {
b7737c9b 8164 BNX2X_ERR("NVRAM config error. BAD phy config."
a22f0788 8165 "PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 8166 SHMEM_RD(bp,
a22f0788
YR
8167 dev_info.port_hw_config[port].external_phy_config),
8168 SHMEM_RD(bp,
8169 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 8170 return;
f85582f8 8171 }
a2fbb9ea 8172
b7737c9b
YR
8173 switch (switch_cfg) {
8174 case SWITCH_CFG_1G:
34f80b04
EG
8175 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8176 port*0x10);
8177 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8178 break;
8179
8180 case SWITCH_CFG_10G:
34f80b04
EG
8181 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8182 port*0x18);
8183 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
8184 break;
8185
8186 default:
8187 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
a22f0788 8188 bp->port.link_config[0]);
a2fbb9ea
ET
8189 return;
8190 }
a22f0788
YR
8191 /* mask what we support according to speed_cap_mask per configuration */
8192 for (idx = 0; idx < cfg_size; idx++) {
8193 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8194 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 8195 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 8196
a22f0788 8197 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8198 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 8199 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 8200
a22f0788 8201 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8202 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 8203 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 8204
a22f0788 8205 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8206 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 8207 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 8208
a22f0788 8209 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8210 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 8211 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 8212 SUPPORTED_1000baseT_Full);
a2fbb9ea 8213
a22f0788 8214 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8215 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 8216 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 8217
a22f0788 8218 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 8219 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
8220 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
8221
8222 }
a2fbb9ea 8223
a22f0788
YR
8224 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8225 bp->port.supported[1]);
a2fbb9ea
ET
8226}
8227
34f80b04 8228static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 8229{
a22f0788
YR
8230 u32 link_config, idx, cfg_size = 0;
8231 bp->port.advertising[0] = 0;
8232 bp->port.advertising[1] = 0;
8233 switch (bp->link_params.num_phys) {
8234 case 1:
8235 case 2:
8236 cfg_size = 1;
8237 break;
8238 case 3:
8239 cfg_size = 2;
8240 break;
8241 }
8242 for (idx = 0; idx < cfg_size; idx++) {
8243 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8244 link_config = bp->port.link_config[idx];
8245 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 8246 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
8247 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8248 bp->link_params.req_line_speed[idx] =
8249 SPEED_AUTO_NEG;
8250 bp->port.advertising[idx] |=
8251 bp->port.supported[idx];
f85582f8
DK
8252 } else {
8253 /* force 10G, no AN */
a22f0788
YR
8254 bp->link_params.req_line_speed[idx] =
8255 SPEED_10000;
8256 bp->port.advertising[idx] |=
8257 (ADVERTISED_10000baseT_Full |
f85582f8 8258 ADVERTISED_FIBRE);
a22f0788 8259 continue;
f85582f8
DK
8260 }
8261 break;
a2fbb9ea 8262
f85582f8 8263 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
8264 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8265 bp->link_params.req_line_speed[idx] =
8266 SPEED_10;
8267 bp->port.advertising[idx] |=
8268 (ADVERTISED_10baseT_Full |
f85582f8
DK
8269 ADVERTISED_TP);
8270 } else {
8271 BNX2X_ERROR("NVRAM config error. "
8272 "Invalid link_config 0x%x"
8273 " speed_cap_mask 0x%x\n",
8274 link_config,
a22f0788 8275 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8276 return;
8277 }
8278 break;
a2fbb9ea 8279
f85582f8 8280 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
8281 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8282 bp->link_params.req_line_speed[idx] =
8283 SPEED_10;
8284 bp->link_params.req_duplex[idx] =
8285 DUPLEX_HALF;
8286 bp->port.advertising[idx] |=
8287 (ADVERTISED_10baseT_Half |
f85582f8
DK
8288 ADVERTISED_TP);
8289 } else {
8290 BNX2X_ERROR("NVRAM config error. "
8291 "Invalid link_config 0x%x"
8292 " speed_cap_mask 0x%x\n",
8293 link_config,
8294 bp->link_params.speed_cap_mask[idx]);
8295 return;
8296 }
8297 break;
a2fbb9ea 8298
f85582f8
DK
8299 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8300 if (bp->port.supported[idx] &
8301 SUPPORTED_100baseT_Full) {
a22f0788
YR
8302 bp->link_params.req_line_speed[idx] =
8303 SPEED_100;
8304 bp->port.advertising[idx] |=
8305 (ADVERTISED_100baseT_Full |
f85582f8
DK
8306 ADVERTISED_TP);
8307 } else {
8308 BNX2X_ERROR("NVRAM config error. "
8309 "Invalid link_config 0x%x"
8310 " speed_cap_mask 0x%x\n",
8311 link_config,
8312 bp->link_params.speed_cap_mask[idx]);
8313 return;
8314 }
8315 break;
a2fbb9ea 8316
f85582f8
DK
8317 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8318 if (bp->port.supported[idx] &
8319 SUPPORTED_100baseT_Half) {
8320 bp->link_params.req_line_speed[idx] =
8321 SPEED_100;
8322 bp->link_params.req_duplex[idx] =
8323 DUPLEX_HALF;
a22f0788
YR
8324 bp->port.advertising[idx] |=
8325 (ADVERTISED_100baseT_Half |
f85582f8
DK
8326 ADVERTISED_TP);
8327 } else {
8328 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8329 "Invalid link_config 0x%x"
8330 " speed_cap_mask 0x%x\n",
a22f0788
YR
8331 link_config,
8332 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8333 return;
8334 }
8335 break;
a2fbb9ea 8336
f85582f8 8337 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
8338 if (bp->port.supported[idx] &
8339 SUPPORTED_1000baseT_Full) {
8340 bp->link_params.req_line_speed[idx] =
8341 SPEED_1000;
8342 bp->port.advertising[idx] |=
8343 (ADVERTISED_1000baseT_Full |
f85582f8
DK
8344 ADVERTISED_TP);
8345 } else {
8346 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8347 "Invalid link_config 0x%x"
8348 " speed_cap_mask 0x%x\n",
a22f0788
YR
8349 link_config,
8350 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
8351 return;
8352 }
8353 break;
a2fbb9ea 8354
f85582f8 8355 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
8356 if (bp->port.supported[idx] &
8357 SUPPORTED_2500baseX_Full) {
8358 bp->link_params.req_line_speed[idx] =
8359 SPEED_2500;
8360 bp->port.advertising[idx] |=
8361 (ADVERTISED_2500baseX_Full |
34f80b04 8362 ADVERTISED_TP);
f85582f8
DK
8363 } else {
8364 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8365 "Invalid link_config 0x%x"
8366 " speed_cap_mask 0x%x\n",
a22f0788 8367 link_config,
f85582f8
DK
8368 bp->link_params.speed_cap_mask[idx]);
8369 return;
8370 }
8371 break;
a2fbb9ea 8372
f85582f8
DK
8373 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8374 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8375 case PORT_FEATURE_LINK_SPEED_10G_KR:
a22f0788
YR
8376 if (bp->port.supported[idx] &
8377 SUPPORTED_10000baseT_Full) {
8378 bp->link_params.req_line_speed[idx] =
8379 SPEED_10000;
8380 bp->port.advertising[idx] |=
8381 (ADVERTISED_10000baseT_Full |
34f80b04 8382 ADVERTISED_FIBRE);
f85582f8
DK
8383 } else {
8384 BNX2X_ERROR("NVRAM config error. "
cdaa7cb8
VZ
8385 "Invalid link_config 0x%x"
8386 " speed_cap_mask 0x%x\n",
a22f0788 8387 link_config,
f85582f8
DK
8388 bp->link_params.speed_cap_mask[idx]);
8389 return;
8390 }
8391 break;
a2fbb9ea 8392
f85582f8
DK
8393 default:
8394 BNX2X_ERROR("NVRAM config error. "
8395 "BAD link speed link_config 0x%x\n",
8396 link_config);
8397 bp->link_params.req_line_speed[idx] =
8398 SPEED_AUTO_NEG;
8399 bp->port.advertising[idx] =
8400 bp->port.supported[idx];
8401 break;
8402 }
a2fbb9ea 8403
a22f0788 8404 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 8405 PORT_FEATURE_FLOW_CONTROL_MASK);
a22f0788
YR
8406 if ((bp->link_params.req_flow_ctrl[idx] ==
8407 BNX2X_FLOW_CTRL_AUTO) &&
8408 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8409 bp->link_params.req_flow_ctrl[idx] =
8410 BNX2X_FLOW_CTRL_NONE;
8411 }
a2fbb9ea 8412
a22f0788
YR
8413 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8414 " 0x%x advertising 0x%x\n",
8415 bp->link_params.req_line_speed[idx],
8416 bp->link_params.req_duplex[idx],
8417 bp->link_params.req_flow_ctrl[idx],
8418 bp->port.advertising[idx]);
8419 }
a2fbb9ea
ET
8420}
8421
e665bfda
MC
8422static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8423{
8424 mac_hi = cpu_to_be16(mac_hi);
8425 mac_lo = cpu_to_be32(mac_lo);
8426 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8427 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8428}
8429
34f80b04 8430static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 8431{
34f80b04 8432 int port = BP_PORT(bp);
589abe3a 8433 u32 config;
6f38ad93 8434 u32 ext_phy_type, ext_phy_config;
a2fbb9ea 8435
c18487ee 8436 bp->link_params.bp = bp;
34f80b04 8437 bp->link_params.port = port;
c18487ee 8438
c18487ee 8439 bp->link_params.lane_config =
a2fbb9ea 8440 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 8441
a22f0788 8442 bp->link_params.speed_cap_mask[0] =
a2fbb9ea
ET
8443 SHMEM_RD(bp,
8444 dev_info.port_hw_config[port].speed_capability_mask);
a22f0788
YR
8445 bp->link_params.speed_cap_mask[1] =
8446 SHMEM_RD(bp,
8447 dev_info.port_hw_config[port].speed_capability_mask2);
8448 bp->port.link_config[0] =
a2fbb9ea
ET
8449 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8450
a22f0788
YR
8451 bp->port.link_config[1] =
8452 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 8453
a22f0788
YR
8454 bp->link_params.multi_phy_config =
8455 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
8456 /* If the device is capable of WoL, set the default state according
8457 * to the HW
8458 */
4d295db0 8459 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
8460 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8461 (config & PORT_FEATURE_WOL_ENABLED));
8462
f85582f8 8463 BNX2X_DEV_INFO("lane_config 0x%08x "
a22f0788 8464 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 8465 bp->link_params.lane_config,
a22f0788
YR
8466 bp->link_params.speed_cap_mask[0],
8467 bp->port.link_config[0]);
a2fbb9ea 8468
a22f0788 8469 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 8470 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 8471 bnx2x_phy_probe(&bp->link_params);
c18487ee 8472 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
8473
8474 bnx2x_link_settings_requested(bp);
8475
01cd4528
EG
8476 /*
8477 * If connected directly, work with the internal PHY, otherwise, work
8478 * with the external PHY
8479 */
b7737c9b
YR
8480 ext_phy_config =
8481 SHMEM_RD(bp,
8482 dev_info.port_hw_config[port].external_phy_config);
8483 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 8484 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 8485 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
8486
8487 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8488 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8489 bp->mdio.prtad =
b7737c9b 8490 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d
YR
8491
8492 /*
8493 * Check if hw lock is required to access MDC/MDIO bus to the PHY(s)
8494 * In MF mode, it is set to cover self test cases
8495 */
8496 if (IS_MF(bp))
8497 bp->port.need_hw_lock = 1;
8498 else
8499 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
8500 bp->common.shmem_base,
8501 bp->common.shmem2_base);
0793f83f 8502}
01cd4528 8503
2ba45142
VZ
8504#ifdef BCM_CNIC
8505static void __devinit bnx2x_get_cnic_info(struct bnx2x *bp)
8506{
8507 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8508 drv_lic_key[BP_PORT(bp)].max_iscsi_conn);
8509 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
8510 drv_lic_key[BP_PORT(bp)].max_fcoe_conn);
8511
8512 /* Get the number of maximum allowed iSCSI and FCoE connections */
8513 bp->cnic_eth_dev.max_iscsi_conn =
8514 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
8515 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
8516
8517 bp->cnic_eth_dev.max_fcoe_conn =
8518 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
8519 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
8520
8521 BNX2X_DEV_INFO("max_iscsi_conn 0x%x max_fcoe_conn 0x%x\n",
8522 bp->cnic_eth_dev.max_iscsi_conn,
8523 bp->cnic_eth_dev.max_fcoe_conn);
8524
8525 /* If mamimum allowed number of connections is zero -
8526 * disable the feature.
8527 */
8528 if (!bp->cnic_eth_dev.max_iscsi_conn)
8529 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8530
8531 if (!bp->cnic_eth_dev.max_fcoe_conn)
8532 bp->flags |= NO_FCOE_FLAG;
8533}
8534#endif
8535
0793f83f
DK
8536static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8537{
8538 u32 val, val2;
8539 int func = BP_ABS_FUNC(bp);
8540 int port = BP_PORT(bp);
2ba45142
VZ
8541#ifdef BCM_CNIC
8542 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
8543 u8 *fip_mac = bp->fip_mac;
8544#endif
0793f83f
DK
8545
8546 if (BP_NOMCP(bp)) {
8547 BNX2X_ERROR("warning: random MAC workaround active\n");
8548 random_ether_addr(bp->dev->dev_addr);
8549 } else if (IS_MF(bp)) {
8550 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8551 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8552 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8553 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8554 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
37b091ba
MC
8555
8556#ifdef BCM_CNIC
2ba45142
VZ
8557 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
8558 * FCoE MAC then the appropriate feature should be disabled.
8559 */
0793f83f
DK
8560 if (IS_MF_SI(bp)) {
8561 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8562 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8563 val2 = MF_CFG_RD(bp, func_ext_config[func].
8564 iscsi_mac_addr_upper);
8565 val = MF_CFG_RD(bp, func_ext_config[func].
8566 iscsi_mac_addr_lower);
2ba45142
VZ
8567 BNX2X_DEV_INFO("Read iSCSI MAC: "
8568 "0x%x:0x%04x\n", val2, val);
8569 bnx2x_set_mac_buf(iscsi_mac, val, val2);
8570
8571 /* Disable iSCSI OOO if MAC configuration is
8572 * invalid.
8573 */
8574 if (!is_valid_ether_addr(iscsi_mac)) {
8575 bp->flags |= NO_ISCSI_OOO_FLAG |
8576 NO_ISCSI_FLAG;
8577 memset(iscsi_mac, 0, ETH_ALEN);
8578 }
8579 } else
8580 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
8581
8582 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
8583 val2 = MF_CFG_RD(bp, func_ext_config[func].
8584 fcoe_mac_addr_upper);
8585 val = MF_CFG_RD(bp, func_ext_config[func].
8586 fcoe_mac_addr_lower);
8587 BNX2X_DEV_INFO("Read FCoE MAC to "
8588 "0x%x:0x%04x\n", val2, val);
8589 bnx2x_set_mac_buf(fip_mac, val, val2);
8590
8591 /* Disable FCoE if MAC configuration is
8592 * invalid.
8593 */
8594 if (!is_valid_ether_addr(fip_mac)) {
8595 bp->flags |= NO_FCOE_FLAG;
8596 memset(bp->fip_mac, 0, ETH_ALEN);
8597 }
8598 } else
8599 bp->flags |= NO_FCOE_FLAG;
0793f83f 8600 }
37b091ba 8601#endif
0793f83f
DK
8602 } else {
8603 /* in SF read MACs from port configuration */
8604 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8605 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8606 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8607
8608#ifdef BCM_CNIC
8609 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8610 iscsi_mac_upper);
8611 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8612 iscsi_mac_lower);
2ba45142 8613 bnx2x_set_mac_buf(iscsi_mac, val, val2);
0793f83f
DK
8614#endif
8615 }
8616
8617 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8618 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8619
ec6ba945 8620#ifdef BCM_CNIC
2ba45142 8621 /* Set the FCoE MAC in modes other then MF_SI */
ec6ba945
VZ
8622 if (!CHIP_IS_E1x(bp)) {
8623 if (IS_MF_SD(bp))
2ba45142
VZ
8624 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
8625 else if (!IS_MF(bp))
8626 memcpy(fip_mac, iscsi_mac, ETH_ALEN);
ec6ba945
VZ
8627 }
8628#endif
34f80b04
EG
8629}
8630
8631static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8632{
0793f83f 8633 int /*abs*/func = BP_ABS_FUNC(bp);
b8ee8328 8634 int vn;
0793f83f 8635 u32 val = 0;
34f80b04 8636 int rc = 0;
a2fbb9ea 8637
34f80b04 8638 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 8639
f2e0899f
DK
8640 if (CHIP_IS_E1x(bp)) {
8641 bp->common.int_block = INT_BLOCK_HC;
8642
8643 bp->igu_dsb_id = DEF_SB_IGU_ID;
8644 bp->igu_base_sb = 0;
ec6ba945
VZ
8645 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8646 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
f2e0899f
DK
8647 } else {
8648 bp->common.int_block = INT_BLOCK_IGU;
8649 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8650 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8651 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8652 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8653 } else
8654 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
523224a3 8655
f2e0899f
DK
8656 bnx2x_get_igu_cam_info(bp);
8657
8658 }
8659 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8660 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8661
8662 /*
8663 * Initialize MF configuration
8664 */
523224a3 8665
fb3bff17
DK
8666 bp->mf_ov = 0;
8667 bp->mf_mode = 0;
f2e0899f 8668 vn = BP_E1HVN(bp);
0793f83f 8669
f2e0899f 8670 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
0793f83f
DK
8671 DP(NETIF_MSG_PROBE,
8672 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8673 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8674 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
f2e0899f
DK
8675 if (SHMEM2_HAS(bp, mf_cfg_addr))
8676 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8677 else
8678 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
8679 offsetof(struct shmem_region, func_mb) +
8680 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
8681 /*
8682 * get mf configuration:
25985edc 8683 * 1. existence of MF configuration
0793f83f
DK
8684 * 2. MAC address must be legal (check only upper bytes)
8685 * for Switch-Independent mode;
8686 * OVLAN must be legal for Switch-Dependent mode
8687 * 3. SF_MODE configures specific MF mode
8688 */
8689 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8690 /* get mf configuration */
8691 val = SHMEM_RD(bp,
8692 dev_info.shared_feature_config.config);
8693 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8694
8695 switch (val) {
8696 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8697 val = MF_CFG_RD(bp, func_mf_config[func].
8698 mac_upper);
8699 /* check for legal mac (upper bytes)*/
8700 if (val != 0xffff) {
8701 bp->mf_mode = MULTI_FUNCTION_SI;
8702 bp->mf_config[vn] = MF_CFG_RD(bp,
8703 func_mf_config[func].config);
8704 } else
8705 DP(NETIF_MSG_PROBE, "illegal MAC "
8706 "address for SI\n");
8707 break;
8708 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8709 /* get OV configuration */
8710 val = MF_CFG_RD(bp,
8711 func_mf_config[FUNC_0].e1hov_tag);
8712 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8713
8714 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8715 bp->mf_mode = MULTI_FUNCTION_SD;
8716 bp->mf_config[vn] = MF_CFG_RD(bp,
8717 func_mf_config[func].config);
8718 } else
8719 DP(NETIF_MSG_PROBE, "illegal OV for "
8720 "SD\n");
8721 break;
8722 default:
8723 /* Unknown configuration: reset mf_config */
8724 bp->mf_config[vn] = 0;
25985edc 8725 DP(NETIF_MSG_PROBE, "Unknown MF mode 0x%x\n",
0793f83f
DK
8726 val);
8727 }
8728 }
a2fbb9ea 8729
2691d51d 8730 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 8731 IS_MF(bp) ? "multi" : "single");
2691d51d 8732
0793f83f
DK
8733 switch (bp->mf_mode) {
8734 case MULTI_FUNCTION_SD:
8735 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8736 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 8737 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 8738 bp->mf_ov = val;
0793f83f
DK
8739 BNX2X_DEV_INFO("MF OV for func %d is %d"
8740 " (0x%04x)\n", func,
8741 bp->mf_ov, bp->mf_ov);
2691d51d 8742 } else {
0793f83f
DK
8743 BNX2X_ERR("No valid MF OV for func %d,"
8744 " aborting\n", func);
34f80b04
EG
8745 rc = -EPERM;
8746 }
0793f83f
DK
8747 break;
8748 case MULTI_FUNCTION_SI:
8749 BNX2X_DEV_INFO("func %d is in MF "
8750 "switch-independent mode\n", func);
8751 break;
8752 default:
8753 if (vn) {
8754 BNX2X_ERR("VN %d in single function mode,"
8755 " aborting\n", vn);
2691d51d
EG
8756 rc = -EPERM;
8757 }
0793f83f 8758 break;
34f80b04 8759 }
0793f83f 8760
34f80b04 8761 }
a2fbb9ea 8762
f2e0899f
DK
8763 /* adjust igu_sb_cnt to MF for E1x */
8764 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
8765 bp->igu_sb_cnt /= E1HVN_MAX;
8766
f2e0899f
DK
8767 /*
8768 * adjust E2 sb count: to be removed when FW will support
8769 * more then 16 L2 clients
8770 */
8771#define MAX_L2_CLIENTS 16
8772 if (CHIP_IS_E2(bp))
8773 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8774 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8775
34f80b04
EG
8776 if (!BP_NOMCP(bp)) {
8777 bnx2x_get_port_hwinfo(bp);
8778
f2e0899f
DK
8779 bp->fw_seq =
8780 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8781 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04
EG
8782 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8783 }
8784
0793f83f
DK
8785 /* Get MAC addresses */
8786 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 8787
2ba45142
VZ
8788#ifdef BCM_CNIC
8789 bnx2x_get_cnic_info(bp);
8790#endif
8791
34f80b04
EG
8792 return rc;
8793}
8794
34f24c7f
VZ
8795static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8796{
8797 int cnt, i, block_end, rodi;
8798 char vpd_data[BNX2X_VPD_LEN+1];
8799 char str_id_reg[VENDOR_ID_LEN+1];
8800 char str_id_cap[VENDOR_ID_LEN+1];
8801 u8 len;
8802
8803 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8804 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8805
8806 if (cnt < BNX2X_VPD_LEN)
8807 goto out_not_found;
8808
8809 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8810 PCI_VPD_LRDT_RO_DATA);
8811 if (i < 0)
8812 goto out_not_found;
8813
8814
8815 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8816 pci_vpd_lrdt_size(&vpd_data[i]);
8817
8818 i += PCI_VPD_LRDT_TAG_SIZE;
8819
8820 if (block_end > BNX2X_VPD_LEN)
8821 goto out_not_found;
8822
8823 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8824 PCI_VPD_RO_KEYWORD_MFR_ID);
8825 if (rodi < 0)
8826 goto out_not_found;
8827
8828 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8829
8830 if (len != VENDOR_ID_LEN)
8831 goto out_not_found;
8832
8833 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8834
8835 /* vendor specific info */
8836 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8837 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8838 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8839 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8840
8841 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8842 PCI_VPD_RO_KEYWORD_VENDOR0);
8843 if (rodi >= 0) {
8844 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8845
8846 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8847
8848 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8849 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8850 bp->fw_ver[len] = ' ';
8851 }
8852 }
8853 return;
8854 }
8855out_not_found:
8856 return;
8857}
8858
34f80b04
EG
8859static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8860{
f2e0899f 8861 int func;
87942b46 8862 int timer_interval;
34f80b04
EG
8863 int rc;
8864
da5a662a
VZ
8865 /* Disable interrupt handling until HW is initialized */
8866 atomic_set(&bp->intr_sem, 1);
e1510706 8867 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
da5a662a 8868
34f80b04 8869 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 8870 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 8871 spin_lock_init(&bp->stats_lock);
993ac7b5
MC
8872#ifdef BCM_CNIC
8873 mutex_init(&bp->cnic_mutex);
8874#endif
a2fbb9ea 8875
1cf167f2 8876 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
72fd0718 8877 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
34f80b04
EG
8878
8879 rc = bnx2x_get_hwinfo(bp);
8880
523224a3
DK
8881 if (!rc)
8882 rc = bnx2x_alloc_mem_bp(bp);
8883
34f24c7f 8884 bnx2x_read_fwinfo(bp);
f2e0899f
DK
8885
8886 func = BP_FUNC(bp);
8887
34f80b04
EG
8888 /* need to reset chip if undi was active */
8889 if (!BP_NOMCP(bp))
8890 bnx2x_undi_unload(bp);
8891
8892 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 8893 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
8894
8895 if (BP_NOMCP(bp) && (func == 0))
cdaa7cb8
VZ
8896 dev_err(&bp->pdev->dev, "MCP disabled, "
8897 "must load devices in order!\n");
34f80b04 8898
555f6c78 8899 bp->multi_mode = multi_mode;
5d7cd496 8900 bp->int_mode = int_mode;
555f6c78 8901
7a9b2557
VZ
8902 /* Set TPA flags */
8903 if (disable_tpa) {
8904 bp->flags &= ~TPA_ENABLE_FLAG;
8905 bp->dev->features &= ~NETIF_F_LRO;
8906 } else {
8907 bp->flags |= TPA_ENABLE_FLAG;
8908 bp->dev->features |= NETIF_F_LRO;
8909 }
5d7cd496 8910 bp->disable_tpa = disable_tpa;
7a9b2557 8911
a18f5128
EG
8912 if (CHIP_IS_E1(bp))
8913 bp->dropless_fc = 0;
8914 else
8915 bp->dropless_fc = dropless_fc;
8916
8d5726c4 8917 bp->mrrs = mrrs;
7a9b2557 8918
34f80b04 8919 bp->tx_ring_size = MAX_TX_AVAIL;
34f80b04 8920
7d323bfd 8921 /* make sure that the numbers are in the right granularity */
523224a3
DK
8922 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8923 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 8924
87942b46
EG
8925 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8926 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8927
8928 init_timer(&bp->timer);
8929 bp->timer.expires = jiffies + bp->current_interval;
8930 bp->timer.data = (unsigned long) bp;
8931 bp->timer.function = bnx2x_timer;
8932
785b9b1a 8933 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
e4901dde
VZ
8934 bnx2x_dcbx_init_params(bp);
8935
34f80b04 8936 return rc;
a2fbb9ea
ET
8937}
8938
a2fbb9ea 8939
de0c62db
DK
8940/****************************************************************************
8941* General service functions
8942****************************************************************************/
a2fbb9ea 8943
bb2a0f7a 8944/* called with rtnl_lock */
a2fbb9ea
ET
8945static int bnx2x_open(struct net_device *dev)
8946{
8947 struct bnx2x *bp = netdev_priv(dev);
8948
6eccabb3
EG
8949 netif_carrier_off(dev);
8950
a2fbb9ea
ET
8951 bnx2x_set_power_state(bp, PCI_D0);
8952
72fd0718
VZ
8953 if (!bnx2x_reset_is_done(bp)) {
8954 do {
8955 /* Reset MCP mail box sequence if there is on going
8956 * recovery
8957 */
8958 bp->fw_seq = 0;
8959
8960 /* If it's the first function to load and reset done
8961 * is still not cleared it may mean that. We don't
8962 * check the attention state here because it may have
8963 * already been cleared by a "common" reset but we
8964 * shell proceed with "process kill" anyway.
8965 */
8966 if ((bnx2x_get_load_cnt(bp) == 0) &&
8967 bnx2x_trylock_hw_lock(bp,
8968 HW_LOCK_RESOURCE_RESERVED_08) &&
8969 (!bnx2x_leader_reset(bp))) {
8970 DP(NETIF_MSG_HW, "Recovered in open\n");
8971 break;
8972 }
8973
8974 bnx2x_set_power_state(bp, PCI_D3hot);
8975
8976 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8977 " completed yet. Try again later. If u still see this"
8978 " message after a few retries then power cycle is"
8979 " required.\n", bp->dev->name);
8980
8981 return -EAGAIN;
8982 } while (0);
8983 }
8984
8985 bp->recovery_state = BNX2X_RECOVERY_DONE;
8986
bb2a0f7a 8987 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
8988}
8989
bb2a0f7a 8990/* called with rtnl_lock */
a2fbb9ea
ET
8991static int bnx2x_close(struct net_device *dev)
8992{
a2fbb9ea
ET
8993 struct bnx2x *bp = netdev_priv(dev);
8994
8995 /* Unload the driver, release IRQs */
bb2a0f7a 8996 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
d3dbfee0 8997 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
8998
8999 return 0;
9000}
9001
6e30dd4e
VZ
9002#define E1_MAX_UC_LIST 29
9003#define E1H_MAX_UC_LIST 30
9004#define E2_MAX_UC_LIST 14
9005static inline u8 bnx2x_max_uc_list(struct bnx2x *bp)
9006{
9007 if (CHIP_IS_E1(bp))
9008 return E1_MAX_UC_LIST;
9009 else if (CHIP_IS_E1H(bp))
9010 return E1H_MAX_UC_LIST;
9011 else
9012 return E2_MAX_UC_LIST;
9013}
9014
9015
9016static inline u8 bnx2x_uc_list_cam_offset(struct bnx2x *bp)
9017{
9018 if (CHIP_IS_E1(bp))
9019 /* CAM Entries for Port0:
9020 * 0 - prim ETH MAC
9021 * 1 - BCAST MAC
9022 * 2 - iSCSI L2 ring ETH MAC
9023 * 3-31 - UC MACs
9024 *
9025 * Port1 entries are allocated the same way starting from
9026 * entry 32.
9027 */
9028 return 3 + 32 * BP_PORT(bp);
9029 else if (CHIP_IS_E1H(bp)) {
9030 /* CAM Entries:
9031 * 0-7 - prim ETH MAC for each function
9032 * 8-15 - iSCSI L2 ring ETH MAC for each function
9033 * 16 till 255 UC MAC lists for each function
9034 *
9035 * Remark: There is no FCoE support for E1H, thus FCoE related
9036 * MACs are not considered.
9037 */
9038 return E1H_FUNC_MAX * (CAM_ISCSI_ETH_LINE + 1) +
9039 bnx2x_max_uc_list(bp) * BP_FUNC(bp);
9040 } else {
9041 /* CAM Entries (there is a separate CAM per engine):
9042 * 0-4 - prim ETH MAC for each function
9043 * 4-7 - iSCSI L2 ring ETH MAC for each function
9044 * 8-11 - FIP ucast L2 MAC for each function
9045 * 12-15 - ALL_ENODE_MACS mcast MAC for each function
9046 * 16 till 71 UC MAC lists for each function
9047 */
9048 u8 func_idx =
9049 (CHIP_MODE_IS_4_PORT(bp) ? BP_FUNC(bp) : BP_VN(bp));
9050
9051 return E2_FUNC_MAX * (CAM_MAX_PF_LINE + 1) +
9052 bnx2x_max_uc_list(bp) * func_idx;
9053 }
9054}
9055
9056/* set uc list, do not wait as wait implies sleep and
9057 * set_rx_mode can be invoked from non-sleepable context.
9058 *
9059 * Instead we use the same ramrod data buffer each time we need
9060 * to configure a list of addresses, and use the fact that the
9061 * list of MACs is changed in an incremental way and that the
9062 * function is called under the netif_addr_lock. A temporary
9063 * inconsistent CAM configuration (possible in case of very fast
9064 * sequence of add/del/add on the host side) will shortly be
9065 * restored by the handler of the last ramrod.
9066 */
9067static int bnx2x_set_uc_list(struct bnx2x *bp)
9068{
9069 int i = 0, old;
9070 struct net_device *dev = bp->dev;
9071 u8 offset = bnx2x_uc_list_cam_offset(bp);
9072 struct netdev_hw_addr *ha;
9073 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9074 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9075
9076 if (netdev_uc_count(dev) > bnx2x_max_uc_list(bp))
9077 return -EINVAL;
9078
9079 netdev_for_each_uc_addr(ha, dev) {
9080 /* copy mac */
9081 config_cmd->config_table[i].msb_mac_addr =
9082 swab16(*(u16 *)&bnx2x_uc_addr(ha)[0]);
9083 config_cmd->config_table[i].middle_mac_addr =
9084 swab16(*(u16 *)&bnx2x_uc_addr(ha)[2]);
9085 config_cmd->config_table[i].lsb_mac_addr =
9086 swab16(*(u16 *)&bnx2x_uc_addr(ha)[4]);
9087
9088 config_cmd->config_table[i].vlan_id = 0;
9089 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
9090 config_cmd->config_table[i].clients_bit_vector =
9091 cpu_to_le32(1 << BP_L_ID(bp));
9092
9093 SET_FLAG(config_cmd->config_table[i].flags,
9094 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9095 T_ETH_MAC_COMMAND_SET);
9096
9097 DP(NETIF_MSG_IFUP,
9098 "setting UCAST[%d] (%04x:%04x:%04x)\n", i,
9099 config_cmd->config_table[i].msb_mac_addr,
9100 config_cmd->config_table[i].middle_mac_addr,
9101 config_cmd->config_table[i].lsb_mac_addr);
9102
9103 i++;
9104
9105 /* Set uc MAC in NIG */
9106 bnx2x_set_mac_in_nig(bp, 1, bnx2x_uc_addr(ha),
9107 LLH_CAM_ETH_LINE + i);
9108 }
9109 old = config_cmd->hdr.length;
9110 if (old > i) {
9111 for (; i < old; i++) {
9112 if (CAM_IS_INVALID(config_cmd->
9113 config_table[i])) {
9114 /* already invalidated */
9115 break;
9116 }
9117 /* invalidate */
9118 SET_FLAG(config_cmd->config_table[i].flags,
9119 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9120 T_ETH_MAC_COMMAND_INVALIDATE);
9121 }
9122 }
9123
9124 wmb();
9125
9126 config_cmd->hdr.length = i;
9127 config_cmd->hdr.offset = offset;
9128 config_cmd->hdr.client_id = 0xff;
9129 /* Mark that this ramrod doesn't use bp->set_mac_pending for
9130 * synchronization.
9131 */
9132 config_cmd->hdr.echo = 0;
9133
9134 mb();
9135
9136 return bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9137 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9138
9139}
9140
9141void bnx2x_invalidate_uc_list(struct bnx2x *bp)
9142{
9143 int i;
9144 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, uc_mac_config);
9145 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, uc_mac_config);
9146 int ramrod_flags = WAIT_RAMROD_COMMON;
9147 u8 offset = bnx2x_uc_list_cam_offset(bp);
9148 u8 max_list_size = bnx2x_max_uc_list(bp);
9149
9150 for (i = 0; i < max_list_size; i++) {
9151 SET_FLAG(config_cmd->config_table[i].flags,
9152 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
9153 T_ETH_MAC_COMMAND_INVALIDATE);
9154 bnx2x_set_mac_in_nig(bp, 0, NULL, LLH_CAM_ETH_LINE + 1 + i);
9155 }
9156
9157 wmb();
9158
9159 config_cmd->hdr.length = max_list_size;
9160 config_cmd->hdr.offset = offset;
9161 config_cmd->hdr.client_id = 0xff;
9162 /* We'll wait for a completion this time... */
9163 config_cmd->hdr.echo = 1;
9164
9165 bp->set_mac_pending = 1;
9166
9167 mb();
9168
9169 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
9170 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
9171
9172 /* Wait for a completion */
9173 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
9174 ramrod_flags);
9175
9176}
9177
9178static inline int bnx2x_set_mc_list(struct bnx2x *bp)
9179{
9180 /* some multicasts */
9181 if (CHIP_IS_E1(bp)) {
9182 return bnx2x_set_e1_mc_list(bp);
9183 } else { /* E1H and newer */
9184 return bnx2x_set_e1h_mc_list(bp);
9185 }
9186}
9187
f5372251 9188/* called with netif_tx_lock from dev_mcast.c */
9f6c9258 9189void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
9190{
9191 struct bnx2x *bp = netdev_priv(dev);
9192 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
34f80b04
EG
9193
9194 if (bp->state != BNX2X_STATE_OPEN) {
9195 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
9196 return;
9197 }
9198
9199 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
9200
9201 if (dev->flags & IFF_PROMISC)
9202 rx_mode = BNX2X_RX_MODE_PROMISC;
6e30dd4e 9203 else if (dev->flags & IFF_ALLMULTI)
34f80b04 9204 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6e30dd4e
VZ
9205 else {
9206 /* some multicasts */
9207 if (bnx2x_set_mc_list(bp))
9208 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04 9209
6e30dd4e
VZ
9210 /* some unicasts */
9211 if (bnx2x_set_uc_list(bp))
9212 rx_mode = BNX2X_RX_MODE_PROMISC;
34f80b04
EG
9213 }
9214
9215 bp->rx_mode = rx_mode;
9216 bnx2x_set_storm_rx_mode(bp);
9217}
9218
c18487ee 9219/* called with rtnl_lock */
01cd4528
EG
9220static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
9221 int devad, u16 addr)
a2fbb9ea 9222{
01cd4528
EG
9223 struct bnx2x *bp = netdev_priv(netdev);
9224 u16 value;
9225 int rc;
a2fbb9ea 9226
01cd4528
EG
9227 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
9228 prtad, devad, addr);
a2fbb9ea 9229
01cd4528
EG
9230 /* The HW expects different devad if CL22 is used */
9231 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 9232
01cd4528 9233 bnx2x_acquire_phy_lock(bp);
e10bc84d 9234 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
9235 bnx2x_release_phy_lock(bp);
9236 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 9237
01cd4528
EG
9238 if (!rc)
9239 rc = value;
9240 return rc;
9241}
a2fbb9ea 9242
01cd4528
EG
9243/* called with rtnl_lock */
9244static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
9245 u16 addr, u16 value)
9246{
9247 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
9248 int rc;
9249
9250 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
9251 " value 0x%x\n", prtad, devad, addr, value);
9252
01cd4528
EG
9253 /* The HW expects different devad if CL22 is used */
9254 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 9255
01cd4528 9256 bnx2x_acquire_phy_lock(bp);
e10bc84d 9257 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
9258 bnx2x_release_phy_lock(bp);
9259 return rc;
9260}
c18487ee 9261
01cd4528
EG
9262/* called with rtnl_lock */
9263static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
9264{
9265 struct bnx2x *bp = netdev_priv(dev);
9266 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 9267
01cd4528
EG
9268 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
9269 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 9270
01cd4528
EG
9271 if (!netif_running(dev))
9272 return -EAGAIN;
9273
9274 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
9275}
9276
257ddbda 9277#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
9278static void poll_bnx2x(struct net_device *dev)
9279{
9280 struct bnx2x *bp = netdev_priv(dev);
9281
9282 disable_irq(bp->pdev->irq);
9283 bnx2x_interrupt(bp->pdev->irq, dev);
9284 enable_irq(bp->pdev->irq);
9285}
9286#endif
9287
c64213cd
SH
9288static const struct net_device_ops bnx2x_netdev_ops = {
9289 .ndo_open = bnx2x_open,
9290 .ndo_stop = bnx2x_close,
9291 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 9292 .ndo_select_queue = bnx2x_select_queue,
6e30dd4e 9293 .ndo_set_rx_mode = bnx2x_set_rx_mode,
c64213cd
SH
9294 .ndo_set_mac_address = bnx2x_change_mac_addr,
9295 .ndo_validate_addr = eth_validate_addr,
9296 .ndo_do_ioctl = bnx2x_ioctl,
9297 .ndo_change_mtu = bnx2x_change_mtu,
66371c44
MM
9298 .ndo_fix_features = bnx2x_fix_features,
9299 .ndo_set_features = bnx2x_set_features,
c64213cd 9300 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 9301#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
9302 .ndo_poll_controller = poll_bnx2x,
9303#endif
9304};
9305
34f80b04
EG
9306static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
9307 struct net_device *dev)
a2fbb9ea
ET
9308{
9309 struct bnx2x *bp;
9310 int rc;
9311
9312 SET_NETDEV_DEV(dev, &pdev->dev);
9313 bp = netdev_priv(dev);
9314
34f80b04
EG
9315 bp->dev = dev;
9316 bp->pdev = pdev;
a2fbb9ea 9317 bp->flags = 0;
f2e0899f 9318 bp->pf_num = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
9319
9320 rc = pci_enable_device(pdev);
9321 if (rc) {
cdaa7cb8
VZ
9322 dev_err(&bp->pdev->dev,
9323 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
9324 goto err_out;
9325 }
9326
9327 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9328 dev_err(&bp->pdev->dev,
9329 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
9330 rc = -ENODEV;
9331 goto err_out_disable;
9332 }
9333
9334 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
9335 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9336 " base address, aborting\n");
a2fbb9ea
ET
9337 rc = -ENODEV;
9338 goto err_out_disable;
9339 }
9340
34f80b04
EG
9341 if (atomic_read(&pdev->enable_cnt) == 1) {
9342 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9343 if (rc) {
cdaa7cb8
VZ
9344 dev_err(&bp->pdev->dev,
9345 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
9346 goto err_out_disable;
9347 }
a2fbb9ea 9348
34f80b04
EG
9349 pci_set_master(pdev);
9350 pci_save_state(pdev);
9351 }
a2fbb9ea
ET
9352
9353 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9354 if (bp->pm_cap == 0) {
cdaa7cb8
VZ
9355 dev_err(&bp->pdev->dev,
9356 "Cannot find power management capability, aborting\n");
a2fbb9ea
ET
9357 rc = -EIO;
9358 goto err_out_release;
9359 }
9360
9361 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9362 if (bp->pcie_cap == 0) {
cdaa7cb8
VZ
9363 dev_err(&bp->pdev->dev,
9364 "Cannot find PCI Express capability, aborting\n");
a2fbb9ea
ET
9365 rc = -EIO;
9366 goto err_out_release;
9367 }
9368
1a983142 9369 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
a2fbb9ea 9370 bp->flags |= USING_DAC_FLAG;
1a983142 9371 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
cdaa7cb8
VZ
9372 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9373 " failed, aborting\n");
a2fbb9ea
ET
9374 rc = -EIO;
9375 goto err_out_release;
9376 }
9377
1a983142 9378 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
cdaa7cb8
VZ
9379 dev_err(&bp->pdev->dev,
9380 "System does not support DMA, aborting\n");
a2fbb9ea
ET
9381 rc = -EIO;
9382 goto err_out_release;
9383 }
9384
34f80b04
EG
9385 dev->mem_start = pci_resource_start(pdev, 0);
9386 dev->base_addr = dev->mem_start;
9387 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
9388
9389 dev->irq = pdev->irq;
9390
275f165f 9391 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 9392 if (!bp->regview) {
cdaa7cb8
VZ
9393 dev_err(&bp->pdev->dev,
9394 "Cannot map register space, aborting\n");
a2fbb9ea
ET
9395 rc = -ENOMEM;
9396 goto err_out_release;
9397 }
9398
34f80b04 9399 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
523224a3 9400 min_t(u64, BNX2X_DB_SIZE(bp),
34f80b04 9401 pci_resource_len(pdev, 2)));
a2fbb9ea 9402 if (!bp->doorbells) {
cdaa7cb8
VZ
9403 dev_err(&bp->pdev->dev,
9404 "Cannot map doorbell space, aborting\n");
a2fbb9ea
ET
9405 rc = -ENOMEM;
9406 goto err_out_unmap;
9407 }
9408
9409 bnx2x_set_power_state(bp, PCI_D0);
9410
34f80b04
EG
9411 /* clean indirect addresses */
9412 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9413 PCICFG_VENDOR_ID_OFFSET);
9414 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9415 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9416 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9417 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 9418
72fd0718
VZ
9419 /* Reset the load counter */
9420 bnx2x_clear_load_cnt(bp);
9421
34f80b04 9422 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 9423
c64213cd 9424 dev->netdev_ops = &bnx2x_netdev_ops;
de0c62db 9425 bnx2x_set_ethtool_ops(dev);
5316bc0b 9426
66371c44
MM
9427 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9428 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
9429 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_HW_VLAN_TX;
9430
9431 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
9432 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
9433
9434 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_RX;
5316bc0b 9435 if (bp->flags & USING_DAC_FLAG)
66371c44 9436 dev->features |= NETIF_F_HIGHDMA;
a2fbb9ea 9437
98507672 9438#ifdef BCM_DCBNL
785b9b1a
SR
9439 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9440#endif
9441
01cd4528
EG
9442 /* get_port_hwinfo() will set prtad and mmds properly */
9443 bp->mdio.prtad = MDIO_PRTAD_NONE;
9444 bp->mdio.mmds = 0;
9445 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9446 bp->mdio.dev = dev;
9447 bp->mdio.mdio_read = bnx2x_mdio_read;
9448 bp->mdio.mdio_write = bnx2x_mdio_write;
9449
a2fbb9ea
ET
9450 return 0;
9451
9452err_out_unmap:
9453 if (bp->regview) {
9454 iounmap(bp->regview);
9455 bp->regview = NULL;
9456 }
a2fbb9ea
ET
9457 if (bp->doorbells) {
9458 iounmap(bp->doorbells);
9459 bp->doorbells = NULL;
9460 }
9461
9462err_out_release:
34f80b04
EG
9463 if (atomic_read(&pdev->enable_cnt) == 1)
9464 pci_release_regions(pdev);
a2fbb9ea
ET
9465
9466err_out_disable:
9467 pci_disable_device(pdev);
9468 pci_set_drvdata(pdev, NULL);
9469
9470err_out:
9471 return rc;
9472}
9473
37f9ce62
EG
9474static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9475 int *width, int *speed)
25047950
ET
9476{
9477 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9478
37f9ce62 9479 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 9480
37f9ce62
EG
9481 /* return value of 1=2.5GHz 2=5GHz */
9482 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
25047950 9483}
37f9ce62 9484
6891dd25 9485static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 9486{
37f9ce62 9487 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
9488 struct bnx2x_fw_file_hdr *fw_hdr;
9489 struct bnx2x_fw_file_section *sections;
94a78b79 9490 u32 offset, len, num_ops;
37f9ce62 9491 u16 *ops_offsets;
94a78b79 9492 int i;
37f9ce62 9493 const u8 *fw_ver;
94a78b79
VZ
9494
9495 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9496 return -EINVAL;
9497
9498 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9499 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9500
9501 /* Make sure none of the offsets and sizes make us read beyond
9502 * the end of the firmware data */
9503 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9504 offset = be32_to_cpu(sections[i].offset);
9505 len = be32_to_cpu(sections[i].len);
9506 if (offset + len > firmware->size) {
cdaa7cb8
VZ
9507 dev_err(&bp->pdev->dev,
9508 "Section %d length is out of bounds\n", i);
94a78b79
VZ
9509 return -EINVAL;
9510 }
9511 }
9512
9513 /* Likewise for the init_ops offsets */
9514 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9515 ops_offsets = (u16 *)(firmware->data + offset);
9516 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9517
9518 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9519 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
cdaa7cb8
VZ
9520 dev_err(&bp->pdev->dev,
9521 "Section offset %d is out of bounds\n", i);
94a78b79
VZ
9522 return -EINVAL;
9523 }
9524 }
9525
9526 /* Check FW version */
9527 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9528 fw_ver = firmware->data + offset;
9529 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9530 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9531 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9532 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
cdaa7cb8
VZ
9533 dev_err(&bp->pdev->dev,
9534 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
94a78b79
VZ
9535 fw_ver[0], fw_ver[1], fw_ver[2],
9536 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9537 BCM_5710_FW_MINOR_VERSION,
9538 BCM_5710_FW_REVISION_VERSION,
9539 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 9540 return -EINVAL;
94a78b79
VZ
9541 }
9542
9543 return 0;
9544}
9545
ab6ad5a4 9546static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9547{
ab6ad5a4
EG
9548 const __be32 *source = (const __be32 *)_source;
9549 u32 *target = (u32 *)_target;
94a78b79 9550 u32 i;
94a78b79
VZ
9551
9552 for (i = 0; i < n/4; i++)
9553 target[i] = be32_to_cpu(source[i]);
9554}
9555
9556/*
9557 Ops array is stored in the following format:
9558 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9559 */
ab6ad5a4 9560static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 9561{
ab6ad5a4
EG
9562 const __be32 *source = (const __be32 *)_source;
9563 struct raw_op *target = (struct raw_op *)_target;
94a78b79 9564 u32 i, j, tmp;
94a78b79 9565
ab6ad5a4 9566 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
9567 tmp = be32_to_cpu(source[j]);
9568 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
9569 target[i].offset = tmp & 0xffffff;
9570 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
9571 }
9572}
ab6ad5a4 9573
523224a3
DK
9574/**
9575 * IRO array is stored in the following format:
9576 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9577 */
9578static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9579{
9580 const __be32 *source = (const __be32 *)_source;
9581 struct iro *target = (struct iro *)_target;
9582 u32 i, j, tmp;
9583
9584 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9585 target[i].base = be32_to_cpu(source[j]);
9586 j++;
9587 tmp = be32_to_cpu(source[j]);
9588 target[i].m1 = (tmp >> 16) & 0xffff;
9589 target[i].m2 = tmp & 0xffff;
9590 j++;
9591 tmp = be32_to_cpu(source[j]);
9592 target[i].m3 = (tmp >> 16) & 0xffff;
9593 target[i].size = tmp & 0xffff;
9594 j++;
9595 }
9596}
9597
ab6ad5a4 9598static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 9599{
ab6ad5a4
EG
9600 const __be16 *source = (const __be16 *)_source;
9601 u16 *target = (u16 *)_target;
94a78b79 9602 u32 i;
94a78b79
VZ
9603
9604 for (i = 0; i < n/2; i++)
9605 target[i] = be16_to_cpu(source[i]);
9606}
9607
7995c64e
JP
9608#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9609do { \
9610 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9611 bp->arr = kmalloc(len, GFP_KERNEL); \
9612 if (!bp->arr) { \
9613 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9614 goto lbl; \
9615 } \
9616 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9617 (u8 *)bp->arr, len); \
9618} while (0)
94a78b79 9619
6891dd25 9620int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 9621{
45229b42 9622 const char *fw_file_name;
94a78b79 9623 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 9624 int rc;
94a78b79 9625
94a78b79 9626 if (CHIP_IS_E1(bp))
45229b42 9627 fw_file_name = FW_FILE_NAME_E1;
cdaa7cb8 9628 else if (CHIP_IS_E1H(bp))
45229b42 9629 fw_file_name = FW_FILE_NAME_E1H;
f2e0899f
DK
9630 else if (CHIP_IS_E2(bp))
9631 fw_file_name = FW_FILE_NAME_E2;
cdaa7cb8 9632 else {
6891dd25 9633 BNX2X_ERR("Unsupported chip revision\n");
cdaa7cb8
VZ
9634 return -EINVAL;
9635 }
94a78b79 9636
6891dd25 9637 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 9638
6891dd25 9639 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
94a78b79 9640 if (rc) {
6891dd25 9641 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
94a78b79
VZ
9642 goto request_firmware_exit;
9643 }
9644
9645 rc = bnx2x_check_firmware(bp);
9646 if (rc) {
6891dd25 9647 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
94a78b79
VZ
9648 goto request_firmware_exit;
9649 }
9650
9651 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9652
9653 /* Initialize the pointers to the init arrays */
9654 /* Blob */
9655 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9656
9657 /* Opcodes */
9658 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9659
9660 /* Offsets */
ab6ad5a4
EG
9661 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9662 be16_to_cpu_n);
94a78b79
VZ
9663
9664 /* STORMs firmware */
573f2035
EG
9665 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9666 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9667 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9668 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9669 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9670 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9671 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9672 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9673 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9674 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9675 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9676 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9677 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9678 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9679 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9680 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
9681 /* IRO */
9682 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
9683
9684 return 0;
ab6ad5a4 9685
523224a3
DK
9686iro_alloc_err:
9687 kfree(bp->init_ops_offsets);
94a78b79
VZ
9688init_offsets_alloc_err:
9689 kfree(bp->init_ops);
9690init_ops_alloc_err:
9691 kfree(bp->init_data);
9692request_firmware_exit:
9693 release_firmware(bp->firmware);
9694
9695 return rc;
9696}
9697
523224a3
DK
9698static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9699{
9700 int cid_count = L2_FP_COUNT(l2_cid_count);
94a78b79 9701
523224a3
DK
9702#ifdef BCM_CNIC
9703 cid_count += CNIC_CID_MAX;
9704#endif
9705 return roundup(cid_count, QM_CID_ROUND);
9706}
f85582f8 9707
a2fbb9ea
ET
9708static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9709 const struct pci_device_id *ent)
9710{
a2fbb9ea
ET
9711 struct net_device *dev = NULL;
9712 struct bnx2x *bp;
37f9ce62 9713 int pcie_width, pcie_speed;
523224a3
DK
9714 int rc, cid_count;
9715
f2e0899f
DK
9716 switch (ent->driver_data) {
9717 case BCM57710:
9718 case BCM57711:
9719 case BCM57711E:
9720 cid_count = FP_SB_MAX_E1x;
9721 break;
9722
9723 case BCM57712:
9724 case BCM57712E:
9725 cid_count = FP_SB_MAX_E2;
9726 break;
a2fbb9ea 9727
f2e0899f
DK
9728 default:
9729 pr_err("Unknown board_type (%ld), aborting\n",
9730 ent->driver_data);
870634b0 9731 return -ENODEV;
f2e0899f
DK
9732 }
9733
ec6ba945 9734 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
f85582f8 9735
a2fbb9ea 9736 /* dev zeroed in init_etherdev */
523224a3 9737 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
34f80b04 9738 if (!dev) {
cdaa7cb8 9739 dev_err(&pdev->dev, "Cannot allocate net device\n");
a2fbb9ea 9740 return -ENOMEM;
34f80b04 9741 }
a2fbb9ea 9742
a2fbb9ea 9743 bp = netdev_priv(dev);
7995c64e 9744 bp->msg_enable = debug;
a2fbb9ea 9745
df4770de
EG
9746 pci_set_drvdata(pdev, dev);
9747
523224a3
DK
9748 bp->l2_cid_count = cid_count;
9749
34f80b04 9750 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
9751 if (rc < 0) {
9752 free_netdev(dev);
9753 return rc;
9754 }
9755
34f80b04 9756 rc = bnx2x_init_bp(bp);
693fc0d1
EG
9757 if (rc)
9758 goto init_one_exit;
9759
523224a3
DK
9760 /* calc qm_cid_count */
9761 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9762
ec6ba945
VZ
9763#ifdef BCM_CNIC
9764 /* disable FCOE L2 queue for E1x*/
9765 if (CHIP_IS_E1x(bp))
9766 bp->flags |= NO_FCOE_FLAG;
9767
9768#endif
9769
25985edc 9770 /* Configure interrupt mode: try to enable MSI-X/MSI if
d6214d7a
DK
9771 * needed, set bp->num_queues appropriately.
9772 */
9773 bnx2x_set_int_mode(bp);
9774
9775 /* Add all NAPI objects */
9776 bnx2x_add_all_napi(bp);
9777
b340007f
VZ
9778 rc = register_netdev(dev);
9779 if (rc) {
9780 dev_err(&pdev->dev, "Cannot register net device\n");
9781 goto init_one_exit;
9782 }
9783
ec6ba945
VZ
9784#ifdef BCM_CNIC
9785 if (!NO_FCOE(bp)) {
9786 /* Add storage MAC address */
9787 rtnl_lock();
9788 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9789 rtnl_unlock();
9790 }
9791#endif
9792
37f9ce62 9793 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
d6214d7a 9794
cdaa7cb8
VZ
9795 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9796 " IRQ %d, ", board_info[ent->driver_data].name,
9797 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
f2e0899f
DK
9798 pcie_width,
9799 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9800 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9801 "5GHz (Gen2)" : "2.5GHz",
cdaa7cb8
VZ
9802 dev->base_addr, bp->pdev->irq);
9803 pr_cont("node addr %pM\n", dev->dev_addr);
c016201c 9804
a2fbb9ea 9805 return 0;
34f80b04
EG
9806
9807init_one_exit:
9808 if (bp->regview)
9809 iounmap(bp->regview);
9810
9811 if (bp->doorbells)
9812 iounmap(bp->doorbells);
9813
9814 free_netdev(dev);
9815
9816 if (atomic_read(&pdev->enable_cnt) == 1)
9817 pci_release_regions(pdev);
9818
9819 pci_disable_device(pdev);
9820 pci_set_drvdata(pdev, NULL);
9821
9822 return rc;
a2fbb9ea
ET
9823}
9824
9825static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9826{
9827 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
9828 struct bnx2x *bp;
9829
9830 if (!dev) {
cdaa7cb8 9831 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
228241eb
ET
9832 return;
9833 }
228241eb 9834 bp = netdev_priv(dev);
a2fbb9ea 9835
ec6ba945
VZ
9836#ifdef BCM_CNIC
9837 /* Delete storage MAC address */
9838 if (!NO_FCOE(bp)) {
9839 rtnl_lock();
9840 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9841 rtnl_unlock();
9842 }
9843#endif
9844
98507672
SR
9845#ifdef BCM_DCBNL
9846 /* Delete app tlvs from dcbnl */
9847 bnx2x_dcbnl_update_applist(bp, true);
9848#endif
9849
a2fbb9ea
ET
9850 unregister_netdev(dev);
9851
d6214d7a
DK
9852 /* Delete all NAPI objects */
9853 bnx2x_del_all_napi(bp);
9854
084d6cbb
VZ
9855 /* Power on: we can't let PCI layer write to us while we are in D3 */
9856 bnx2x_set_power_state(bp, PCI_D0);
9857
d6214d7a
DK
9858 /* Disable MSI/MSI-X */
9859 bnx2x_disable_msi(bp);
f85582f8 9860
084d6cbb
VZ
9861 /* Power off */
9862 bnx2x_set_power_state(bp, PCI_D3hot);
9863
72fd0718
VZ
9864 /* Make sure RESET task is not scheduled before continuing */
9865 cancel_delayed_work_sync(&bp->reset_task);
9866
a2fbb9ea
ET
9867 if (bp->regview)
9868 iounmap(bp->regview);
9869
9870 if (bp->doorbells)
9871 iounmap(bp->doorbells);
9872
523224a3
DK
9873 bnx2x_free_mem_bp(bp);
9874
a2fbb9ea 9875 free_netdev(dev);
34f80b04
EG
9876
9877 if (atomic_read(&pdev->enable_cnt) == 1)
9878 pci_release_regions(pdev);
9879
a2fbb9ea
ET
9880 pci_disable_device(pdev);
9881 pci_set_drvdata(pdev, NULL);
9882}
9883
f8ef6e44
YG
9884static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9885{
9886 int i;
9887
9888 bp->state = BNX2X_STATE_ERROR;
9889
9890 bp->rx_mode = BNX2X_RX_MODE_NONE;
9891
9892 bnx2x_netif_stop(bp, 0);
c89af1a3 9893 netif_carrier_off(bp->dev);
f8ef6e44
YG
9894
9895 del_timer_sync(&bp->timer);
9896 bp->stats_state = STATS_STATE_DISABLED;
9897 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9898
9899 /* Release IRQs */
d6214d7a 9900 bnx2x_free_irq(bp);
f8ef6e44 9901
f8ef6e44
YG
9902 /* Free SKBs, SGEs, TPA pool and driver internals */
9903 bnx2x_free_skbs(bp);
523224a3 9904
ec6ba945 9905 for_each_rx_queue(bp, i)
f8ef6e44 9906 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
d6214d7a 9907
f8ef6e44
YG
9908 bnx2x_free_mem(bp);
9909
9910 bp->state = BNX2X_STATE_CLOSED;
9911
f8ef6e44
YG
9912 return 0;
9913}
9914
9915static void bnx2x_eeh_recover(struct bnx2x *bp)
9916{
9917 u32 val;
9918
9919 mutex_init(&bp->port.phy_mutex);
9920
9921 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9922 bp->link_params.shmem_base = bp->common.shmem_base;
9923 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9924
9925 if (!bp->common.shmem_base ||
9926 (bp->common.shmem_base < 0xA0000) ||
9927 (bp->common.shmem_base >= 0xC0000)) {
9928 BNX2X_DEV_INFO("MCP not active\n");
9929 bp->flags |= NO_MCP_FLAG;
9930 return;
9931 }
9932
9933 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9934 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9935 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9936 BNX2X_ERR("BAD MCP validity signature\n");
9937
9938 if (!BP_NOMCP(bp)) {
f2e0899f
DK
9939 bp->fw_seq =
9940 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9941 DRV_MSG_SEQ_NUMBER_MASK);
f8ef6e44
YG
9942 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9943 }
9944}
9945
493adb1f
WX
9946/**
9947 * bnx2x_io_error_detected - called when PCI error is detected
9948 * @pdev: Pointer to PCI device
9949 * @state: The current pci connection state
9950 *
9951 * This function is called after a PCI bus error affecting
9952 * this device has been detected.
9953 */
9954static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9955 pci_channel_state_t state)
9956{
9957 struct net_device *dev = pci_get_drvdata(pdev);
9958 struct bnx2x *bp = netdev_priv(dev);
9959
9960 rtnl_lock();
9961
9962 netif_device_detach(dev);
9963
07ce50e4
DN
9964 if (state == pci_channel_io_perm_failure) {
9965 rtnl_unlock();
9966 return PCI_ERS_RESULT_DISCONNECT;
9967 }
9968
493adb1f 9969 if (netif_running(dev))
f8ef6e44 9970 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
9971
9972 pci_disable_device(pdev);
9973
9974 rtnl_unlock();
9975
9976 /* Request a slot reset */
9977 return PCI_ERS_RESULT_NEED_RESET;
9978}
9979
9980/**
9981 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9982 * @pdev: Pointer to PCI device
9983 *
9984 * Restart the card from scratch, as if from a cold-boot.
9985 */
9986static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9987{
9988 struct net_device *dev = pci_get_drvdata(pdev);
9989 struct bnx2x *bp = netdev_priv(dev);
9990
9991 rtnl_lock();
9992
9993 if (pci_enable_device(pdev)) {
9994 dev_err(&pdev->dev,
9995 "Cannot re-enable PCI device after reset\n");
9996 rtnl_unlock();
9997 return PCI_ERS_RESULT_DISCONNECT;
9998 }
9999
10000 pci_set_master(pdev);
10001 pci_restore_state(pdev);
10002
10003 if (netif_running(dev))
10004 bnx2x_set_power_state(bp, PCI_D0);
10005
10006 rtnl_unlock();
10007
10008 return PCI_ERS_RESULT_RECOVERED;
10009}
10010
10011/**
10012 * bnx2x_io_resume - called when traffic can start flowing again
10013 * @pdev: Pointer to PCI device
10014 *
10015 * This callback is called when the error recovery driver tells us that
10016 * its OK to resume normal operation.
10017 */
10018static void bnx2x_io_resume(struct pci_dev *pdev)
10019{
10020 struct net_device *dev = pci_get_drvdata(pdev);
10021 struct bnx2x *bp = netdev_priv(dev);
10022
72fd0718 10023 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
f2e0899f
DK
10024 printk(KERN_ERR "Handling parity error recovery. "
10025 "Try again later\n");
72fd0718
VZ
10026 return;
10027 }
10028
493adb1f
WX
10029 rtnl_lock();
10030
f8ef6e44
YG
10031 bnx2x_eeh_recover(bp);
10032
493adb1f 10033 if (netif_running(dev))
f8ef6e44 10034 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
10035
10036 netif_device_attach(dev);
10037
10038 rtnl_unlock();
10039}
10040
10041static struct pci_error_handlers bnx2x_err_handler = {
10042 .error_detected = bnx2x_io_error_detected,
356e2385
EG
10043 .slot_reset = bnx2x_io_slot_reset,
10044 .resume = bnx2x_io_resume,
493adb1f
WX
10045};
10046
a2fbb9ea 10047static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
10048 .name = DRV_MODULE_NAME,
10049 .id_table = bnx2x_pci_tbl,
10050 .probe = bnx2x_init_one,
10051 .remove = __devexit_p(bnx2x_remove_one),
10052 .suspend = bnx2x_suspend,
10053 .resume = bnx2x_resume,
10054 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
10055};
10056
10057static int __init bnx2x_init(void)
10058{
dd21ca6d
SG
10059 int ret;
10060
7995c64e 10061 pr_info("%s", version);
938cf541 10062
1cf167f2
EG
10063 bnx2x_wq = create_singlethread_workqueue("bnx2x");
10064 if (bnx2x_wq == NULL) {
7995c64e 10065 pr_err("Cannot create workqueue\n");
1cf167f2
EG
10066 return -ENOMEM;
10067 }
10068
dd21ca6d
SG
10069 ret = pci_register_driver(&bnx2x_pci_driver);
10070 if (ret) {
7995c64e 10071 pr_err("Cannot register driver\n");
dd21ca6d
SG
10072 destroy_workqueue(bnx2x_wq);
10073 }
10074 return ret;
a2fbb9ea
ET
10075}
10076
10077static void __exit bnx2x_cleanup(void)
10078{
10079 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
10080
10081 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
10082}
10083
10084module_init(bnx2x_init);
10085module_exit(bnx2x_cleanup);
10086
993ac7b5
MC
10087#ifdef BCM_CNIC
10088
10089/* count denotes the number of new completions we have seen */
10090static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
10091{
10092 struct eth_spe *spe;
10093
10094#ifdef BNX2X_STOP_ON_ERROR
10095 if (unlikely(bp->panic))
10096 return;
10097#endif
10098
10099 spin_lock_bh(&bp->spq_lock);
c2bff63f 10100 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
10101 bp->cnic_spq_pending -= count;
10102
993ac7b5 10103
c2bff63f
DK
10104 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
10105 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
10106 & SPE_HDR_CONN_TYPE) >>
10107 SPE_HDR_CONN_TYPE_SHIFT;
10108
10109 /* Set validation for iSCSI L2 client before sending SETUP
10110 * ramrod
10111 */
10112 if (type == ETH_CONNECTION_TYPE) {
10113 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
10114 hdr.conn_and_cmd_data) >>
10115 SPE_HDR_CMD_ID_SHIFT) & 0xff;
10116
10117 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
10118 bnx2x_set_ctx_validation(&bp->context.
10119 vcxt[BNX2X_ISCSI_ETH_CID].eth,
10120 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
10121 }
10122
6e30dd4e
VZ
10123 /* There may be not more than 8 L2 and not more than 8 L5 SPEs
10124 * We also check that the number of outstanding
10125 * COMMON ramrods is not more than the EQ and SPQ can
10126 * accommodate.
c2bff63f 10127 */
6e30dd4e
VZ
10128 if (type == ETH_CONNECTION_TYPE) {
10129 if (!atomic_read(&bp->cq_spq_left))
10130 break;
10131 else
10132 atomic_dec(&bp->cq_spq_left);
10133 } else if (type == NONE_CONNECTION_TYPE) {
10134 if (!atomic_read(&bp->eq_spq_left))
c2bff63f
DK
10135 break;
10136 else
6e30dd4e 10137 atomic_dec(&bp->eq_spq_left);
ec6ba945
VZ
10138 } else if ((type == ISCSI_CONNECTION_TYPE) ||
10139 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
10140 if (bp->cnic_spq_pending >=
10141 bp->cnic_eth_dev.max_kwqe_pending)
10142 break;
10143 else
10144 bp->cnic_spq_pending++;
10145 } else {
10146 BNX2X_ERR("Unknown SPE type: %d\n", type);
10147 bnx2x_panic();
993ac7b5 10148 break;
c2bff63f 10149 }
993ac7b5
MC
10150
10151 spe = bnx2x_sp_get_next(bp);
10152 *spe = *bp->cnic_kwq_cons;
10153
993ac7b5
MC
10154 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
10155 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
10156
10157 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
10158 bp->cnic_kwq_cons = bp->cnic_kwq;
10159 else
10160 bp->cnic_kwq_cons++;
10161 }
10162 bnx2x_sp_prod_update(bp);
10163 spin_unlock_bh(&bp->spq_lock);
10164}
10165
10166static int bnx2x_cnic_sp_queue(struct net_device *dev,
10167 struct kwqe_16 *kwqes[], u32 count)
10168{
10169 struct bnx2x *bp = netdev_priv(dev);
10170 int i;
10171
10172#ifdef BNX2X_STOP_ON_ERROR
10173 if (unlikely(bp->panic))
10174 return -EIO;
10175#endif
10176
10177 spin_lock_bh(&bp->spq_lock);
10178
10179 for (i = 0; i < count; i++) {
10180 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
10181
10182 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
10183 break;
10184
10185 *bp->cnic_kwq_prod = *spe;
10186
10187 bp->cnic_kwq_pending++;
10188
10189 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
10190 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
10191 spe->data.update_data_addr.hi,
10192 spe->data.update_data_addr.lo,
993ac7b5
MC
10193 bp->cnic_kwq_pending);
10194
10195 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
10196 bp->cnic_kwq_prod = bp->cnic_kwq;
10197 else
10198 bp->cnic_kwq_prod++;
10199 }
10200
10201 spin_unlock_bh(&bp->spq_lock);
10202
10203 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
10204 bnx2x_cnic_sp_post(bp, 0);
10205
10206 return i;
10207}
10208
10209static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10210{
10211 struct cnic_ops *c_ops;
10212 int rc = 0;
10213
10214 mutex_lock(&bp->cnic_mutex);
13707f9e
ED
10215 c_ops = rcu_dereference_protected(bp->cnic_ops,
10216 lockdep_is_held(&bp->cnic_mutex));
993ac7b5
MC
10217 if (c_ops)
10218 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10219 mutex_unlock(&bp->cnic_mutex);
10220
10221 return rc;
10222}
10223
10224static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
10225{
10226 struct cnic_ops *c_ops;
10227 int rc = 0;
10228
10229 rcu_read_lock();
10230 c_ops = rcu_dereference(bp->cnic_ops);
10231 if (c_ops)
10232 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
10233 rcu_read_unlock();
10234
10235 return rc;
10236}
10237
10238/*
10239 * for commands that have no data
10240 */
9f6c9258 10241int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
10242{
10243 struct cnic_ctl_info ctl = {0};
10244
10245 ctl.cmd = cmd;
10246
10247 return bnx2x_cnic_ctl_send(bp, &ctl);
10248}
10249
10250static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
10251{
10252 struct cnic_ctl_info ctl;
10253
10254 /* first we tell CNIC and only then we count this as a completion */
10255 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
10256 ctl.data.comp.cid = cid;
10257
10258 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 10259 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
10260}
10261
10262static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
10263{
10264 struct bnx2x *bp = netdev_priv(dev);
10265 int rc = 0;
10266
10267 switch (ctl->cmd) {
10268 case DRV_CTL_CTXTBL_WR_CMD: {
10269 u32 index = ctl->data.io.offset;
10270 dma_addr_t addr = ctl->data.io.dma_addr;
10271
10272 bnx2x_ilt_wr(bp, index, addr);
10273 break;
10274 }
10275
c2bff63f
DK
10276 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
10277 int count = ctl->data.credit.credit_count;
993ac7b5
MC
10278
10279 bnx2x_cnic_sp_post(bp, count);
10280 break;
10281 }
10282
10283 /* rtnl_lock is held. */
10284 case DRV_CTL_START_L2_CMD: {
10285 u32 cli = ctl->data.ring.client_id;
10286
ec6ba945
VZ
10287 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
10288 bnx2x_del_fcoe_eth_macs(bp);
10289
523224a3
DK
10290 /* Set iSCSI MAC address */
10291 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
10292
10293 mmiowb();
10294 barrier();
10295
10296 /* Start accepting on iSCSI L2 ring. Accept all multicasts
10297 * because it's the only way for UIO Client to accept
10298 * multicasts (in non-promiscuous mode only one Client per
10299 * function will receive multicast packets (leading in our
10300 * case).
10301 */
10302 bnx2x_rxq_set_mac_filters(bp, cli,
10303 BNX2X_ACCEPT_UNICAST |
10304 BNX2X_ACCEPT_BROADCAST |
10305 BNX2X_ACCEPT_ALL_MULTICAST);
10306 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10307
993ac7b5
MC
10308 break;
10309 }
10310
10311 /* rtnl_lock is held. */
10312 case DRV_CTL_STOP_L2_CMD: {
10313 u32 cli = ctl->data.ring.client_id;
10314
523224a3
DK
10315 /* Stop accepting on iSCSI L2 ring */
10316 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
10317 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
10318
10319 mmiowb();
10320 barrier();
10321
10322 /* Unset iSCSI L2 MAC */
10323 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
993ac7b5
MC
10324 break;
10325 }
c2bff63f
DK
10326 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
10327 int count = ctl->data.credit.credit_count;
10328
10329 smp_mb__before_atomic_inc();
6e30dd4e 10330 atomic_add(count, &bp->cq_spq_left);
c2bff63f
DK
10331 smp_mb__after_atomic_inc();
10332 break;
10333 }
993ac7b5 10334
fab0dc89
DK
10335 case DRV_CTL_ISCSI_STOPPED_CMD: {
10336 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_ISCSI_STOPPED);
10337 break;
10338 }
10339
993ac7b5
MC
10340 default:
10341 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10342 rc = -EINVAL;
10343 }
10344
10345 return rc;
10346}
10347
9f6c9258 10348void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
10349{
10350 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10351
10352 if (bp->flags & USING_MSIX_FLAG) {
10353 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10354 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10355 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10356 } else {
10357 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10358 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10359 }
f2e0899f
DK
10360 if (CHIP_IS_E2(bp))
10361 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10362 else
10363 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10364
993ac7b5 10365 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
523224a3 10366 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
993ac7b5
MC
10367 cp->irq_arr[1].status_blk = bp->def_status_blk;
10368 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 10369 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
10370
10371 cp->num_irq = 2;
10372}
10373
10374static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10375 void *data)
10376{
10377 struct bnx2x *bp = netdev_priv(dev);
10378 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10379
10380 if (ops == NULL)
10381 return -EINVAL;
10382
10383 if (atomic_read(&bp->intr_sem) != 0)
10384 return -EBUSY;
10385
10386 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10387 if (!bp->cnic_kwq)
10388 return -ENOMEM;
10389
10390 bp->cnic_kwq_cons = bp->cnic_kwq;
10391 bp->cnic_kwq_prod = bp->cnic_kwq;
10392 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10393
10394 bp->cnic_spq_pending = 0;
10395 bp->cnic_kwq_pending = 0;
10396
10397 bp->cnic_data = data;
10398
10399 cp->num_irq = 0;
10400 cp->drv_state = CNIC_DRV_STATE_REGD;
523224a3 10401 cp->iro_arr = bp->iro_arr;
993ac7b5 10402
993ac7b5 10403 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 10404
993ac7b5
MC
10405 rcu_assign_pointer(bp->cnic_ops, ops);
10406
10407 return 0;
10408}
10409
10410static int bnx2x_unregister_cnic(struct net_device *dev)
10411{
10412 struct bnx2x *bp = netdev_priv(dev);
10413 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10414
10415 mutex_lock(&bp->cnic_mutex);
993ac7b5
MC
10416 cp->drv_state = 0;
10417 rcu_assign_pointer(bp->cnic_ops, NULL);
10418 mutex_unlock(&bp->cnic_mutex);
10419 synchronize_rcu();
10420 kfree(bp->cnic_kwq);
10421 bp->cnic_kwq = NULL;
10422
10423 return 0;
10424}
10425
10426struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10427{
10428 struct bnx2x *bp = netdev_priv(dev);
10429 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10430
2ba45142
VZ
10431 /* If both iSCSI and FCoE are disabled - return NULL in
10432 * order to indicate CNIC that it should not try to work
10433 * with this device.
10434 */
10435 if (NO_ISCSI(bp) && NO_FCOE(bp))
10436 return NULL;
10437
993ac7b5
MC
10438 cp->drv_owner = THIS_MODULE;
10439 cp->chip_id = CHIP_ID(bp);
10440 cp->pdev = bp->pdev;
10441 cp->io_base = bp->regview;
10442 cp->io_base2 = bp->doorbells;
10443 cp->max_kwqe_pending = 8;
523224a3 10444 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
10445 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10446 bnx2x_cid_ilt_lines(bp);
993ac7b5 10447 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 10448 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
10449 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10450 cp->drv_ctl = bnx2x_drv_ctl;
10451 cp->drv_register_cnic = bnx2x_register_cnic;
10452 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
ec6ba945
VZ
10453 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10454 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10455 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
c2bff63f
DK
10456 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
10457
2ba45142
VZ
10458 if (NO_ISCSI_OOO(bp))
10459 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
10460
10461 if (NO_ISCSI(bp))
10462 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
10463
10464 if (NO_FCOE(bp))
10465 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
10466
c2bff63f
DK
10467 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10468 "starting cid %d\n",
10469 cp->ctx_blk_size,
10470 cp->ctx_tbl_offset,
10471 cp->ctx_tbl_len,
10472 cp->starting_cid);
993ac7b5
MC
10473 return cp;
10474}
10475EXPORT_SYMBOL(bnx2x_cnic_probe);
10476
10477#endif /* BCM_CNIC */
94a78b79 10478