bnx2x: use XPS if possible for bnx2x_select_queue instead of pure hash
[linux-2.6-block.git] / drivers / net / ethernet / broadcom / bnx2x / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
247fa82b 3 * Copyright (c) 2007-2013 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
ca00392c 13 * Slowpath and fastpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
f1deab50
JP
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
a2fbb9ea
ET
20#include <linux/module.h>
21#include <linux/moduleparam.h>
22#include <linux/kernel.h>
23#include <linux/device.h> /* for dev_info() */
24#include <linux/timer.h>
25#include <linux/errno.h>
26#include <linux/ioport.h>
27#include <linux/slab.h>
a2fbb9ea
ET
28#include <linux/interrupt.h>
29#include <linux/pci.h>
30#include <linux/init.h>
31#include <linux/netdevice.h>
32#include <linux/etherdevice.h>
33#include <linux/skbuff.h>
34#include <linux/dma-mapping.h>
35#include <linux/bitops.h>
36#include <linux/irq.h>
37#include <linux/delay.h>
38#include <asm/byteorder.h>
39#include <linux/time.h>
40#include <linux/ethtool.h>
41#include <linux/mii.h>
0c6671b0 42#include <linux/if_vlan.h>
a2fbb9ea 43#include <net/ip.h>
619c5cb6 44#include <net/ipv6.h>
a2fbb9ea
ET
45#include <net/tcp.h>
46#include <net/checksum.h>
34f80b04 47#include <net/ip6_checksum.h>
a2fbb9ea
ET
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
34f80b04 50#include <linux/crc32c.h>
a2fbb9ea
ET
51#include <linux/prefetch.h>
52#include <linux/zlib.h>
a2fbb9ea 53#include <linux/io.h>
452427b0 54#include <linux/semaphore.h>
45229b42 55#include <linux/stringify.h>
7ab24bfd 56#include <linux/vmalloc.h>
a2fbb9ea 57
a2fbb9ea
ET
58#include "bnx2x.h"
59#include "bnx2x_init.h"
94a78b79 60#include "bnx2x_init_ops.h"
9f6c9258 61#include "bnx2x_cmn.h"
1ab4434c 62#include "bnx2x_vfpf.h"
e4901dde 63#include "bnx2x_dcb.h"
042181f5 64#include "bnx2x_sp.h"
a2fbb9ea 65
94a78b79
VZ
66#include <linux/firmware.h>
67#include "bnx2x_fw_file_hdr.h"
68/* FW files */
45229b42
BH
69#define FW_FILE_VERSION \
70 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
71 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
72 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
73 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
560131f3
DK
74#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
75#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
f2e0899f 76#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
94a78b79 77
34f80b04
EG
78/* Time in jiffies before concluding the transmitter is hung */
79#define TX_TIMEOUT (5*HZ)
a2fbb9ea 80
0329aba1 81static char version[] =
619c5cb6 82 "Broadcom NetXtreme II 5771x/578xx 10/20-Gigabit Ethernet Driver "
a2fbb9ea
ET
83 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
84
24e3fcef 85MODULE_AUTHOR("Eliezer Tamir");
f2e0899f 86MODULE_DESCRIPTION("Broadcom NetXtreme II "
619c5cb6
VZ
87 "BCM57710/57711/57711E/"
88 "57712/57712_MF/57800/57800_MF/57810/57810_MF/"
89 "57840/57840_MF Driver");
a2fbb9ea
ET
90MODULE_LICENSE("GPL");
91MODULE_VERSION(DRV_MODULE_VERSION);
45229b42
BH
92MODULE_FIRMWARE(FW_FILE_NAME_E1);
93MODULE_FIRMWARE(FW_FILE_NAME_E1H);
f2e0899f 94MODULE_FIRMWARE(FW_FILE_NAME_E2);
a2fbb9ea 95
ca00392c 96
d6214d7a 97int num_queues;
54b9ddaa 98module_param(num_queues, int, 0);
96305234
DK
99MODULE_PARM_DESC(num_queues,
100 " Set number of queues (default is as a number of CPUs)");
555f6c78 101
19680c48 102static int disable_tpa;
19680c48 103module_param(disable_tpa, int, 0);
9898f86d 104MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
8badd27a 105
9ee3d37b
DK
106#define INT_MODE_INTx 1
107#define INT_MODE_MSI 2
0e8d2ec5 108int int_mode;
8badd27a 109module_param(int_mode, int, 0);
619c5cb6 110MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
cdaa7cb8 111 "(1 INT#x; 2 MSI)");
8badd27a 112
a18f5128
EG
113static int dropless_fc;
114module_param(dropless_fc, int, 0);
115MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
116
8d5726c4
EG
117static int mrrs = -1;
118module_param(mrrs, int, 0);
119MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
120
9898f86d 121static int debug;
a2fbb9ea 122module_param(debug, int, 0);
9898f86d
EG
123MODULE_PARM_DESC(debug, " Default debug msglevel");
124
a2fbb9ea 125
619c5cb6
VZ
126
127struct workqueue_struct *bnx2x_wq;
ec6ba945 128
1ef1d45a
BW
129struct bnx2x_mac_vals {
130 u32 xmac_addr;
131 u32 xmac_val;
132 u32 emac_addr;
133 u32 emac_val;
134 u32 umac_addr;
135 u32 umac_val;
136 u32 bmac_addr;
137 u32 bmac_val[2];
138};
139
a2fbb9ea
ET
140enum bnx2x_board_type {
141 BCM57710 = 0,
619c5cb6
VZ
142 BCM57711,
143 BCM57711E,
144 BCM57712,
145 BCM57712_MF,
1ab4434c 146 BCM57712_VF,
619c5cb6
VZ
147 BCM57800,
148 BCM57800_MF,
1ab4434c 149 BCM57800_VF,
619c5cb6
VZ
150 BCM57810,
151 BCM57810_MF,
1ab4434c 152 BCM57810_VF,
c3def943
YM
153 BCM57840_4_10,
154 BCM57840_2_20,
7e8e02df 155 BCM57840_MF,
1ab4434c 156 BCM57840_VF,
7e8e02df 157 BCM57811,
1ab4434c
AE
158 BCM57811_MF,
159 BCM57840_O,
160 BCM57840_MFO,
161 BCM57811_VF
a2fbb9ea
ET
162};
163
34f80b04 164/* indexed by board_type, above */
53a10565 165static struct {
a2fbb9ea 166 char *name;
0329aba1 167} board_info[] = {
1ab4434c
AE
168 [BCM57710] = { "Broadcom NetXtreme II BCM57710 10 Gigabit PCIe [Everest]" },
169 [BCM57711] = { "Broadcom NetXtreme II BCM57711 10 Gigabit PCIe" },
170 [BCM57711E] = { "Broadcom NetXtreme II BCM57711E 10 Gigabit PCIe" },
171 [BCM57712] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet" },
172 [BCM57712_MF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Multi Function" },
173 [BCM57712_VF] = { "Broadcom NetXtreme II BCM57712 10 Gigabit Ethernet Virtual Function" },
174 [BCM57800] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet" },
175 [BCM57800_MF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Multi Function" },
176 [BCM57800_VF] = { "Broadcom NetXtreme II BCM57800 10 Gigabit Ethernet Virtual Function" },
177 [BCM57810] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet" },
178 [BCM57810_MF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Multi Function" },
179 [BCM57810_VF] = { "Broadcom NetXtreme II BCM57810 10 Gigabit Ethernet Virtual Function" },
180 [BCM57840_4_10] = { "Broadcom NetXtreme II BCM57840 10 Gigabit Ethernet" },
181 [BCM57840_2_20] = { "Broadcom NetXtreme II BCM57840 20 Gigabit Ethernet" },
182 [BCM57840_MF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
183 [BCM57840_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" },
184 [BCM57811] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet" },
185 [BCM57811_MF] = { "Broadcom NetXtreme II BCM57811 10 Gigabit Ethernet Multi Function" },
186 [BCM57840_O] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet" },
187 [BCM57840_MFO] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Multi Function" },
188 [BCM57811_VF] = { "Broadcom NetXtreme II BCM57840 10/20 Gigabit Ethernet Virtual Function" }
a2fbb9ea
ET
189};
190
619c5cb6
VZ
191#ifndef PCI_DEVICE_ID_NX2_57710
192#define PCI_DEVICE_ID_NX2_57710 CHIP_NUM_57710
193#endif
194#ifndef PCI_DEVICE_ID_NX2_57711
195#define PCI_DEVICE_ID_NX2_57711 CHIP_NUM_57711
196#endif
197#ifndef PCI_DEVICE_ID_NX2_57711E
198#define PCI_DEVICE_ID_NX2_57711E CHIP_NUM_57711E
199#endif
200#ifndef PCI_DEVICE_ID_NX2_57712
201#define PCI_DEVICE_ID_NX2_57712 CHIP_NUM_57712
202#endif
203#ifndef PCI_DEVICE_ID_NX2_57712_MF
204#define PCI_DEVICE_ID_NX2_57712_MF CHIP_NUM_57712_MF
205#endif
8395be5e
AE
206#ifndef PCI_DEVICE_ID_NX2_57712_VF
207#define PCI_DEVICE_ID_NX2_57712_VF CHIP_NUM_57712_VF
208#endif
619c5cb6
VZ
209#ifndef PCI_DEVICE_ID_NX2_57800
210#define PCI_DEVICE_ID_NX2_57800 CHIP_NUM_57800
211#endif
212#ifndef PCI_DEVICE_ID_NX2_57800_MF
213#define PCI_DEVICE_ID_NX2_57800_MF CHIP_NUM_57800_MF
214#endif
8395be5e
AE
215#ifndef PCI_DEVICE_ID_NX2_57800_VF
216#define PCI_DEVICE_ID_NX2_57800_VF CHIP_NUM_57800_VF
217#endif
619c5cb6
VZ
218#ifndef PCI_DEVICE_ID_NX2_57810
219#define PCI_DEVICE_ID_NX2_57810 CHIP_NUM_57810
220#endif
221#ifndef PCI_DEVICE_ID_NX2_57810_MF
222#define PCI_DEVICE_ID_NX2_57810_MF CHIP_NUM_57810_MF
223#endif
c3def943
YM
224#ifndef PCI_DEVICE_ID_NX2_57840_O
225#define PCI_DEVICE_ID_NX2_57840_O CHIP_NUM_57840_OBSOLETE
226#endif
8395be5e
AE
227#ifndef PCI_DEVICE_ID_NX2_57810_VF
228#define PCI_DEVICE_ID_NX2_57810_VF CHIP_NUM_57810_VF
229#endif
c3def943
YM
230#ifndef PCI_DEVICE_ID_NX2_57840_4_10
231#define PCI_DEVICE_ID_NX2_57840_4_10 CHIP_NUM_57840_4_10
232#endif
233#ifndef PCI_DEVICE_ID_NX2_57840_2_20
234#define PCI_DEVICE_ID_NX2_57840_2_20 CHIP_NUM_57840_2_20
235#endif
236#ifndef PCI_DEVICE_ID_NX2_57840_MFO
237#define PCI_DEVICE_ID_NX2_57840_MFO CHIP_NUM_57840_MF_OBSOLETE
619c5cb6
VZ
238#endif
239#ifndef PCI_DEVICE_ID_NX2_57840_MF
240#define PCI_DEVICE_ID_NX2_57840_MF CHIP_NUM_57840_MF
241#endif
8395be5e
AE
242#ifndef PCI_DEVICE_ID_NX2_57840_VF
243#define PCI_DEVICE_ID_NX2_57840_VF CHIP_NUM_57840_VF
244#endif
7e8e02df
BW
245#ifndef PCI_DEVICE_ID_NX2_57811
246#define PCI_DEVICE_ID_NX2_57811 CHIP_NUM_57811
247#endif
248#ifndef PCI_DEVICE_ID_NX2_57811_MF
249#define PCI_DEVICE_ID_NX2_57811_MF CHIP_NUM_57811_MF
250#endif
8395be5e
AE
251#ifndef PCI_DEVICE_ID_NX2_57811_VF
252#define PCI_DEVICE_ID_NX2_57811_VF CHIP_NUM_57811_VF
253#endif
254
a3aa1884 255static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
e4ed7113
EG
256 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
257 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
258 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
f2e0899f 259 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
619c5cb6 260 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_MF), BCM57712_MF },
8395be5e 261 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712_VF), BCM57712_VF },
619c5cb6
VZ
262 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800), BCM57800 },
263 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_MF), BCM57800_MF },
8395be5e 264 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57800_VF), BCM57800_VF },
619c5cb6
VZ
265 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810), BCM57810 },
266 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_MF), BCM57810_MF },
c3def943
YM
267 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_O), BCM57840_O },
268 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_4_10), BCM57840_4_10 },
269 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_2_20), BCM57840_2_20 },
8395be5e 270 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57810_VF), BCM57810_VF },
c3def943 271 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MFO), BCM57840_MFO },
619c5cb6 272 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_MF), BCM57840_MF },
8395be5e 273 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57840_VF), BCM57840_VF },
7e8e02df
BW
274 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811), BCM57811 },
275 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_MF), BCM57811_MF },
8395be5e 276 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57811_VF), BCM57811_VF },
a2fbb9ea
ET
277 { 0 }
278};
279
280MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
281
452427b0
YM
282/* Global resources for unloading a previously loaded device */
283#define BNX2X_PREV_WAIT_NEEDED 1
284static DEFINE_SEMAPHORE(bnx2x_prev_sem);
285static LIST_HEAD(bnx2x_prev_list);
a2fbb9ea
ET
286/****************************************************************************
287* General service functions
288****************************************************************************/
289
1191cb83 290static void __storm_memset_dma_mapping(struct bnx2x *bp,
619c5cb6
VZ
291 u32 addr, dma_addr_t mapping)
292{
293 REG_WR(bp, addr, U64_LO(mapping));
294 REG_WR(bp, addr + 4, U64_HI(mapping));
295}
296
1191cb83
ED
297static void storm_memset_spq_addr(struct bnx2x *bp,
298 dma_addr_t mapping, u16 abs_fid)
619c5cb6
VZ
299{
300 u32 addr = XSEM_REG_FAST_MEMORY +
301 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
302
303 __storm_memset_dma_mapping(bp, addr, mapping);
304}
305
1191cb83
ED
306static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
307 u16 pf_id)
523224a3 308{
619c5cb6
VZ
309 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
310 pf_id);
311 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
312 pf_id);
313 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
314 pf_id);
315 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
316 pf_id);
523224a3
DK
317}
318
1191cb83
ED
319static void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
320 u8 enable)
619c5cb6
VZ
321{
322 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
323 enable);
324 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
325 enable);
326 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
327 enable);
328 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
329 enable);
330}
523224a3 331
1191cb83
ED
332static void storm_memset_eq_data(struct bnx2x *bp,
333 struct event_ring_data *eq_data,
523224a3
DK
334 u16 pfid)
335{
336 size_t size = sizeof(struct event_ring_data);
337
338 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
339
340 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
341}
342
1191cb83
ED
343static void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
344 u16 pfid)
523224a3
DK
345{
346 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
347 REG_WR16(bp, addr, eq_prod);
348}
349
a2fbb9ea
ET
350/* used only at init
351 * locking is done by mcp
352 */
8d96286a 353static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
a2fbb9ea
ET
354{
355 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
356 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
357 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
358 PCICFG_VENDOR_ID_OFFSET);
359}
360
a2fbb9ea
ET
361static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
362{
363 u32 val;
364
365 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
366 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
367 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
368 PCICFG_VENDOR_ID_OFFSET);
369
370 return val;
371}
a2fbb9ea 372
f2e0899f
DK
373#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
374#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
375#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
376#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
377#define DMAE_DP_DST_NONE "dst_addr [none]"
378
fd1fc79d
AE
379void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
380{
381 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
382
383 switch (dmae->opcode & DMAE_COMMAND_DST) {
384 case DMAE_CMD_DST_PCI:
385 if (src_type == DMAE_CMD_SRC_PCI)
386 DP(msglvl, "DMAE: opcode 0x%08x\n"
387 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
388 "comp_addr [%x:%08x], comp_val 0x%08x\n",
389 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
390 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
391 dmae->comp_addr_hi, dmae->comp_addr_lo,
392 dmae->comp_val);
393 else
394 DP(msglvl, "DMAE: opcode 0x%08x\n"
395 "src [%08x], len [%d*4], dst [%x:%08x]\n"
396 "comp_addr [%x:%08x], comp_val 0x%08x\n",
397 dmae->opcode, dmae->src_addr_lo >> 2,
398 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
399 dmae->comp_addr_hi, dmae->comp_addr_lo,
400 dmae->comp_val);
401 break;
402 case DMAE_CMD_DST_GRC:
403 if (src_type == DMAE_CMD_SRC_PCI)
404 DP(msglvl, "DMAE: opcode 0x%08x\n"
405 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
406 "comp_addr [%x:%08x], comp_val 0x%08x\n",
407 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
408 dmae->len, dmae->dst_addr_lo >> 2,
409 dmae->comp_addr_hi, dmae->comp_addr_lo,
410 dmae->comp_val);
411 else
412 DP(msglvl, "DMAE: opcode 0x%08x\n"
413 "src [%08x], len [%d*4], dst [%08x]\n"
414 "comp_addr [%x:%08x], comp_val 0x%08x\n",
415 dmae->opcode, dmae->src_addr_lo >> 2,
416 dmae->len, dmae->dst_addr_lo >> 2,
417 dmae->comp_addr_hi, dmae->comp_addr_lo,
418 dmae->comp_val);
419 break;
420 default:
421 if (src_type == DMAE_CMD_SRC_PCI)
422 DP(msglvl, "DMAE: opcode 0x%08x\n"
423 "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n"
424 "comp_addr [%x:%08x] comp_val 0x%08x\n",
425 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
426 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
427 dmae->comp_val);
428 else
429 DP(msglvl, "DMAE: opcode 0x%08x\n"
430 "src_addr [%08x] len [%d * 4] dst_addr [none]\n"
431 "comp_addr [%x:%08x] comp_val 0x%08x\n",
432 dmae->opcode, dmae->src_addr_lo >> 2,
433 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
434 dmae->comp_val);
435 break;
436 }
437}
f2e0899f 438
a2fbb9ea 439/* copy command into DMAE command memory and set DMAE command go */
6c719d00 440void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
a2fbb9ea
ET
441{
442 u32 cmd_offset;
443 int i;
444
445 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
446 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
447 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
448 }
449 REG_WR(bp, dmae_reg_go_c[idx], 1);
450}
451
f2e0899f 452u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
a2fbb9ea 453{
f2e0899f
DK
454 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
455 DMAE_CMD_C_ENABLE);
456}
ad8d3948 457
f2e0899f
DK
458u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
459{
460 return opcode & ~DMAE_CMD_SRC_RESET;
461}
ad8d3948 462
f2e0899f
DK
463u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
464 bool with_comp, u8 comp_type)
465{
466 u32 opcode = 0;
467
468 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
469 (dst_type << DMAE_COMMAND_DST_SHIFT));
ad8d3948 470
f2e0899f
DK
471 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
472
473 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
3395a033
DK
474 opcode |= ((BP_VN(bp) << DMAE_CMD_E1HVN_SHIFT) |
475 (BP_VN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
f2e0899f 476 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
a2fbb9ea 477
a2fbb9ea 478#ifdef __BIG_ENDIAN
f2e0899f 479 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
a2fbb9ea 480#else
f2e0899f 481 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
a2fbb9ea 482#endif
f2e0899f
DK
483 if (with_comp)
484 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
485 return opcode;
486}
487
fd1fc79d 488void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
8d96286a 489 struct dmae_command *dmae,
490 u8 src_type, u8 dst_type)
f2e0899f
DK
491{
492 memset(dmae, 0, sizeof(struct dmae_command));
493
494 /* set the opcode */
495 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
496 true, DMAE_COMP_PCI);
497
498 /* fill in the completion parameters */
499 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
500 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
501 dmae->comp_val = DMAE_COMP_VAL;
502}
503
fd1fc79d
AE
504/* issue a dmae command over the init-channel and wait for completion */
505int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
f2e0899f
DK
506{
507 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
5e374b5a 508 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 4000;
f2e0899f
DK
509 int rc = 0;
510
619c5cb6
VZ
511 /*
512 * Lock the dmae channel. Disable BHs to prevent a dead-lock
513 * as long as this code is called both from syscall context and
514 * from ndo_set_rx_mode() flow that may be called from BH.
515 */
6e30dd4e 516 spin_lock_bh(&bp->dmae_lock);
5ff7b6d4 517
f2e0899f 518 /* reset completion */
a2fbb9ea
ET
519 *wb_comp = 0;
520
f2e0899f
DK
521 /* post the command on the channel used for initializations */
522 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea 523
f2e0899f 524 /* wait for completion */
a2fbb9ea 525 udelay(5);
f2e0899f 526 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
ad8d3948 527
95c6c616
AE
528 if (!cnt ||
529 (bp->recovery_state != BNX2X_RECOVERY_DONE &&
530 bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
c3eefaf6 531 BNX2X_ERR("DMAE timeout!\n");
f2e0899f
DK
532 rc = DMAE_TIMEOUT;
533 goto unlock;
a2fbb9ea 534 }
ad8d3948 535 cnt--;
f2e0899f 536 udelay(50);
a2fbb9ea 537 }
f2e0899f
DK
538 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
539 BNX2X_ERR("DMAE PCI error!\n");
540 rc = DMAE_PCI_ERROR;
541 }
542
f2e0899f 543unlock:
6e30dd4e 544 spin_unlock_bh(&bp->dmae_lock);
f2e0899f
DK
545 return rc;
546}
547
548void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
549 u32 len32)
550{
551 struct dmae_command dmae;
552
553 if (!bp->dmae_ready) {
554 u32 *data = bnx2x_sp(bp, wb_data[0]);
555
127a425e
AE
556 if (CHIP_IS_E1(bp))
557 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
558 else
559 bnx2x_init_str_wr(bp, dst_addr, data, len32);
f2e0899f
DK
560 return;
561 }
562
563 /* set opcode and fixed command fields */
564 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
565
566 /* fill in addresses and len */
567 dmae.src_addr_lo = U64_LO(dma_addr);
568 dmae.src_addr_hi = U64_HI(dma_addr);
569 dmae.dst_addr_lo = dst_addr >> 2;
570 dmae.dst_addr_hi = 0;
571 dmae.len = len32;
572
f2e0899f
DK
573 /* issue the command and wait for completion */
574 bnx2x_issue_dmae_with_comp(bp, &dmae);
a2fbb9ea
ET
575}
576
c18487ee 577void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 578{
5ff7b6d4 579 struct dmae_command dmae;
ad8d3948
EG
580
581 if (!bp->dmae_ready) {
582 u32 *data = bnx2x_sp(bp, wb_data[0]);
583 int i;
584
51c1a580 585 if (CHIP_IS_E1(bp))
127a425e
AE
586 for (i = 0; i < len32; i++)
587 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
51c1a580 588 else
127a425e
AE
589 for (i = 0; i < len32; i++)
590 data[i] = REG_RD(bp, src_addr + i*4);
591
ad8d3948
EG
592 return;
593 }
594
f2e0899f
DK
595 /* set opcode and fixed command fields */
596 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
a2fbb9ea 597
f2e0899f 598 /* fill in addresses and len */
5ff7b6d4
EG
599 dmae.src_addr_lo = src_addr >> 2;
600 dmae.src_addr_hi = 0;
601 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
602 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
603 dmae.len = len32;
ad8d3948 604
f2e0899f
DK
605 /* issue the command and wait for completion */
606 bnx2x_issue_dmae_with_comp(bp, &dmae);
ad8d3948
EG
607}
608
8d96286a 609static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
610 u32 addr, u32 len)
573f2035 611{
02e3c6cb 612 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
573f2035
EG
613 int offset = 0;
614
02e3c6cb 615 while (len > dmae_wr_max) {
573f2035 616 bnx2x_write_dmae(bp, phys_addr + offset,
02e3c6cb
VZ
617 addr + offset, dmae_wr_max);
618 offset += dmae_wr_max * 4;
619 len -= dmae_wr_max;
573f2035
EG
620 }
621
622 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
623}
624
a2fbb9ea
ET
625static int bnx2x_mc_assert(struct bnx2x *bp)
626{
a2fbb9ea 627 char last_idx;
34f80b04
EG
628 int i, rc = 0;
629 u32 row0, row1, row2, row3;
630
631 /* XSTORM */
632 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
633 XSTORM_ASSERT_LIST_INDEX_OFFSET);
634 if (last_idx)
635 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
636
637 /* print the asserts */
638 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
639
640 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
641 XSTORM_ASSERT_LIST_OFFSET(i));
642 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
643 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
644 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
645 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
646 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
647 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
648
649 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
51c1a580 650 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
34f80b04
EG
651 i, row3, row2, row1, row0);
652 rc++;
653 } else {
654 break;
655 }
656 }
657
658 /* TSTORM */
659 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
660 TSTORM_ASSERT_LIST_INDEX_OFFSET);
661 if (last_idx)
662 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
663
664 /* print the asserts */
665 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
666
667 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
668 TSTORM_ASSERT_LIST_OFFSET(i));
669 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
670 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
671 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
672 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
673 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
674 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
675
676 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
51c1a580 677 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
34f80b04
EG
678 i, row3, row2, row1, row0);
679 rc++;
680 } else {
681 break;
682 }
683 }
684
685 /* CSTORM */
686 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
687 CSTORM_ASSERT_LIST_INDEX_OFFSET);
688 if (last_idx)
689 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
690
691 /* print the asserts */
692 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
693
694 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
695 CSTORM_ASSERT_LIST_OFFSET(i));
696 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
697 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
698 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
699 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
700 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
701 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
702
703 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
51c1a580 704 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
34f80b04
EG
705 i, row3, row2, row1, row0);
706 rc++;
707 } else {
708 break;
709 }
710 }
711
712 /* USTORM */
713 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
714 USTORM_ASSERT_LIST_INDEX_OFFSET);
715 if (last_idx)
716 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
717
718 /* print the asserts */
719 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
720
721 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
722 USTORM_ASSERT_LIST_OFFSET(i));
723 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
724 USTORM_ASSERT_LIST_OFFSET(i) + 4);
725 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
726 USTORM_ASSERT_LIST_OFFSET(i) + 8);
727 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
728 USTORM_ASSERT_LIST_OFFSET(i) + 12);
729
730 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
51c1a580 731 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x 0x%08x 0x%08x 0x%08x\n",
34f80b04
EG
732 i, row3, row2, row1, row0);
733 rc++;
734 } else {
735 break;
a2fbb9ea
ET
736 }
737 }
34f80b04 738
a2fbb9ea
ET
739 return rc;
740}
c14423fe 741
7a25cc73 742void bnx2x_fw_dump_lvl(struct bnx2x *bp, const char *lvl)
a2fbb9ea 743{
7a25cc73 744 u32 addr, val;
a2fbb9ea 745 u32 mark, offset;
4781bfad 746 __be32 data[9];
a2fbb9ea 747 int word;
f2e0899f 748 u32 trace_shmem_base;
2145a920
VZ
749 if (BP_NOMCP(bp)) {
750 BNX2X_ERR("NO MCP - can not dump\n");
751 return;
752 }
7a25cc73
DK
753 netdev_printk(lvl, bp->dev, "bc %d.%d.%d\n",
754 (bp->common.bc_ver & 0xff0000) >> 16,
755 (bp->common.bc_ver & 0xff00) >> 8,
756 (bp->common.bc_ver & 0xff));
757
758 val = REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER);
759 if (val == REG_RD(bp, MCP_REG_MCPR_CPU_PROGRAM_COUNTER))
51c1a580 760 BNX2X_ERR("%s" "MCP PC at 0x%x\n", lvl, val);
cdaa7cb8 761
f2e0899f
DK
762 if (BP_PATH(bp) == 0)
763 trace_shmem_base = bp->common.shmem_base;
764 else
765 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
de128804
DK
766 addr = trace_shmem_base - 0x800;
767
768 /* validate TRCB signature */
769 mark = REG_RD(bp, addr);
770 if (mark != MFW_TRACE_SIGNATURE) {
771 BNX2X_ERR("Trace buffer signature is missing.");
772 return ;
773 }
774
775 /* read cyclic buffer pointer */
776 addr += 4;
cdaa7cb8 777 mark = REG_RD(bp, addr);
f2e0899f
DK
778 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
779 + ((mark + 0x3) & ~0x3) - 0x08000000;
7a25cc73 780 printk("%s" "begin fw dump (mark 0x%x)\n", lvl, mark);
a2fbb9ea 781
7a25cc73 782 printk("%s", lvl);
2de67439
YM
783
784 /* dump buffer after the mark */
f2e0899f 785 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
a2fbb9ea 786 for (word = 0; word < 8; word++)
cdaa7cb8 787 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 788 data[8] = 0x0;
7995c64e 789 pr_cont("%s", (char *)data);
a2fbb9ea 790 }
2de67439
YM
791
792 /* dump buffer before the mark */
cdaa7cb8 793 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
a2fbb9ea 794 for (word = 0; word < 8; word++)
cdaa7cb8 795 data[word] = htonl(REG_RD(bp, offset + 4*word));
a2fbb9ea 796 data[8] = 0x0;
7995c64e 797 pr_cont("%s", (char *)data);
a2fbb9ea 798 }
7a25cc73
DK
799 printk("%s" "end of fw dump\n", lvl);
800}
801
1191cb83 802static void bnx2x_fw_dump(struct bnx2x *bp)
7a25cc73
DK
803{
804 bnx2x_fw_dump_lvl(bp, KERN_ERR);
a2fbb9ea
ET
805}
806
823e1d90
YM
807static void bnx2x_hc_int_disable(struct bnx2x *bp)
808{
809 int port = BP_PORT(bp);
810 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
811 u32 val = REG_RD(bp, addr);
812
813 /* in E1 we must use only PCI configuration space to disable
814 * MSI/MSIX capablility
815 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
816 */
817 if (CHIP_IS_E1(bp)) {
818 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
819 * Use mask register to prevent from HC sending interrupts
820 * after we exit the function
821 */
822 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
823
824 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
825 HC_CONFIG_0_REG_INT_LINE_EN_0 |
826 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
827 } else
828 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
829 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
830 HC_CONFIG_0_REG_INT_LINE_EN_0 |
831 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
832
833 DP(NETIF_MSG_IFDOWN,
834 "write %x to HC %d (addr 0x%x)\n",
835 val, port, addr);
836
837 /* flush all outstanding writes */
838 mmiowb();
839
840 REG_WR(bp, addr, val);
841 if (REG_RD(bp, addr) != val)
842 BNX2X_ERR("BUG! proper val not read from IGU!\n");
843}
844
845static void bnx2x_igu_int_disable(struct bnx2x *bp)
846{
847 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
848
849 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
850 IGU_PF_CONF_INT_LINE_EN |
851 IGU_PF_CONF_ATTN_BIT_EN);
852
853 DP(NETIF_MSG_IFDOWN, "write %x to IGU\n", val);
854
855 /* flush all outstanding writes */
856 mmiowb();
857
858 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
859 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
860 BNX2X_ERR("BUG! proper val not read from IGU!\n");
861}
862
863static void bnx2x_int_disable(struct bnx2x *bp)
864{
865 if (bp->common.int_block == INT_BLOCK_HC)
866 bnx2x_hc_int_disable(bp);
867 else
868 bnx2x_igu_int_disable(bp);
869}
870
871void bnx2x_panic_dump(struct bnx2x *bp, bool disable_int)
a2fbb9ea
ET
872{
873 int i;
523224a3
DK
874 u16 j;
875 struct hc_sp_status_block_data sp_sb_data;
876 int func = BP_FUNC(bp);
877#ifdef BNX2X_STOP_ON_ERROR
878 u16 start = 0, end = 0;
6383c0b3 879 u8 cos;
523224a3 880#endif
823e1d90
YM
881 if (disable_int)
882 bnx2x_int_disable(bp);
a2fbb9ea 883
66e855f3 884 bp->stats_state = STATS_STATE_DISABLED;
7a752993 885 bp->eth_stats.unrecoverable_error++;
66e855f3
YG
886 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
887
a2fbb9ea
ET
888 BNX2X_ERR("begin crash dump -----------------\n");
889
8440d2b6
EG
890 /* Indices */
891 /* Common */
51c1a580 892 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x) spq_prod_idx(0x%x) next_stats_cnt(0x%x)\n",
619c5cb6
VZ
893 bp->def_idx, bp->def_att_idx, bp->attn_state,
894 bp->spq_prod_idx, bp->stats_counter);
523224a3
DK
895 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
896 bp->def_status_blk->atten_status_block.attn_bits,
897 bp->def_status_blk->atten_status_block.attn_bits_ack,
898 bp->def_status_blk->atten_status_block.status_block_id,
899 bp->def_status_blk->atten_status_block.attn_bits_index);
900 BNX2X_ERR(" def (");
901 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
902 pr_cont("0x%x%s",
f1deab50
JP
903 bp->def_status_blk->sp_sb.index_values[i],
904 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
523224a3
DK
905
906 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
907 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
908 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
909 i*sizeof(u32));
910
f1deab50 911 pr_cont("igu_sb_id(0x%x) igu_seg_id(0x%x) pf_id(0x%x) vnic_id(0x%x) vf_id(0x%x) vf_valid (0x%x) state(0x%x)\n",
523224a3
DK
912 sp_sb_data.igu_sb_id,
913 sp_sb_data.igu_seg_id,
914 sp_sb_data.p_func.pf_id,
915 sp_sb_data.p_func.vnic_id,
916 sp_sb_data.p_func.vf_id,
619c5cb6
VZ
917 sp_sb_data.p_func.vf_valid,
918 sp_sb_data.state);
523224a3 919
8440d2b6 920
ec6ba945 921 for_each_eth_queue(bp, i) {
a2fbb9ea 922 struct bnx2x_fastpath *fp = &bp->fp[i];
523224a3 923 int loop;
f2e0899f 924 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
619c5cb6
VZ
927 CHIP_IS_E1x(bp) ?
928 sb_data_e1x.common.state_machine :
929 sb_data_e2.common.state_machine;
523224a3 930 struct hc_index_data *hc_index_p =
619c5cb6
VZ
931 CHIP_IS_E1x(bp) ?
932 sb_data_e1x.index_data :
933 sb_data_e2.index_data;
6383c0b3 934 u8 data_size, cos;
523224a3 935 u32 *sb_data_p;
6383c0b3 936 struct bnx2x_fp_txdata txdata;
523224a3
DK
937
938 /* Rx */
51c1a580 939 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x) rx_comp_prod(0x%x) rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
8440d2b6 940 i, fp->rx_bd_prod, fp->rx_bd_cons,
523224a3 941 fp->rx_comp_prod,
66e855f3 942 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
51c1a580 943 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x) fp_hc_idx(0x%x)\n",
8440d2b6 944 fp->rx_sge_prod, fp->last_max_sge,
523224a3 945 le16_to_cpu(fp->fp_hc_idx));
a2fbb9ea 946
523224a3 947 /* Tx */
6383c0b3
AE
948 for_each_cos_in_tx_queue(fp, cos)
949 {
65565884 950 txdata = *fp->txdata_ptr[cos];
51c1a580 951 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x) tx_bd_prod(0x%x) tx_bd_cons(0x%x) *tx_cons_sb(0x%x)\n",
6383c0b3
AE
952 i, txdata.tx_pkt_prod,
953 txdata.tx_pkt_cons, txdata.tx_bd_prod,
954 txdata.tx_bd_cons,
955 le16_to_cpu(*txdata.tx_cons_sb));
956 }
523224a3 957
619c5cb6
VZ
958 loop = CHIP_IS_E1x(bp) ?
959 HC_SB_MAX_INDICES_E1X : HC_SB_MAX_INDICES_E2;
523224a3
DK
960
961 /* host sb data */
962
ec6ba945
VZ
963 if (IS_FCOE_FP(fp))
964 continue;
55c11941 965
523224a3
DK
966 BNX2X_ERR(" run indexes (");
967 for (j = 0; j < HC_SB_MAX_SM; j++)
968 pr_cont("0x%x%s",
969 fp->sb_running_index[j],
970 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
971
972 BNX2X_ERR(" indexes (");
973 for (j = 0; j < loop; j++)
974 pr_cont("0x%x%s",
975 fp->sb_index_values[j],
976 (j == loop - 1) ? ")" : " ");
977 /* fw sb data */
619c5cb6
VZ
978 data_size = CHIP_IS_E1x(bp) ?
979 sizeof(struct hc_status_block_data_e1x) :
980 sizeof(struct hc_status_block_data_e2);
523224a3 981 data_size /= sizeof(u32);
619c5cb6
VZ
982 sb_data_p = CHIP_IS_E1x(bp) ?
983 (u32 *)&sb_data_e1x :
984 (u32 *)&sb_data_e2;
523224a3
DK
985 /* copy sb data in here */
986 for (j = 0; j < data_size; j++)
987 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
988 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
989 j * sizeof(u32));
990
619c5cb6 991 if (!CHIP_IS_E1x(bp)) {
51c1a580 992 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
f2e0899f
DK
993 sb_data_e2.common.p_func.pf_id,
994 sb_data_e2.common.p_func.vf_id,
995 sb_data_e2.common.p_func.vf_valid,
996 sb_data_e2.common.p_func.vnic_id,
619c5cb6
VZ
997 sb_data_e2.common.same_igu_sb_1b,
998 sb_data_e2.common.state);
f2e0899f 999 } else {
51c1a580 1000 pr_cont("pf_id(0x%x) vf_id(0x%x) vf_valid(0x%x) vnic_id(0x%x) same_igu_sb_1b(0x%x) state(0x%x)\n",
f2e0899f
DK
1001 sb_data_e1x.common.p_func.pf_id,
1002 sb_data_e1x.common.p_func.vf_id,
1003 sb_data_e1x.common.p_func.vf_valid,
1004 sb_data_e1x.common.p_func.vnic_id,
619c5cb6
VZ
1005 sb_data_e1x.common.same_igu_sb_1b,
1006 sb_data_e1x.common.state);
f2e0899f 1007 }
523224a3
DK
1008
1009 /* SB_SMs data */
1010 for (j = 0; j < HC_SB_MAX_SM; j++) {
51c1a580
MS
1011 pr_cont("SM[%d] __flags (0x%x) igu_sb_id (0x%x) igu_seg_id(0x%x) time_to_expire (0x%x) timer_value(0x%x)\n",
1012 j, hc_sm_p[j].__flags,
1013 hc_sm_p[j].igu_sb_id,
1014 hc_sm_p[j].igu_seg_id,
1015 hc_sm_p[j].time_to_expire,
1016 hc_sm_p[j].timer_value);
523224a3
DK
1017 }
1018
1019 /* Indecies data */
1020 for (j = 0; j < loop; j++) {
51c1a580 1021 pr_cont("INDEX[%d] flags (0x%x) timeout (0x%x)\n", j,
523224a3
DK
1022 hc_index_p[j].flags,
1023 hc_index_p[j].timeout);
1024 }
8440d2b6 1025 }
a2fbb9ea 1026
523224a3 1027#ifdef BNX2X_STOP_ON_ERROR
04c46736
YM
1028
1029 /* event queue */
1030 for (i = 0; i < NUM_EQ_DESC; i++) {
1031 u32 *data = (u32 *)&bp->eq_ring[i].message.data;
1032
1033 BNX2X_ERR("event queue [%d]: header: opcode %d, error %d\n",
1034 i, bp->eq_ring[i].message.opcode,
1035 bp->eq_ring[i].message.error);
1036 BNX2X_ERR("data: %x %x %x\n", data[0], data[1], data[2]);
1037 }
1038
8440d2b6
EG
1039 /* Rings */
1040 /* Rx */
55c11941 1041 for_each_valid_rx_queue(bp, i) {
8440d2b6 1042 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea
ET
1043
1044 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1045 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
8440d2b6 1046 for (j = start; j != end; j = RX_BD(j + 1)) {
a2fbb9ea
ET
1047 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1048 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1049
c3eefaf6 1050 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
44151acb 1051 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
a2fbb9ea
ET
1052 }
1053
3196a88a
EG
1054 start = RX_SGE(fp->rx_sge_prod);
1055 end = RX_SGE(fp->last_max_sge);
8440d2b6 1056 for (j = start; j != end; j = RX_SGE(j + 1)) {
7a9b2557
VZ
1057 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1058 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1059
c3eefaf6
EG
1060 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1061 i, j, rx_sge[1], rx_sge[0], sw_page->page);
7a9b2557
VZ
1062 }
1063
a2fbb9ea
ET
1064 start = RCQ_BD(fp->rx_comp_cons - 10);
1065 end = RCQ_BD(fp->rx_comp_cons + 503);
8440d2b6 1066 for (j = start; j != end; j = RCQ_BD(j + 1)) {
a2fbb9ea
ET
1067 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1068
c3eefaf6
EG
1069 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1070 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
a2fbb9ea
ET
1071 }
1072 }
1073
8440d2b6 1074 /* Tx */
55c11941 1075 for_each_valid_tx_queue(bp, i) {
8440d2b6 1076 struct bnx2x_fastpath *fp = &bp->fp[i];
6383c0b3 1077 for_each_cos_in_tx_queue(fp, cos) {
65565884 1078 struct bnx2x_fp_txdata *txdata = fp->txdata_ptr[cos];
6383c0b3
AE
1079
1080 start = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) - 10);
1081 end = TX_BD(le16_to_cpu(*txdata->tx_cons_sb) + 245);
1082 for (j = start; j != end; j = TX_BD(j + 1)) {
1083 struct sw_tx_bd *sw_bd =
1084 &txdata->tx_buf_ring[j];
1085
51c1a580 1086 BNX2X_ERR("fp%d: txdata %d, packet[%x]=[%p,%x]\n",
6383c0b3
AE
1087 i, cos, j, sw_bd->skb,
1088 sw_bd->first_bd);
1089 }
8440d2b6 1090
6383c0b3
AE
1091 start = TX_BD(txdata->tx_bd_cons - 10);
1092 end = TX_BD(txdata->tx_bd_cons + 254);
1093 for (j = start; j != end; j = TX_BD(j + 1)) {
1094 u32 *tx_bd = (u32 *)&txdata->tx_desc_ring[j];
8440d2b6 1095
51c1a580 1096 BNX2X_ERR("fp%d: txdata %d, tx_bd[%x]=[%x:%x:%x:%x]\n",
6383c0b3
AE
1097 i, cos, j, tx_bd[0], tx_bd[1],
1098 tx_bd[2], tx_bd[3]);
1099 }
8440d2b6
EG
1100 }
1101 }
523224a3 1102#endif
34f80b04 1103 bnx2x_fw_dump(bp);
a2fbb9ea
ET
1104 bnx2x_mc_assert(bp);
1105 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
1106}
1107
619c5cb6
VZ
1108/*
1109 * FLR Support for E2
1110 *
1111 * bnx2x_pf_flr_clnup() is called during nic_load in the per function HW
1112 * initialization.
1113 */
1114#define FLR_WAIT_USEC 10000 /* 10 miliseconds */
89db4ad8
AE
1115#define FLR_WAIT_INTERVAL 50 /* usec */
1116#define FLR_POLL_CNT (FLR_WAIT_USEC/FLR_WAIT_INTERVAL) /* 200 */
619c5cb6
VZ
1117
1118struct pbf_pN_buf_regs {
1119 int pN;
1120 u32 init_crd;
1121 u32 crd;
1122 u32 crd_freed;
1123};
1124
1125struct pbf_pN_cmd_regs {
1126 int pN;
1127 u32 lines_occup;
1128 u32 lines_freed;
1129};
1130
1131static void bnx2x_pbf_pN_buf_flushed(struct bnx2x *bp,
1132 struct pbf_pN_buf_regs *regs,
1133 u32 poll_count)
1134{
1135 u32 init_crd, crd, crd_start, crd_freed, crd_freed_start;
1136 u32 cur_cnt = poll_count;
1137
1138 crd_freed = crd_freed_start = REG_RD(bp, regs->crd_freed);
1139 crd = crd_start = REG_RD(bp, regs->crd);
1140 init_crd = REG_RD(bp, regs->init_crd);
1141
1142 DP(BNX2X_MSG_SP, "INIT CREDIT[%d] : %x\n", regs->pN, init_crd);
1143 DP(BNX2X_MSG_SP, "CREDIT[%d] : s:%x\n", regs->pN, crd);
1144 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: s:%x\n", regs->pN, crd_freed);
1145
1146 while ((crd != init_crd) && ((u32)SUB_S32(crd_freed, crd_freed_start) <
1147 (init_crd - crd_start))) {
1148 if (cur_cnt--) {
89db4ad8 1149 udelay(FLR_WAIT_INTERVAL);
619c5cb6
VZ
1150 crd = REG_RD(bp, regs->crd);
1151 crd_freed = REG_RD(bp, regs->crd_freed);
1152 } else {
1153 DP(BNX2X_MSG_SP, "PBF tx buffer[%d] timed out\n",
1154 regs->pN);
1155 DP(BNX2X_MSG_SP, "CREDIT[%d] : c:%x\n",
1156 regs->pN, crd);
1157 DP(BNX2X_MSG_SP, "CREDIT_FREED[%d]: c:%x\n",
1158 regs->pN, crd_freed);
1159 break;
1160 }
1161 }
1162 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF tx buffer[%d]\n",
89db4ad8 1163 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
619c5cb6
VZ
1164}
1165
1166static void bnx2x_pbf_pN_cmd_flushed(struct bnx2x *bp,
1167 struct pbf_pN_cmd_regs *regs,
1168 u32 poll_count)
1169{
1170 u32 occup, to_free, freed, freed_start;
1171 u32 cur_cnt = poll_count;
1172
1173 occup = to_free = REG_RD(bp, regs->lines_occup);
1174 freed = freed_start = REG_RD(bp, regs->lines_freed);
1175
1176 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n", regs->pN, occup);
1177 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n", regs->pN, freed);
1178
1179 while (occup && ((u32)SUB_S32(freed, freed_start) < to_free)) {
1180 if (cur_cnt--) {
89db4ad8 1181 udelay(FLR_WAIT_INTERVAL);
619c5cb6
VZ
1182 occup = REG_RD(bp, regs->lines_occup);
1183 freed = REG_RD(bp, regs->lines_freed);
1184 } else {
1185 DP(BNX2X_MSG_SP, "PBF cmd queue[%d] timed out\n",
1186 regs->pN);
1187 DP(BNX2X_MSG_SP, "OCCUPANCY[%d] : s:%x\n",
1188 regs->pN, occup);
1189 DP(BNX2X_MSG_SP, "LINES_FREED[%d] : s:%x\n",
1190 regs->pN, freed);
1191 break;
1192 }
1193 }
1194 DP(BNX2X_MSG_SP, "Waited %d*%d usec for PBF cmd queue[%d]\n",
89db4ad8 1195 poll_count-cur_cnt, FLR_WAIT_INTERVAL, regs->pN);
619c5cb6
VZ
1196}
1197
1191cb83
ED
1198static u32 bnx2x_flr_clnup_reg_poll(struct bnx2x *bp, u32 reg,
1199 u32 expected, u32 poll_count)
619c5cb6
VZ
1200{
1201 u32 cur_cnt = poll_count;
1202 u32 val;
1203
1204 while ((val = REG_RD(bp, reg)) != expected && cur_cnt--)
89db4ad8 1205 udelay(FLR_WAIT_INTERVAL);
619c5cb6
VZ
1206
1207 return val;
1208}
1209
d16132ce
AE
1210int bnx2x_flr_clnup_poll_hw_counter(struct bnx2x *bp, u32 reg,
1211 char *msg, u32 poll_cnt)
619c5cb6
VZ
1212{
1213 u32 val = bnx2x_flr_clnup_reg_poll(bp, reg, 0, poll_cnt);
1214 if (val != 0) {
1215 BNX2X_ERR("%s usage count=%d\n", msg, val);
1216 return 1;
1217 }
1218 return 0;
1219}
1220
d16132ce
AE
1221/* Common routines with VF FLR cleanup */
1222u32 bnx2x_flr_clnup_poll_count(struct bnx2x *bp)
619c5cb6
VZ
1223{
1224 /* adjust polling timeout */
1225 if (CHIP_REV_IS_EMUL(bp))
1226 return FLR_POLL_CNT * 2000;
1227
1228 if (CHIP_REV_IS_FPGA(bp))
1229 return FLR_POLL_CNT * 120;
1230
1231 return FLR_POLL_CNT;
1232}
1233
d16132ce 1234void bnx2x_tx_hw_flushed(struct bnx2x *bp, u32 poll_count)
619c5cb6
VZ
1235{
1236 struct pbf_pN_cmd_regs cmd_regs[] = {
1237 {0, (CHIP_IS_E3B0(bp)) ?
1238 PBF_REG_TQ_OCCUPANCY_Q0 :
1239 PBF_REG_P0_TQ_OCCUPANCY,
1240 (CHIP_IS_E3B0(bp)) ?
1241 PBF_REG_TQ_LINES_FREED_CNT_Q0 :
1242 PBF_REG_P0_TQ_LINES_FREED_CNT},
1243 {1, (CHIP_IS_E3B0(bp)) ?
1244 PBF_REG_TQ_OCCUPANCY_Q1 :
1245 PBF_REG_P1_TQ_OCCUPANCY,
1246 (CHIP_IS_E3B0(bp)) ?
1247 PBF_REG_TQ_LINES_FREED_CNT_Q1 :
1248 PBF_REG_P1_TQ_LINES_FREED_CNT},
1249 {4, (CHIP_IS_E3B0(bp)) ?
1250 PBF_REG_TQ_OCCUPANCY_LB_Q :
1251 PBF_REG_P4_TQ_OCCUPANCY,
1252 (CHIP_IS_E3B0(bp)) ?
1253 PBF_REG_TQ_LINES_FREED_CNT_LB_Q :
1254 PBF_REG_P4_TQ_LINES_FREED_CNT}
1255 };
1256
1257 struct pbf_pN_buf_regs buf_regs[] = {
1258 {0, (CHIP_IS_E3B0(bp)) ?
1259 PBF_REG_INIT_CRD_Q0 :
1260 PBF_REG_P0_INIT_CRD ,
1261 (CHIP_IS_E3B0(bp)) ?
1262 PBF_REG_CREDIT_Q0 :
1263 PBF_REG_P0_CREDIT,
1264 (CHIP_IS_E3B0(bp)) ?
1265 PBF_REG_INTERNAL_CRD_FREED_CNT_Q0 :
1266 PBF_REG_P0_INTERNAL_CRD_FREED_CNT},
1267 {1, (CHIP_IS_E3B0(bp)) ?
1268 PBF_REG_INIT_CRD_Q1 :
1269 PBF_REG_P1_INIT_CRD,
1270 (CHIP_IS_E3B0(bp)) ?
1271 PBF_REG_CREDIT_Q1 :
1272 PBF_REG_P1_CREDIT,
1273 (CHIP_IS_E3B0(bp)) ?
1274 PBF_REG_INTERNAL_CRD_FREED_CNT_Q1 :
1275 PBF_REG_P1_INTERNAL_CRD_FREED_CNT},
1276 {4, (CHIP_IS_E3B0(bp)) ?
1277 PBF_REG_INIT_CRD_LB_Q :
1278 PBF_REG_P4_INIT_CRD,
1279 (CHIP_IS_E3B0(bp)) ?
1280 PBF_REG_CREDIT_LB_Q :
1281 PBF_REG_P4_CREDIT,
1282 (CHIP_IS_E3B0(bp)) ?
1283 PBF_REG_INTERNAL_CRD_FREED_CNT_LB_Q :
1284 PBF_REG_P4_INTERNAL_CRD_FREED_CNT},
1285 };
1286
1287 int i;
1288
1289 /* Verify the command queues are flushed P0, P1, P4 */
1290 for (i = 0; i < ARRAY_SIZE(cmd_regs); i++)
1291 bnx2x_pbf_pN_cmd_flushed(bp, &cmd_regs[i], poll_count);
1292
1293
1294 /* Verify the transmission buffers are flushed P0, P1, P4 */
1295 for (i = 0; i < ARRAY_SIZE(buf_regs); i++)
1296 bnx2x_pbf_pN_buf_flushed(bp, &buf_regs[i], poll_count);
1297}
1298
1299#define OP_GEN_PARAM(param) \
1300 (((param) << SDM_OP_GEN_COMP_PARAM_SHIFT) & SDM_OP_GEN_COMP_PARAM)
1301
1302#define OP_GEN_TYPE(type) \
1303 (((type) << SDM_OP_GEN_COMP_TYPE_SHIFT) & SDM_OP_GEN_COMP_TYPE)
1304
1305#define OP_GEN_AGG_VECT(index) \
1306 (((index) << SDM_OP_GEN_AGG_VECT_IDX_SHIFT) & SDM_OP_GEN_AGG_VECT_IDX)
1307
1308
d16132ce 1309int bnx2x_send_final_clnup(struct bnx2x *bp, u8 clnup_func, u32 poll_cnt)
619c5cb6 1310{
86564c3f 1311 u32 op_gen_command = 0;
619c5cb6
VZ
1312
1313 u32 comp_addr = BAR_CSTRORM_INTMEM +
1314 CSTORM_FINAL_CLEANUP_COMPLETE_OFFSET(clnup_func);
1315 int ret = 0;
1316
1317 if (REG_RD(bp, comp_addr)) {
89db4ad8 1318 BNX2X_ERR("Cleanup complete was not 0 before sending\n");
619c5cb6
VZ
1319 return 1;
1320 }
1321
86564c3f
YM
1322 op_gen_command |= OP_GEN_PARAM(XSTORM_AGG_INT_FINAL_CLEANUP_INDEX);
1323 op_gen_command |= OP_GEN_TYPE(XSTORM_AGG_INT_FINAL_CLEANUP_COMP_TYPE);
1324 op_gen_command |= OP_GEN_AGG_VECT(clnup_func);
1325 op_gen_command |= 1 << SDM_OP_GEN_AGG_VECT_IDX_VALID_SHIFT;
619c5cb6 1326
89db4ad8 1327 DP(BNX2X_MSG_SP, "sending FW Final cleanup\n");
86564c3f 1328 REG_WR(bp, XSDM_REG_OPERATION_GEN, op_gen_command);
619c5cb6
VZ
1329
1330 if (bnx2x_flr_clnup_reg_poll(bp, comp_addr, 1, poll_cnt) != 1) {
1331 BNX2X_ERR("FW final cleanup did not succeed\n");
51c1a580
MS
1332 DP(BNX2X_MSG_SP, "At timeout completion address contained %x\n",
1333 (REG_RD(bp, comp_addr)));
d16132ce
AE
1334 bnx2x_panic();
1335 return 1;
619c5cb6
VZ
1336 }
1337 /* Zero completion for nxt FLR */
1338 REG_WR(bp, comp_addr, 0);
1339
1340 return ret;
1341}
1342
b56e9670 1343u8 bnx2x_is_pcie_pending(struct pci_dev *dev)
619c5cb6 1344{
619c5cb6
VZ
1345 u16 status;
1346
2a80eebc 1347 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
619c5cb6
VZ
1348 return status & PCI_EXP_DEVSTA_TRPND;
1349}
1350
1351/* PF FLR specific routines
1352*/
1353static int bnx2x_poll_hw_usage_counters(struct bnx2x *bp, u32 poll_cnt)
1354{
1355
1356 /* wait for CFC PF usage-counter to zero (includes all the VFs) */
1357 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1358 CFC_REG_NUM_LCIDS_INSIDE_PF,
1359 "CFC PF usage counter timed out",
1360 poll_cnt))
1361 return 1;
1362
1363
1364 /* Wait for DQ PF usage-counter to zero (until DQ cleanup) */
1365 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1366 DORQ_REG_PF_USAGE_CNT,
1367 "DQ PF usage counter timed out",
1368 poll_cnt))
1369 return 1;
1370
1371 /* Wait for QM PF usage-counter to zero (until DQ cleanup) */
1372 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1373 QM_REG_PF_USG_CNT_0 + 4*BP_FUNC(bp),
1374 "QM PF usage counter timed out",
1375 poll_cnt))
1376 return 1;
1377
1378 /* Wait for Timer PF usage-counters to zero (until DQ cleanup) */
1379 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1380 TM_REG_LIN0_VNIC_UC + 4*BP_PORT(bp),
1381 "Timers VNIC usage counter timed out",
1382 poll_cnt))
1383 return 1;
1384 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1385 TM_REG_LIN0_NUM_SCANS + 4*BP_PORT(bp),
1386 "Timers NUM_SCANS usage counter timed out",
1387 poll_cnt))
1388 return 1;
1389
1390 /* Wait DMAE PF usage counter to zero */
1391 if (bnx2x_flr_clnup_poll_hw_counter(bp,
1392 dmae_reg_go_c[INIT_DMAE_C(bp)],
1393 "DMAE dommand register timed out",
1394 poll_cnt))
1395 return 1;
1396
1397 return 0;
1398}
1399
1400static void bnx2x_hw_enable_status(struct bnx2x *bp)
1401{
1402 u32 val;
1403
1404 val = REG_RD(bp, CFC_REG_WEAK_ENABLE_PF);
1405 DP(BNX2X_MSG_SP, "CFC_REG_WEAK_ENABLE_PF is 0x%x\n", val);
1406
1407 val = REG_RD(bp, PBF_REG_DISABLE_PF);
1408 DP(BNX2X_MSG_SP, "PBF_REG_DISABLE_PF is 0x%x\n", val);
1409
1410 val = REG_RD(bp, IGU_REG_PCI_PF_MSI_EN);
1411 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSI_EN is 0x%x\n", val);
1412
1413 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_EN);
1414 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_EN is 0x%x\n", val);
1415
1416 val = REG_RD(bp, IGU_REG_PCI_PF_MSIX_FUNC_MASK);
1417 DP(BNX2X_MSG_SP, "IGU_REG_PCI_PF_MSIX_FUNC_MASK is 0x%x\n", val);
1418
1419 val = REG_RD(bp, PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR);
1420 DP(BNX2X_MSG_SP, "PGLUE_B_REG_SHADOW_BME_PF_7_0_CLR is 0x%x\n", val);
1421
1422 val = REG_RD(bp, PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR);
1423 DP(BNX2X_MSG_SP, "PGLUE_B_REG_FLR_REQUEST_PF_7_0_CLR is 0x%x\n", val);
1424
1425 val = REG_RD(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER);
1426 DP(BNX2X_MSG_SP, "PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER is 0x%x\n",
1427 val);
1428}
1429
1430static int bnx2x_pf_flr_clnup(struct bnx2x *bp)
1431{
1432 u32 poll_cnt = bnx2x_flr_clnup_poll_count(bp);
1433
1434 DP(BNX2X_MSG_SP, "Cleanup after FLR PF[%d]\n", BP_ABS_FUNC(bp));
1435
1436 /* Re-enable PF target read access */
1437 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
1438
1439 /* Poll HW usage counters */
89db4ad8 1440 DP(BNX2X_MSG_SP, "Polling usage counters\n");
619c5cb6
VZ
1441 if (bnx2x_poll_hw_usage_counters(bp, poll_cnt))
1442 return -EBUSY;
1443
1444 /* Zero the igu 'trailing edge' and 'leading edge' */
1445
1446 /* Send the FW cleanup command */
1447 if (bnx2x_send_final_clnup(bp, (u8)BP_FUNC(bp), poll_cnt))
1448 return -EBUSY;
1449
1450 /* ATC cleanup */
1451
1452 /* Verify TX hw is flushed */
1453 bnx2x_tx_hw_flushed(bp, poll_cnt);
1454
1455 /* Wait 100ms (not adjusted according to platform) */
1456 msleep(100);
1457
1458 /* Verify no pending pci transactions */
1459 if (bnx2x_is_pcie_pending(bp->pdev))
1460 BNX2X_ERR("PCIE Transactions still pending\n");
1461
1462 /* Debug */
1463 bnx2x_hw_enable_status(bp);
1464
1465 /*
1466 * Master enable - Due to WB DMAE writes performed before this
1467 * register is re-initialized as part of the regular function init
1468 */
1469 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
1470
1471 return 0;
1472}
1473
f2e0899f 1474static void bnx2x_hc_int_enable(struct bnx2x *bp)
a2fbb9ea 1475{
34f80b04 1476 int port = BP_PORT(bp);
a2fbb9ea
ET
1477 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1478 u32 val = REG_RD(bp, addr);
69c326b3
DK
1479 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1480 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1481 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
a2fbb9ea
ET
1482
1483 if (msix) {
8badd27a
EG
1484 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1485 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
1486 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1487 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
69c326b3
DK
1488 if (single_msix)
1489 val |= HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
8badd27a
EG
1490 } else if (msi) {
1491 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1492 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1493 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1494 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
1495 } else {
1496 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 1497 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
1498 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1499 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 1500
a0fd065c 1501 if (!CHIP_IS_E1(bp)) {
51c1a580
MS
1502 DP(NETIF_MSG_IFUP,
1503 "write %x to HC %d (addr 0x%x)\n", val, port, addr);
615f8fd9 1504
a0fd065c 1505 REG_WR(bp, addr, val);
615f8fd9 1506
a0fd065c
DK
1507 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1508 }
a2fbb9ea
ET
1509 }
1510
a0fd065c
DK
1511 if (CHIP_IS_E1(bp))
1512 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1513
51c1a580
MS
1514 DP(NETIF_MSG_IFUP,
1515 "write %x to HC %d (addr 0x%x) mode %s\n", val, port, addr,
1516 (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
1517
1518 REG_WR(bp, addr, val);
37dbbf32
EG
1519 /*
1520 * Ensure that HC_CONFIG is written before leading/trailing edge config
1521 */
1522 mmiowb();
1523 barrier();
34f80b04 1524
f2e0899f 1525 if (!CHIP_IS_E1(bp)) {
34f80b04 1526 /* init leading/trailing edge */
fb3bff17 1527 if (IS_MF(bp)) {
3395a033 1528 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
34f80b04 1529 if (bp->port.pmf)
4acac6a5
EG
1530 /* enable nig and gpio3 attention */
1531 val |= 0x1100;
34f80b04
EG
1532 } else
1533 val = 0xffff;
1534
1535 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1536 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1537 }
37dbbf32
EG
1538
1539 /* Make sure that interrupts are indeed enabled from here on */
1540 mmiowb();
a2fbb9ea
ET
1541}
1542
f2e0899f
DK
1543static void bnx2x_igu_int_enable(struct bnx2x *bp)
1544{
1545 u32 val;
30a5de77
DK
1546 bool msix = (bp->flags & USING_MSIX_FLAG) ? true : false;
1547 bool single_msix = (bp->flags & USING_SINGLE_MSIX_FLAG) ? true : false;
1548 bool msi = (bp->flags & USING_MSI_FLAG) ? true : false;
f2e0899f
DK
1549
1550 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1551
1552 if (msix) {
1553 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1554 IGU_PF_CONF_SINGLE_ISR_EN);
ebe61d80 1555 val |= (IGU_PF_CONF_MSI_MSIX_EN |
f2e0899f 1556 IGU_PF_CONF_ATTN_BIT_EN);
30a5de77
DK
1557
1558 if (single_msix)
1559 val |= IGU_PF_CONF_SINGLE_ISR_EN;
f2e0899f
DK
1560 } else if (msi) {
1561 val &= ~IGU_PF_CONF_INT_LINE_EN;
ebe61d80 1562 val |= (IGU_PF_CONF_MSI_MSIX_EN |
f2e0899f
DK
1563 IGU_PF_CONF_ATTN_BIT_EN |
1564 IGU_PF_CONF_SINGLE_ISR_EN);
1565 } else {
1566 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
ebe61d80 1567 val |= (IGU_PF_CONF_INT_LINE_EN |
f2e0899f
DK
1568 IGU_PF_CONF_ATTN_BIT_EN |
1569 IGU_PF_CONF_SINGLE_ISR_EN);
1570 }
1571
ebe61d80
YM
1572 /* Clean previous status - need to configure igu prior to ack*/
1573 if ((!msix) || single_msix) {
1574 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1575 bnx2x_ack_int(bp);
1576 }
1577
1578 val |= IGU_PF_CONF_FUNC_EN;
1579
51c1a580 1580 DP(NETIF_MSG_IFUP, "write 0x%x to IGU mode %s\n",
f2e0899f
DK
1581 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1582
1583 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1584
79a8557a
YM
1585 if (val & IGU_PF_CONF_INT_LINE_EN)
1586 pci_intx(bp->pdev, true);
1587
f2e0899f
DK
1588 barrier();
1589
1590 /* init leading/trailing edge */
1591 if (IS_MF(bp)) {
3395a033 1592 val = (0xee0f | (1 << (BP_VN(bp) + 4)));
f2e0899f
DK
1593 if (bp->port.pmf)
1594 /* enable nig and gpio3 attention */
1595 val |= 0x1100;
1596 } else
1597 val = 0xffff;
1598
1599 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1600 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1601
1602 /* Make sure that interrupts are indeed enabled from here on */
1603 mmiowb();
1604}
1605
1606void bnx2x_int_enable(struct bnx2x *bp)
1607{
1608 if (bp->common.int_block == INT_BLOCK_HC)
1609 bnx2x_hc_int_enable(bp);
1610 else
1611 bnx2x_igu_int_enable(bp);
1612}
1613
9f6c9258 1614void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 1615{
a2fbb9ea 1616 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 1617 int i, offset;
a2fbb9ea 1618
f8ef6e44
YG
1619 if (disable_hw)
1620 /* prevent the HW from sending interrupts */
1621 bnx2x_int_disable(bp);
a2fbb9ea
ET
1622
1623 /* make sure all ISRs are done */
1624 if (msix) {
8badd27a
EG
1625 synchronize_irq(bp->msix_table[0].vector);
1626 offset = 1;
55c11941
MS
1627 if (CNIC_SUPPORT(bp))
1628 offset++;
ec6ba945 1629 for_each_eth_queue(bp, i)
754a2f52 1630 synchronize_irq(bp->msix_table[offset++].vector);
a2fbb9ea
ET
1631 } else
1632 synchronize_irq(bp->pdev->irq);
1633
1634 /* make sure sp_task is not running */
1cf167f2 1635 cancel_delayed_work(&bp->sp_task);
3deb8167 1636 cancel_delayed_work(&bp->period_task);
1cf167f2 1637 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
1638}
1639
34f80b04 1640/* fast path */
a2fbb9ea
ET
1641
1642/*
34f80b04 1643 * General service functions
a2fbb9ea
ET
1644 */
1645
72fd0718
VZ
1646/* Return true if succeeded to acquire the lock */
1647static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1648{
1649 u32 lock_status;
1650 u32 resource_bit = (1 << resource);
1651 int func = BP_FUNC(bp);
1652 u32 hw_lock_control_reg;
1653
51c1a580
MS
1654 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1655 "Trying to take a lock on resource %d\n", resource);
72fd0718
VZ
1656
1657 /* Validating that the resource is within range */
1658 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
51c1a580 1659 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
72fd0718
VZ
1660 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1661 resource, HW_LOCK_MAX_RESOURCE_VALUE);
0fdf4d09 1662 return false;
72fd0718
VZ
1663 }
1664
1665 if (func <= 5)
1666 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1667 else
1668 hw_lock_control_reg =
1669 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1670
1671 /* Try to acquire the lock */
1672 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1673 lock_status = REG_RD(bp, hw_lock_control_reg);
1674 if (lock_status & resource_bit)
1675 return true;
1676
51c1a580
MS
1677 DP(NETIF_MSG_HW | NETIF_MSG_IFUP,
1678 "Failed to get a lock on resource %d\n", resource);
72fd0718
VZ
1679 return false;
1680}
1681
c9ee9206
VZ
1682/**
1683 * bnx2x_get_leader_lock_resource - get the recovery leader resource id
1684 *
1685 * @bp: driver handle
1686 *
1687 * Returns the recovery leader resource id according to the engine this function
1688 * belongs to. Currently only only 2 engines is supported.
1689 */
1191cb83 1690static int bnx2x_get_leader_lock_resource(struct bnx2x *bp)
c9ee9206
VZ
1691{
1692 if (BP_PATH(bp))
1693 return HW_LOCK_RESOURCE_RECOVERY_LEADER_1;
1694 else
1695 return HW_LOCK_RESOURCE_RECOVERY_LEADER_0;
1696}
1697
1698/**
2de67439 1699 * bnx2x_trylock_leader_lock- try to acquire a leader lock.
c9ee9206
VZ
1700 *
1701 * @bp: driver handle
1702 *
2de67439 1703 * Tries to acquire a leader lock for current engine.
c9ee9206 1704 */
1191cb83 1705static bool bnx2x_trylock_leader_lock(struct bnx2x *bp)
c9ee9206
VZ
1706{
1707 return bnx2x_trylock_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1708}
1709
619c5cb6 1710static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err);
55c11941 1711
fd1fc79d
AE
1712/* schedule the sp task and mark that interrupt occurred (runs from ISR) */
1713static int bnx2x_schedule_sp_task(struct bnx2x *bp)
1714{
1715 /* Set the interrupt occurred bit for the sp-task to recognize it
1716 * must ack the interrupt and transition according to the IGU
1717 * state machine.
1718 */
1719 atomic_set(&bp->interrupt_occurred, 1);
1720
1721 /* The sp_task must execute only after this bit
1722 * is set, otherwise we will get out of sync and miss all
1723 * further interrupts. Hence, the barrier.
1724 */
1725 smp_wmb();
1726
1727 /* schedule sp_task to workqueue */
1728 return queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1729}
3196a88a 1730
619c5cb6 1731void bnx2x_sp_event(struct bnx2x_fastpath *fp, union eth_rx_cqe *rr_cqe)
a2fbb9ea
ET
1732{
1733 struct bnx2x *bp = fp->bp;
1734 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1735 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
619c5cb6 1736 enum bnx2x_queue_cmd drv_cmd = BNX2X_Q_CMD_MAX;
15192a8c 1737 struct bnx2x_queue_sp_obj *q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
a2fbb9ea 1738
34f80b04 1739 DP(BNX2X_MSG_SP,
a2fbb9ea 1740 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
0626b899 1741 fp->index, cid, command, bp->state,
34f80b04 1742 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea 1743
fd1fc79d
AE
1744 /* If cid is within VF range, replace the slowpath object with the
1745 * one corresponding to this VF
1746 */
1747 if (cid >= BNX2X_FIRST_VF_CID &&
1748 cid < BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)
1749 bnx2x_iov_set_queue_sp_obj(bp, cid, &q_obj);
1750
619c5cb6
VZ
1751 switch (command) {
1752 case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE):
d6cae238 1753 DP(BNX2X_MSG_SP, "got UPDATE ramrod. CID %d\n", cid);
619c5cb6
VZ
1754 drv_cmd = BNX2X_Q_CMD_UPDATE;
1755 break;
d6cae238 1756
619c5cb6 1757 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP):
d6cae238 1758 DP(BNX2X_MSG_SP, "got MULTI[%d] setup ramrod\n", cid);
619c5cb6 1759 drv_cmd = BNX2X_Q_CMD_SETUP;
a2fbb9ea
ET
1760 break;
1761
6383c0b3 1762 case (RAMROD_CMD_ID_ETH_TX_QUEUE_SETUP):
51c1a580 1763 DP(BNX2X_MSG_SP, "got MULTI[%d] tx-only setup ramrod\n", cid);
6383c0b3
AE
1764 drv_cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
1765 break;
1766
619c5cb6 1767 case (RAMROD_CMD_ID_ETH_HALT):
d6cae238 1768 DP(BNX2X_MSG_SP, "got MULTI[%d] halt ramrod\n", cid);
619c5cb6 1769 drv_cmd = BNX2X_Q_CMD_HALT;
a2fbb9ea
ET
1770 break;
1771
619c5cb6 1772 case (RAMROD_CMD_ID_ETH_TERMINATE):
d6cae238 1773 DP(BNX2X_MSG_SP, "got MULTI[%d] teminate ramrod\n", cid);
619c5cb6 1774 drv_cmd = BNX2X_Q_CMD_TERMINATE;
a2fbb9ea
ET
1775 break;
1776
619c5cb6 1777 case (RAMROD_CMD_ID_ETH_EMPTY):
d6cae238 1778 DP(BNX2X_MSG_SP, "got MULTI[%d] empty ramrod\n", cid);
619c5cb6 1779 drv_cmd = BNX2X_Q_CMD_EMPTY;
993ac7b5 1780 break;
619c5cb6
VZ
1781
1782 default:
1783 BNX2X_ERR("unexpected MC reply (%d) on fp[%d]\n",
1784 command, fp->index);
1785 return;
523224a3 1786 }
3196a88a 1787
619c5cb6
VZ
1788 if ((drv_cmd != BNX2X_Q_CMD_MAX) &&
1789 q_obj->complete_cmd(bp, q_obj, drv_cmd))
1790 /* q_obj->complete_cmd() failure means that this was
1791 * an unexpected completion.
1792 *
1793 * In this case we don't want to increase the bp->spq_left
1794 * because apparently we haven't sent this command the first
1795 * place.
1796 */
1797#ifdef BNX2X_STOP_ON_ERROR
1798 bnx2x_panic();
1799#else
1800 return;
1801#endif
fd1fc79d
AE
1802 /* SRIOV: reschedule any 'in_progress' operations */
1803 bnx2x_iov_sp_event(bp, cid, true);
619c5cb6 1804
8fe23fbd 1805 smp_mb__before_atomic_inc();
6e30dd4e 1806 atomic_inc(&bp->cq_spq_left);
619c5cb6
VZ
1807 /* push the change in bp->spq_left and towards the memory */
1808 smp_mb__after_atomic_inc();
49d66772 1809
d6cae238
VZ
1810 DP(BNX2X_MSG_SP, "bp->cq_spq_left %x\n", atomic_read(&bp->cq_spq_left));
1811
a3348722
BW
1812 if ((drv_cmd == BNX2X_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) &&
1813 (!!test_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state))) {
1814 /* if Q update ramrod is completed for last Q in AFEX vif set
1815 * flow, then ACK MCP at the end
1816 *
1817 * mark pending ACK to MCP bit.
1818 * prevent case that both bits are cleared.
1819 * At the end of load/unload driver checks that
2de67439 1820 * sp_state is cleared, and this order prevents
a3348722
BW
1821 * races
1822 */
1823 smp_mb__before_clear_bit();
1824 set_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK, &bp->sp_state);
1825 wmb();
1826 clear_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
1827 smp_mb__after_clear_bit();
1828
fd1fc79d
AE
1829 /* schedule the sp task as mcp ack is required */
1830 bnx2x_schedule_sp_task(bp);
a3348722
BW
1831 }
1832
523224a3 1833 return;
a2fbb9ea
ET
1834}
1835
9f6c9258 1836irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
a2fbb9ea 1837{
555f6c78 1838 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1839 u16 status = bnx2x_ack_int(bp);
34f80b04 1840 u16 mask;
ca00392c 1841 int i;
6383c0b3 1842 u8 cos;
a2fbb9ea 1843
34f80b04 1844 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1845 if (unlikely(status == 0)) {
1846 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1847 return IRQ_NONE;
1848 }
f5372251 1849 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
a2fbb9ea 1850
3196a88a
EG
1851#ifdef BNX2X_STOP_ON_ERROR
1852 if (unlikely(bp->panic))
1853 return IRQ_HANDLED;
1854#endif
1855
ec6ba945 1856 for_each_eth_queue(bp, i) {
ca00392c 1857 struct bnx2x_fastpath *fp = &bp->fp[i];
a2fbb9ea 1858
55c11941 1859 mask = 0x2 << (fp->index + CNIC_SUPPORT(bp));
ca00392c 1860 if (status & mask) {
619c5cb6 1861 /* Handle Rx or Tx according to SB id */
54b9ddaa 1862 prefetch(fp->rx_cons_sb);
6383c0b3 1863 for_each_cos_in_tx_queue(fp, cos)
65565884 1864 prefetch(fp->txdata_ptr[cos]->tx_cons_sb);
523224a3 1865 prefetch(&fp->sb_running_index[SM_RX_ID]);
54b9ddaa 1866 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
ca00392c
EG
1867 status &= ~mask;
1868 }
a2fbb9ea
ET
1869 }
1870
55c11941
MS
1871 if (CNIC_SUPPORT(bp)) {
1872 mask = 0x2;
1873 if (status & (mask | 0x1)) {
1874 struct cnic_ops *c_ops = NULL;
993ac7b5 1875
ad9b4359
MC
1876 rcu_read_lock();
1877 c_ops = rcu_dereference(bp->cnic_ops);
1878 if (c_ops && (bp->cnic_eth_dev.drv_state &
1879 CNIC_DRV_STATE_HANDLES_IRQ))
1880 c_ops->cnic_handler(bp->cnic_data, NULL);
1881 rcu_read_unlock();
993ac7b5 1882
55c11941
MS
1883 status &= ~mask;
1884 }
993ac7b5 1885 }
a2fbb9ea 1886
34f80b04 1887 if (unlikely(status & 0x1)) {
fd1fc79d
AE
1888
1889 /* schedule sp task to perform default status block work, ack
1890 * attentions and enable interrupts.
1891 */
1892 bnx2x_schedule_sp_task(bp);
a2fbb9ea
ET
1893
1894 status &= ~0x1;
1895 if (!status)
1896 return IRQ_HANDLED;
1897 }
1898
cdaa7cb8
VZ
1899 if (unlikely(status))
1900 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
34f80b04 1901 status);
a2fbb9ea 1902
c18487ee 1903 return IRQ_HANDLED;
a2fbb9ea
ET
1904}
1905
c18487ee
YR
1906/* Link */
1907
1908/*
1909 * General service functions
1910 */
a2fbb9ea 1911
9f6c9258 1912int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1913{
1914 u32 lock_status;
1915 u32 resource_bit = (1 << resource);
4a37fb66
YG
1916 int func = BP_FUNC(bp);
1917 u32 hw_lock_control_reg;
c18487ee 1918 int cnt;
a2fbb9ea 1919
c18487ee
YR
1920 /* Validating that the resource is within range */
1921 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
51c1a580 1922 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
c18487ee
YR
1923 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1924 return -EINVAL;
1925 }
a2fbb9ea 1926
4a37fb66
YG
1927 if (func <= 5) {
1928 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1929 } else {
1930 hw_lock_control_reg =
1931 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1932 }
1933
c18487ee 1934 /* Validating that the resource is not already taken */
4a37fb66 1935 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee 1936 if (lock_status & resource_bit) {
51c1a580 1937 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x\n",
c18487ee
YR
1938 lock_status, resource_bit);
1939 return -EEXIST;
1940 }
a2fbb9ea 1941
46230476
EG
1942 /* Try for 5 second every 5ms */
1943 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1944 /* Try to acquire the lock */
4a37fb66
YG
1945 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1946 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1947 if (lock_status & resource_bit)
1948 return 0;
a2fbb9ea 1949
c18487ee 1950 msleep(5);
a2fbb9ea 1951 }
51c1a580 1952 BNX2X_ERR("Timeout\n");
c18487ee
YR
1953 return -EAGAIN;
1954}
a2fbb9ea 1955
c9ee9206
VZ
1956int bnx2x_release_leader_lock(struct bnx2x *bp)
1957{
1958 return bnx2x_release_hw_lock(bp, bnx2x_get_leader_lock_resource(bp));
1959}
1960
9f6c9258 1961int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1962{
1963 u32 lock_status;
1964 u32 resource_bit = (1 << resource);
4a37fb66
YG
1965 int func = BP_FUNC(bp);
1966 u32 hw_lock_control_reg;
a2fbb9ea 1967
c18487ee
YR
1968 /* Validating that the resource is within range */
1969 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
51c1a580 1970 BNX2X_ERR("resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
c18487ee
YR
1971 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1972 return -EINVAL;
1973 }
1974
4a37fb66
YG
1975 if (func <= 5) {
1976 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1977 } else {
1978 hw_lock_control_reg =
1979 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1980 }
1981
c18487ee 1982 /* Validating that the resource is currently taken */
4a37fb66 1983 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee 1984 if (!(lock_status & resource_bit)) {
51c1a580 1985 BNX2X_ERR("lock_status 0x%x resource_bit 0x%x. unlock was called but lock wasn't taken!\n",
c18487ee
YR
1986 lock_status, resource_bit);
1987 return -EFAULT;
a2fbb9ea
ET
1988 }
1989
9f6c9258
DK
1990 REG_WR(bp, hw_lock_control_reg, resource_bit);
1991 return 0;
c18487ee 1992}
a2fbb9ea 1993
9f6c9258 1994
4acac6a5
EG
1995int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1996{
1997 /* The GPIO should be swapped if swap register is set and active */
1998 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1999 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2000 int gpio_shift = gpio_num +
2001 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2002 u32 gpio_mask = (1 << gpio_shift);
2003 u32 gpio_reg;
2004 int value;
2005
2006 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2007 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2008 return -EINVAL;
2009 }
2010
2011 /* read GPIO value */
2012 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2013
2014 /* get the requested pin value */
2015 if ((gpio_reg & gpio_mask) == gpio_mask)
2016 value = 1;
2017 else
2018 value = 0;
2019
2020 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
2021
2022 return value;
2023}
2024
17de50b7 2025int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
2026{
2027 /* The GPIO should be swapped if swap register is set and active */
2028 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 2029 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
2030 int gpio_shift = gpio_num +
2031 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2032 u32 gpio_mask = (1 << gpio_shift);
2033 u32 gpio_reg;
a2fbb9ea 2034
c18487ee
YR
2035 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2036 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2037 return -EINVAL;
2038 }
a2fbb9ea 2039
4a37fb66 2040 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
2041 /* read GPIO and mask except the float bits */
2042 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 2043
c18487ee
YR
2044 switch (mode) {
2045 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
51c1a580
MS
2046 DP(NETIF_MSG_LINK,
2047 "Set GPIO %d (shift %d) -> output low\n",
c18487ee
YR
2048 gpio_num, gpio_shift);
2049 /* clear FLOAT and set CLR */
2050 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2051 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
2052 break;
a2fbb9ea 2053
c18487ee 2054 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
51c1a580
MS
2055 DP(NETIF_MSG_LINK,
2056 "Set GPIO %d (shift %d) -> output high\n",
c18487ee
YR
2057 gpio_num, gpio_shift);
2058 /* clear FLOAT and set SET */
2059 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2060 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
2061 break;
a2fbb9ea 2062
17de50b7 2063 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
51c1a580
MS
2064 DP(NETIF_MSG_LINK,
2065 "Set GPIO %d (shift %d) -> input\n",
c18487ee
YR
2066 gpio_num, gpio_shift);
2067 /* set FLOAT */
2068 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
2069 break;
a2fbb9ea 2070
c18487ee
YR
2071 default:
2072 break;
a2fbb9ea
ET
2073 }
2074
c18487ee 2075 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 2076 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 2077
c18487ee 2078 return 0;
a2fbb9ea
ET
2079}
2080
0d40f0d4
YR
2081int bnx2x_set_mult_gpio(struct bnx2x *bp, u8 pins, u32 mode)
2082{
2083 u32 gpio_reg = 0;
2084 int rc = 0;
2085
2086 /* Any port swapping should be handled by caller. */
2087
2088 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2089 /* read GPIO and mask except the float bits */
2090 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
2091 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2092 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_CLR_POS);
2093 gpio_reg &= ~(pins << MISC_REGISTERS_GPIO_SET_POS);
2094
2095 switch (mode) {
2096 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
2097 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output low\n", pins);
2098 /* set CLR */
2099 gpio_reg |= (pins << MISC_REGISTERS_GPIO_CLR_POS);
2100 break;
2101
2102 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
2103 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> output high\n", pins);
2104 /* set SET */
2105 gpio_reg |= (pins << MISC_REGISTERS_GPIO_SET_POS);
2106 break;
2107
2108 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
2109 DP(NETIF_MSG_LINK, "Set GPIO 0x%x -> input\n", pins);
2110 /* set FLOAT */
2111 gpio_reg |= (pins << MISC_REGISTERS_GPIO_FLOAT_POS);
2112 break;
2113
2114 default:
2115 BNX2X_ERR("Invalid GPIO mode assignment %d\n", mode);
2116 rc = -EINVAL;
2117 break;
2118 }
2119
2120 if (rc == 0)
2121 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
2122
2123 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2124
2125 return rc;
2126}
2127
4acac6a5
EG
2128int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
2129{
2130 /* The GPIO should be swapped if swap register is set and active */
2131 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
2132 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
2133 int gpio_shift = gpio_num +
2134 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
2135 u32 gpio_mask = (1 << gpio_shift);
2136 u32 gpio_reg;
2137
2138 if (gpio_num > MISC_REGISTERS_GPIO_3) {
2139 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2140 return -EINVAL;
2141 }
2142
2143 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2144 /* read GPIO int */
2145 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2146
2147 switch (mode) {
2148 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
51c1a580
MS
2149 DP(NETIF_MSG_LINK,
2150 "Clear GPIO INT %d (shift %d) -> output low\n",
2151 gpio_num, gpio_shift);
4acac6a5
EG
2152 /* clear SET and set CLR */
2153 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2154 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2155 break;
2156
2157 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
51c1a580
MS
2158 DP(NETIF_MSG_LINK,
2159 "Set GPIO INT %d (shift %d) -> output high\n",
2160 gpio_num, gpio_shift);
4acac6a5
EG
2161 /* clear CLR and set SET */
2162 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2163 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2164 break;
2165
2166 default:
2167 break;
2168 }
2169
2170 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2171 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2172
2173 return 0;
2174}
2175
d6d99a3f 2176static int bnx2x_set_spio(struct bnx2x *bp, int spio, u32 mode)
a2fbb9ea 2177{
c18487ee 2178 u32 spio_reg;
a2fbb9ea 2179
d6d99a3f
YM
2180 /* Only 2 SPIOs are configurable */
2181 if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) {
2182 BNX2X_ERR("Invalid SPIO 0x%x\n", spio);
c18487ee 2183 return -EINVAL;
a2fbb9ea
ET
2184 }
2185
4a37fb66 2186 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2187 /* read SPIO and mask except the float bits */
d6d99a3f 2188 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_SPIO_FLOAT);
a2fbb9ea 2189
c18487ee 2190 switch (mode) {
d6d99a3f
YM
2191 case MISC_SPIO_OUTPUT_LOW:
2192 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output low\n", spio);
c18487ee 2193 /* clear FLOAT and set CLR */
d6d99a3f
YM
2194 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2195 spio_reg |= (spio << MISC_SPIO_CLR_POS);
c18487ee 2196 break;
a2fbb9ea 2197
d6d99a3f
YM
2198 case MISC_SPIO_OUTPUT_HIGH:
2199 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> output high\n", spio);
c18487ee 2200 /* clear FLOAT and set SET */
d6d99a3f
YM
2201 spio_reg &= ~(spio << MISC_SPIO_FLOAT_POS);
2202 spio_reg |= (spio << MISC_SPIO_SET_POS);
c18487ee 2203 break;
a2fbb9ea 2204
d6d99a3f
YM
2205 case MISC_SPIO_INPUT_HI_Z:
2206 DP(NETIF_MSG_HW, "Set SPIO 0x%x -> input\n", spio);
c18487ee 2207 /* set FLOAT */
d6d99a3f 2208 spio_reg |= (spio << MISC_SPIO_FLOAT_POS);
c18487ee 2209 break;
a2fbb9ea 2210
c18487ee
YR
2211 default:
2212 break;
a2fbb9ea
ET
2213 }
2214
c18487ee 2215 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2216 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2217
a2fbb9ea
ET
2218 return 0;
2219}
2220
9f6c9258 2221void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2222{
a22f0788 2223 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
ad33ea3a
EG
2224 switch (bp->link_vars.ieee_fc &
2225 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2226 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
a22f0788 2227 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 2228 ADVERTISED_Pause);
c18487ee 2229 break;
356e2385 2230
c18487ee 2231 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
a22f0788 2232 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
f85582f8 2233 ADVERTISED_Pause);
c18487ee 2234 break;
356e2385 2235
c18487ee 2236 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
a22f0788 2237 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
c18487ee 2238 break;
356e2385 2239
c18487ee 2240 default:
a22f0788 2241 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
f85582f8 2242 ADVERTISED_Pause);
c18487ee
YR
2243 break;
2244 }
2245}
f1410647 2246
cd1dfce2 2247static void bnx2x_set_requested_fc(struct bnx2x *bp)
c18487ee 2248{
cd1dfce2
YM
2249 /* Initialize link parameters structure variables
2250 * It is recommended to turn off RX FC for jumbo frames
2251 * for better performance
2252 */
2253 if (CHIP_IS_E1x(bp) && (bp->dev->mtu > 5000))
2254 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2255 else
2256 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2257}
a2fbb9ea 2258
cd1dfce2
YM
2259int bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2260{
2261 int rc, cfx_idx = bnx2x_get_link_cfg_idx(bp);
2262 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
2263
2264 if (!BP_NOMCP(bp)) {
2265 bnx2x_set_requested_fc(bp);
4a37fb66 2266 bnx2x_acquire_phy_lock(bp);
b5bf9068 2267
a22f0788 2268 if (load_mode == LOAD_DIAG) {
1cb0c788
YR
2269 struct link_params *lp = &bp->link_params;
2270 lp->loopback_mode = LOOPBACK_XGXS;
2271 /* do PHY loopback at 10G speed, if possible */
2272 if (lp->req_line_speed[cfx_idx] < SPEED_10000) {
2273 if (lp->speed_cap_mask[cfx_idx] &
2274 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
2275 lp->req_line_speed[cfx_idx] =
2276 SPEED_10000;
2277 else
2278 lp->req_line_speed[cfx_idx] =
2279 SPEED_1000;
2280 }
a22f0788 2281 }
b5bf9068 2282
8970b2e4
MS
2283 if (load_mode == LOAD_LOOPBACK_EXT) {
2284 struct link_params *lp = &bp->link_params;
2285 lp->loopback_mode = LOOPBACK_EXT;
2286 }
2287
19680c48 2288 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
b5bf9068 2289
4a37fb66 2290 bnx2x_release_phy_lock(bp);
a2fbb9ea 2291
3c96c68b
EG
2292 bnx2x_calc_fc_adv(bp);
2293
cd1dfce2 2294 if (bp->link_vars.link_up) {
b5bf9068 2295 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
19680c48 2296 bnx2x_link_report(bp);
cd1dfce2
YM
2297 }
2298 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
a22f0788 2299 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
19680c48
EG
2300 return rc;
2301 }
f5372251 2302 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
19680c48 2303 return -EINVAL;
a2fbb9ea
ET
2304}
2305
9f6c9258 2306void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2307{
19680c48 2308 if (!BP_NOMCP(bp)) {
4a37fb66 2309 bnx2x_acquire_phy_lock(bp);
19680c48 2310 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2311 bnx2x_release_phy_lock(bp);
a2fbb9ea 2312
19680c48
EG
2313 bnx2x_calc_fc_adv(bp);
2314 } else
f5372251 2315 BNX2X_ERR("Bootcode is missing - can not set link\n");
c18487ee 2316}
a2fbb9ea 2317
c18487ee
YR
2318static void bnx2x__link_reset(struct bnx2x *bp)
2319{
19680c48 2320 if (!BP_NOMCP(bp)) {
4a37fb66 2321 bnx2x_acquire_phy_lock(bp);
5d07d868 2322 bnx2x_lfa_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2323 bnx2x_release_phy_lock(bp);
19680c48 2324 } else
f5372251 2325 BNX2X_ERR("Bootcode is missing - can not reset link\n");
c18487ee 2326}
a2fbb9ea 2327
5d07d868
YM
2328void bnx2x_force_link_reset(struct bnx2x *bp)
2329{
2330 bnx2x_acquire_phy_lock(bp);
2331 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2332 bnx2x_release_phy_lock(bp);
2333}
2334
a22f0788 2335u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
c18487ee 2336{
2145a920 2337 u8 rc = 0;
a2fbb9ea 2338
2145a920
VZ
2339 if (!BP_NOMCP(bp)) {
2340 bnx2x_acquire_phy_lock(bp);
a22f0788
YR
2341 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
2342 is_serdes);
2145a920
VZ
2343 bnx2x_release_phy_lock(bp);
2344 } else
2345 BNX2X_ERR("Bootcode is missing - can not test link\n");
a2fbb9ea 2346
c18487ee
YR
2347 return rc;
2348}
a2fbb9ea 2349
34f80b04 2350
2691d51d
EG
2351/* Calculates the sum of vn_min_rates.
2352 It's needed for further normalizing of the min_rates.
2353 Returns:
2354 sum of vn_min_rates.
2355 or
2356 0 - if all the min_rates are 0.
2357 In the later case fainess algorithm should be deactivated.
2358 If not all min_rates are zero then those that are zeroes will be set to 1.
2359 */
b475d78f
YM
2360static void bnx2x_calc_vn_min(struct bnx2x *bp,
2361 struct cmng_init_input *input)
2691d51d
EG
2362{
2363 int all_zero = 1;
2691d51d
EG
2364 int vn;
2365
3395a033 2366 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
f2e0899f 2367 u32 vn_cfg = bp->mf_config[vn];
2691d51d
EG
2368 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2369 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2370
2371 /* Skip hidden vns */
2372 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
b475d78f 2373 vn_min_rate = 0;
2691d51d 2374 /* If min rate is zero - set it to 1 */
b475d78f 2375 else if (!vn_min_rate)
2691d51d
EG
2376 vn_min_rate = DEF_MIN_RATE;
2377 else
2378 all_zero = 0;
2379
b475d78f 2380 input->vnic_min_rate[vn] = vn_min_rate;
2691d51d
EG
2381 }
2382
30ae438b
DK
2383 /* if ETS or all min rates are zeros - disable fairness */
2384 if (BNX2X_IS_ETS_ENABLED(bp)) {
b475d78f 2385 input->flags.cmng_enables &=
30ae438b
DK
2386 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2387 DP(NETIF_MSG_IFUP, "Fairness will be disabled due to ETS\n");
2388 } else if (all_zero) {
b475d78f 2389 input->flags.cmng_enables &=
b015e3d1 2390 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
b475d78f
YM
2391 DP(NETIF_MSG_IFUP,
2392 "All MIN values are zeroes fairness will be disabled\n");
b015e3d1 2393 } else
b475d78f 2394 input->flags.cmng_enables |=
b015e3d1 2395 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2691d51d
EG
2396}
2397
b475d78f
YM
2398static void bnx2x_calc_vn_max(struct bnx2x *bp, int vn,
2399 struct cmng_init_input *input)
34f80b04 2400{
b475d78f 2401 u16 vn_max_rate;
f2e0899f 2402 u32 vn_cfg = bp->mf_config[vn];
34f80b04 2403
b475d78f 2404 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
34f80b04 2405 vn_max_rate = 0;
b475d78f 2406 else {
faa6fcbb
DK
2407 u32 maxCfg = bnx2x_extract_max_cfg(bp, vn_cfg);
2408
b475d78f 2409 if (IS_MF_SI(bp)) {
faa6fcbb
DK
2410 /* maxCfg in percents of linkspeed */
2411 vn_max_rate = (bp->link_vars.line_speed * maxCfg) / 100;
b475d78f 2412 } else /* SD modes */
faa6fcbb
DK
2413 /* maxCfg is absolute in 100Mb units */
2414 vn_max_rate = maxCfg * 100;
34f80b04 2415 }
f85582f8 2416
b475d78f 2417 DP(NETIF_MSG_IFUP, "vn %d: vn_max_rate %d\n", vn, vn_max_rate);
34f80b04 2418
b475d78f 2419 input->vnic_max_rate[vn] = vn_max_rate;
34f80b04 2420}
f85582f8 2421
b475d78f 2422
523224a3
DK
2423static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2424{
2425 if (CHIP_REV_IS_SLOW(bp))
2426 return CMNG_FNS_NONE;
fb3bff17 2427 if (IS_MF(bp))
523224a3
DK
2428 return CMNG_FNS_MINMAX;
2429
2430 return CMNG_FNS_NONE;
2431}
2432
2ae17f66 2433void bnx2x_read_mf_cfg(struct bnx2x *bp)
523224a3 2434{
0793f83f 2435 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
523224a3
DK
2436
2437 if (BP_NOMCP(bp))
2438 return; /* what should be the default bvalue in this case */
2439
0793f83f
DK
2440 /* For 2 port configuration the absolute function number formula
2441 * is:
2442 * abs_func = 2 * vn + BP_PORT + BP_PATH
2443 *
2444 * and there are 4 functions per port
2445 *
2446 * For 4 port configuration it is
2447 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2448 *
2449 * and there are 2 functions per port
2450 */
3395a033 2451 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
0793f83f
DK
2452 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2453
2454 if (func >= E1H_FUNC_MAX)
2455 break;
2456
f2e0899f 2457 bp->mf_config[vn] =
523224a3
DK
2458 MF_CFG_RD(bp, func_mf_config[func].config);
2459 }
a3348722
BW
2460 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
2461 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
2462 bp->flags |= MF_FUNC_DIS;
2463 } else {
2464 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2465 bp->flags &= ~MF_FUNC_DIS;
2466 }
523224a3
DK
2467}
2468
2469static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2470{
b475d78f
YM
2471 struct cmng_init_input input;
2472 memset(&input, 0, sizeof(struct cmng_init_input));
2473
2474 input.port_rate = bp->link_vars.line_speed;
523224a3
DK
2475
2476 if (cmng_type == CMNG_FNS_MINMAX) {
2477 int vn;
2478
523224a3
DK
2479 /* read mf conf from shmem */
2480 if (read_cfg)
2481 bnx2x_read_mf_cfg(bp);
2482
523224a3 2483 /* vn_weight_sum and enable fairness if not 0 */
b475d78f 2484 bnx2x_calc_vn_min(bp, &input);
523224a3
DK
2485
2486 /* calculate and set min-max rate for each vn */
c4154f25 2487 if (bp->port.pmf)
3395a033 2488 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++)
b475d78f 2489 bnx2x_calc_vn_max(bp, vn, &input);
523224a3
DK
2490
2491 /* always enable rate shaping and fairness */
b475d78f 2492 input.flags.cmng_enables |=
523224a3 2493 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
b475d78f
YM
2494
2495 bnx2x_init_cmng(&input, &bp->cmng);
523224a3
DK
2496 return;
2497 }
2498
2499 /* rate shaping and fairness are disabled */
2500 DP(NETIF_MSG_IFUP,
2501 "rate shaping and fairness are disabled\n");
2502}
34f80b04 2503
1191cb83
ED
2504static void storm_memset_cmng(struct bnx2x *bp,
2505 struct cmng_init *cmng,
2506 u8 port)
2507{
2508 int vn;
2509 size_t size = sizeof(struct cmng_struct_per_port);
2510
2511 u32 addr = BAR_XSTRORM_INTMEM +
2512 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port);
2513
2514 __storm_memset_struct(bp, addr, size, (u32 *)&cmng->port);
2515
2516 for (vn = VN_0; vn < BP_MAX_VN_NUM(bp); vn++) {
2517 int func = func_by_vn(bp, vn);
2518
2519 addr = BAR_XSTRORM_INTMEM +
2520 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func);
2521 size = sizeof(struct rate_shaping_vars_per_vn);
2522 __storm_memset_struct(bp, addr, size,
2523 (u32 *)&cmng->vnic.vnic_max_rate[vn]);
2524
2525 addr = BAR_XSTRORM_INTMEM +
2526 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func);
2527 size = sizeof(struct fairness_vars_per_vn);
2528 __storm_memset_struct(bp, addr, size,
2529 (u32 *)&cmng->vnic.vnic_min_rate[vn]);
2530 }
2531}
2532
c18487ee
YR
2533/* This function is called upon link interrupt */
2534static void bnx2x_link_attn(struct bnx2x *bp)
2535{
bb2a0f7a
YG
2536 /* Make sure that we are synced with the current statistics */
2537 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2538
c18487ee 2539 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2540
bb2a0f7a
YG
2541 if (bp->link_vars.link_up) {
2542
1c06328c 2543 /* dropless flow control */
f2e0899f 2544 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
1c06328c
EG
2545 int port = BP_PORT(bp);
2546 u32 pause_enabled = 0;
2547
2548 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2549 pause_enabled = 1;
2550
2551 REG_WR(bp, BAR_USTRORM_INTMEM +
ca00392c 2552 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1c06328c
EG
2553 pause_enabled);
2554 }
2555
619c5cb6 2556 if (bp->link_vars.mac_type != MAC_TYPE_EMAC) {
bb2a0f7a
YG
2557 struct host_port_stats *pstats;
2558
2559 pstats = bnx2x_sp(bp, port_stats);
619c5cb6 2560 /* reset old mac stats */
bb2a0f7a
YG
2561 memset(&(pstats->mac_stx[0]), 0,
2562 sizeof(struct mac_stx));
2563 }
f34d28ea 2564 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a
YG
2565 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2566 }
2567
f2e0899f
DK
2568 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2569 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
8a1c38d1 2570
f2e0899f
DK
2571 if (cmng_fns != CMNG_FNS_NONE) {
2572 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2573 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2574 } else
2575 /* rate shaping and fairness are disabled */
2576 DP(NETIF_MSG_IFUP,
2577 "single function mode without fairness\n");
34f80b04 2578 }
9fdc3e95 2579
2ae17f66
VZ
2580 __bnx2x_link_report(bp);
2581
9fdc3e95
DK
2582 if (IS_MF(bp))
2583 bnx2x_link_sync_notify(bp);
c18487ee 2584}
a2fbb9ea 2585
9f6c9258 2586void bnx2x__link_status_update(struct bnx2x *bp)
c18487ee 2587{
2ae17f66 2588 if (bp->state != BNX2X_STATE_OPEN)
c18487ee 2589 return;
a2fbb9ea 2590
00253a8c 2591 /* read updated dcb configuration */
ad5afc89
AE
2592 if (IS_PF(bp)) {
2593 bnx2x_dcbx_pmf_update(bp);
2594 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2595 if (bp->link_vars.link_up)
2596 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2597 else
2598 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2599 /* indicate link status */
2600 bnx2x_link_report(bp);
a2fbb9ea 2601
ad5afc89
AE
2602 } else { /* VF */
2603 bp->port.supported[0] |= (SUPPORTED_10baseT_Half |
2604 SUPPORTED_10baseT_Full |
2605 SUPPORTED_100baseT_Half |
2606 SUPPORTED_100baseT_Full |
2607 SUPPORTED_1000baseT_Full |
2608 SUPPORTED_2500baseX_Full |
2609 SUPPORTED_10000baseT_Full |
2610 SUPPORTED_TP |
2611 SUPPORTED_FIBRE |
2612 SUPPORTED_Autoneg |
2613 SUPPORTED_Pause |
2614 SUPPORTED_Asym_Pause);
2615 bp->port.advertising[0] = bp->port.supported[0];
2616
2617 bp->link_params.bp = bp;
2618 bp->link_params.port = BP_PORT(bp);
2619 bp->link_params.req_duplex[0] = DUPLEX_FULL;
2620 bp->link_params.req_flow_ctrl[0] = BNX2X_FLOW_CTRL_NONE;
2621 bp->link_params.req_line_speed[0] = SPEED_10000;
2622 bp->link_params.speed_cap_mask[0] = 0x7f0000;
2623 bp->link_params.switch_cfg = SWITCH_CFG_10G;
2624 bp->link_vars.mac_type = MAC_TYPE_BMAC;
2625 bp->link_vars.line_speed = SPEED_10000;
2626 bp->link_vars.link_status =
2627 (LINK_STATUS_LINK_UP |
2628 LINK_STATUS_SPEED_AND_DUPLEX_10GTFD);
2629 bp->link_vars.link_up = 1;
2630 bp->link_vars.duplex = DUPLEX_FULL;
2631 bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
2632 __bnx2x_link_report(bp);
bb2a0f7a 2633 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
ad5afc89 2634 }
a2fbb9ea 2635}
a2fbb9ea 2636
a3348722
BW
2637static int bnx2x_afex_func_update(struct bnx2x *bp, u16 vifid,
2638 u16 vlan_val, u8 allowed_prio)
2639{
86564c3f 2640 struct bnx2x_func_state_params func_params = {NULL};
a3348722
BW
2641 struct bnx2x_func_afex_update_params *f_update_params =
2642 &func_params.params.afex_update;
2643
2644 func_params.f_obj = &bp->func_obj;
2645 func_params.cmd = BNX2X_F_CMD_AFEX_UPDATE;
2646
2647 /* no need to wait for RAMROD completion, so don't
2648 * set RAMROD_COMP_WAIT flag
2649 */
2650
2651 f_update_params->vif_id = vifid;
2652 f_update_params->afex_default_vlan = vlan_val;
2653 f_update_params->allowed_priorities = allowed_prio;
2654
2655 /* if ramrod can not be sent, response to MCP immediately */
2656 if (bnx2x_func_state_change(bp, &func_params) < 0)
2657 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
2658
2659 return 0;
2660}
2661
2662static int bnx2x_afex_handle_vif_list_cmd(struct bnx2x *bp, u8 cmd_type,
2663 u16 vif_index, u8 func_bit_map)
2664{
86564c3f 2665 struct bnx2x_func_state_params func_params = {NULL};
a3348722
BW
2666 struct bnx2x_func_afex_viflists_params *update_params =
2667 &func_params.params.afex_viflists;
2668 int rc;
2669 u32 drv_msg_code;
2670
2671 /* validate only LIST_SET and LIST_GET are received from switch */
2672 if ((cmd_type != VIF_LIST_RULE_GET) && (cmd_type != VIF_LIST_RULE_SET))
2673 BNX2X_ERR("BUG! afex_handle_vif_list_cmd invalid type 0x%x\n",
2674 cmd_type);
2675
2676 func_params.f_obj = &bp->func_obj;
2677 func_params.cmd = BNX2X_F_CMD_AFEX_VIFLISTS;
2678
2679 /* set parameters according to cmd_type */
2680 update_params->afex_vif_list_command = cmd_type;
86564c3f 2681 update_params->vif_list_index = vif_index;
a3348722
BW
2682 update_params->func_bit_map =
2683 (cmd_type == VIF_LIST_RULE_GET) ? 0 : func_bit_map;
2684 update_params->func_to_clear = 0;
2685 drv_msg_code =
2686 (cmd_type == VIF_LIST_RULE_GET) ?
2687 DRV_MSG_CODE_AFEX_LISTGET_ACK :
2688 DRV_MSG_CODE_AFEX_LISTSET_ACK;
2689
2690 /* if ramrod can not be sent, respond to MCP immediately for
2691 * SET and GET requests (other are not triggered from MCP)
2692 */
2693 rc = bnx2x_func_state_change(bp, &func_params);
2694 if (rc < 0)
2695 bnx2x_fw_command(bp, drv_msg_code, 0);
2696
2697 return 0;
2698}
2699
2700static void bnx2x_handle_afex_cmd(struct bnx2x *bp, u32 cmd)
2701{
2702 struct afex_stats afex_stats;
2703 u32 func = BP_ABS_FUNC(bp);
2704 u32 mf_config;
2705 u16 vlan_val;
2706 u32 vlan_prio;
2707 u16 vif_id;
2708 u8 allowed_prio;
2709 u8 vlan_mode;
2710 u32 addr_to_write, vifid, addrs, stats_type, i;
2711
2712 if (cmd & DRV_STATUS_AFEX_LISTGET_REQ) {
2713 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2714 DP(BNX2X_MSG_MCP,
2715 "afex: got MCP req LISTGET_REQ for vifid 0x%x\n", vifid);
2716 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_GET, vifid, 0);
2717 }
2718
2719 if (cmd & DRV_STATUS_AFEX_LISTSET_REQ) {
2720 vifid = SHMEM2_RD(bp, afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2721 addrs = SHMEM2_RD(bp, afex_param2_to_driver[BP_FW_MB_IDX(bp)]);
2722 DP(BNX2X_MSG_MCP,
2723 "afex: got MCP req LISTSET_REQ for vifid 0x%x addrs 0x%x\n",
2724 vifid, addrs);
2725 bnx2x_afex_handle_vif_list_cmd(bp, VIF_LIST_RULE_SET, vifid,
2726 addrs);
2727 }
2728
2729 if (cmd & DRV_STATUS_AFEX_STATSGET_REQ) {
2730 addr_to_write = SHMEM2_RD(bp,
2731 afex_scratchpad_addr_to_write[BP_FW_MB_IDX(bp)]);
2732 stats_type = SHMEM2_RD(bp,
2733 afex_param1_to_driver[BP_FW_MB_IDX(bp)]);
2734
2735 DP(BNX2X_MSG_MCP,
2736 "afex: got MCP req STATSGET_REQ, write to addr 0x%x\n",
2737 addr_to_write);
2738
2739 bnx2x_afex_collect_stats(bp, (void *)&afex_stats, stats_type);
2740
2741 /* write response to scratchpad, for MCP */
2742 for (i = 0; i < (sizeof(struct afex_stats)/sizeof(u32)); i++)
2743 REG_WR(bp, addr_to_write + i*sizeof(u32),
2744 *(((u32 *)(&afex_stats))+i));
2745
2746 /* send ack message to MCP */
2747 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_STATSGET_ACK, 0);
2748 }
2749
2750 if (cmd & DRV_STATUS_AFEX_VIFSET_REQ) {
2751 mf_config = MF_CFG_RD(bp, func_mf_config[func].config);
2752 bp->mf_config[BP_VN(bp)] = mf_config;
2753 DP(BNX2X_MSG_MCP,
2754 "afex: got MCP req VIFSET_REQ, mf_config 0x%x\n",
2755 mf_config);
2756
2757 /* if VIF_SET is "enabled" */
2758 if (!(mf_config & FUNC_MF_CFG_FUNC_DISABLED)) {
2759 /* set rate limit directly to internal RAM */
2760 struct cmng_init_input cmng_input;
2761 struct rate_shaping_vars_per_vn m_rs_vn;
2762 size_t size = sizeof(struct rate_shaping_vars_per_vn);
2763 u32 addr = BAR_XSTRORM_INTMEM +
2764 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(BP_FUNC(bp));
2765
2766 bp->mf_config[BP_VN(bp)] = mf_config;
2767
2768 bnx2x_calc_vn_max(bp, BP_VN(bp), &cmng_input);
2769 m_rs_vn.vn_counter.rate =
2770 cmng_input.vnic_max_rate[BP_VN(bp)];
2771 m_rs_vn.vn_counter.quota =
2772 (m_rs_vn.vn_counter.rate *
2773 RS_PERIODIC_TIMEOUT_USEC) / 8;
2774
2775 __storm_memset_struct(bp, addr, size, (u32 *)&m_rs_vn);
2776
2777 /* read relevant values from mf_cfg struct in shmem */
2778 vif_id =
2779 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2780 FUNC_MF_CFG_E1HOV_TAG_MASK) >>
2781 FUNC_MF_CFG_E1HOV_TAG_SHIFT;
2782 vlan_val =
2783 (MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
2784 FUNC_MF_CFG_AFEX_VLAN_MASK) >>
2785 FUNC_MF_CFG_AFEX_VLAN_SHIFT;
2786 vlan_prio = (mf_config &
2787 FUNC_MF_CFG_TRANSMIT_PRIORITY_MASK) >>
2788 FUNC_MF_CFG_TRANSMIT_PRIORITY_SHIFT;
2789 vlan_val |= (vlan_prio << VLAN_PRIO_SHIFT);
2790 vlan_mode =
2791 (MF_CFG_RD(bp,
2792 func_mf_config[func].afex_config) &
2793 FUNC_MF_CFG_AFEX_VLAN_MODE_MASK) >>
2794 FUNC_MF_CFG_AFEX_VLAN_MODE_SHIFT;
2795 allowed_prio =
2796 (MF_CFG_RD(bp,
2797 func_mf_config[func].afex_config) &
2798 FUNC_MF_CFG_AFEX_COS_FILTER_MASK) >>
2799 FUNC_MF_CFG_AFEX_COS_FILTER_SHIFT;
2800
2801 /* send ramrod to FW, return in case of failure */
2802 if (bnx2x_afex_func_update(bp, vif_id, vlan_val,
2803 allowed_prio))
2804 return;
2805
2806 bp->afex_def_vlan_tag = vlan_val;
2807 bp->afex_vlan_mode = vlan_mode;
2808 } else {
2809 /* notify link down because BP->flags is disabled */
2810 bnx2x_link_report(bp);
2811
2812 /* send INVALID VIF ramrod to FW */
2813 bnx2x_afex_func_update(bp, 0xFFFF, 0, 0);
2814
2815 /* Reset the default afex VLAN */
2816 bp->afex_def_vlan_tag = -1;
2817 }
2818 }
2819}
2820
34f80b04
EG
2821static void bnx2x_pmf_update(struct bnx2x *bp)
2822{
2823 int port = BP_PORT(bp);
2824 u32 val;
2825
2826 bp->port.pmf = 1;
51c1a580 2827 DP(BNX2X_MSG_MCP, "pmf %d\n", bp->port.pmf);
34f80b04 2828
3deb8167
YR
2829 /*
2830 * We need the mb() to ensure the ordering between the writing to
2831 * bp->port.pmf here and reading it from the bnx2x_periodic_task().
2832 */
2833 smp_mb();
2834
2835 /* queue a periodic task */
2836 queue_delayed_work(bnx2x_wq, &bp->period_task, 0);
2837
ef01854e
DK
2838 bnx2x_dcbx_pmf_update(bp);
2839
34f80b04 2840 /* enable nig attention */
3395a033 2841 val = (0xff0f | (1 << (BP_VN(bp) + 4)));
f2e0899f
DK
2842 if (bp->common.int_block == INT_BLOCK_HC) {
2843 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2844 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
619c5cb6 2845 } else if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
2846 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2847 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2848 }
bb2a0f7a
YG
2849
2850 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2851}
2852
c18487ee 2853/* end of Link */
a2fbb9ea
ET
2854
2855/* slow path */
2856
2857/*
2858 * General service functions
2859 */
2860
2691d51d 2861/* send the MCP a request, block until there is a reply */
a22f0788 2862u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2691d51d 2863{
f2e0899f 2864 int mb_idx = BP_FW_MB_IDX(bp);
a5971d43 2865 u32 seq;
2691d51d
EG
2866 u32 rc = 0;
2867 u32 cnt = 1;
2868 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2869
c4ff7cbf 2870 mutex_lock(&bp->fw_mb_mutex);
a5971d43 2871 seq = ++bp->fw_seq;
f2e0899f
DK
2872 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2873 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2874
754a2f52
DK
2875 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB param 0x%08x\n",
2876 (command | seq), param);
2691d51d
EG
2877
2878 do {
2879 /* let the FW do it's magic ... */
2880 msleep(delay);
2881
f2e0899f 2882 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
2691d51d 2883
c4ff7cbf
EG
2884 /* Give the FW up to 5 second (500*10ms) */
2885 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2691d51d
EG
2886
2887 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2888 cnt*delay, rc, seq);
2889
2890 /* is this a reply to our command? */
2891 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2892 rc &= FW_MSG_CODE_MASK;
2893 else {
2894 /* FW BUG! */
2895 BNX2X_ERR("FW failed to respond!\n");
2896 bnx2x_fw_dump(bp);
2897 rc = 0;
2898 }
c4ff7cbf 2899 mutex_unlock(&bp->fw_mb_mutex);
2691d51d
EG
2900
2901 return rc;
2902}
2903
ec6ba945 2904
1191cb83
ED
2905static void storm_memset_func_cfg(struct bnx2x *bp,
2906 struct tstorm_eth_function_common_config *tcfg,
2907 u16 abs_fid)
2908{
2909 size_t size = sizeof(struct tstorm_eth_function_common_config);
2910
2911 u32 addr = BAR_TSTRORM_INTMEM +
2912 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
2913
2914 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
2915}
2916
619c5cb6
VZ
2917void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2918{
2919 if (CHIP_IS_E1x(bp)) {
2920 struct tstorm_eth_function_common_config tcfg = {0};
2921
2922 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2923 }
2924
2925 /* Enable the function in the FW */
2926 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2927 storm_memset_func_en(bp, p->func_id, 1);
2928
2929 /* spq */
2930 if (p->func_flgs & FUNC_FLG_SPQ) {
2931 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2932 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2933 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2934 }
2935}
2936
6383c0b3
AE
2937/**
2938 * bnx2x_get_tx_only_flags - Return common flags
2939 *
2940 * @bp device handle
2941 * @fp queue handle
2942 * @zero_stats TRUE if statistics zeroing is needed
2943 *
2944 * Return the flags that are common for the Tx-only and not normal connections.
2945 */
1191cb83
ED
2946static unsigned long bnx2x_get_common_flags(struct bnx2x *bp,
2947 struct bnx2x_fastpath *fp,
2948 bool zero_stats)
28912902 2949{
619c5cb6
VZ
2950 unsigned long flags = 0;
2951
2952 /* PF driver will always initialize the Queue to an ACTIVE state */
2953 __set_bit(BNX2X_Q_FLG_ACTIVE, &flags);
28912902 2954
6383c0b3 2955 /* tx only connections collect statistics (on the same index as the
91226790
DK
2956 * parent connection). The statistics are zeroed when the parent
2957 * connection is initialized.
6383c0b3 2958 */
50f0a562
BW
2959
2960 __set_bit(BNX2X_Q_FLG_STATS, &flags);
2961 if (zero_stats)
2962 __set_bit(BNX2X_Q_FLG_ZERO_STATS, &flags);
2963
91226790 2964 __set_bit(BNX2X_Q_FLG_PCSUM_ON_PKT, &flags);
e287a75c 2965 __set_bit(BNX2X_Q_FLG_TUN_INC_INNER_IP_ID, &flags);
6383c0b3 2966
823e1d90
YM
2967#ifdef BNX2X_STOP_ON_ERROR
2968 __set_bit(BNX2X_Q_FLG_TX_SEC, &flags);
2969#endif
2970
6383c0b3
AE
2971 return flags;
2972}
2973
1191cb83
ED
2974static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
2975 struct bnx2x_fastpath *fp,
2976 bool leading)
6383c0b3
AE
2977{
2978 unsigned long flags = 0;
2979
619c5cb6
VZ
2980 /* calculate other queue flags */
2981 if (IS_MF_SD(bp))
2982 __set_bit(BNX2X_Q_FLG_OV, &flags);
28912902 2983
a3348722 2984 if (IS_FCOE_FP(fp)) {
619c5cb6 2985 __set_bit(BNX2X_Q_FLG_FCOE, &flags);
a3348722
BW
2986 /* For FCoE - force usage of default priority (for afex) */
2987 __set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
2988 }
523224a3 2989
f5219d8e 2990 if (!fp->disable_tpa) {
619c5cb6 2991 __set_bit(BNX2X_Q_FLG_TPA, &flags);
f5219d8e 2992 __set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
621b4d66
DK
2993 if (fp->mode == TPA_MODE_GRO)
2994 __set_bit(BNX2X_Q_FLG_TPA_GRO, &flags);
f5219d8e 2995 }
619c5cb6 2996
619c5cb6
VZ
2997 if (leading) {
2998 __set_bit(BNX2X_Q_FLG_LEADING_RSS, &flags);
2999 __set_bit(BNX2X_Q_FLG_MCAST, &flags);
3000 }
523224a3 3001
619c5cb6
VZ
3002 /* Always set HW VLAN stripping */
3003 __set_bit(BNX2X_Q_FLG_VLAN, &flags);
523224a3 3004
a3348722
BW
3005 /* configure silent vlan removal */
3006 if (IS_MF_AFEX(bp))
3007 __set_bit(BNX2X_Q_FLG_SILENT_VLAN_REM, &flags);
3008
6383c0b3
AE
3009
3010 return flags | bnx2x_get_common_flags(bp, fp, true);
523224a3
DK
3011}
3012
619c5cb6 3013static void bnx2x_pf_q_prep_general(struct bnx2x *bp,
6383c0b3
AE
3014 struct bnx2x_fastpath *fp, struct bnx2x_general_setup_params *gen_init,
3015 u8 cos)
619c5cb6
VZ
3016{
3017 gen_init->stat_id = bnx2x_stats_id(fp);
3018 gen_init->spcl_id = fp->cl_id;
3019
3020 /* Always use mini-jumbo MTU for FCoE L2 ring */
3021 if (IS_FCOE_FP(fp))
3022 gen_init->mtu = BNX2X_FCOE_MINI_JUMBO_MTU;
3023 else
3024 gen_init->mtu = bp->dev->mtu;
6383c0b3
AE
3025
3026 gen_init->cos = cos;
619c5cb6
VZ
3027}
3028
3029static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
523224a3 3030 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
619c5cb6 3031 struct bnx2x_rxq_setup_params *rxq_init)
523224a3 3032{
619c5cb6 3033 u8 max_sge = 0;
523224a3
DK
3034 u16 sge_sz = 0;
3035 u16 tpa_agg_size = 0;
3036
523224a3 3037 if (!fp->disable_tpa) {
dfacf138
DK
3038 pause->sge_th_lo = SGE_TH_LO(bp);
3039 pause->sge_th_hi = SGE_TH_HI(bp);
3040
3041 /* validate SGE ring has enough to cross high threshold */
3042 WARN_ON(bp->dropless_fc &&
3043 pause->sge_th_hi + FW_PREFETCH_CNT >
3044 MAX_RX_SGE_CNT * NUM_RX_SGE_PAGES);
3045
924d75ab 3046 tpa_agg_size = TPA_AGG_SIZE;
523224a3
DK
3047 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
3048 SGE_PAGE_SHIFT;
3049 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
3050 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
924d75ab 3051 sge_sz = (u16)min_t(u32, SGE_PAGES, 0xffff);
523224a3
DK
3052 }
3053
3054 /* pause - not for e1 */
3055 if (!CHIP_IS_E1(bp)) {
dfacf138
DK
3056 pause->bd_th_lo = BD_TH_LO(bp);
3057 pause->bd_th_hi = BD_TH_HI(bp);
3058
3059 pause->rcq_th_lo = RCQ_TH_LO(bp);
3060 pause->rcq_th_hi = RCQ_TH_HI(bp);
3061 /*
3062 * validate that rings have enough entries to cross
3063 * high thresholds
3064 */
3065 WARN_ON(bp->dropless_fc &&
3066 pause->bd_th_hi + FW_PREFETCH_CNT >
3067 bp->rx_ring_size);
3068 WARN_ON(bp->dropless_fc &&
3069 pause->rcq_th_hi + FW_PREFETCH_CNT >
3070 NUM_RCQ_RINGS * MAX_RCQ_DESC_CNT);
619c5cb6 3071
523224a3
DK
3072 pause->pri_map = 1;
3073 }
3074
3075 /* rxq setup */
523224a3
DK
3076 rxq_init->dscr_map = fp->rx_desc_mapping;
3077 rxq_init->sge_map = fp->rx_sge_mapping;
3078 rxq_init->rcq_map = fp->rx_comp_mapping;
3079 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
a8c94b91 3080
619c5cb6
VZ
3081 /* This should be a maximum number of data bytes that may be
3082 * placed on the BD (not including paddings).
3083 */
e52fcb24
ED
3084 rxq_init->buf_sz = fp->rx_buf_size - BNX2X_FW_RX_ALIGN_START -
3085 BNX2X_FW_RX_ALIGN_END - IP_HEADER_ALIGNMENT_PADDING;
a8c94b91 3086
523224a3 3087 rxq_init->cl_qzone_id = fp->cl_qzone_id;
523224a3
DK
3088 rxq_init->tpa_agg_sz = tpa_agg_size;
3089 rxq_init->sge_buf_sz = sge_sz;
3090 rxq_init->max_sges_pkt = max_sge;
619c5cb6 3091 rxq_init->rss_engine_id = BP_FUNC(bp);
259afa1f 3092 rxq_init->mcast_engine_id = BP_FUNC(bp);
619c5cb6
VZ
3093
3094 /* Maximum number or simultaneous TPA aggregation for this Queue.
3095 *
2de67439 3096 * For PF Clients it should be the maximum available number.
619c5cb6
VZ
3097 * VF driver(s) may want to define it to a smaller value.
3098 */
dfacf138 3099 rxq_init->max_tpa_queues = MAX_AGG_QS(bp);
619c5cb6 3100
523224a3
DK
3101 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
3102 rxq_init->fw_sb_id = fp->fw_sb_id;
3103
ec6ba945
VZ
3104 if (IS_FCOE_FP(fp))
3105 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
3106 else
6383c0b3 3107 rxq_init->sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
a3348722
BW
3108 /* configure silent vlan removal
3109 * if multi function mode is afex, then mask default vlan
3110 */
3111 if (IS_MF_AFEX(bp)) {
3112 rxq_init->silent_removal_value = bp->afex_def_vlan_tag;
3113 rxq_init->silent_removal_mask = VLAN_VID_MASK;
3114 }
523224a3
DK
3115}
3116
619c5cb6 3117static void bnx2x_pf_tx_q_prep(struct bnx2x *bp,
6383c0b3
AE
3118 struct bnx2x_fastpath *fp, struct bnx2x_txq_setup_params *txq_init,
3119 u8 cos)
523224a3 3120{
65565884 3121 txq_init->dscr_map = fp->txdata_ptr[cos]->tx_desc_mapping;
6383c0b3 3122 txq_init->sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS + cos;
523224a3
DK
3123 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
3124 txq_init->fw_sb_id = fp->fw_sb_id;
ec6ba945 3125
619c5cb6
VZ
3126 /*
3127 * set the tss leading client id for TX classfication ==
3128 * leading RSS client id
3129 */
3130 txq_init->tss_leading_cl_id = bnx2x_fp(bp, 0, cl_id);
3131
ec6ba945
VZ
3132 if (IS_FCOE_FP(fp)) {
3133 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
3134 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
3135 }
523224a3
DK
3136}
3137
8d96286a 3138static void bnx2x_pf_init(struct bnx2x *bp)
523224a3
DK
3139{
3140 struct bnx2x_func_init_params func_init = {0};
523224a3
DK
3141 struct event_ring_data eq_data = { {0} };
3142 u16 flags;
3143
619c5cb6 3144 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
3145 /* reset IGU PF statistics: MSIX + ATTN */
3146 /* PF */
3147 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3148 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3149 (CHIP_MODE_IS_4_PORT(bp) ?
3150 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3151 /* ATTN */
3152 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
3153 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
3154 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
3155 (CHIP_MODE_IS_4_PORT(bp) ?
3156 BP_FUNC(bp) : BP_VN(bp))*4, 0);
3157 }
3158
523224a3
DK
3159 /* function setup flags */
3160 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
3161
619c5cb6
VZ
3162 /* This flag is relevant for E1x only.
3163 * E2 doesn't have a TPA configuration in a function level.
523224a3 3164 */
619c5cb6 3165 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
523224a3
DK
3166
3167 func_init.func_flgs = flags;
3168 func_init.pf_id = BP_FUNC(bp);
3169 func_init.func_id = BP_FUNC(bp);
523224a3
DK
3170 func_init.spq_map = bp->spq_mapping;
3171 func_init.spq_prod = bp->spq_prod_idx;
3172
3173 bnx2x_func_init(bp, &func_init);
3174
3175 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3176
3177 /*
619c5cb6
VZ
3178 * Congestion management values depend on the link rate
3179 * There is no active link so initial link rate is set to 10 Gbps.
3180 * When the link comes up The congestion management values are
3181 * re-calculated according to the actual link rate.
3182 */
523224a3
DK
3183 bp->link_vars.line_speed = SPEED_10000;
3184 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
3185
3186 /* Only the PMF sets the HW */
3187 if (bp->port.pmf)
3188 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3189
86564c3f 3190 /* init Event Queue - PCI bus guarantees correct endianity*/
523224a3
DK
3191 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
3192 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
3193 eq_data.producer = bp->eq_prod;
3194 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
3195 eq_data.sb_id = DEF_SB_ID;
3196 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
3197}
3198
3199
3200static void bnx2x_e1h_disable(struct bnx2x *bp)
3201{
3202 int port = BP_PORT(bp);
3203
619c5cb6 3204 bnx2x_tx_disable(bp);
523224a3
DK
3205
3206 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
523224a3
DK
3207}
3208
3209static void bnx2x_e1h_enable(struct bnx2x *bp)
3210{
3211 int port = BP_PORT(bp);
3212
3213 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
3214
3215 /* Tx queue should be only reenabled */
3216 netif_tx_wake_all_queues(bp->dev);
3217
3218 /*
3219 * Should not call netif_carrier_on since it will be called if the link
3220 * is up when checking for link state
3221 */
3222}
3223
1d187b34
BW
3224#define DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED 3
3225
3226static void bnx2x_drv_info_ether_stat(struct bnx2x *bp)
3227{
3228 struct eth_stats_info *ether_stat =
3229 &bp->slowpath->drv_info_to_mcp.ether_stat;
3ec9f9ca
AE
3230 struct bnx2x_vlan_mac_obj *mac_obj =
3231 &bp->sp_objs->mac_obj;
3232 int i;
1d187b34 3233
786fdf0b
DC
3234 strlcpy(ether_stat->version, DRV_MODULE_VERSION,
3235 ETH_STAT_INFO_VERSION_LEN);
1d187b34 3236
3ec9f9ca
AE
3237 /* get DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED macs, placing them in the
3238 * mac_local field in ether_stat struct. The base address is offset by 2
3239 * bytes to account for the field being 8 bytes but a mac address is
3240 * only 6 bytes. Likewise, the stride for the get_n_elements function is
3241 * 2 bytes to compensate from the 6 bytes of a mac to the 8 bytes
3242 * allocated by the ether_stat struct, so the macs will land in their
3243 * proper positions.
3244 */
3245 for (i = 0; i < DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED; i++)
3246 memset(ether_stat->mac_local + i, 0,
3247 sizeof(ether_stat->mac_local[0]));
3248 mac_obj->get_n_elements(bp, &bp->sp_objs[0].mac_obj,
3249 DRV_INFO_ETH_STAT_NUM_MACS_REQUIRED,
3250 ether_stat->mac_local + MAC_PAD, MAC_PAD,
3251 ETH_ALEN);
1d187b34 3252 ether_stat->mtu_size = bp->dev->mtu;
1d187b34
BW
3253 if (bp->dev->features & NETIF_F_RXCSUM)
3254 ether_stat->feature_flags |= FEATURE_ETH_CHKSUM_OFFLOAD_MASK;
3255 if (bp->dev->features & NETIF_F_TSO)
3256 ether_stat->feature_flags |= FEATURE_ETH_LSO_MASK;
3257 ether_stat->feature_flags |= bp->common.boot_mode;
3258
3259 ether_stat->promiscuous_mode = (bp->dev->flags & IFF_PROMISC) ? 1 : 0;
3260
3261 ether_stat->txq_size = bp->tx_ring_size;
3262 ether_stat->rxq_size = bp->rx_ring_size;
3263}
3264
3265static void bnx2x_drv_info_fcoe_stat(struct bnx2x *bp)
3266{
3267 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3268 struct fcoe_stats_info *fcoe_stat =
3269 &bp->slowpath->drv_info_to_mcp.fcoe_stat;
3270
55c11941
MS
3271 if (!CNIC_LOADED(bp))
3272 return;
3273
3ec9f9ca 3274 memcpy(fcoe_stat->mac_local + MAC_PAD, bp->fip_mac, ETH_ALEN);
1d187b34
BW
3275
3276 fcoe_stat->qos_priority =
3277 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_FCOE];
3278
3279 /* insert FCoE stats from ramrod response */
3280 if (!NO_FCOE(bp)) {
3281 struct tstorm_per_queue_stats *fcoe_q_tstorm_stats =
65565884 3282 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
1d187b34
BW
3283 tstorm_queue_statistics;
3284
3285 struct xstorm_per_queue_stats *fcoe_q_xstorm_stats =
65565884 3286 &bp->fw_stats_data->queue_stats[FCOE_IDX(bp)].
1d187b34
BW
3287 xstorm_queue_statistics;
3288
3289 struct fcoe_statistics_params *fw_fcoe_stat =
3290 &bp->fw_stats_data->fcoe;
3291
86564c3f
YM
3292 ADD_64_LE(fcoe_stat->rx_bytes_hi, LE32_0,
3293 fcoe_stat->rx_bytes_lo,
3294 fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt);
1d187b34 3295
86564c3f
YM
3296 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3297 fcoe_q_tstorm_stats->rcv_ucast_bytes.hi,
3298 fcoe_stat->rx_bytes_lo,
3299 fcoe_q_tstorm_stats->rcv_ucast_bytes.lo);
1d187b34 3300
86564c3f
YM
3301 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3302 fcoe_q_tstorm_stats->rcv_bcast_bytes.hi,
3303 fcoe_stat->rx_bytes_lo,
3304 fcoe_q_tstorm_stats->rcv_bcast_bytes.lo);
1d187b34 3305
86564c3f
YM
3306 ADD_64_LE(fcoe_stat->rx_bytes_hi,
3307 fcoe_q_tstorm_stats->rcv_mcast_bytes.hi,
3308 fcoe_stat->rx_bytes_lo,
3309 fcoe_q_tstorm_stats->rcv_mcast_bytes.lo);
1d187b34 3310
86564c3f
YM
3311 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3312 fcoe_stat->rx_frames_lo,
3313 fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt);
1d187b34 3314
86564c3f
YM
3315 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3316 fcoe_stat->rx_frames_lo,
3317 fcoe_q_tstorm_stats->rcv_ucast_pkts);
1d187b34 3318
86564c3f
YM
3319 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3320 fcoe_stat->rx_frames_lo,
3321 fcoe_q_tstorm_stats->rcv_bcast_pkts);
1d187b34 3322
86564c3f
YM
3323 ADD_64_LE(fcoe_stat->rx_frames_hi, LE32_0,
3324 fcoe_stat->rx_frames_lo,
3325 fcoe_q_tstorm_stats->rcv_mcast_pkts);
1d187b34 3326
86564c3f
YM
3327 ADD_64_LE(fcoe_stat->tx_bytes_hi, LE32_0,
3328 fcoe_stat->tx_bytes_lo,
3329 fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt);
1d187b34 3330
86564c3f
YM
3331 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3332 fcoe_q_xstorm_stats->ucast_bytes_sent.hi,
3333 fcoe_stat->tx_bytes_lo,
3334 fcoe_q_xstorm_stats->ucast_bytes_sent.lo);
1d187b34 3335
86564c3f
YM
3336 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3337 fcoe_q_xstorm_stats->bcast_bytes_sent.hi,
3338 fcoe_stat->tx_bytes_lo,
3339 fcoe_q_xstorm_stats->bcast_bytes_sent.lo);
1d187b34 3340
86564c3f
YM
3341 ADD_64_LE(fcoe_stat->tx_bytes_hi,
3342 fcoe_q_xstorm_stats->mcast_bytes_sent.hi,
3343 fcoe_stat->tx_bytes_lo,
3344 fcoe_q_xstorm_stats->mcast_bytes_sent.lo);
1d187b34 3345
86564c3f
YM
3346 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3347 fcoe_stat->tx_frames_lo,
3348 fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt);
1d187b34 3349
86564c3f
YM
3350 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3351 fcoe_stat->tx_frames_lo,
3352 fcoe_q_xstorm_stats->ucast_pkts_sent);
1d187b34 3353
86564c3f
YM
3354 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3355 fcoe_stat->tx_frames_lo,
3356 fcoe_q_xstorm_stats->bcast_pkts_sent);
1d187b34 3357
86564c3f
YM
3358 ADD_64_LE(fcoe_stat->tx_frames_hi, LE32_0,
3359 fcoe_stat->tx_frames_lo,
3360 fcoe_q_xstorm_stats->mcast_pkts_sent);
1d187b34
BW
3361 }
3362
1d187b34
BW
3363 /* ask L5 driver to add data to the struct */
3364 bnx2x_cnic_notify(bp, CNIC_CTL_FCOE_STATS_GET_CMD);
1d187b34
BW
3365}
3366
3367static void bnx2x_drv_info_iscsi_stat(struct bnx2x *bp)
3368{
3369 struct bnx2x_dcbx_app_params *app = &bp->dcbx_port_params.app;
3370 struct iscsi_stats_info *iscsi_stat =
3371 &bp->slowpath->drv_info_to_mcp.iscsi_stat;
3372
55c11941
MS
3373 if (!CNIC_LOADED(bp))
3374 return;
3375
3ec9f9ca
AE
3376 memcpy(iscsi_stat->mac_local + MAC_PAD, bp->cnic_eth_dev.iscsi_mac,
3377 ETH_ALEN);
1d187b34
BW
3378
3379 iscsi_stat->qos_priority =
3380 app->traffic_type_priority[LLFC_TRAFFIC_TYPE_ISCSI];
3381
1d187b34
BW
3382 /* ask L5 driver to add data to the struct */
3383 bnx2x_cnic_notify(bp, CNIC_CTL_ISCSI_STATS_GET_CMD);
1d187b34
BW
3384}
3385
0793f83f
DK
3386/* called due to MCP event (on pmf):
3387 * reread new bandwidth configuration
3388 * configure FW
3389 * notify others function about the change
3390 */
1191cb83 3391static void bnx2x_config_mf_bw(struct bnx2x *bp)
0793f83f
DK
3392{
3393 if (bp->link_vars.link_up) {
3394 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
3395 bnx2x_link_sync_notify(bp);
3396 }
3397 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
3398}
3399
1191cb83 3400static void bnx2x_set_mf_bw(struct bnx2x *bp)
0793f83f
DK
3401{
3402 bnx2x_config_mf_bw(bp);
3403 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
3404}
3405
c8c60d88
YM
3406static void bnx2x_handle_eee_event(struct bnx2x *bp)
3407{
3408 DP(BNX2X_MSG_MCP, "EEE - LLDP event\n");
3409 bnx2x_fw_command(bp, DRV_MSG_CODE_EEE_RESULTS_ACK, 0);
3410}
3411
1d187b34
BW
3412static void bnx2x_handle_drv_info_req(struct bnx2x *bp)
3413{
3414 enum drv_info_opcode op_code;
3415 u32 drv_info_ctl = SHMEM2_RD(bp, drv_info_control);
3416
3417 /* if drv_info version supported by MFW doesn't match - send NACK */
3418 if ((drv_info_ctl & DRV_INFO_CONTROL_VER_MASK) != DRV_INFO_CUR_VER) {
3419 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3420 return;
3421 }
3422
3423 op_code = (drv_info_ctl & DRV_INFO_CONTROL_OP_CODE_MASK) >>
3424 DRV_INFO_CONTROL_OP_CODE_SHIFT;
3425
3426 memset(&bp->slowpath->drv_info_to_mcp, 0,
3427 sizeof(union drv_info_to_mcp));
3428
3429 switch (op_code) {
3430 case ETH_STATS_OPCODE:
3431 bnx2x_drv_info_ether_stat(bp);
3432 break;
3433 case FCOE_STATS_OPCODE:
3434 bnx2x_drv_info_fcoe_stat(bp);
3435 break;
3436 case ISCSI_STATS_OPCODE:
3437 bnx2x_drv_info_iscsi_stat(bp);
3438 break;
3439 default:
3440 /* if op code isn't supported - send NACK */
3441 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_NACK, 0);
3442 return;
3443 }
3444
3445 /* if we got drv_info attn from MFW then these fields are defined in
3446 * shmem2 for sure
3447 */
3448 SHMEM2_WR(bp, drv_info_host_addr_lo,
3449 U64_LO(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3450 SHMEM2_WR(bp, drv_info_host_addr_hi,
3451 U64_HI(bnx2x_sp_mapping(bp, drv_info_to_mcp)));
3452
3453 bnx2x_fw_command(bp, DRV_MSG_CODE_DRV_INFO_ACK, 0);
3454}
3455
523224a3
DK
3456static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
3457{
3458 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
3459
3460 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
3461
3462 /*
3463 * This is the only place besides the function initialization
3464 * where the bp->flags can change so it is done without any
3465 * locks
3466 */
f2e0899f 3467 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
51c1a580 3468 DP(BNX2X_MSG_MCP, "mf_cfg function disabled\n");
523224a3
DK
3469 bp->flags |= MF_FUNC_DIS;
3470
3471 bnx2x_e1h_disable(bp);
3472 } else {
51c1a580 3473 DP(BNX2X_MSG_MCP, "mf_cfg function enabled\n");
523224a3
DK
3474 bp->flags &= ~MF_FUNC_DIS;
3475
3476 bnx2x_e1h_enable(bp);
3477 }
3478 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
3479 }
3480 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
0793f83f 3481 bnx2x_config_mf_bw(bp);
523224a3
DK
3482 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
3483 }
3484
3485 /* Report results to MCP */
3486 if (dcc_event)
3487 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
3488 else
3489 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
3490}
3491
3492/* must be called under the spq lock */
1191cb83 3493static struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
523224a3
DK
3494{
3495 struct eth_spe *next_spe = bp->spq_prod_bd;
3496
3497 if (bp->spq_prod_bd == bp->spq_last_bd) {
3498 bp->spq_prod_bd = bp->spq;
3499 bp->spq_prod_idx = 0;
51c1a580 3500 DP(BNX2X_MSG_SP, "end of spq\n");
523224a3
DK
3501 } else {
3502 bp->spq_prod_bd++;
3503 bp->spq_prod_idx++;
3504 }
3505 return next_spe;
3506}
3507
3508/* must be called under the spq lock */
1191cb83 3509static void bnx2x_sp_prod_update(struct bnx2x *bp)
28912902
MC
3510{
3511 int func = BP_FUNC(bp);
3512
53e51e2f
VZ
3513 /*
3514 * Make sure that BD data is updated before writing the producer:
3515 * BD data is written to the memory, the producer is read from the
3516 * memory, thus we need a full memory barrier to ensure the ordering.
3517 */
3518 mb();
28912902 3519
523224a3 3520 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
f85582f8 3521 bp->spq_prod_idx);
28912902
MC
3522 mmiowb();
3523}
3524
619c5cb6
VZ
3525/**
3526 * bnx2x_is_contextless_ramrod - check if the current command ends on EQ
3527 *
3528 * @cmd: command to check
3529 * @cmd_type: command type
3530 */
1191cb83 3531static bool bnx2x_is_contextless_ramrod(int cmd, int cmd_type)
619c5cb6
VZ
3532{
3533 if ((cmd_type == NONE_CONNECTION_TYPE) ||
6383c0b3 3534 (cmd == RAMROD_CMD_ID_ETH_FORWARD_SETUP) ||
619c5cb6
VZ
3535 (cmd == RAMROD_CMD_ID_ETH_CLASSIFICATION_RULES) ||
3536 (cmd == RAMROD_CMD_ID_ETH_FILTER_RULES) ||
3537 (cmd == RAMROD_CMD_ID_ETH_MULTICAST_RULES) ||
3538 (cmd == RAMROD_CMD_ID_ETH_SET_MAC) ||
3539 (cmd == RAMROD_CMD_ID_ETH_RSS_UPDATE))
3540 return true;
3541 else
3542 return false;
3543
3544}
3545
3546
3547/**
3548 * bnx2x_sp_post - place a single command on an SP ring
3549 *
3550 * @bp: driver handle
3551 * @command: command to place (e.g. SETUP, FILTER_RULES, etc.)
3552 * @cid: SW CID the command is related to
3553 * @data_hi: command private data address (high 32 bits)
3554 * @data_lo: command private data address (low 32 bits)
3555 * @cmd_type: command type (e.g. NONE, ETH)
3556 *
3557 * SP data is handled as if it's always an address pair, thus data fields are
3558 * not swapped to little endian in upper functions. Instead this function swaps
3559 * data as if it's two u32 fields.
3560 */
9f6c9258 3561int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
619c5cb6 3562 u32 data_hi, u32 data_lo, int cmd_type)
a2fbb9ea 3563{
28912902 3564 struct eth_spe *spe;
523224a3 3565 u16 type;
619c5cb6 3566 bool common = bnx2x_is_contextless_ramrod(command, cmd_type);
a2fbb9ea 3567
a2fbb9ea 3568#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
3569 if (unlikely(bp->panic)) {
3570 BNX2X_ERR("Can't post SP when there is panic\n");
a2fbb9ea 3571 return -EIO;
51c1a580 3572 }
a2fbb9ea
ET
3573#endif
3574
34f80b04 3575 spin_lock_bh(&bp->spq_lock);
a2fbb9ea 3576
6e30dd4e
VZ
3577 if (common) {
3578 if (!atomic_read(&bp->eq_spq_left)) {
3579 BNX2X_ERR("BUG! EQ ring full!\n");
3580 spin_unlock_bh(&bp->spq_lock);
3581 bnx2x_panic();
3582 return -EBUSY;
3583 }
3584 } else if (!atomic_read(&bp->cq_spq_left)) {
3585 BNX2X_ERR("BUG! SPQ ring full!\n");
3586 spin_unlock_bh(&bp->spq_lock);
3587 bnx2x_panic();
3588 return -EBUSY;
a2fbb9ea 3589 }
f1410647 3590
28912902
MC
3591 spe = bnx2x_sp_get_next(bp);
3592
a2fbb9ea 3593 /* CID needs port number to be encoded int it */
28912902 3594 spe->hdr.conn_and_cmd_data =
cdaa7cb8
VZ
3595 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
3596 HW_CID(bp, cid));
523224a3 3597
619c5cb6 3598 type = (cmd_type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
a2fbb9ea 3599
523224a3
DK
3600 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
3601 SPE_HDR_FUNCTION_ID);
a2fbb9ea 3602
523224a3
DK
3603 spe->hdr.type = cpu_to_le16(type);
3604
3605 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
3606 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
3607
d6cae238
VZ
3608 /*
3609 * It's ok if the actual decrement is issued towards the memory
3610 * somewhere between the spin_lock and spin_unlock. Thus no
3611 * more explict memory barrier is needed.
3612 */
3613 if (common)
3614 atomic_dec(&bp->eq_spq_left);
3615 else
3616 atomic_dec(&bp->cq_spq_left);
6e30dd4e 3617
a2fbb9ea 3618
51c1a580
MS
3619 DP(BNX2X_MSG_SP,
3620 "SPQE[%x] (%x:%x) (cmd, common?) (%d,%d) hw_cid %x data (%x:%x) type(0x%x) left (CQ, EQ) (%x,%x)\n",
cdaa7cb8
VZ
3621 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
3622 (u32)(U64_LO(bp->spq_mapping) +
d6cae238 3623 (void *)bp->spq_prod_bd - (void *)bp->spq), command, common,
6e30dd4e
VZ
3624 HW_CID(bp, cid), data_hi, data_lo, type,
3625 atomic_read(&bp->cq_spq_left), atomic_read(&bp->eq_spq_left));
cdaa7cb8 3626
28912902 3627 bnx2x_sp_prod_update(bp);
34f80b04 3628 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
3629 return 0;
3630}
3631
3632/* acquire split MCP access lock register */
4a37fb66 3633static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 3634{
72fd0718 3635 u32 j, val;
34f80b04 3636 int rc = 0;
a2fbb9ea
ET
3637
3638 might_sleep();
72fd0718 3639 for (j = 0; j < 1000; j++) {
a2fbb9ea
ET
3640 val = (1UL << 31);
3641 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
3642 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
3643 if (val & (1L << 31))
3644 break;
3645
3646 msleep(5);
3647 }
a2fbb9ea 3648 if (!(val & (1L << 31))) {
19680c48 3649 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
3650 rc = -EBUSY;
3651 }
3652
3653 return rc;
3654}
3655
4a37fb66
YG
3656/* release split MCP access lock register */
3657static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea 3658{
72fd0718 3659 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
a2fbb9ea
ET
3660}
3661
523224a3
DK
3662#define BNX2X_DEF_SB_ATT_IDX 0x0001
3663#define BNX2X_DEF_SB_IDX 0x0002
3664
1191cb83 3665static u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
a2fbb9ea 3666{
523224a3 3667 struct host_sp_status_block *def_sb = bp->def_status_blk;
a2fbb9ea
ET
3668 u16 rc = 0;
3669
3670 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
3671 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
3672 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
523224a3 3673 rc |= BNX2X_DEF_SB_ATT_IDX;
a2fbb9ea 3674 }
523224a3
DK
3675
3676 if (bp->def_idx != def_sb->sp_sb.running_index) {
3677 bp->def_idx = def_sb->sp_sb.running_index;
3678 rc |= BNX2X_DEF_SB_IDX;
a2fbb9ea 3679 }
523224a3
DK
3680
3681 /* Do not reorder: indecies reading should complete before handling */
3682 barrier();
a2fbb9ea
ET
3683 return rc;
3684}
3685
3686/*
3687 * slow path service functions
3688 */
3689
3690static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
3691{
34f80b04 3692 int port = BP_PORT(bp);
a2fbb9ea
ET
3693 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3694 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
3695 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
3696 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 3697 u32 aeu_mask;
87942b46 3698 u32 nig_mask = 0;
f2e0899f 3699 u32 reg_addr;
a2fbb9ea 3700
a2fbb9ea
ET
3701 if (bp->attn_state & asserted)
3702 BNX2X_ERR("IGU ERROR\n");
3703
3fcaf2e5
EG
3704 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3705 aeu_mask = REG_RD(bp, aeu_addr);
3706
a2fbb9ea 3707 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5 3708 aeu_mask, asserted);
72fd0718 3709 aeu_mask &= ~(asserted & 0x3ff);
3fcaf2e5 3710 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 3711
3fcaf2e5
EG
3712 REG_WR(bp, aeu_addr, aeu_mask);
3713 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 3714
3fcaf2e5 3715 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 3716 bp->attn_state |= asserted;
3fcaf2e5 3717 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
3718
3719 if (asserted & ATTN_HARD_WIRED_MASK) {
3720 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 3721
a5e9a7cf
EG
3722 bnx2x_acquire_phy_lock(bp);
3723
877e9aa4 3724 /* save nig interrupt mask */
87942b46 3725 nig_mask = REG_RD(bp, nig_int_mask_addr);
a2fbb9ea 3726
361c391e
YR
3727 /* If nig_mask is not set, no need to call the update
3728 * function.
3729 */
3730 if (nig_mask) {
3731 REG_WR(bp, nig_int_mask_addr, 0);
3732
3733 bnx2x_link_attn(bp);
3734 }
a2fbb9ea
ET
3735
3736 /* handle unicore attn? */
3737 }
3738 if (asserted & ATTN_SW_TIMER_4_FUNC)
3739 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
3740
3741 if (asserted & GPIO_2_FUNC)
3742 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
3743
3744 if (asserted & GPIO_3_FUNC)
3745 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
3746
3747 if (asserted & GPIO_4_FUNC)
3748 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
3749
3750 if (port == 0) {
3751 if (asserted & ATTN_GENERAL_ATTN_1) {
3752 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
3753 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
3754 }
3755 if (asserted & ATTN_GENERAL_ATTN_2) {
3756 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
3757 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
3758 }
3759 if (asserted & ATTN_GENERAL_ATTN_3) {
3760 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
3761 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
3762 }
3763 } else {
3764 if (asserted & ATTN_GENERAL_ATTN_4) {
3765 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
3766 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
3767 }
3768 if (asserted & ATTN_GENERAL_ATTN_5) {
3769 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
3770 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
3771 }
3772 if (asserted & ATTN_GENERAL_ATTN_6) {
3773 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
3774 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
3775 }
3776 }
3777
3778 } /* if hardwired */
3779
f2e0899f
DK
3780 if (bp->common.int_block == INT_BLOCK_HC)
3781 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3782 COMMAND_REG_ATTN_BITS_SET);
3783 else
3784 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
3785
3786 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
3787 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
3788 REG_WR(bp, reg_addr, asserted);
a2fbb9ea
ET
3789
3790 /* now set back the mask */
a5e9a7cf 3791 if (asserted & ATTN_NIG_FOR_FUNC) {
27c1151c
YR
3792 /* Verify that IGU ack through BAR was written before restoring
3793 * NIG mask. This loop should exit after 2-3 iterations max.
3794 */
3795 if (bp->common.int_block != INT_BLOCK_HC) {
3796 u32 cnt = 0, igu_acked;
3797 do {
3798 igu_acked = REG_RD(bp,
3799 IGU_REG_ATTENTION_ACK_BITS);
3800 } while (((igu_acked & ATTN_NIG_FOR_FUNC) == 0) &&
3801 (++cnt < MAX_IGU_ATTN_ACK_TO));
3802 if (!igu_acked)
3803 DP(NETIF_MSG_HW,
3804 "Failed to verify IGU ack on time\n");
3805 barrier();
3806 }
87942b46 3807 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
3808 bnx2x_release_phy_lock(bp);
3809 }
a2fbb9ea
ET
3810}
3811
1191cb83 3812static void bnx2x_fan_failure(struct bnx2x *bp)
fd4ef40d
EG
3813{
3814 int port = BP_PORT(bp);
b7737c9b 3815 u32 ext_phy_config;
fd4ef40d 3816 /* mark the failure */
b7737c9b
YR
3817 ext_phy_config =
3818 SHMEM_RD(bp,
3819 dev_info.port_hw_config[port].external_phy_config);
3820
3821 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
3822 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
fd4ef40d 3823 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
b7737c9b 3824 ext_phy_config);
fd4ef40d
EG
3825
3826 /* log the failure */
51c1a580
MS
3827 netdev_err(bp->dev, "Fan Failure on Network Controller has caused the driver to shutdown the card to prevent permanent damage.\n"
3828 "Please contact OEM Support for assistance\n");
8304859a
AE
3829
3830 /*
2de67439 3831 * Schedule device reset (unload)
8304859a
AE
3832 * This is due to some boards consuming sufficient power when driver is
3833 * up to overheat if fan fails.
3834 */
3835 smp_mb__before_clear_bit();
3836 set_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state);
3837 smp_mb__after_clear_bit();
3838 schedule_delayed_work(&bp->sp_rtnl_task, 0);
3839
fd4ef40d 3840}
ab6ad5a4 3841
1191cb83 3842static void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 3843{
34f80b04 3844 int port = BP_PORT(bp);
877e9aa4 3845 int reg_offset;
d90d96ba 3846 u32 val;
877e9aa4 3847
34f80b04
EG
3848 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3849 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 3850
34f80b04 3851 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
3852
3853 val = REG_RD(bp, reg_offset);
3854 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
3855 REG_WR(bp, reg_offset, val);
3856
3857 BNX2X_ERR("SPIO5 hw attention\n");
3858
fd4ef40d 3859 /* Fan failure attention */
d90d96ba 3860 bnx2x_hw_reset_phy(&bp->link_params);
fd4ef40d 3861 bnx2x_fan_failure(bp);
877e9aa4 3862 }
34f80b04 3863
3deb8167 3864 if ((attn & bp->link_vars.aeu_int_mask) && bp->port.pmf) {
589abe3a
EG
3865 bnx2x_acquire_phy_lock(bp);
3866 bnx2x_handle_module_detect_int(&bp->link_params);
3867 bnx2x_release_phy_lock(bp);
3868 }
3869
34f80b04
EG
3870 if (attn & HW_INTERRUT_ASSERT_SET_0) {
3871
3872 val = REG_RD(bp, reg_offset);
3873 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3874 REG_WR(bp, reg_offset, val);
3875
3876 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
0fc5d009 3877 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
34f80b04
EG
3878 bnx2x_panic();
3879 }
877e9aa4
ET
3880}
3881
1191cb83 3882static void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
877e9aa4
ET
3883{
3884 u32 val;
3885
0626b899 3886 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
877e9aa4
ET
3887
3888 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3889 BNX2X_ERR("DB hw attention 0x%x\n", val);
3890 /* DORQ discard attention */
3891 if (val & 0x2)
3892 BNX2X_ERR("FATAL error from DORQ\n");
3893 }
34f80b04
EG
3894
3895 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3896
3897 int port = BP_PORT(bp);
3898 int reg_offset;
3899
3900 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3901 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3902
3903 val = REG_RD(bp, reg_offset);
3904 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3905 REG_WR(bp, reg_offset, val);
3906
3907 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
0fc5d009 3908 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
34f80b04
EG
3909 bnx2x_panic();
3910 }
877e9aa4
ET
3911}
3912
1191cb83 3913static void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
877e9aa4
ET
3914{
3915 u32 val;
3916
3917 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3918
3919 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3920 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3921 /* CFC error attention */
3922 if (val & 0x2)
3923 BNX2X_ERR("FATAL error from CFC\n");
3924 }
3925
3926 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
877e9aa4 3927 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
619c5cb6 3928 BNX2X_ERR("PXP hw attention-0 0x%x\n", val);
877e9aa4
ET
3929 /* RQ_USDMDP_FIFO_OVERFLOW */
3930 if (val & 0x18000)
3931 BNX2X_ERR("FATAL error from PXP\n");
619c5cb6
VZ
3932
3933 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
3934 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3935 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3936 }
877e9aa4 3937 }
34f80b04
EG
3938
3939 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3940
3941 int port = BP_PORT(bp);
3942 int reg_offset;
3943
3944 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3945 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3946
3947 val = REG_RD(bp, reg_offset);
3948 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3949 REG_WR(bp, reg_offset, val);
3950
3951 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
0fc5d009 3952 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
34f80b04
EG
3953 bnx2x_panic();
3954 }
877e9aa4
ET
3955}
3956
1191cb83 3957static void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
877e9aa4 3958{
34f80b04
EG
3959 u32 val;
3960
877e9aa4
ET
3961 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3962
34f80b04
EG
3963 if (attn & BNX2X_PMF_LINK_ASSERT) {
3964 int func = BP_FUNC(bp);
3965
3966 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
a3348722 3967 bnx2x_read_mf_cfg(bp);
f2e0899f
DK
3968 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3969 func_mf_config[BP_ABS_FUNC(bp)].config);
3970 val = SHMEM_RD(bp,
3971 func_mb[BP_FW_MB_IDX(bp)].drv_status);
2691d51d
EG
3972 if (val & DRV_STATUS_DCC_EVENT_MASK)
3973 bnx2x_dcc_event(bp,
3974 (val & DRV_STATUS_DCC_EVENT_MASK));
0793f83f
DK
3975
3976 if (val & DRV_STATUS_SET_MF_BW)
3977 bnx2x_set_mf_bw(bp);
3978
1d187b34
BW
3979 if (val & DRV_STATUS_DRV_INFO_REQ)
3980 bnx2x_handle_drv_info_req(bp);
d16132ce
AE
3981
3982 if (val & DRV_STATUS_VF_DISABLED)
3983 bnx2x_vf_handle_flr_event(bp);
3984
2691d51d 3985 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
34f80b04
EG
3986 bnx2x_pmf_update(bp);
3987
e4901dde 3988 if (bp->port.pmf &&
785b9b1a
SR
3989 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3990 bp->dcbx_enabled > 0)
e4901dde
VZ
3991 /* start dcbx state machine */
3992 bnx2x_dcbx_set_params(bp,
3993 BNX2X_DCBX_STATE_NEG_RECEIVED);
a3348722
BW
3994 if (val & DRV_STATUS_AFEX_EVENT_MASK)
3995 bnx2x_handle_afex_cmd(bp,
3996 val & DRV_STATUS_AFEX_EVENT_MASK);
c8c60d88
YM
3997 if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS)
3998 bnx2x_handle_eee_event(bp);
3deb8167
YR
3999 if (bp->link_vars.periodic_flags &
4000 PERIODIC_FLAGS_LINK_EVENT) {
4001 /* sync with link */
4002 bnx2x_acquire_phy_lock(bp);
4003 bp->link_vars.periodic_flags &=
4004 ~PERIODIC_FLAGS_LINK_EVENT;
4005 bnx2x_release_phy_lock(bp);
4006 if (IS_MF(bp))
4007 bnx2x_link_sync_notify(bp);
4008 bnx2x_link_report(bp);
4009 }
4010 /* Always call it here: bnx2x_link_report() will
4011 * prevent the link indication duplication.
4012 */
4013 bnx2x__link_status_update(bp);
34f80b04 4014 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
4015
4016 BNX2X_ERR("MC assert!\n");
d6cae238 4017 bnx2x_mc_assert(bp);
877e9aa4
ET
4018 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
4019 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
4020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
4021 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
4022 bnx2x_panic();
4023
4024 } else if (attn & BNX2X_MCP_ASSERT) {
4025
4026 BNX2X_ERR("MCP assert!\n");
4027 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 4028 bnx2x_fw_dump(bp);
877e9aa4
ET
4029
4030 } else
4031 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
4032 }
4033
4034 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
4035 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
4036 if (attn & BNX2X_GRC_TIMEOUT) {
f2e0899f
DK
4037 val = CHIP_IS_E1(bp) ? 0 :
4038 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
34f80b04
EG
4039 BNX2X_ERR("GRC time-out 0x%08x\n", val);
4040 }
4041 if (attn & BNX2X_GRC_RSV) {
f2e0899f
DK
4042 val = CHIP_IS_E1(bp) ? 0 :
4043 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
34f80b04
EG
4044 BNX2X_ERR("GRC reserved 0x%08x\n", val);
4045 }
877e9aa4 4046 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
4047 }
4048}
4049
c9ee9206
VZ
4050/*
4051 * Bits map:
4052 * 0-7 - Engine0 load counter.
4053 * 8-15 - Engine1 load counter.
4054 * 16 - Engine0 RESET_IN_PROGRESS bit.
4055 * 17 - Engine1 RESET_IN_PROGRESS bit.
4056 * 18 - Engine0 ONE_IS_LOADED. Set when there is at least one active function
4057 * on the engine
4058 * 19 - Engine1 ONE_IS_LOADED.
4059 * 20 - Chip reset flow bit. When set none-leader must wait for both engines
4060 * leader to complete (check for both RESET_IN_PROGRESS bits and not for
4061 * just the one belonging to its engine).
4062 *
4063 */
4064#define BNX2X_RECOVERY_GLOB_REG MISC_REG_GENERIC_POR_1
4065
4066#define BNX2X_PATH0_LOAD_CNT_MASK 0x000000ff
4067#define BNX2X_PATH0_LOAD_CNT_SHIFT 0
4068#define BNX2X_PATH1_LOAD_CNT_MASK 0x0000ff00
4069#define BNX2X_PATH1_LOAD_CNT_SHIFT 8
4070#define BNX2X_PATH0_RST_IN_PROG_BIT 0x00010000
4071#define BNX2X_PATH1_RST_IN_PROG_BIT 0x00020000
4072#define BNX2X_GLOBAL_RESET_BIT 0x00040000
4073
4074/*
4075 * Set the GLOBAL_RESET bit.
4076 *
4077 * Should be run under rtnl lock
4078 */
4079void bnx2x_set_reset_global(struct bnx2x *bp)
4080{
f16da43b
AE
4081 u32 val;
4082 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4083 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206 4084 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val | BNX2X_GLOBAL_RESET_BIT);
f16da43b 4085 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
c9ee9206
VZ
4086}
4087
4088/*
4089 * Clear the GLOBAL_RESET bit.
4090 *
4091 * Should be run under rtnl lock
4092 */
1191cb83 4093static void bnx2x_clear_reset_global(struct bnx2x *bp)
c9ee9206 4094{
f16da43b
AE
4095 u32 val;
4096 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4097 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206 4098 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val & (~BNX2X_GLOBAL_RESET_BIT));
f16da43b 4099 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
c9ee9206 4100}
f85582f8 4101
72fd0718 4102/*
c9ee9206
VZ
4103 * Checks the GLOBAL_RESET bit.
4104 *
72fd0718
VZ
4105 * should be run under rtnl lock
4106 */
1191cb83 4107static bool bnx2x_reset_is_global(struct bnx2x *bp)
c9ee9206
VZ
4108{
4109 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4110
4111 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
4112 return (val & BNX2X_GLOBAL_RESET_BIT) ? true : false;
4113}
4114
4115/*
4116 * Clear RESET_IN_PROGRESS bit for the current engine.
4117 *
4118 * Should be run under rtnl lock
4119 */
1191cb83 4120static void bnx2x_set_reset_done(struct bnx2x *bp)
72fd0718 4121{
f16da43b 4122 u32 val;
c9ee9206
VZ
4123 u32 bit = BP_PATH(bp) ?
4124 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
f16da43b
AE
4125 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4126 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206
VZ
4127
4128 /* Clear the bit */
4129 val &= ~bit;
4130 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
f16da43b
AE
4131
4132 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
72fd0718
VZ
4133}
4134
4135/*
c9ee9206
VZ
4136 * Set RESET_IN_PROGRESS for the current engine.
4137 *
72fd0718
VZ
4138 * should be run under rtnl lock
4139 */
c9ee9206 4140void bnx2x_set_reset_in_progress(struct bnx2x *bp)
72fd0718 4141{
f16da43b 4142 u32 val;
c9ee9206
VZ
4143 u32 bit = BP_PATH(bp) ?
4144 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
f16da43b
AE
4145 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4146 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
c9ee9206
VZ
4147
4148 /* Set the bit */
4149 val |= bit;
4150 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
f16da43b 4151 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
72fd0718
VZ
4152}
4153
4154/*
c9ee9206 4155 * Checks the RESET_IN_PROGRESS bit for the given engine.
72fd0718
VZ
4156 * should be run under rtnl lock
4157 */
c9ee9206 4158bool bnx2x_reset_is_done(struct bnx2x *bp, int engine)
72fd0718 4159{
c9ee9206
VZ
4160 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4161 u32 bit = engine ?
4162 BNX2X_PATH1_RST_IN_PROG_BIT : BNX2X_PATH0_RST_IN_PROG_BIT;
4163
4164 /* return false if bit is set */
4165 return (val & bit) ? false : true;
72fd0718
VZ
4166}
4167
4168/*
889b9af3 4169 * set pf load for the current pf.
c9ee9206 4170 *
72fd0718
VZ
4171 * should be run under rtnl lock
4172 */
889b9af3 4173void bnx2x_set_pf_load(struct bnx2x *bp)
72fd0718 4174{
f16da43b 4175 u32 val1, val;
c9ee9206
VZ
4176 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4177 BNX2X_PATH0_LOAD_CNT_MASK;
4178 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4179 BNX2X_PATH0_LOAD_CNT_SHIFT;
72fd0718 4180
f16da43b
AE
4181 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4182 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4183
51c1a580 4184 DP(NETIF_MSG_IFUP, "Old GEN_REG_VAL=0x%08x\n", val);
72fd0718 4185
c9ee9206
VZ
4186 /* get the current counter value */
4187 val1 = (val & mask) >> shift;
4188
889b9af3
AE
4189 /* set bit of that PF */
4190 val1 |= (1 << bp->pf_num);
c9ee9206
VZ
4191
4192 /* clear the old value */
4193 val &= ~mask;
4194
4195 /* set the new one */
4196 val |= ((val1 << shift) & mask);
4197
4198 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
f16da43b 4199 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
72fd0718
VZ
4200}
4201
c9ee9206 4202/**
889b9af3 4203 * bnx2x_clear_pf_load - clear pf load mark
c9ee9206
VZ
4204 *
4205 * @bp: driver handle
4206 *
4207 * Should be run under rtnl lock.
4208 * Decrements the load counter for the current engine. Returns
889b9af3 4209 * whether other functions are still loaded
72fd0718 4210 */
889b9af3 4211bool bnx2x_clear_pf_load(struct bnx2x *bp)
72fd0718 4212{
f16da43b 4213 u32 val1, val;
c9ee9206
VZ
4214 u32 mask = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_MASK :
4215 BNX2X_PATH0_LOAD_CNT_MASK;
4216 u32 shift = BP_PATH(bp) ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4217 BNX2X_PATH0_LOAD_CNT_SHIFT;
72fd0718 4218
f16da43b
AE
4219 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4220 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
51c1a580 4221 DP(NETIF_MSG_IFDOWN, "Old GEN_REG_VAL=0x%08x\n", val);
72fd0718 4222
c9ee9206
VZ
4223 /* get the current counter value */
4224 val1 = (val & mask) >> shift;
4225
889b9af3
AE
4226 /* clear bit of that PF */
4227 val1 &= ~(1 << bp->pf_num);
c9ee9206
VZ
4228
4229 /* clear the old value */
4230 val &= ~mask;
4231
4232 /* set the new one */
4233 val |= ((val1 << shift) & mask);
4234
4235 REG_WR(bp, BNX2X_RECOVERY_GLOB_REG, val);
f16da43b
AE
4236 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RECOVERY_REG);
4237 return val1 != 0;
72fd0718
VZ
4238}
4239
4240/*
889b9af3 4241 * Read the load status for the current engine.
c9ee9206 4242 *
72fd0718
VZ
4243 * should be run under rtnl lock
4244 */
1191cb83 4245static bool bnx2x_get_load_status(struct bnx2x *bp, int engine)
72fd0718 4246{
c9ee9206
VZ
4247 u32 mask = (engine ? BNX2X_PATH1_LOAD_CNT_MASK :
4248 BNX2X_PATH0_LOAD_CNT_MASK);
4249 u32 shift = (engine ? BNX2X_PATH1_LOAD_CNT_SHIFT :
4250 BNX2X_PATH0_LOAD_CNT_SHIFT);
4251 u32 val = REG_RD(bp, BNX2X_RECOVERY_GLOB_REG);
4252
51c1a580 4253 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "GLOB_REG=0x%08x\n", val);
c9ee9206
VZ
4254
4255 val = (val & mask) >> shift;
4256
51c1a580
MS
4257 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "load mask for engine %d = 0x%x\n",
4258 engine, val);
c9ee9206 4259
889b9af3 4260 return val != 0;
72fd0718
VZ
4261}
4262
1191cb83 4263static void _print_next_block(int idx, const char *blk)
72fd0718 4264{
f1deab50 4265 pr_cont("%s%s", idx ? ", " : "", blk);
72fd0718
VZ
4266}
4267
1191cb83
ED
4268static int bnx2x_check_blocks_with_parity0(u32 sig, int par_num,
4269 bool print)
72fd0718
VZ
4270{
4271 int i = 0;
4272 u32 cur_bit = 0;
4273 for (i = 0; sig; i++) {
4274 cur_bit = ((u32)0x1 << i);
4275 if (sig & cur_bit) {
4276 switch (cur_bit) {
4277 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
c9ee9206
VZ
4278 if (print)
4279 _print_next_block(par_num++, "BRB");
72fd0718
VZ
4280 break;
4281 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
c9ee9206
VZ
4282 if (print)
4283 _print_next_block(par_num++, "PARSER");
72fd0718
VZ
4284 break;
4285 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
c9ee9206
VZ
4286 if (print)
4287 _print_next_block(par_num++, "TSDM");
72fd0718
VZ
4288 break;
4289 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
c9ee9206
VZ
4290 if (print)
4291 _print_next_block(par_num++,
4292 "SEARCHER");
4293 break;
4294 case AEU_INPUTS_ATTN_BITS_TCM_PARITY_ERROR:
4295 if (print)
4296 _print_next_block(par_num++, "TCM");
72fd0718
VZ
4297 break;
4298 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
c9ee9206
VZ
4299 if (print)
4300 _print_next_block(par_num++, "TSEMI");
4301 break;
4302 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
4303 if (print)
4304 _print_next_block(par_num++, "XPB");
72fd0718
VZ
4305 break;
4306 }
4307
4308 /* Clear the bit */
4309 sig &= ~cur_bit;
4310 }
4311 }
4312
4313 return par_num;
4314}
4315
1191cb83
ED
4316static int bnx2x_check_blocks_with_parity1(u32 sig, int par_num,
4317 bool *global, bool print)
72fd0718
VZ
4318{
4319 int i = 0;
4320 u32 cur_bit = 0;
4321 for (i = 0; sig; i++) {
4322 cur_bit = ((u32)0x1 << i);
4323 if (sig & cur_bit) {
4324 switch (cur_bit) {
c9ee9206
VZ
4325 case AEU_INPUTS_ATTN_BITS_PBF_PARITY_ERROR:
4326 if (print)
4327 _print_next_block(par_num++, "PBF");
72fd0718
VZ
4328 break;
4329 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
c9ee9206
VZ
4330 if (print)
4331 _print_next_block(par_num++, "QM");
4332 break;
4333 case AEU_INPUTS_ATTN_BITS_TIMERS_PARITY_ERROR:
4334 if (print)
4335 _print_next_block(par_num++, "TM");
72fd0718
VZ
4336 break;
4337 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
c9ee9206
VZ
4338 if (print)
4339 _print_next_block(par_num++, "XSDM");
4340 break;
4341 case AEU_INPUTS_ATTN_BITS_XCM_PARITY_ERROR:
4342 if (print)
4343 _print_next_block(par_num++, "XCM");
72fd0718
VZ
4344 break;
4345 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
c9ee9206
VZ
4346 if (print)
4347 _print_next_block(par_num++, "XSEMI");
72fd0718
VZ
4348 break;
4349 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
c9ee9206
VZ
4350 if (print)
4351 _print_next_block(par_num++,
4352 "DOORBELLQ");
4353 break;
4354 case AEU_INPUTS_ATTN_BITS_NIG_PARITY_ERROR:
4355 if (print)
4356 _print_next_block(par_num++, "NIG");
72fd0718
VZ
4357 break;
4358 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
c9ee9206
VZ
4359 if (print)
4360 _print_next_block(par_num++,
4361 "VAUX PCI CORE");
4362 *global = true;
72fd0718
VZ
4363 break;
4364 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
c9ee9206
VZ
4365 if (print)
4366 _print_next_block(par_num++, "DEBUG");
72fd0718
VZ
4367 break;
4368 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
c9ee9206
VZ
4369 if (print)
4370 _print_next_block(par_num++, "USDM");
72fd0718 4371 break;
8736c826
VZ
4372 case AEU_INPUTS_ATTN_BITS_UCM_PARITY_ERROR:
4373 if (print)
4374 _print_next_block(par_num++, "UCM");
4375 break;
72fd0718 4376 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
c9ee9206
VZ
4377 if (print)
4378 _print_next_block(par_num++, "USEMI");
72fd0718
VZ
4379 break;
4380 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
c9ee9206
VZ
4381 if (print)
4382 _print_next_block(par_num++, "UPB");
72fd0718
VZ
4383 break;
4384 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
c9ee9206
VZ
4385 if (print)
4386 _print_next_block(par_num++, "CSDM");
72fd0718 4387 break;
8736c826
VZ
4388 case AEU_INPUTS_ATTN_BITS_CCM_PARITY_ERROR:
4389 if (print)
4390 _print_next_block(par_num++, "CCM");
4391 break;
72fd0718
VZ
4392 }
4393
4394 /* Clear the bit */
4395 sig &= ~cur_bit;
4396 }
4397 }
4398
4399 return par_num;
4400}
4401
1191cb83
ED
4402static int bnx2x_check_blocks_with_parity2(u32 sig, int par_num,
4403 bool print)
72fd0718
VZ
4404{
4405 int i = 0;
4406 u32 cur_bit = 0;
4407 for (i = 0; sig; i++) {
4408 cur_bit = ((u32)0x1 << i);
4409 if (sig & cur_bit) {
4410 switch (cur_bit) {
4411 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
c9ee9206
VZ
4412 if (print)
4413 _print_next_block(par_num++, "CSEMI");
72fd0718
VZ
4414 break;
4415 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
c9ee9206
VZ
4416 if (print)
4417 _print_next_block(par_num++, "PXP");
72fd0718
VZ
4418 break;
4419 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
c9ee9206
VZ
4420 if (print)
4421 _print_next_block(par_num++,
72fd0718
VZ
4422 "PXPPCICLOCKCLIENT");
4423 break;
4424 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
c9ee9206
VZ
4425 if (print)
4426 _print_next_block(par_num++, "CFC");
72fd0718
VZ
4427 break;
4428 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
c9ee9206
VZ
4429 if (print)
4430 _print_next_block(par_num++, "CDU");
4431 break;
4432 case AEU_INPUTS_ATTN_BITS_DMAE_PARITY_ERROR:
4433 if (print)
4434 _print_next_block(par_num++, "DMAE");
72fd0718
VZ
4435 break;
4436 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
c9ee9206
VZ
4437 if (print)
4438 _print_next_block(par_num++, "IGU");
72fd0718
VZ
4439 break;
4440 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
c9ee9206
VZ
4441 if (print)
4442 _print_next_block(par_num++, "MISC");
72fd0718
VZ
4443 break;
4444 }
4445
4446 /* Clear the bit */
4447 sig &= ~cur_bit;
4448 }
4449 }
4450
4451 return par_num;
4452}
4453
1191cb83
ED
4454static int bnx2x_check_blocks_with_parity3(u32 sig, int par_num,
4455 bool *global, bool print)
72fd0718
VZ
4456{
4457 int i = 0;
4458 u32 cur_bit = 0;
4459 for (i = 0; sig; i++) {
4460 cur_bit = ((u32)0x1 << i);
4461 if (sig & cur_bit) {
4462 switch (cur_bit) {
4463 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
c9ee9206
VZ
4464 if (print)
4465 _print_next_block(par_num++, "MCP ROM");
4466 *global = true;
72fd0718
VZ
4467 break;
4468 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
c9ee9206
VZ
4469 if (print)
4470 _print_next_block(par_num++,
4471 "MCP UMP RX");
4472 *global = true;
72fd0718
VZ
4473 break;
4474 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
c9ee9206
VZ
4475 if (print)
4476 _print_next_block(par_num++,
4477 "MCP UMP TX");
4478 *global = true;
72fd0718
VZ
4479 break;
4480 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
c9ee9206
VZ
4481 if (print)
4482 _print_next_block(par_num++,
4483 "MCP SCPAD");
4484 *global = true;
72fd0718
VZ
4485 break;
4486 }
4487
4488 /* Clear the bit */
4489 sig &= ~cur_bit;
4490 }
4491 }
4492
4493 return par_num;
4494}
4495
1191cb83
ED
4496static int bnx2x_check_blocks_with_parity4(u32 sig, int par_num,
4497 bool print)
8736c826
VZ
4498{
4499 int i = 0;
4500 u32 cur_bit = 0;
4501 for (i = 0; sig; i++) {
4502 cur_bit = ((u32)0x1 << i);
4503 if (sig & cur_bit) {
4504 switch (cur_bit) {
4505 case AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR:
4506 if (print)
4507 _print_next_block(par_num++, "PGLUE_B");
4508 break;
4509 case AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR:
4510 if (print)
4511 _print_next_block(par_num++, "ATC");
4512 break;
4513 }
4514
4515 /* Clear the bit */
4516 sig &= ~cur_bit;
4517 }
4518 }
4519
4520 return par_num;
4521}
4522
1191cb83
ED
4523static bool bnx2x_parity_attn(struct bnx2x *bp, bool *global, bool print,
4524 u32 *sig)
72fd0718 4525{
8736c826
VZ
4526 if ((sig[0] & HW_PRTY_ASSERT_SET_0) ||
4527 (sig[1] & HW_PRTY_ASSERT_SET_1) ||
4528 (sig[2] & HW_PRTY_ASSERT_SET_2) ||
4529 (sig[3] & HW_PRTY_ASSERT_SET_3) ||
4530 (sig[4] & HW_PRTY_ASSERT_SET_4)) {
72fd0718 4531 int par_num = 0;
51c1a580
MS
4532 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention:\n"
4533 "[0]:0x%08x [1]:0x%08x [2]:0x%08x [3]:0x%08x [4]:0x%08x\n",
8736c826
VZ
4534 sig[0] & HW_PRTY_ASSERT_SET_0,
4535 sig[1] & HW_PRTY_ASSERT_SET_1,
4536 sig[2] & HW_PRTY_ASSERT_SET_2,
4537 sig[3] & HW_PRTY_ASSERT_SET_3,
4538 sig[4] & HW_PRTY_ASSERT_SET_4);
c9ee9206
VZ
4539 if (print)
4540 netdev_err(bp->dev,
4541 "Parity errors detected in blocks: ");
4542 par_num = bnx2x_check_blocks_with_parity0(
8736c826 4543 sig[0] & HW_PRTY_ASSERT_SET_0, par_num, print);
c9ee9206 4544 par_num = bnx2x_check_blocks_with_parity1(
8736c826 4545 sig[1] & HW_PRTY_ASSERT_SET_1, par_num, global, print);
c9ee9206 4546 par_num = bnx2x_check_blocks_with_parity2(
8736c826 4547 sig[2] & HW_PRTY_ASSERT_SET_2, par_num, print);
c9ee9206 4548 par_num = bnx2x_check_blocks_with_parity3(
8736c826
VZ
4549 sig[3] & HW_PRTY_ASSERT_SET_3, par_num, global, print);
4550 par_num = bnx2x_check_blocks_with_parity4(
4551 sig[4] & HW_PRTY_ASSERT_SET_4, par_num, print);
4552
c9ee9206
VZ
4553 if (print)
4554 pr_cont("\n");
8736c826 4555
72fd0718
VZ
4556 return true;
4557 } else
4558 return false;
4559}
4560
c9ee9206
VZ
4561/**
4562 * bnx2x_chk_parity_attn - checks for parity attentions.
4563 *
4564 * @bp: driver handle
4565 * @global: true if there was a global attention
4566 * @print: show parity attention in syslog
4567 */
4568bool bnx2x_chk_parity_attn(struct bnx2x *bp, bool *global, bool print)
877e9aa4 4569{
8736c826 4570 struct attn_route attn = { {0} };
72fd0718
VZ
4571 int port = BP_PORT(bp);
4572
4573 attn.sig[0] = REG_RD(bp,
4574 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
4575 port*4);
4576 attn.sig[1] = REG_RD(bp,
4577 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
4578 port*4);
4579 attn.sig[2] = REG_RD(bp,
4580 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
4581 port*4);
4582 attn.sig[3] = REG_RD(bp,
4583 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
4584 port*4);
4585
8736c826
VZ
4586 if (!CHIP_IS_E1x(bp))
4587 attn.sig[4] = REG_RD(bp,
4588 MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 +
4589 port*4);
4590
4591 return bnx2x_parity_attn(bp, global, print, attn.sig);
72fd0718
VZ
4592}
4593
f2e0899f 4594
1191cb83 4595static void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
f2e0899f
DK
4596{
4597 u32 val;
4598 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
4599
4600 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
4601 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
4602 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
51c1a580 4603 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR\n");
f2e0899f 4604 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
51c1a580 4605 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR\n");
f2e0899f 4606 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
51c1a580 4607 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN\n");
f2e0899f 4608 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
51c1a580 4609 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN\n");
f2e0899f
DK
4610 if (val &
4611 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
51c1a580 4612 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN\n");
f2e0899f
DK
4613 if (val &
4614 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
51c1a580 4615 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN\n");
f2e0899f 4616 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
51c1a580 4617 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN\n");
f2e0899f 4618 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
51c1a580 4619 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN\n");
f2e0899f 4620 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
51c1a580 4621 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW\n");
f2e0899f
DK
4622 }
4623 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
4624 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
4625 BNX2X_ERR("ATC hw attention 0x%x\n", val);
4626 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
4627 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
4628 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
51c1a580 4629 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND\n");
f2e0899f 4630 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
51c1a580 4631 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS\n");
f2e0899f 4632 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
51c1a580 4633 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT\n");
f2e0899f
DK
4634 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
4635 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
4636 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
51c1a580 4637 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU\n");
f2e0899f
DK
4638 }
4639
4640 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4641 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
4642 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
4643 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
4644 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
4645 }
4646
4647}
4648
72fd0718
VZ
4649static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
4650{
4651 struct attn_route attn, *group_mask;
34f80b04 4652 int port = BP_PORT(bp);
877e9aa4 4653 int index;
a2fbb9ea
ET
4654 u32 reg_addr;
4655 u32 val;
3fcaf2e5 4656 u32 aeu_mask;
c9ee9206 4657 bool global = false;
a2fbb9ea
ET
4658
4659 /* need to take HW lock because MCP or other port might also
4660 try to handle this event */
4a37fb66 4661 bnx2x_acquire_alr(bp);
a2fbb9ea 4662
c9ee9206
VZ
4663 if (bnx2x_chk_parity_attn(bp, &global, true)) {
4664#ifndef BNX2X_STOP_ON_ERROR
72fd0718 4665 bp->recovery_state = BNX2X_RECOVERY_INIT;
7be08a72 4666 schedule_delayed_work(&bp->sp_rtnl_task, 0);
72fd0718
VZ
4667 /* Disable HW interrupts */
4668 bnx2x_int_disable(bp);
72fd0718
VZ
4669 /* In case of parity errors don't handle attentions so that
4670 * other function would "see" parity errors.
4671 */
c9ee9206
VZ
4672#else
4673 bnx2x_panic();
4674#endif
4675 bnx2x_release_alr(bp);
72fd0718
VZ
4676 return;
4677 }
4678
a2fbb9ea
ET
4679 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
4680 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
4681 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
4682 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
619c5cb6 4683 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
4684 attn.sig[4] =
4685 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
4686 else
4687 attn.sig[4] = 0;
4688
4689 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
4690 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
a2fbb9ea
ET
4691
4692 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4693 if (deasserted & (1 << index)) {
72fd0718 4694 group_mask = &bp->attn_group[index];
a2fbb9ea 4695
51c1a580 4696 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x %08x\n",
f2e0899f
DK
4697 index,
4698 group_mask->sig[0], group_mask->sig[1],
4699 group_mask->sig[2], group_mask->sig[3],
4700 group_mask->sig[4]);
a2fbb9ea 4701
f2e0899f
DK
4702 bnx2x_attn_int_deasserted4(bp,
4703 attn.sig[4] & group_mask->sig[4]);
877e9aa4 4704 bnx2x_attn_int_deasserted3(bp,
72fd0718 4705 attn.sig[3] & group_mask->sig[3]);
877e9aa4 4706 bnx2x_attn_int_deasserted1(bp,
72fd0718 4707 attn.sig[1] & group_mask->sig[1]);
877e9aa4 4708 bnx2x_attn_int_deasserted2(bp,
72fd0718 4709 attn.sig[2] & group_mask->sig[2]);
877e9aa4 4710 bnx2x_attn_int_deasserted0(bp,
72fd0718 4711 attn.sig[0] & group_mask->sig[0]);
a2fbb9ea
ET
4712 }
4713 }
4714
4a37fb66 4715 bnx2x_release_alr(bp);
a2fbb9ea 4716
f2e0899f
DK
4717 if (bp->common.int_block == INT_BLOCK_HC)
4718 reg_addr = (HC_REG_COMMAND_REG + port*32 +
4719 COMMAND_REG_ATTN_BITS_CLR);
4720 else
4721 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
a2fbb9ea
ET
4722
4723 val = ~deasserted;
f2e0899f
DK
4724 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
4725 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
5c862848 4726 REG_WR(bp, reg_addr, val);
a2fbb9ea 4727
a2fbb9ea 4728 if (~bp->attn_state & deasserted)
3fcaf2e5 4729 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
4730
4731 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
4732 MISC_REG_AEU_MASK_ATTN_FUNC_0;
4733
3fcaf2e5
EG
4734 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
4735 aeu_mask = REG_RD(bp, reg_addr);
4736
4737 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
4738 aeu_mask, deasserted);
72fd0718 4739 aeu_mask |= (deasserted & 0x3ff);
3fcaf2e5 4740 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 4741
3fcaf2e5
EG
4742 REG_WR(bp, reg_addr, aeu_mask);
4743 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
4744
4745 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
4746 bp->attn_state &= ~deasserted;
4747 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
4748}
4749
4750static void bnx2x_attn_int(struct bnx2x *bp)
4751{
4752 /* read local copy of bits */
68d59484
EG
4753 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
4754 attn_bits);
4755 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
4756 attn_bits_ack);
a2fbb9ea
ET
4757 u32 attn_state = bp->attn_state;
4758
4759 /* look for changed bits */
4760 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
4761 u32 deasserted = ~attn_bits & attn_ack & attn_state;
4762
4763 DP(NETIF_MSG_HW,
4764 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
4765 attn_bits, attn_ack, asserted, deasserted);
4766
4767 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 4768 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
4769
4770 /* handle bits that were raised */
4771 if (asserted)
4772 bnx2x_attn_int_asserted(bp, asserted);
4773
4774 if (deasserted)
4775 bnx2x_attn_int_deasserted(bp, deasserted);
4776}
4777
619c5cb6
VZ
4778void bnx2x_igu_ack_sb(struct bnx2x *bp, u8 igu_sb_id, u8 segment,
4779 u16 index, u8 op, u8 update)
4780{
dc1ba591
AE
4781 u32 igu_addr = bp->igu_base_addr;
4782 igu_addr += (IGU_CMD_INT_ACK_BASE + igu_sb_id)*8;
619c5cb6
VZ
4783 bnx2x_igu_ack_sb_gen(bp, igu_sb_id, segment, index, op, update,
4784 igu_addr);
4785}
4786
1191cb83 4787static void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
523224a3
DK
4788{
4789 /* No memory barriers */
4790 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
4791 mmiowb(); /* keep prod updates ordered */
4792}
4793
523224a3
DK
4794static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
4795 union event_ring_elem *elem)
4796{
619c5cb6
VZ
4797 u8 err = elem->message.error;
4798
523224a3 4799 if (!bp->cnic_eth_dev.starting_cid ||
c3a8ce61
VZ
4800 (cid < bp->cnic_eth_dev.starting_cid &&
4801 cid != bp->cnic_eth_dev.iscsi_l2_cid))
523224a3
DK
4802 return 1;
4803
4804 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
4805
619c5cb6
VZ
4806 if (unlikely(err)) {
4807
523224a3
DK
4808 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
4809 cid);
823e1d90 4810 bnx2x_panic_dump(bp, false);
523224a3 4811 }
619c5cb6 4812 bnx2x_cnic_cfc_comp(bp, cid, err);
523224a3
DK
4813 return 0;
4814}
523224a3 4815
1191cb83 4816static void bnx2x_handle_mcast_eqe(struct bnx2x *bp)
619c5cb6
VZ
4817{
4818 struct bnx2x_mcast_ramrod_params rparam;
4819 int rc;
4820
4821 memset(&rparam, 0, sizeof(rparam));
4822
4823 rparam.mcast_obj = &bp->mcast_obj;
4824
4825 netif_addr_lock_bh(bp->dev);
4826
4827 /* Clear pending state for the last command */
4828 bp->mcast_obj.raw.clear_pending(&bp->mcast_obj.raw);
4829
4830 /* If there are pending mcast commands - send them */
4831 if (bp->mcast_obj.check_pending(&bp->mcast_obj)) {
4832 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_CONT);
4833 if (rc < 0)
4834 BNX2X_ERR("Failed to send pending mcast commands: %d\n",
4835 rc);
4836 }
4837
4838 netif_addr_unlock_bh(bp->dev);
4839}
4840
1191cb83
ED
4841static void bnx2x_handle_classification_eqe(struct bnx2x *bp,
4842 union event_ring_elem *elem)
619c5cb6
VZ
4843{
4844 unsigned long ramrod_flags = 0;
4845 int rc = 0;
4846 u32 cid = elem->message.data.eth_event.echo & BNX2X_SWCID_MASK;
4847 struct bnx2x_vlan_mac_obj *vlan_mac_obj;
4848
4849 /* Always push next commands out, don't wait here */
4850 __set_bit(RAMROD_CONT, &ramrod_flags);
4851
86564c3f
YM
4852 switch (le32_to_cpu((__force __le32)elem->message.data.eth_event.echo)
4853 >> BNX2X_SWCID_SHIFT) {
619c5cb6 4854 case BNX2X_FILTER_MAC_PENDING:
51c1a580 4855 DP(BNX2X_MSG_SP, "Got SETUP_MAC completions\n");
55c11941 4856 if (CNIC_LOADED(bp) && (cid == BNX2X_ISCSI_ETH_CID(bp)))
619c5cb6
VZ
4857 vlan_mac_obj = &bp->iscsi_l2_mac_obj;
4858 else
15192a8c 4859 vlan_mac_obj = &bp->sp_objs[cid].mac_obj;
619c5cb6
VZ
4860
4861 break;
619c5cb6 4862 case BNX2X_FILTER_MCAST_PENDING:
51c1a580 4863 DP(BNX2X_MSG_SP, "Got SETUP_MCAST completions\n");
619c5cb6
VZ
4864 /* This is only relevant for 57710 where multicast MACs are
4865 * configured as unicast MACs using the same ramrod.
4866 */
4867 bnx2x_handle_mcast_eqe(bp);
4868 return;
4869 default:
4870 BNX2X_ERR("Unsupported classification command: %d\n",
4871 elem->message.data.eth_event.echo);
4872 return;
4873 }
4874
4875 rc = vlan_mac_obj->complete(bp, vlan_mac_obj, elem, &ramrod_flags);
4876
4877 if (rc < 0)
4878 BNX2X_ERR("Failed to schedule new commands: %d\n", rc);
4879 else if (rc > 0)
4880 DP(BNX2X_MSG_SP, "Scheduled next pending commands...\n");
4881
4882}
4883
619c5cb6 4884static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start);
619c5cb6 4885
1191cb83 4886static void bnx2x_handle_rx_mode_eqe(struct bnx2x *bp)
619c5cb6
VZ
4887{
4888 netif_addr_lock_bh(bp->dev);
4889
4890 clear_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
4891
4892 /* Send rx_mode command again if was requested */
4893 if (test_and_clear_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state))
4894 bnx2x_set_storm_rx_mode(bp);
619c5cb6
VZ
4895 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED,
4896 &bp->sp_state))
4897 bnx2x_set_iscsi_eth_rx_mode(bp, true);
4898 else if (test_and_clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED,
4899 &bp->sp_state))
4900 bnx2x_set_iscsi_eth_rx_mode(bp, false);
619c5cb6
VZ
4901
4902 netif_addr_unlock_bh(bp->dev);
4903}
4904
1191cb83 4905static void bnx2x_after_afex_vif_lists(struct bnx2x *bp,
a3348722
BW
4906 union event_ring_elem *elem)
4907{
4908 if (elem->message.data.vif_list_event.echo == VIF_LIST_RULE_GET) {
4909 DP(BNX2X_MSG_SP,
4910 "afex: ramrod completed VIF LIST_GET, addrs 0x%x\n",
4911 elem->message.data.vif_list_event.func_bit_map);
4912 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTGET_ACK,
4913 elem->message.data.vif_list_event.func_bit_map);
4914 } else if (elem->message.data.vif_list_event.echo ==
4915 VIF_LIST_RULE_SET) {
4916 DP(BNX2X_MSG_SP, "afex: ramrod completed VIF LIST_SET\n");
4917 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_LISTSET_ACK, 0);
4918 }
4919}
4920
4921/* called with rtnl_lock */
1191cb83 4922static void bnx2x_after_function_update(struct bnx2x *bp)
a3348722
BW
4923{
4924 int q, rc;
4925 struct bnx2x_fastpath *fp;
4926 struct bnx2x_queue_state_params queue_params = {NULL};
4927 struct bnx2x_queue_update_params *q_update_params =
4928 &queue_params.params.update;
4929
2de67439 4930 /* Send Q update command with afex vlan removal values for all Qs */
a3348722
BW
4931 queue_params.cmd = BNX2X_Q_CMD_UPDATE;
4932
4933 /* set silent vlan removal values according to vlan mode */
4934 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
4935 &q_update_params->update_flags);
4936 __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
4937 &q_update_params->update_flags);
4938 __set_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4939
4940 /* in access mode mark mask and value are 0 to strip all vlans */
4941 if (bp->afex_vlan_mode == FUNC_MF_CFG_AFEX_VLAN_ACCESS_MODE) {
4942 q_update_params->silent_removal_value = 0;
4943 q_update_params->silent_removal_mask = 0;
4944 } else {
4945 q_update_params->silent_removal_value =
4946 (bp->afex_def_vlan_tag & VLAN_VID_MASK);
4947 q_update_params->silent_removal_mask = VLAN_VID_MASK;
4948 }
4949
4950 for_each_eth_queue(bp, q) {
4951 /* Set the appropriate Queue object */
4952 fp = &bp->fp[q];
15192a8c 4953 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
a3348722
BW
4954
4955 /* send the ramrod */
4956 rc = bnx2x_queue_state_change(bp, &queue_params);
4957 if (rc < 0)
4958 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4959 q);
4960 }
4961
fea75645 4962 if (!NO_FCOE(bp) && CNIC_ENABLED(bp)) {
65565884 4963 fp = &bp->fp[FCOE_IDX(bp)];
15192a8c 4964 queue_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
a3348722
BW
4965
4966 /* clear pending completion bit */
4967 __clear_bit(RAMROD_COMP_WAIT, &queue_params.ramrod_flags);
4968
4969 /* mark latest Q bit */
4970 smp_mb__before_clear_bit();
4971 set_bit(BNX2X_AFEX_FCOE_Q_UPDATE_PENDING, &bp->sp_state);
4972 smp_mb__after_clear_bit();
4973
4974 /* send Q update ramrod for FCoE Q */
4975 rc = bnx2x_queue_state_change(bp, &queue_params);
4976 if (rc < 0)
4977 BNX2X_ERR("Failed to config silent vlan rem for Q %d\n",
4978 q);
4979 } else {
4980 /* If no FCoE ring - ACK MCP now */
4981 bnx2x_link_report(bp);
4982 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
4983 }
a3348722
BW
4984}
4985
1191cb83 4986static struct bnx2x_queue_sp_obj *bnx2x_cid_to_q_obj(
619c5cb6
VZ
4987 struct bnx2x *bp, u32 cid)
4988{
94f05b0f 4989 DP(BNX2X_MSG_SP, "retrieving fp from cid %d\n", cid);
55c11941
MS
4990
4991 if (CNIC_LOADED(bp) && (cid == BNX2X_FCOE_ETH_CID(bp)))
15192a8c 4992 return &bnx2x_fcoe_sp_obj(bp, q_obj);
619c5cb6 4993 else
15192a8c 4994 return &bp->sp_objs[CID_TO_FP(cid, bp)].q_obj;
619c5cb6
VZ
4995}
4996
523224a3
DK
4997static void bnx2x_eq_int(struct bnx2x *bp)
4998{
4999 u16 hw_cons, sw_cons, sw_prod;
5000 union event_ring_elem *elem;
55c11941 5001 u8 echo;
523224a3
DK
5002 u32 cid;
5003 u8 opcode;
fd1fc79d 5004 int rc, spqe_cnt = 0;
619c5cb6
VZ
5005 struct bnx2x_queue_sp_obj *q_obj;
5006 struct bnx2x_func_sp_obj *f_obj = &bp->func_obj;
5007 struct bnx2x_raw_obj *rss_raw = &bp->rss_conf_obj.raw;
523224a3
DK
5008
5009 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
5010
5011 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
5012 * when we get the the next-page we nned to adjust so the loop
5013 * condition below will be met. The next element is the size of a
5014 * regular element and hence incrementing by 1
5015 */
5016 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
5017 hw_cons++;
5018
25985edc 5019 /* This function may never run in parallel with itself for a
523224a3
DK
5020 * specific bp, thus there is no need in "paired" read memory
5021 * barrier here.
5022 */
5023 sw_cons = bp->eq_cons;
5024 sw_prod = bp->eq_prod;
5025
d6cae238 5026 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->eq_spq_left %x\n",
6e30dd4e 5027 hw_cons, sw_cons, atomic_read(&bp->eq_spq_left));
523224a3
DK
5028
5029 for (; sw_cons != hw_cons;
5030 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
5031
523224a3
DK
5032 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
5033
fd1fc79d
AE
5034 rc = bnx2x_iov_eq_sp_event(bp, elem);
5035 if (!rc) {
5036 DP(BNX2X_MSG_IOV, "bnx2x_iov_eq_sp_event returned %d\n",
5037 rc);
5038 goto next_spqe;
5039 }
523224a3 5040
86564c3f
YM
5041 /* elem CID originates from FW; actually LE */
5042 cid = SW_CID((__force __le32)
5043 elem->message.data.cfc_del_event.cid);
5044 opcode = elem->message.opcode;
523224a3
DK
5045
5046 /* handle eq element */
5047 switch (opcode) {
fd1fc79d
AE
5048 case EVENT_RING_OPCODE_VF_PF_CHANNEL:
5049 DP(BNX2X_MSG_IOV, "vf pf channel element on eq\n");
5050 bnx2x_vf_mbx(bp, &elem->message.data.vf_pf_event);
5051 continue;
5052
523224a3 5053 case EVENT_RING_OPCODE_STAT_QUERY:
51c1a580
MS
5054 DP(BNX2X_MSG_SP | BNX2X_MSG_STATS,
5055 "got statistics comp event %d\n",
619c5cb6 5056 bp->stats_comp++);
523224a3 5057 /* nothing to do with stats comp */
d6cae238 5058 goto next_spqe;
523224a3
DK
5059
5060 case EVENT_RING_OPCODE_CFC_DEL:
5061 /* handle according to cid range */
5062 /*
5063 * we may want to verify here that the bp state is
5064 * HALTING
5065 */
d6cae238 5066 DP(BNX2X_MSG_SP,
523224a3 5067 "got delete ramrod for MULTI[%d]\n", cid);
55c11941
MS
5068
5069 if (CNIC_LOADED(bp) &&
5070 !bnx2x_cnic_handle_cfc_del(bp, cid, elem))
523224a3 5071 goto next_spqe;
55c11941 5072
619c5cb6
VZ
5073 q_obj = bnx2x_cid_to_q_obj(bp, cid);
5074
5075 if (q_obj->complete_cmd(bp, q_obj, BNX2X_Q_CMD_CFC_DEL))
5076 break;
5077
5078
523224a3
DK
5079
5080 goto next_spqe;
e4901dde
VZ
5081
5082 case EVENT_RING_OPCODE_STOP_TRAFFIC:
51c1a580 5083 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got STOP TRAFFIC\n");
6debea87
DK
5084 if (f_obj->complete_cmd(bp, f_obj,
5085 BNX2X_F_CMD_TX_STOP))
5086 break;
e4901dde
VZ
5087 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
5088 goto next_spqe;
619c5cb6 5089
e4901dde 5090 case EVENT_RING_OPCODE_START_TRAFFIC:
51c1a580 5091 DP(BNX2X_MSG_SP | BNX2X_MSG_DCB, "got START TRAFFIC\n");
6debea87
DK
5092 if (f_obj->complete_cmd(bp, f_obj,
5093 BNX2X_F_CMD_TX_START))
5094 break;
e4901dde
VZ
5095 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
5096 goto next_spqe;
55c11941 5097
a3348722 5098 case EVENT_RING_OPCODE_FUNCTION_UPDATE:
55c11941
MS
5099 echo = elem->message.data.function_update_event.echo;
5100 if (echo == SWITCH_UPDATE) {
5101 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5102 "got FUNC_SWITCH_UPDATE ramrod\n");
5103 if (f_obj->complete_cmd(
5104 bp, f_obj, BNX2X_F_CMD_SWITCH_UPDATE))
5105 break;
a3348722 5106
55c11941
MS
5107 } else {
5108 DP(BNX2X_MSG_SP | BNX2X_MSG_MCP,
5109 "AFEX: ramrod completed FUNCTION_UPDATE\n");
5110 f_obj->complete_cmd(bp, f_obj,
5111 BNX2X_F_CMD_AFEX_UPDATE);
5112
5113 /* We will perform the Queues update from
5114 * sp_rtnl task as all Queue SP operations
5115 * should run under rtnl_lock.
5116 */
5117 smp_mb__before_clear_bit();
5118 set_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE,
5119 &bp->sp_rtnl_state);
5120 smp_mb__after_clear_bit();
5121
5122 schedule_delayed_work(&bp->sp_rtnl_task, 0);
5123 }
a3348722 5124
a3348722
BW
5125 goto next_spqe;
5126
5127 case EVENT_RING_OPCODE_AFEX_VIF_LISTS:
5128 f_obj->complete_cmd(bp, f_obj,
5129 BNX2X_F_CMD_AFEX_VIFLISTS);
5130 bnx2x_after_afex_vif_lists(bp, elem);
5131 goto next_spqe;
619c5cb6 5132 case EVENT_RING_OPCODE_FUNCTION_START:
51c1a580
MS
5133 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5134 "got FUNC_START ramrod\n");
619c5cb6
VZ
5135 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_START))
5136 break;
5137
5138 goto next_spqe;
5139
5140 case EVENT_RING_OPCODE_FUNCTION_STOP:
51c1a580
MS
5141 DP(BNX2X_MSG_SP | NETIF_MSG_IFUP,
5142 "got FUNC_STOP ramrod\n");
619c5cb6
VZ
5143 if (f_obj->complete_cmd(bp, f_obj, BNX2X_F_CMD_STOP))
5144 break;
5145
5146 goto next_spqe;
523224a3
DK
5147 }
5148
5149 switch (opcode | bp->state) {
619c5cb6
VZ
5150 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
5151 BNX2X_STATE_OPEN):
5152 case (EVENT_RING_OPCODE_RSS_UPDATE_RULES |
523224a3 5153 BNX2X_STATE_OPENING_WAIT4_PORT):
619c5cb6
VZ
5154 cid = elem->message.data.eth_event.echo &
5155 BNX2X_SWCID_MASK;
d6cae238 5156 DP(BNX2X_MSG_SP, "got RSS_UPDATE ramrod. CID %d\n",
619c5cb6
VZ
5157 cid);
5158 rss_raw->clear_pending(rss_raw);
523224a3
DK
5159 break;
5160
619c5cb6
VZ
5161 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
5162 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
5163 case (EVENT_RING_OPCODE_SET_MAC |
523224a3 5164 BNX2X_STATE_CLOSING_WAIT4_HALT):
619c5cb6
VZ
5165 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5166 BNX2X_STATE_OPEN):
5167 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5168 BNX2X_STATE_DIAG):
5169 case (EVENT_RING_OPCODE_CLASSIFICATION_RULES |
5170 BNX2X_STATE_CLOSING_WAIT4_HALT):
d6cae238 5171 DP(BNX2X_MSG_SP, "got (un)set mac ramrod\n");
619c5cb6 5172 bnx2x_handle_classification_eqe(bp, elem);
523224a3
DK
5173 break;
5174
619c5cb6
VZ
5175 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5176 BNX2X_STATE_OPEN):
5177 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5178 BNX2X_STATE_DIAG):
5179 case (EVENT_RING_OPCODE_MULTICAST_RULES |
5180 BNX2X_STATE_CLOSING_WAIT4_HALT):
d6cae238 5181 DP(BNX2X_MSG_SP, "got mcast ramrod\n");
619c5cb6 5182 bnx2x_handle_mcast_eqe(bp);
523224a3
DK
5183 break;
5184
619c5cb6
VZ
5185 case (EVENT_RING_OPCODE_FILTERS_RULES |
5186 BNX2X_STATE_OPEN):
5187 case (EVENT_RING_OPCODE_FILTERS_RULES |
5188 BNX2X_STATE_DIAG):
5189 case (EVENT_RING_OPCODE_FILTERS_RULES |
523224a3 5190 BNX2X_STATE_CLOSING_WAIT4_HALT):
d6cae238 5191 DP(BNX2X_MSG_SP, "got rx_mode ramrod\n");
619c5cb6 5192 bnx2x_handle_rx_mode_eqe(bp);
523224a3
DK
5193 break;
5194 default:
5195 /* unknown event log error and continue */
619c5cb6
VZ
5196 BNX2X_ERR("Unknown EQ event %d, bp->state 0x%x\n",
5197 elem->message.opcode, bp->state);
523224a3
DK
5198 }
5199next_spqe:
5200 spqe_cnt++;
5201 } /* for */
5202
8fe23fbd 5203 smp_mb__before_atomic_inc();
6e30dd4e 5204 atomic_add(spqe_cnt, &bp->eq_spq_left);
523224a3
DK
5205
5206 bp->eq_cons = sw_cons;
5207 bp->eq_prod = sw_prod;
5208 /* Make sure that above mem writes were issued towards the memory */
5209 smp_wmb();
5210
5211 /* update producer */
5212 bnx2x_update_eq_prod(bp, bp->eq_prod);
5213}
5214
a2fbb9ea
ET
5215static void bnx2x_sp_task(struct work_struct *work)
5216{
1cf167f2 5217 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea 5218
fd1fc79d 5219 DP(BNX2X_MSG_SP, "sp task invoked\n");
a2fbb9ea 5220
fd1fc79d
AE
5221 /* make sure the atomic interupt_occurred has been written */
5222 smp_rmb();
5223 if (atomic_read(&bp->interrupt_occurred)) {
a2fbb9ea 5224
fd1fc79d
AE
5225 /* what work needs to be performed? */
5226 u16 status = bnx2x_update_dsb_idx(bp);
cdaa7cb8 5227
fd1fc79d
AE
5228 DP(BNX2X_MSG_SP, "status %x\n", status);
5229 DP(BNX2X_MSG_SP, "setting interrupt_occurred to 0\n");
5230 atomic_set(&bp->interrupt_occurred, 0);
5231
5232 /* HW attentions */
5233 if (status & BNX2X_DEF_SB_ATT_IDX) {
5234 bnx2x_attn_int(bp);
5235 status &= ~BNX2X_DEF_SB_ATT_IDX;
5236 }
5237
5238 /* SP events: STAT_QUERY and others */
5239 if (status & BNX2X_DEF_SB_IDX) {
5240 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
523224a3 5241
55c11941 5242 if (FCOE_INIT(bp) &&
fd1fc79d
AE
5243 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
5244 /* Prevent local bottom-halves from running as
5245 * we are going to change the local NAPI list.
5246 */
5247 local_bh_disable();
5248 napi_schedule(&bnx2x_fcoe(bp, napi));
5249 local_bh_enable();
5250 }
5251
5252 /* Handle EQ completions */
5253 bnx2x_eq_int(bp);
5254 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
5255 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
5256
5257 status &= ~BNX2X_DEF_SB_IDX;
019dbb4c 5258 }
55c11941 5259
fd1fc79d
AE
5260 /* if status is non zero then perhaps something went wrong */
5261 if (unlikely(status))
5262 DP(BNX2X_MSG_SP,
5263 "got an unknown interrupt! (status 0x%x)\n", status);
523224a3 5264
fd1fc79d
AE
5265 /* ack status block only if something was actually handled */
5266 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
5267 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
523224a3 5268
cdaa7cb8
VZ
5269 }
5270
fd1fc79d
AE
5271 /* must be called after the EQ processing (since eq leads to sriov
5272 * ramrod completion flows).
5273 * This flow may have been scheduled by the arrival of a ramrod
5274 * completion, or by the sriov code rescheduling itself.
5275 */
5276 bnx2x_iov_sp_task(bp);
a3348722
BW
5277
5278 /* afex - poll to check if VIFSET_ACK should be sent to MFW */
5279 if (test_and_clear_bit(BNX2X_AFEX_PENDING_VIFSET_MCP_ACK,
5280 &bp->sp_state)) {
5281 bnx2x_link_report(bp);
5282 bnx2x_fw_command(bp, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0);
5283 }
a2fbb9ea
ET
5284}
5285
9f6c9258 5286irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
a2fbb9ea
ET
5287{
5288 struct net_device *dev = dev_instance;
5289 struct bnx2x *bp = netdev_priv(dev);
5290
523224a3
DK
5291 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
5292 IGU_INT_DISABLE, 0);
a2fbb9ea
ET
5293
5294#ifdef BNX2X_STOP_ON_ERROR
5295 if (unlikely(bp->panic))
5296 return IRQ_HANDLED;
5297#endif
5298
55c11941 5299 if (CNIC_LOADED(bp)) {
993ac7b5
MC
5300 struct cnic_ops *c_ops;
5301
5302 rcu_read_lock();
5303 c_ops = rcu_dereference(bp->cnic_ops);
5304 if (c_ops)
5305 c_ops->cnic_handler(bp->cnic_data, NULL);
5306 rcu_read_unlock();
5307 }
55c11941 5308
fd1fc79d
AE
5309 /* schedule sp task to perform default status block work, ack
5310 * attentions and enable interrupts.
5311 */
5312 bnx2x_schedule_sp_task(bp);
a2fbb9ea
ET
5313
5314 return IRQ_HANDLED;
5315}
5316
5317/* end of slow path */
5318
619c5cb6
VZ
5319
5320void bnx2x_drv_pulse(struct bnx2x *bp)
5321{
5322 SHMEM_WR(bp, func_mb[BP_FW_MB_IDX(bp)].drv_pulse_mb,
5323 bp->fw_drv_pulse_wr_seq);
5324}
5325
a2fbb9ea
ET
5326static void bnx2x_timer(unsigned long data)
5327{
5328 struct bnx2x *bp = (struct bnx2x *) data;
5329
5330 if (!netif_running(bp->dev))
5331 return;
5332
67c431a5
AE
5333 if (IS_PF(bp) &&
5334 !BP_NOMCP(bp)) {
f2e0899f 5335 int mb_idx = BP_FW_MB_IDX(bp);
a2fbb9ea
ET
5336 u32 drv_pulse;
5337 u32 mcp_pulse;
5338
5339 ++bp->fw_drv_pulse_wr_seq;
5340 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
5341 /* TBD - add SYSTEM_TIME */
5342 drv_pulse = bp->fw_drv_pulse_wr_seq;
619c5cb6 5343 bnx2x_drv_pulse(bp);
a2fbb9ea 5344
f2e0899f 5345 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
a2fbb9ea
ET
5346 MCP_PULSE_SEQ_MASK);
5347 /* The delta between driver pulse and mcp response
5348 * should be 1 (before mcp response) or 0 (after mcp response)
5349 */
5350 if ((drv_pulse != mcp_pulse) &&
5351 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
5352 /* someone lost a heartbeat... */
5353 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
5354 drv_pulse, mcp_pulse);
5355 }
5356 }
5357
f34d28ea 5358 if (bp->state == BNX2X_STATE_OPEN)
bb2a0f7a 5359 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 5360
abc5a021
AE
5361 /* sample pf vf bulletin board for new posts from pf */
5362 if (IS_VF(bp))
5363 bnx2x_sample_bulletin(bp);
5364
a2fbb9ea
ET
5365 mod_timer(&bp->timer, jiffies + bp->current_interval);
5366}
5367
5368/* end of Statistics */
5369
5370/* nic init */
5371
5372/*
5373 * nic init service functions
5374 */
5375
1191cb83 5376static void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
a2fbb9ea 5377{
523224a3
DK
5378 u32 i;
5379 if (!(len%4) && !(addr%4))
5380 for (i = 0; i < len; i += 4)
5381 REG_WR(bp, addr + i, fill);
5382 else
5383 for (i = 0; i < len; i++)
5384 REG_WR8(bp, addr + i, fill);
34f80b04 5385
34f80b04
EG
5386}
5387
523224a3 5388/* helper: writes FP SP data to FW - data_size in dwords */
1191cb83
ED
5389static void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
5390 int fw_sb_id,
5391 u32 *sb_data_p,
5392 u32 data_size)
34f80b04 5393{
a2fbb9ea 5394 int index;
523224a3
DK
5395 for (index = 0; index < data_size; index++)
5396 REG_WR(bp, BAR_CSTRORM_INTMEM +
5397 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
5398 sizeof(u32)*index,
5399 *(sb_data_p + index));
5400}
a2fbb9ea 5401
1191cb83 5402static void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
523224a3
DK
5403{
5404 u32 *sb_data_p;
5405 u32 data_size = 0;
f2e0899f 5406 struct hc_status_block_data_e2 sb_data_e2;
523224a3 5407 struct hc_status_block_data_e1x sb_data_e1x;
a2fbb9ea 5408
523224a3 5409 /* disable the function first */
619c5cb6 5410 if (!CHIP_IS_E1x(bp)) {
f2e0899f 5411 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
619c5cb6 5412 sb_data_e2.common.state = SB_DISABLED;
f2e0899f
DK
5413 sb_data_e2.common.p_func.vf_valid = false;
5414 sb_data_p = (u32 *)&sb_data_e2;
5415 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
5416 } else {
5417 memset(&sb_data_e1x, 0,
5418 sizeof(struct hc_status_block_data_e1x));
619c5cb6 5419 sb_data_e1x.common.state = SB_DISABLED;
f2e0899f
DK
5420 sb_data_e1x.common.p_func.vf_valid = false;
5421 sb_data_p = (u32 *)&sb_data_e1x;
5422 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
5423 }
523224a3 5424 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
a2fbb9ea 5425
523224a3
DK
5426 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5427 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
5428 CSTORM_STATUS_BLOCK_SIZE);
5429 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5430 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
5431 CSTORM_SYNC_BLOCK_SIZE);
5432}
34f80b04 5433
523224a3 5434/* helper: writes SP SB data to FW */
1191cb83 5435static void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
523224a3
DK
5436 struct hc_sp_status_block_data *sp_sb_data)
5437{
5438 int func = BP_FUNC(bp);
5439 int i;
5440 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
5441 REG_WR(bp, BAR_CSTRORM_INTMEM +
5442 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5443 i*sizeof(u32),
5444 *((u32 *)sp_sb_data + i));
34f80b04
EG
5445}
5446
1191cb83 5447static void bnx2x_zero_sp_sb(struct bnx2x *bp)
34f80b04
EG
5448{
5449 int func = BP_FUNC(bp);
523224a3
DK
5450 struct hc_sp_status_block_data sp_sb_data;
5451 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
a2fbb9ea 5452
619c5cb6 5453 sp_sb_data.state = SB_DISABLED;
523224a3
DK
5454 sp_sb_data.p_func.vf_valid = false;
5455
5456 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
5457
5458 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5459 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
5460 CSTORM_SP_STATUS_BLOCK_SIZE);
5461 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
5462 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
5463 CSTORM_SP_SYNC_BLOCK_SIZE);
5464
5465}
5466
5467
1191cb83 5468static void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
523224a3
DK
5469 int igu_sb_id, int igu_seg_id)
5470{
5471 hc_sm->igu_sb_id = igu_sb_id;
5472 hc_sm->igu_seg_id = igu_seg_id;
5473 hc_sm->timer_value = 0xFF;
5474 hc_sm->time_to_expire = 0xFFFFFFFF;
a2fbb9ea
ET
5475}
5476
150966ad
AE
5477
5478/* allocates state machine ids. */
1191cb83 5479static void bnx2x_map_sb_state_machines(struct hc_index_data *index_data)
150966ad
AE
5480{
5481 /* zero out state machine indices */
5482 /* rx indices */
5483 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5484
5485 /* tx indices */
5486 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags &= ~HC_INDEX_DATA_SM_ID;
5487 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags &= ~HC_INDEX_DATA_SM_ID;
5488 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags &= ~HC_INDEX_DATA_SM_ID;
5489 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags &= ~HC_INDEX_DATA_SM_ID;
5490
5491 /* map indices */
5492 /* rx indices */
5493 index_data[HC_INDEX_ETH_RX_CQ_CONS].flags |=
5494 SM_RX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5495
5496 /* tx indices */
5497 index_data[HC_INDEX_OOO_TX_CQ_CONS].flags |=
5498 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5499 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS0].flags |=
5500 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5501 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS1].flags |=
5502 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5503 index_data[HC_INDEX_ETH_TX_CQ_CONS_COS2].flags |=
5504 SM_TX_ID << HC_INDEX_DATA_SM_ID_SHIFT;
5505}
5506
b93288d5 5507void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
523224a3 5508 u8 vf_valid, int fw_sb_id, int igu_sb_id)
a2fbb9ea 5509{
523224a3
DK
5510 int igu_seg_id;
5511
f2e0899f 5512 struct hc_status_block_data_e2 sb_data_e2;
523224a3
DK
5513 struct hc_status_block_data_e1x sb_data_e1x;
5514 struct hc_status_block_sm *hc_sm_p;
523224a3
DK
5515 int data_size;
5516 u32 *sb_data_p;
5517
f2e0899f
DK
5518 if (CHIP_INT_MODE_IS_BC(bp))
5519 igu_seg_id = HC_SEG_ACCESS_NORM;
5520 else
5521 igu_seg_id = IGU_SEG_ACCESS_NORM;
523224a3
DK
5522
5523 bnx2x_zero_fp_sb(bp, fw_sb_id);
5524
619c5cb6 5525 if (!CHIP_IS_E1x(bp)) {
f2e0899f 5526 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
619c5cb6 5527 sb_data_e2.common.state = SB_ENABLED;
f2e0899f
DK
5528 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
5529 sb_data_e2.common.p_func.vf_id = vfid;
5530 sb_data_e2.common.p_func.vf_valid = vf_valid;
5531 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
5532 sb_data_e2.common.same_igu_sb_1b = true;
5533 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
5534 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
5535 hc_sm_p = sb_data_e2.common.state_machine;
f2e0899f
DK
5536 sb_data_p = (u32 *)&sb_data_e2;
5537 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
150966ad 5538 bnx2x_map_sb_state_machines(sb_data_e2.index_data);
f2e0899f
DK
5539 } else {
5540 memset(&sb_data_e1x, 0,
5541 sizeof(struct hc_status_block_data_e1x));
619c5cb6 5542 sb_data_e1x.common.state = SB_ENABLED;
f2e0899f
DK
5543 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
5544 sb_data_e1x.common.p_func.vf_id = 0xff;
5545 sb_data_e1x.common.p_func.vf_valid = false;
5546 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
5547 sb_data_e1x.common.same_igu_sb_1b = true;
5548 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
5549 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
5550 hc_sm_p = sb_data_e1x.common.state_machine;
f2e0899f
DK
5551 sb_data_p = (u32 *)&sb_data_e1x;
5552 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
150966ad 5553 bnx2x_map_sb_state_machines(sb_data_e1x.index_data);
f2e0899f 5554 }
523224a3
DK
5555
5556 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
5557 igu_sb_id, igu_seg_id);
5558 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
5559 igu_sb_id, igu_seg_id);
5560
51c1a580 5561 DP(NETIF_MSG_IFUP, "Init FW SB %d\n", fw_sb_id);
523224a3 5562
86564c3f 5563 /* write indices to HW - PCI guarantees endianity of regpairs */
523224a3
DK
5564 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
5565}
5566
619c5cb6 5567static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u8 fw_sb_id,
523224a3
DK
5568 u16 tx_usec, u16 rx_usec)
5569{
6383c0b3 5570 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, HC_INDEX_ETH_RX_CQ_CONS,
523224a3 5571 false, rx_usec);
6383c0b3
AE
5572 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5573 HC_INDEX_ETH_TX_CQ_CONS_COS0, false,
5574 tx_usec);
5575 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5576 HC_INDEX_ETH_TX_CQ_CONS_COS1, false,
5577 tx_usec);
5578 bnx2x_update_coalesce_sb_index(bp, fw_sb_id,
5579 HC_INDEX_ETH_TX_CQ_CONS_COS2, false,
5580 tx_usec);
523224a3 5581}
f2e0899f 5582
523224a3
DK
5583static void bnx2x_init_def_sb(struct bnx2x *bp)
5584{
5585 struct host_sp_status_block *def_sb = bp->def_status_blk;
5586 dma_addr_t mapping = bp->def_status_blk_mapping;
5587 int igu_sp_sb_index;
5588 int igu_seg_id;
34f80b04
EG
5589 int port = BP_PORT(bp);
5590 int func = BP_FUNC(bp);
f2eaeb58 5591 int reg_offset, reg_offset_en5;
a2fbb9ea 5592 u64 section;
523224a3
DK
5593 int index;
5594 struct hc_sp_status_block_data sp_sb_data;
5595 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
5596
f2e0899f
DK
5597 if (CHIP_INT_MODE_IS_BC(bp)) {
5598 igu_sp_sb_index = DEF_SB_IGU_ID;
5599 igu_seg_id = HC_SEG_ACCESS_DEF;
5600 } else {
5601 igu_sp_sb_index = bp->igu_dsb_id;
5602 igu_seg_id = IGU_SEG_ACCESS_DEF;
5603 }
a2fbb9ea
ET
5604
5605 /* ATTN */
523224a3 5606 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
a2fbb9ea 5607 atten_status_block);
523224a3 5608 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
a2fbb9ea 5609
49d66772
ET
5610 bp->attn_state = 0;
5611
a2fbb9ea
ET
5612 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5613 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
f2eaeb58
DK
5614 reg_offset_en5 = (port ? MISC_REG_AEU_ENABLE5_FUNC_1_OUT_0 :
5615 MISC_REG_AEU_ENABLE5_FUNC_0_OUT_0);
34f80b04 5616 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
523224a3
DK
5617 int sindex;
5618 /* take care of sig[0]..sig[4] */
5619 for (sindex = 0; sindex < 4; sindex++)
5620 bp->attn_group[index].sig[sindex] =
5621 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
f2e0899f 5622
619c5cb6 5623 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
5624 /*
5625 * enable5 is separate from the rest of the registers,
5626 * and therefore the address skip is 4
5627 * and not 16 between the different groups
5628 */
5629 bp->attn_group[index].sig[4] = REG_RD(bp,
f2eaeb58 5630 reg_offset_en5 + 0x4*index);
f2e0899f
DK
5631 else
5632 bp->attn_group[index].sig[4] = 0;
a2fbb9ea
ET
5633 }
5634
f2e0899f
DK
5635 if (bp->common.int_block == INT_BLOCK_HC) {
5636 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
5637 HC_REG_ATTN_MSG0_ADDR_L);
5638
5639 REG_WR(bp, reg_offset, U64_LO(section));
5640 REG_WR(bp, reg_offset + 4, U64_HI(section));
619c5cb6 5641 } else if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
5642 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
5643 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
5644 }
a2fbb9ea 5645
523224a3
DK
5646 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
5647 sp_sb);
a2fbb9ea 5648
523224a3 5649 bnx2x_zero_sp_sb(bp);
a2fbb9ea 5650
86564c3f 5651 /* PCI guarantees endianity of regpairs */
619c5cb6 5652 sp_sb_data.state = SB_ENABLED;
523224a3
DK
5653 sp_sb_data.host_sb_addr.lo = U64_LO(section);
5654 sp_sb_data.host_sb_addr.hi = U64_HI(section);
5655 sp_sb_data.igu_sb_id = igu_sp_sb_index;
5656 sp_sb_data.igu_seg_id = igu_seg_id;
5657 sp_sb_data.p_func.pf_id = func;
f2e0899f 5658 sp_sb_data.p_func.vnic_id = BP_VN(bp);
523224a3 5659 sp_sb_data.p_func.vf_id = 0xff;
a2fbb9ea 5660
523224a3 5661 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
49d66772 5662
523224a3 5663 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
5664}
5665
9f6c9258 5666void bnx2x_update_coalesce(struct bnx2x *bp)
a2fbb9ea 5667{
a2fbb9ea
ET
5668 int i;
5669
ec6ba945 5670 for_each_eth_queue(bp, i)
523224a3 5671 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
423cfa7e 5672 bp->tx_ticks, bp->rx_ticks);
a2fbb9ea
ET
5673}
5674
a2fbb9ea
ET
5675static void bnx2x_init_sp_ring(struct bnx2x *bp)
5676{
a2fbb9ea 5677 spin_lock_init(&bp->spq_lock);
6e30dd4e 5678 atomic_set(&bp->cq_spq_left, MAX_SPQ_PENDING);
a2fbb9ea 5679
a2fbb9ea 5680 bp->spq_prod_idx = 0;
a2fbb9ea
ET
5681 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
5682 bp->spq_prod_bd = bp->spq;
5683 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
a2fbb9ea
ET
5684}
5685
523224a3 5686static void bnx2x_init_eq_ring(struct bnx2x *bp)
a2fbb9ea
ET
5687{
5688 int i;
523224a3
DK
5689 for (i = 1; i <= NUM_EQ_PAGES; i++) {
5690 union event_ring_elem *elem =
5691 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
a2fbb9ea 5692
523224a3
DK
5693 elem->next_page.addr.hi =
5694 cpu_to_le32(U64_HI(bp->eq_mapping +
5695 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
5696 elem->next_page.addr.lo =
5697 cpu_to_le32(U64_LO(bp->eq_mapping +
5698 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
a2fbb9ea 5699 }
523224a3
DK
5700 bp->eq_cons = 0;
5701 bp->eq_prod = NUM_EQ_DESC;
5702 bp->eq_cons_sb = BNX2X_EQ_INDEX;
6e30dd4e
VZ
5703 /* we want a warning message before it gets rought... */
5704 atomic_set(&bp->eq_spq_left,
5705 min_t(int, MAX_SP_DESC_CNT - MAX_SPQ_PENDING, NUM_EQ_DESC) - 1);
a2fbb9ea
ET
5706}
5707
619c5cb6 5708/* called with netif_addr_lock_bh() */
924d75ab
YM
5709int bnx2x_set_q_rx_mode(struct bnx2x *bp, u8 cl_id,
5710 unsigned long rx_mode_flags,
5711 unsigned long rx_accept_flags,
5712 unsigned long tx_accept_flags,
5713 unsigned long ramrod_flags)
ab532cf3 5714{
619c5cb6
VZ
5715 struct bnx2x_rx_mode_ramrod_params ramrod_param;
5716 int rc;
5717
5718 memset(&ramrod_param, 0, sizeof(ramrod_param));
5719
5720 /* Prepare ramrod parameters */
5721 ramrod_param.cid = 0;
5722 ramrod_param.cl_id = cl_id;
5723 ramrod_param.rx_mode_obj = &bp->rx_mode_obj;
5724 ramrod_param.func_id = BP_FUNC(bp);
ab532cf3 5725
619c5cb6
VZ
5726 ramrod_param.pstate = &bp->sp_state;
5727 ramrod_param.state = BNX2X_FILTER_RX_MODE_PENDING;
ab532cf3 5728
619c5cb6
VZ
5729 ramrod_param.rdata = bnx2x_sp(bp, rx_mode_rdata);
5730 ramrod_param.rdata_mapping = bnx2x_sp_mapping(bp, rx_mode_rdata);
5731
5732 set_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state);
5733
5734 ramrod_param.ramrod_flags = ramrod_flags;
5735 ramrod_param.rx_mode_flags = rx_mode_flags;
5736
5737 ramrod_param.rx_accept_flags = rx_accept_flags;
5738 ramrod_param.tx_accept_flags = tx_accept_flags;
5739
5740 rc = bnx2x_config_rx_mode(bp, &ramrod_param);
5741 if (rc < 0) {
5742 BNX2X_ERR("Set rx_mode %d failed\n", bp->rx_mode);
924d75ab 5743 return rc;
619c5cb6 5744 }
924d75ab
YM
5745
5746 return 0;
a2fbb9ea
ET
5747}
5748
86564c3f
YM
5749static int bnx2x_fill_accept_flags(struct bnx2x *bp, u32 rx_mode,
5750 unsigned long *rx_accept_flags,
5751 unsigned long *tx_accept_flags)
471de716 5752{
924d75ab
YM
5753 /* Clear the flags first */
5754 *rx_accept_flags = 0;
5755 *tx_accept_flags = 0;
619c5cb6 5756
924d75ab 5757 switch (rx_mode) {
619c5cb6
VZ
5758 case BNX2X_RX_MODE_NONE:
5759 /*
5760 * 'drop all' supersedes any accept flags that may have been
5761 * passed to the function.
5762 */
5763 break;
5764 case BNX2X_RX_MODE_NORMAL:
924d75ab
YM
5765 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5766 __set_bit(BNX2X_ACCEPT_MULTICAST, rx_accept_flags);
5767 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
619c5cb6
VZ
5768
5769 /* internal switching mode */
924d75ab
YM
5770 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5771 __set_bit(BNX2X_ACCEPT_MULTICAST, tx_accept_flags);
5772 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
619c5cb6
VZ
5773
5774 break;
5775 case BNX2X_RX_MODE_ALLMULTI:
924d75ab
YM
5776 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5777 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5778 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
619c5cb6
VZ
5779
5780 /* internal switching mode */
924d75ab
YM
5781 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
5782 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5783 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
619c5cb6
VZ
5784
5785 break;
5786 case BNX2X_RX_MODE_PROMISC:
5787 /* According to deffinition of SI mode, iface in promisc mode
5788 * should receive matched and unmatched (in resolution of port)
5789 * unicast packets.
5790 */
924d75ab
YM
5791 __set_bit(BNX2X_ACCEPT_UNMATCHED, rx_accept_flags);
5792 __set_bit(BNX2X_ACCEPT_UNICAST, rx_accept_flags);
5793 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, rx_accept_flags);
5794 __set_bit(BNX2X_ACCEPT_BROADCAST, rx_accept_flags);
619c5cb6
VZ
5795
5796 /* internal switching mode */
924d75ab
YM
5797 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, tx_accept_flags);
5798 __set_bit(BNX2X_ACCEPT_BROADCAST, tx_accept_flags);
619c5cb6
VZ
5799
5800 if (IS_MF_SI(bp))
924d75ab 5801 __set_bit(BNX2X_ACCEPT_ALL_UNICAST, tx_accept_flags);
619c5cb6 5802 else
924d75ab 5803 __set_bit(BNX2X_ACCEPT_UNICAST, tx_accept_flags);
619c5cb6
VZ
5804
5805 break;
5806 default:
924d75ab
YM
5807 BNX2X_ERR("Unknown rx_mode: %d\n", rx_mode);
5808 return -EINVAL;
619c5cb6 5809 }
de832a55 5810
924d75ab 5811 /* Set ACCEPT_ANY_VLAN as we do not enable filtering by VLAN */
619c5cb6 5812 if (bp->rx_mode != BNX2X_RX_MODE_NONE) {
924d75ab
YM
5813 __set_bit(BNX2X_ACCEPT_ANY_VLAN, rx_accept_flags);
5814 __set_bit(BNX2X_ACCEPT_ANY_VLAN, tx_accept_flags);
34f80b04
EG
5815 }
5816
924d75ab
YM
5817 return 0;
5818}
5819
5820/* called with netif_addr_lock_bh() */
5821int bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5822{
5823 unsigned long rx_mode_flags = 0, ramrod_flags = 0;
5824 unsigned long rx_accept_flags = 0, tx_accept_flags = 0;
5825 int rc;
5826
5827 if (!NO_FCOE(bp))
5828 /* Configure rx_mode of FCoE Queue */
5829 __set_bit(BNX2X_RX_MODE_FCOE_ETH, &rx_mode_flags);
5830
5831 rc = bnx2x_fill_accept_flags(bp, bp->rx_mode, &rx_accept_flags,
5832 &tx_accept_flags);
5833 if (rc)
5834 return rc;
5835
619c5cb6
VZ
5836 __set_bit(RAMROD_RX, &ramrod_flags);
5837 __set_bit(RAMROD_TX, &ramrod_flags);
5838
924d75ab
YM
5839 return bnx2x_set_q_rx_mode(bp, bp->fp->cl_id, rx_mode_flags,
5840 rx_accept_flags, tx_accept_flags,
5841 ramrod_flags);
619c5cb6
VZ
5842}
5843
5844static void bnx2x_init_internal_common(struct bnx2x *bp)
5845{
5846 int i;
5847
0793f83f
DK
5848 if (IS_MF_SI(bp))
5849 /*
5850 * In switch independent mode, the TSTORM needs to accept
5851 * packets that failed classification, since approximate match
5852 * mac addresses aren't written to NIG LLH
5853 */
5854 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5855 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
619c5cb6
VZ
5856 else if (!CHIP_IS_E1(bp)) /* 57710 doesn't support MF */
5857 REG_WR8(bp, BAR_TSTRORM_INTMEM +
5858 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 0);
0793f83f 5859
523224a3
DK
5860 /* Zero this manually as its initialization is
5861 currently missing in the initTool */
5862 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
ca00392c 5863 REG_WR(bp, BAR_USTRORM_INTMEM +
523224a3 5864 USTORM_AGG_DATA_OFFSET + i * 4, 0);
619c5cb6 5865 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
5866 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
5867 CHIP_INT_MODE_IS_BC(bp) ?
5868 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
5869 }
523224a3 5870}
8a1c38d1 5871
471de716
EG
5872static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5873{
5874 switch (load_code) {
5875 case FW_MSG_CODE_DRV_LOAD_COMMON:
f2e0899f 5876 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
471de716
EG
5877 bnx2x_init_internal_common(bp);
5878 /* no break */
5879
5880 case FW_MSG_CODE_DRV_LOAD_PORT:
619c5cb6 5881 /* nothing to do */
471de716
EG
5882 /* no break */
5883
5884 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
523224a3
DK
5885 /* internal memory per function is
5886 initialized inside bnx2x_pf_init */
471de716
EG
5887 break;
5888
5889 default:
5890 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5891 break;
5892 }
5893}
5894
619c5cb6 5895static inline u8 bnx2x_fp_igu_sb_id(struct bnx2x_fastpath *fp)
523224a3 5896{
55c11941 5897 return fp->bp->igu_base_sb + fp->index + CNIC_SUPPORT(fp->bp);
619c5cb6 5898}
523224a3 5899
619c5cb6
VZ
5900static inline u8 bnx2x_fp_fw_sb_id(struct bnx2x_fastpath *fp)
5901{
55c11941 5902 return fp->bp->base_fw_ndsb + fp->index + CNIC_SUPPORT(fp->bp);
619c5cb6
VZ
5903}
5904
1191cb83 5905static u8 bnx2x_fp_cl_id(struct bnx2x_fastpath *fp)
619c5cb6
VZ
5906{
5907 if (CHIP_IS_E1x(fp->bp))
5908 return BP_L_ID(fp->bp) + fp->index;
5909 else /* We want Client ID to be the same as IGU SB ID for 57712 */
5910 return bnx2x_fp_igu_sb_id(fp);
5911}
5912
6383c0b3 5913static void bnx2x_init_eth_fp(struct bnx2x *bp, int fp_idx)
619c5cb6
VZ
5914{
5915 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
6383c0b3 5916 u8 cos;
619c5cb6 5917 unsigned long q_type = 0;
6383c0b3 5918 u32 cids[BNX2X_MULTI_TX_COS] = { 0 };
f233cafe 5919 fp->rx_queue = fp_idx;
b3b83c3f 5920 fp->cid = fp_idx;
619c5cb6
VZ
5921 fp->cl_id = bnx2x_fp_cl_id(fp);
5922 fp->fw_sb_id = bnx2x_fp_fw_sb_id(fp);
5923 fp->igu_sb_id = bnx2x_fp_igu_sb_id(fp);
523224a3 5924 /* qZone id equals to FW (per path) client id */
619c5cb6
VZ
5925 fp->cl_qzone_id = bnx2x_fp_qzone_id(fp);
5926
523224a3 5927 /* init shortcut */
619c5cb6 5928 fp->ustorm_rx_prods_offset = bnx2x_rx_ustorm_prods_offset(fp);
7a752993 5929
523224a3
DK
5930 /* Setup SB indicies */
5931 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
523224a3 5932
619c5cb6
VZ
5933 /* Configure Queue State object */
5934 __set_bit(BNX2X_Q_TYPE_HAS_RX, &q_type);
5935 __set_bit(BNX2X_Q_TYPE_HAS_TX, &q_type);
6383c0b3
AE
5936
5937 BUG_ON(fp->max_cos > BNX2X_MULTI_TX_COS);
5938
5939 /* init tx data */
5940 for_each_cos_in_tx_queue(fp, cos) {
65565884
MS
5941 bnx2x_init_txdata(bp, fp->txdata_ptr[cos],
5942 CID_COS_TO_TX_ONLY_CID(fp->cid, cos, bp),
5943 FP_COS_TO_TXQ(fp, cos, bp),
5944 BNX2X_TX_SB_INDEX_BASE + cos, fp);
5945 cids[cos] = fp->txdata_ptr[cos]->cid;
6383c0b3
AE
5946 }
5947
ad5afc89
AE
5948 /* nothing more for vf to do here */
5949 if (IS_VF(bp))
5950 return;
5951
5952 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
5953 fp->fw_sb_id, fp->igu_sb_id);
5954 bnx2x_update_fpsb_idx(fp);
15192a8c
BW
5955 bnx2x_init_queue_obj(bp, &bnx2x_sp_obj(bp, fp).q_obj, fp->cl_id, cids,
5956 fp->max_cos, BP_FUNC(bp), bnx2x_sp(bp, q_rdata),
6383c0b3 5957 bnx2x_sp_mapping(bp, q_rdata), q_type);
619c5cb6
VZ
5958
5959 /**
5960 * Configure classification DBs: Always enable Tx switching
5961 */
5962 bnx2x_init_vlan_mac_fp_objs(fp, BNX2X_OBJ_TYPE_RX_TX);
5963
ad5afc89
AE
5964 DP(NETIF_MSG_IFUP,
5965 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d fw_sb %d igu_sb %d\n",
5966 fp_idx, bp, fp->status_blk.e2_sb, fp->cl_id, fp->fw_sb_id,
5967 fp->igu_sb_id);
523224a3
DK
5968}
5969
1191cb83
ED
5970static void bnx2x_init_tx_ring_one(struct bnx2x_fp_txdata *txdata)
5971{
5972 int i;
5973
5974 for (i = 1; i <= NUM_TX_RINGS; i++) {
5975 struct eth_tx_next_bd *tx_next_bd =
5976 &txdata->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
5977
5978 tx_next_bd->addr_hi =
5979 cpu_to_le32(U64_HI(txdata->tx_desc_mapping +
5980 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5981 tx_next_bd->addr_lo =
5982 cpu_to_le32(U64_LO(txdata->tx_desc_mapping +
5983 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
5984 }
5985
5986 SET_FLAG(txdata->tx_db.data.header.header, DOORBELL_HDR_DB_TYPE, 1);
5987 txdata->tx_db.data.zero_fill1 = 0;
5988 txdata->tx_db.data.prod = 0;
5989
5990 txdata->tx_pkt_prod = 0;
5991 txdata->tx_pkt_cons = 0;
5992 txdata->tx_bd_prod = 0;
5993 txdata->tx_bd_cons = 0;
5994 txdata->tx_pkt = 0;
5995}
5996
55c11941
MS
5997static void bnx2x_init_tx_rings_cnic(struct bnx2x *bp)
5998{
5999 int i;
6000
6001 for_each_tx_queue_cnic(bp, i)
6002 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[0]);
6003}
1191cb83
ED
6004static void bnx2x_init_tx_rings(struct bnx2x *bp)
6005{
6006 int i;
6007 u8 cos;
6008
55c11941 6009 for_each_eth_queue(bp, i)
1191cb83 6010 for_each_cos_in_tx_queue(&bp->fp[i], cos)
65565884 6011 bnx2x_init_tx_ring_one(bp->fp[i].txdata_ptr[cos]);
1191cb83
ED
6012}
6013
55c11941 6014void bnx2x_nic_init_cnic(struct bnx2x *bp)
a2fbb9ea 6015{
ec6ba945
VZ
6016 if (!NO_FCOE(bp))
6017 bnx2x_init_fcoe_fp(bp);
523224a3
DK
6018
6019 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
6020 BNX2X_VF_ID_INVALID, false,
619c5cb6 6021 bnx2x_cnic_fw_sb_id(bp), bnx2x_cnic_igu_sb_id(bp));
523224a3 6022
55c11941
MS
6023 /* ensure status block indices were read */
6024 rmb();
6025 bnx2x_init_rx_rings_cnic(bp);
6026 bnx2x_init_tx_rings_cnic(bp);
6027
6028 /* flush all */
6029 mb();
6030 mmiowb();
6031}
a2fbb9ea 6032
ecf01c22 6033void bnx2x_pre_irq_nic_init(struct bnx2x *bp)
55c11941
MS
6034{
6035 int i;
6036
ecf01c22 6037 /* Setup NIC internals and enable interrupts */
55c11941
MS
6038 for_each_eth_queue(bp, i)
6039 bnx2x_init_eth_fp(bp, i);
ad5afc89
AE
6040
6041 /* ensure status block indices were read */
6042 rmb();
6043 bnx2x_init_rx_rings(bp);
6044 bnx2x_init_tx_rings(bp);
6045
5b0752c8
AE
6046 if (IS_VF(bp)) {
6047 bnx2x_memset_stats(bp);
ad5afc89 6048 return;
5b0752c8 6049 }
ad5afc89 6050
ecf01c22
YM
6051 if (IS_PF(bp)) {
6052 /* Initialize MOD_ABS interrupts */
6053 bnx2x_init_mod_abs_int(bp, &bp->link_vars, bp->common.chip_id,
6054 bp->common.shmem_base,
6055 bp->common.shmem2_base, BP_PORT(bp));
ad5afc89 6056
ecf01c22
YM
6057 /* initialize the default status block and sp ring */
6058 bnx2x_init_def_sb(bp);
6059 bnx2x_update_dsb_idx(bp);
6060 bnx2x_init_sp_ring(bp);
6061 }
6062}
16119785 6063
ecf01c22
YM
6064void bnx2x_post_irq_nic_init(struct bnx2x *bp, u32 load_code)
6065{
523224a3 6066 bnx2x_init_eq_ring(bp);
471de716 6067 bnx2x_init_internal(bp, load_code);
523224a3 6068 bnx2x_pf_init(bp);
0ef00459
EG
6069 bnx2x_stats_init(bp);
6070
0ef00459
EG
6071 /* flush all before enabling interrupts */
6072 mb();
6073 mmiowb();
6074
615f8fd9 6075 bnx2x_int_enable(bp);
eb8da205
EG
6076
6077 /* Check for SPIO5 */
6078 bnx2x_attn_int_deasserted0(bp,
6079 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
6080 AEU_INPUTS_ATTN_BITS_SPIO5);
a2fbb9ea
ET
6081}
6082
ecf01c22 6083/* gzip service functions */
a2fbb9ea
ET
6084static int bnx2x_gunzip_init(struct bnx2x *bp)
6085{
1a983142
FT
6086 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
6087 &bp->gunzip_mapping, GFP_KERNEL);
a2fbb9ea
ET
6088 if (bp->gunzip_buf == NULL)
6089 goto gunzip_nomem1;
6090
6091 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
6092 if (bp->strm == NULL)
6093 goto gunzip_nomem2;
6094
7ab24bfd 6095 bp->strm->workspace = vmalloc(zlib_inflate_workspacesize());
a2fbb9ea
ET
6096 if (bp->strm->workspace == NULL)
6097 goto gunzip_nomem3;
6098
6099 return 0;
6100
6101gunzip_nomem3:
6102 kfree(bp->strm);
6103 bp->strm = NULL;
6104
6105gunzip_nomem2:
1a983142
FT
6106 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6107 bp->gunzip_mapping);
a2fbb9ea
ET
6108 bp->gunzip_buf = NULL;
6109
6110gunzip_nomem1:
51c1a580 6111 BNX2X_ERR("Cannot allocate firmware buffer for un-compression\n");
a2fbb9ea
ET
6112 return -ENOMEM;
6113}
6114
6115static void bnx2x_gunzip_end(struct bnx2x *bp)
6116{
b3b83c3f 6117 if (bp->strm) {
7ab24bfd 6118 vfree(bp->strm->workspace);
b3b83c3f
DK
6119 kfree(bp->strm);
6120 bp->strm = NULL;
6121 }
a2fbb9ea
ET
6122
6123 if (bp->gunzip_buf) {
1a983142
FT
6124 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
6125 bp->gunzip_mapping);
a2fbb9ea
ET
6126 bp->gunzip_buf = NULL;
6127 }
6128}
6129
94a78b79 6130static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
a2fbb9ea
ET
6131{
6132 int n, rc;
6133
6134 /* check gzip header */
94a78b79
VZ
6135 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
6136 BNX2X_ERR("Bad gzip header\n");
a2fbb9ea 6137 return -EINVAL;
94a78b79 6138 }
a2fbb9ea
ET
6139
6140 n = 10;
6141
34f80b04 6142#define FNAME 0x8
a2fbb9ea
ET
6143
6144 if (zbuf[3] & FNAME)
6145 while ((zbuf[n++] != 0) && (n < len));
6146
94a78b79 6147 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
a2fbb9ea
ET
6148 bp->strm->avail_in = len - n;
6149 bp->strm->next_out = bp->gunzip_buf;
6150 bp->strm->avail_out = FW_BUF_SIZE;
6151
6152 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
6153 if (rc != Z_OK)
6154 return rc;
6155
6156 rc = zlib_inflate(bp->strm, Z_FINISH);
6157 if ((rc != Z_OK) && (rc != Z_STREAM_END))
7995c64e
JP
6158 netdev_err(bp->dev, "Firmware decompression error: %s\n",
6159 bp->strm->msg);
a2fbb9ea
ET
6160
6161 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
6162 if (bp->gunzip_outlen & 0x3)
51c1a580
MS
6163 netdev_err(bp->dev,
6164 "Firmware decompression error: gunzip_outlen (%d) not aligned\n",
cdaa7cb8 6165 bp->gunzip_outlen);
a2fbb9ea
ET
6166 bp->gunzip_outlen >>= 2;
6167
6168 zlib_inflateEnd(bp->strm);
6169
6170 if (rc == Z_STREAM_END)
6171 return 0;
6172
6173 return rc;
6174}
6175
6176/* nic load/unload */
6177
6178/*
34f80b04 6179 * General service functions
a2fbb9ea
ET
6180 */
6181
6182/* send a NIG loopback debug packet */
6183static void bnx2x_lb_pckt(struct bnx2x *bp)
6184{
a2fbb9ea 6185 u32 wb_write[3];
a2fbb9ea
ET
6186
6187 /* Ethernet source and destination addresses */
a2fbb9ea
ET
6188 wb_write[0] = 0x55555555;
6189 wb_write[1] = 0x55555555;
34f80b04 6190 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 6191 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6192
6193 /* NON-IP protocol */
a2fbb9ea
ET
6194 wb_write[0] = 0x09000000;
6195 wb_write[1] = 0x55555555;
34f80b04 6196 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 6197 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
6198}
6199
6200/* some of the internal memories
6201 * are not directly readable from the driver
6202 * to test them we send debug packets
6203 */
6204static int bnx2x_int_mem_test(struct bnx2x *bp)
6205{
6206 int factor;
6207 int count, i;
6208 u32 val = 0;
6209
ad8d3948 6210 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 6211 factor = 120;
ad8d3948
EG
6212 else if (CHIP_REV_IS_EMUL(bp))
6213 factor = 200;
6214 else
a2fbb9ea 6215 factor = 1;
a2fbb9ea 6216
a2fbb9ea
ET
6217 /* Disable inputs of parser neighbor blocks */
6218 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6219 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6220 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6221 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6222
6223 /* Write 0 to parser credits for CFC search request */
6224 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6225
6226 /* send Ethernet packet */
6227 bnx2x_lb_pckt(bp);
6228
6229 /* TODO do i reset NIG statistic? */
6230 /* Wait until NIG register shows 1 packet of size 0x10 */
6231 count = 1000 * factor;
6232 while (count) {
34f80b04 6233
a2fbb9ea
ET
6234 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6235 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6236 if (val == 0x10)
6237 break;
6238
6239 msleep(10);
6240 count--;
6241 }
6242 if (val != 0x10) {
6243 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6244 return -1;
6245 }
6246
6247 /* Wait until PRS register shows 1 packet */
6248 count = 1000 * factor;
6249 while (count) {
6250 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
6251 if (val == 1)
6252 break;
6253
6254 msleep(10);
6255 count--;
6256 }
6257 if (val != 0x1) {
6258 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6259 return -2;
6260 }
6261
6262 /* Reset and init BRB, PRS */
34f80b04 6263 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 6264 msleep(50);
34f80b04 6265 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea 6266 msleep(50);
619c5cb6
VZ
6267 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6268 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
a2fbb9ea
ET
6269
6270 DP(NETIF_MSG_HW, "part2\n");
6271
6272 /* Disable inputs of parser neighbor blocks */
6273 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
6274 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
6275 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 6276 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
6277
6278 /* Write 0 to parser credits for CFC search request */
6279 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
6280
6281 /* send 10 Ethernet packets */
6282 for (i = 0; i < 10; i++)
6283 bnx2x_lb_pckt(bp);
6284
6285 /* Wait until NIG register shows 10 + 1
6286 packets of size 11*0x10 = 0xb0 */
6287 count = 1000 * factor;
6288 while (count) {
34f80b04 6289
a2fbb9ea
ET
6290 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6291 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
6292 if (val == 0xb0)
6293 break;
6294
6295 msleep(10);
6296 count--;
6297 }
6298 if (val != 0xb0) {
6299 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
6300 return -3;
6301 }
6302
6303 /* Wait until PRS register shows 2 packets */
6304 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6305 if (val != 2)
6306 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6307
6308 /* Write 1 to parser credits for CFC search request */
6309 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
6310
6311 /* Wait until PRS register shows 3 packets */
6312 msleep(10 * factor);
6313 /* Wait until NIG register shows 1 packet of size 0x10 */
6314 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
6315 if (val != 3)
6316 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
6317
6318 /* clear NIG EOP FIFO */
6319 for (i = 0; i < 11; i++)
6320 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
6321 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
6322 if (val != 1) {
6323 BNX2X_ERR("clear of NIG failed\n");
6324 return -4;
6325 }
6326
6327 /* Reset and init BRB, PRS, NIG */
6328 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
6329 msleep(50);
6330 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
6331 msleep(50);
619c5cb6
VZ
6332 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
6333 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
55c11941
MS
6334 if (!CNIC_SUPPORT(bp))
6335 /* set NIC mode */
6336 REG_WR(bp, PRS_REG_NIC_MODE, 1);
a2fbb9ea
ET
6337
6338 /* Enable inputs of parser neighbor blocks */
6339 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
6340 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
6341 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 6342 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
6343
6344 DP(NETIF_MSG_HW, "done\n");
6345
6346 return 0; /* OK */
6347}
6348
4a33bc03 6349static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
a2fbb9ea 6350{
b343d002
YM
6351 u32 val;
6352
a2fbb9ea 6353 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
619c5cb6 6354 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
6355 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
6356 else
6357 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
a2fbb9ea
ET
6358 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
6359 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
f2e0899f
DK
6360 /*
6361 * mask read length error interrupts in brb for parser
6362 * (parsing unit and 'checksum and crc' unit)
6363 * these errors are legal (PU reads fixed length and CAC can cause
6364 * read length error on truncated packets)
6365 */
6366 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
a2fbb9ea
ET
6367 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
6368 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
6369 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
6370 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
6371 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
6372/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
6373/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6374 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
6375 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
6376 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
6377/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
6378/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
6379 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
6380 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
6381 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
6382 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
6383/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
6384/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
f85582f8 6385
b343d002
YM
6386 val = PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT |
6387 PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF |
6388 PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN;
6389 if (!CHIP_IS_E1x(bp))
6390 val |= PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED |
6391 PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED;
6392 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, val);
6393
a2fbb9ea
ET
6394 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
6395 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
6396 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04 6397/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
619c5cb6
VZ
6398
6399 if (!CHIP_IS_E1x(bp))
6400 /* enable VFC attentions: bits 11 and 12, bits 31:13 reserved */
6401 REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0x07ff);
6402
a2fbb9ea
ET
6403 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
6404 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04 6405/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4a33bc03 6406 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
a2fbb9ea
ET
6407}
6408
81f75bbf
EG
6409static void bnx2x_reset_common(struct bnx2x *bp)
6410{
619c5cb6
VZ
6411 u32 val = 0x1400;
6412
81f75bbf
EG
6413 /* reset_common */
6414 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6415 0xd3ffff7f);
619c5cb6
VZ
6416
6417 if (CHIP_IS_E3(bp)) {
6418 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6419 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6420 }
6421
6422 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, val);
6423}
6424
6425static void bnx2x_setup_dmae(struct bnx2x *bp)
6426{
6427 bp->dmae_ready = 0;
6428 spin_lock_init(&bp->dmae_lock);
81f75bbf
EG
6429}
6430
573f2035
EG
6431static void bnx2x_init_pxp(struct bnx2x *bp)
6432{
6433 u16 devctl;
6434 int r_order, w_order;
6435
2a80eebc 6436 pcie_capability_read_word(bp->pdev, PCI_EXP_DEVCTL, &devctl);
573f2035
EG
6437 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
6438 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
6439 if (bp->mrrs == -1)
6440 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
6441 else {
6442 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
6443 r_order = bp->mrrs;
6444 }
6445
6446 bnx2x_init_pxp_arb(bp, r_order, w_order);
6447}
fd4ef40d
EG
6448
6449static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6450{
2145a920 6451 int is_required;
fd4ef40d 6452 u32 val;
2145a920 6453 int port;
fd4ef40d 6454
2145a920
VZ
6455 if (BP_NOMCP(bp))
6456 return;
6457
6458 is_required = 0;
fd4ef40d
EG
6459 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
6460 SHARED_HW_CFG_FAN_FAILURE_MASK;
6461
6462 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
6463 is_required = 1;
6464
6465 /*
6466 * The fan failure mechanism is usually related to the PHY type since
6467 * the power consumption of the board is affected by the PHY. Currently,
6468 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
6469 */
6470 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
6471 for (port = PORT_0; port < PORT_MAX; port++) {
fd4ef40d 6472 is_required |=
d90d96ba
YR
6473 bnx2x_fan_failure_det_req(
6474 bp,
6475 bp->common.shmem_base,
a22f0788 6476 bp->common.shmem2_base,
d90d96ba 6477 port);
fd4ef40d
EG
6478 }
6479
6480 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
6481
6482 if (is_required == 0)
6483 return;
6484
6485 /* Fan failure is indicated by SPIO 5 */
d6d99a3f 6486 bnx2x_set_spio(bp, MISC_SPIO_SPIO5, MISC_SPIO_INPUT_HI_Z);
fd4ef40d
EG
6487
6488 /* set to active low mode */
6489 val = REG_RD(bp, MISC_REG_SPIO_INT);
d6d99a3f 6490 val |= (MISC_SPIO_SPIO5 << MISC_SPIO_INT_OLD_SET_POS);
fd4ef40d
EG
6491 REG_WR(bp, MISC_REG_SPIO_INT, val);
6492
6493 /* enable interrupt to signal the IGU */
6494 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
d6d99a3f 6495 val |= MISC_SPIO_SPIO5;
fd4ef40d
EG
6496 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
6497}
6498
c9ee9206 6499void bnx2x_pf_disable(struct bnx2x *bp)
f2e0899f
DK
6500{
6501 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
6502 val &= ~IGU_PF_CONF_FUNC_EN;
6503
6504 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
6505 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6506 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
6507}
6508
1191cb83 6509static void bnx2x__common_init_phy(struct bnx2x *bp)
619c5cb6
VZ
6510{
6511 u32 shmem_base[2], shmem2_base[2];
b884d95b
YR
6512 /* Avoid common init in case MFW supports LFA */
6513 if (SHMEM2_RD(bp, size) >
6514 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
6515 return;
619c5cb6
VZ
6516 shmem_base[0] = bp->common.shmem_base;
6517 shmem2_base[0] = bp->common.shmem2_base;
6518 if (!CHIP_IS_E1x(bp)) {
6519 shmem_base[1] =
6520 SHMEM2_RD(bp, other_shmem_base_addr);
6521 shmem2_base[1] =
6522 SHMEM2_RD(bp, other_shmem2_base_addr);
6523 }
6524 bnx2x_acquire_phy_lock(bp);
6525 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
6526 bp->common.chip_id);
6527 bnx2x_release_phy_lock(bp);
6528}
6529
6530/**
6531 * bnx2x_init_hw_common - initialize the HW at the COMMON phase.
6532 *
6533 * @bp: driver handle
6534 */
6535static int bnx2x_init_hw_common(struct bnx2x *bp)
a2fbb9ea 6536{
619c5cb6 6537 u32 val;
a2fbb9ea 6538
51c1a580 6539 DP(NETIF_MSG_HW, "starting common init func %d\n", BP_ABS_FUNC(bp));
a2fbb9ea 6540
2031bd3a 6541 /*
2de67439 6542 * take the RESET lock to protect undi_unload flow from accessing
2031bd3a
DK
6543 * registers while we're resetting the chip
6544 */
7a06a122 6545 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
2031bd3a 6546
81f75bbf 6547 bnx2x_reset_common(bp);
34f80b04 6548 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
a2fbb9ea 6549
619c5cb6
VZ
6550 val = 0xfffc;
6551 if (CHIP_IS_E3(bp)) {
6552 val |= MISC_REGISTERS_RESET_REG_2_MSTAT0;
6553 val |= MISC_REGISTERS_RESET_REG_2_MSTAT1;
6554 }
6555 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, val);
6556
7a06a122 6557 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
2031bd3a 6558
619c5cb6 6559 bnx2x_init_block(bp, BLOCK_MISC, PHASE_COMMON);
a2fbb9ea 6560
619c5cb6
VZ
6561 if (!CHIP_IS_E1x(bp)) {
6562 u8 abs_func_id;
f2e0899f
DK
6563
6564 /**
6565 * 4-port mode or 2-port mode we need to turn of master-enable
6566 * for everyone, after that, turn it back on for self.
6567 * so, we disregard multi-function or not, and always disable
6568 * for all functions on the given path, this means 0,2,4,6 for
6569 * path 0 and 1,3,5,7 for path 1
6570 */
619c5cb6
VZ
6571 for (abs_func_id = BP_PATH(bp);
6572 abs_func_id < E2_FUNC_MAX*2; abs_func_id += 2) {
6573 if (abs_func_id == BP_ABS_FUNC(bp)) {
f2e0899f
DK
6574 REG_WR(bp,
6575 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
6576 1);
6577 continue;
6578 }
6579
619c5cb6 6580 bnx2x_pretend_func(bp, abs_func_id);
f2e0899f
DK
6581 /* clear pf enable */
6582 bnx2x_pf_disable(bp);
6583 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6584 }
6585 }
a2fbb9ea 6586
619c5cb6 6587 bnx2x_init_block(bp, BLOCK_PXP, PHASE_COMMON);
34f80b04
EG
6588 if (CHIP_IS_E1(bp)) {
6589 /* enable HW interrupt from PXP on USDM overflow
6590 bit 16 on INT_MASK_0 */
6591 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
6592 }
a2fbb9ea 6593
619c5cb6 6594 bnx2x_init_block(bp, BLOCK_PXP2, PHASE_COMMON);
34f80b04 6595 bnx2x_init_pxp(bp);
a2fbb9ea
ET
6596
6597#ifdef __BIG_ENDIAN
34f80b04
EG
6598 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
6599 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
6600 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
6601 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
6602 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
6603 /* make sure this value is 0 */
6604 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
6605
6606/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
6607 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
6608 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
6609 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
6610 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
6611#endif
6612
523224a3
DK
6613 bnx2x_ilt_init_page_size(bp, INITOP_SET);
6614
34f80b04
EG
6615 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
6616 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 6617
34f80b04
EG
6618 /* let the HW do it's magic ... */
6619 msleep(100);
6620 /* finish PXP init */
6621 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
6622 if (val != 1) {
6623 BNX2X_ERR("PXP2 CFG failed\n");
6624 return -EBUSY;
6625 }
6626 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
6627 if (val != 1) {
6628 BNX2X_ERR("PXP2 RD_INIT failed\n");
6629 return -EBUSY;
6630 }
a2fbb9ea 6631
f2e0899f
DK
6632 /* Timers bug workaround E2 only. We need to set the entire ILT to
6633 * have entries with value "0" and valid bit on.
6634 * This needs to be done by the first PF that is loaded in a path
6635 * (i.e. common phase)
6636 */
619c5cb6
VZ
6637 if (!CHIP_IS_E1x(bp)) {
6638/* In E2 there is a bug in the timers block that can cause function 6 / 7
6639 * (i.e. vnic3) to start even if it is marked as "scan-off".
6640 * This occurs when a different function (func2,3) is being marked
6641 * as "scan-off". Real-life scenario for example: if a driver is being
6642 * load-unloaded while func6,7 are down. This will cause the timer to access
6643 * the ilt, translate to a logical address and send a request to read/write.
6644 * Since the ilt for the function that is down is not valid, this will cause
6645 * a translation error which is unrecoverable.
6646 * The Workaround is intended to make sure that when this happens nothing fatal
6647 * will occur. The workaround:
6648 * 1. First PF driver which loads on a path will:
6649 * a. After taking the chip out of reset, by using pretend,
6650 * it will write "0" to the following registers of
6651 * the other vnics.
6652 * REG_WR(pdev, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
6653 * REG_WR(pdev, CFC_REG_WEAK_ENABLE_PF,0);
6654 * REG_WR(pdev, CFC_REG_STRONG_ENABLE_PF,0);
6655 * And for itself it will write '1' to
6656 * PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER to enable
6657 * dmae-operations (writing to pram for example.)
6658 * note: can be done for only function 6,7 but cleaner this
6659 * way.
6660 * b. Write zero+valid to the entire ILT.
6661 * c. Init the first_timers_ilt_entry, last_timers_ilt_entry of
6662 * VNIC3 (of that port). The range allocated will be the
6663 * entire ILT. This is needed to prevent ILT range error.
6664 * 2. Any PF driver load flow:
6665 * a. ILT update with the physical addresses of the allocated
6666 * logical pages.
6667 * b. Wait 20msec. - note that this timeout is needed to make
6668 * sure there are no requests in one of the PXP internal
6669 * queues with "old" ILT addresses.
6670 * c. PF enable in the PGLC.
6671 * d. Clear the was_error of the PF in the PGLC. (could have
2de67439 6672 * occurred while driver was down)
619c5cb6
VZ
6673 * e. PF enable in the CFC (WEAK + STRONG)
6674 * f. Timers scan enable
6675 * 3. PF driver unload flow:
6676 * a. Clear the Timers scan_en.
6677 * b. Polling for scan_on=0 for that PF.
6678 * c. Clear the PF enable bit in the PXP.
6679 * d. Clear the PF enable in the CFC (WEAK + STRONG)
6680 * e. Write zero+valid to all ILT entries (The valid bit must
6681 * stay set)
6682 * f. If this is VNIC 3 of a port then also init
6683 * first_timers_ilt_entry to zero and last_timers_ilt_entry
6684 * to the last enrty in the ILT.
6685 *
6686 * Notes:
6687 * Currently the PF error in the PGLC is non recoverable.
6688 * In the future the there will be a recovery routine for this error.
6689 * Currently attention is masked.
6690 * Having an MCP lock on the load/unload process does not guarantee that
6691 * there is no Timer disable during Func6/7 enable. This is because the
6692 * Timers scan is currently being cleared by the MCP on FLR.
6693 * Step 2.d can be done only for PF6/7 and the driver can also check if
6694 * there is error before clearing it. But the flow above is simpler and
6695 * more general.
6696 * All ILT entries are written by zero+valid and not just PF6/7
6697 * ILT entries since in the future the ILT entries allocation for
6698 * PF-s might be dynamic.
6699 */
f2e0899f
DK
6700 struct ilt_client_info ilt_cli;
6701 struct bnx2x_ilt ilt;
6702 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6703 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
6704
b595076a 6705 /* initialize dummy TM client */
f2e0899f
DK
6706 ilt_cli.start = 0;
6707 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6708 ilt_cli.client_num = ILT_CLIENT_TM;
6709
6710 /* Step 1: set zeroes to all ilt page entries with valid bit on
6711 * Step 2: set the timers first/last ilt entry to point
6712 * to the entire range to prevent ILT range error for 3rd/4th
2de67439 6713 * vnic (this code assumes existence of the vnic)
f2e0899f
DK
6714 *
6715 * both steps performed by call to bnx2x_ilt_client_init_op()
6716 * with dummy TM client
6717 *
6718 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
6719 * and his brother are split registers
6720 */
6721 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
6722 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
6723 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
6724
6725 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
6726 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
6727 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
6728 }
6729
34f80b04
EG
6730 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
6731 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 6732
619c5cb6 6733 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
6734 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
6735 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
619c5cb6 6736 bnx2x_init_block(bp, BLOCK_PGLUE_B, PHASE_COMMON);
f2e0899f 6737
619c5cb6 6738 bnx2x_init_block(bp, BLOCK_ATC, PHASE_COMMON);
f2e0899f
DK
6739
6740 /* let the HW do it's magic ... */
6741 do {
6742 msleep(200);
6743 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
6744 } while (factor-- && (val != 1));
6745
6746 if (val != 1) {
6747 BNX2X_ERR("ATC_INIT failed\n");
6748 return -EBUSY;
6749 }
6750 }
6751
619c5cb6 6752 bnx2x_init_block(bp, BLOCK_DMAE, PHASE_COMMON);
a2fbb9ea 6753
b56e9670
AE
6754 bnx2x_iov_init_dmae(bp);
6755
34f80b04
EG
6756 /* clean the DMAE memory */
6757 bp->dmae_ready = 1;
619c5cb6
VZ
6758 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8, 1);
6759
6760 bnx2x_init_block(bp, BLOCK_TCM, PHASE_COMMON);
6761
6762 bnx2x_init_block(bp, BLOCK_UCM, PHASE_COMMON);
6763
6764 bnx2x_init_block(bp, BLOCK_CCM, PHASE_COMMON);
a2fbb9ea 6765
619c5cb6 6766 bnx2x_init_block(bp, BLOCK_XCM, PHASE_COMMON);
a2fbb9ea 6767
34f80b04
EG
6768 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
6769 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
6770 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
6771 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6772
619c5cb6 6773 bnx2x_init_block(bp, BLOCK_QM, PHASE_COMMON);
37b091ba 6774
f85582f8 6775
523224a3
DK
6776 /* QM queues pointers table */
6777 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
6778
34f80b04
EG
6779 /* soft reset pulse */
6780 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6781 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea 6782
55c11941
MS
6783 if (CNIC_SUPPORT(bp))
6784 bnx2x_init_block(bp, BLOCK_TM, PHASE_COMMON);
a2fbb9ea 6785
619c5cb6 6786 bnx2x_init_block(bp, BLOCK_DORQ, PHASE_COMMON);
523224a3 6787 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
619c5cb6 6788 if (!CHIP_REV_IS_SLOW(bp))
34f80b04
EG
6789 /* enable hw interrupt from doorbell Q */
6790 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
a2fbb9ea 6791
619c5cb6 6792 bnx2x_init_block(bp, BLOCK_BRB1, PHASE_COMMON);
f2e0899f 6793
619c5cb6 6794 bnx2x_init_block(bp, BLOCK_PRS, PHASE_COMMON);
26c8fa4d 6795 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
619c5cb6 6796
f2e0899f 6797 if (!CHIP_IS_E1(bp))
619c5cb6 6798 REG_WR(bp, PRS_REG_E1HOV_MODE, bp->path_has_ovlan);
f85582f8 6799
a3348722
BW
6800 if (!CHIP_IS_E1x(bp) && !CHIP_IS_E3B0(bp)) {
6801 if (IS_MF_AFEX(bp)) {
6802 /* configure that VNTag and VLAN headers must be
6803 * received in afex mode
6804 */
6805 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, 0xE);
6806 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, 0xA);
6807 REG_WR(bp, PRS_REG_HDRS_AFTER_TAG_0, 0x6);
6808 REG_WR(bp, PRS_REG_TAG_ETHERTYPE_0, 0x8926);
6809 REG_WR(bp, PRS_REG_TAG_LEN_0, 0x4);
6810 } else {
6811 /* Bit-map indicating which L2 hdrs may appear
6812 * after the basic Ethernet header
6813 */
6814 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC,
6815 bp->path_has_ovlan ? 7 : 6);
6816 }
6817 }
a2fbb9ea 6818
619c5cb6
VZ
6819 bnx2x_init_block(bp, BLOCK_TSDM, PHASE_COMMON);
6820 bnx2x_init_block(bp, BLOCK_CSDM, PHASE_COMMON);
6821 bnx2x_init_block(bp, BLOCK_USDM, PHASE_COMMON);
6822 bnx2x_init_block(bp, BLOCK_XSDM, PHASE_COMMON);
a2fbb9ea 6823
619c5cb6
VZ
6824 if (!CHIP_IS_E1x(bp)) {
6825 /* reset VFC memories */
6826 REG_WR(bp, TSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6827 VFC_MEMORIES_RST_REG_CAM_RST |
6828 VFC_MEMORIES_RST_REG_RAM_RST);
6829 REG_WR(bp, XSEM_REG_FAST_MEMORY + VFC_REG_MEMORIES_RST,
6830 VFC_MEMORIES_RST_REG_CAM_RST |
6831 VFC_MEMORIES_RST_REG_RAM_RST);
a2fbb9ea 6832
619c5cb6
VZ
6833 msleep(20);
6834 }
a2fbb9ea 6835
619c5cb6
VZ
6836 bnx2x_init_block(bp, BLOCK_TSEM, PHASE_COMMON);
6837 bnx2x_init_block(bp, BLOCK_USEM, PHASE_COMMON);
6838 bnx2x_init_block(bp, BLOCK_CSEM, PHASE_COMMON);
6839 bnx2x_init_block(bp, BLOCK_XSEM, PHASE_COMMON);
f2e0899f 6840
34f80b04
EG
6841 /* sync semi rtc */
6842 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6843 0x80000000);
6844 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6845 0x80000000);
a2fbb9ea 6846
619c5cb6
VZ
6847 bnx2x_init_block(bp, BLOCK_UPB, PHASE_COMMON);
6848 bnx2x_init_block(bp, BLOCK_XPB, PHASE_COMMON);
6849 bnx2x_init_block(bp, BLOCK_PBF, PHASE_COMMON);
a2fbb9ea 6850
a3348722
BW
6851 if (!CHIP_IS_E1x(bp)) {
6852 if (IS_MF_AFEX(bp)) {
6853 /* configure that VNTag and VLAN headers must be
6854 * sent in afex mode
6855 */
6856 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, 0xE);
6857 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, 0xA);
6858 REG_WR(bp, PBF_REG_HDRS_AFTER_TAG_0, 0x6);
6859 REG_WR(bp, PBF_REG_TAG_ETHERTYPE_0, 0x8926);
6860 REG_WR(bp, PBF_REG_TAG_LEN_0, 0x4);
6861 } else {
6862 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC,
6863 bp->path_has_ovlan ? 7 : 6);
6864 }
6865 }
f2e0899f 6866
34f80b04 6867 REG_WR(bp, SRC_REG_SOFT_RST, 1);
f85582f8 6868
619c5cb6
VZ
6869 bnx2x_init_block(bp, BLOCK_SRC, PHASE_COMMON);
6870
55c11941
MS
6871 if (CNIC_SUPPORT(bp)) {
6872 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6873 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6874 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6875 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6876 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6877 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6878 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6879 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6880 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6881 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6882 }
34f80b04 6883 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 6884
34f80b04
EG
6885 if (sizeof(union cdu_context) != 1024)
6886 /* we currently assume that a context is 1024 bytes */
51c1a580
MS
6887 dev_alert(&bp->pdev->dev,
6888 "please adjust the size of cdu_context(%ld)\n",
6889 (long)sizeof(union cdu_context));
a2fbb9ea 6890
619c5cb6 6891 bnx2x_init_block(bp, BLOCK_CDU, PHASE_COMMON);
34f80b04
EG
6892 val = (4 << 24) + (0 << 12) + 1024;
6893 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
a2fbb9ea 6894
619c5cb6 6895 bnx2x_init_block(bp, BLOCK_CFC, PHASE_COMMON);
34f80b04 6896 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
6897 /* enable context validation interrupt from CFC */
6898 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
6899
6900 /* set the thresholds to prevent CFC/CDU race */
6901 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 6902
619c5cb6 6903 bnx2x_init_block(bp, BLOCK_HC, PHASE_COMMON);
f2e0899f 6904
619c5cb6 6905 if (!CHIP_IS_E1x(bp) && BP_NOMCP(bp))
f2e0899f
DK
6906 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
6907
619c5cb6
VZ
6908 bnx2x_init_block(bp, BLOCK_IGU, PHASE_COMMON);
6909 bnx2x_init_block(bp, BLOCK_MISC_AEU, PHASE_COMMON);
a2fbb9ea 6910
34f80b04
EG
6911 /* Reset PCIE errors for debug */
6912 REG_WR(bp, 0x2814, 0xffffffff);
6913 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 6914
619c5cb6 6915 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
6916 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
6917 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
6918 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
6919 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
6920 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
6921 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
6922 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
6923 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
6924 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
6925 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
6926 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
6927 }
6928
619c5cb6 6929 bnx2x_init_block(bp, BLOCK_NIG, PHASE_COMMON);
f2e0899f 6930 if (!CHIP_IS_E1(bp)) {
619c5cb6
VZ
6931 /* in E3 this done in per-port section */
6932 if (!CHIP_IS_E3(bp))
6933 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
f2e0899f 6934 }
619c5cb6
VZ
6935 if (CHIP_IS_E1H(bp))
6936 /* not applicable for E2 (and above ...) */
6937 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
34f80b04
EG
6938
6939 if (CHIP_REV_IS_SLOW(bp))
6940 msleep(200);
6941
6942 /* finish CFC init */
6943 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
6944 if (val != 1) {
6945 BNX2X_ERR("CFC LL_INIT failed\n");
6946 return -EBUSY;
6947 }
6948 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
6949 if (val != 1) {
6950 BNX2X_ERR("CFC AC_INIT failed\n");
6951 return -EBUSY;
6952 }
6953 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
6954 if (val != 1) {
6955 BNX2X_ERR("CFC CAM_INIT failed\n");
6956 return -EBUSY;
6957 }
6958 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 6959
f2e0899f
DK
6960 if (CHIP_IS_E1(bp)) {
6961 /* read NIG statistic
6962 to see if this is our first up since powerup */
6963 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
6964 val = *bnx2x_sp(bp, wb_data[0]);
34f80b04 6965
f2e0899f
DK
6966 /* do internal memory self test */
6967 if ((val == 0) && bnx2x_int_mem_test(bp)) {
6968 BNX2X_ERR("internal mem self test failed\n");
6969 return -EBUSY;
6970 }
34f80b04
EG
6971 }
6972
fd4ef40d
EG
6973 bnx2x_setup_fan_failure_detection(bp);
6974
34f80b04
EG
6975 /* clear PXP2 attentions */
6976 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 6977
4a33bc03 6978 bnx2x_enable_blocks_attention(bp);
c9ee9206 6979 bnx2x_enable_blocks_parity(bp);
a2fbb9ea 6980
6bbca910 6981 if (!BP_NOMCP(bp)) {
619c5cb6
VZ
6982 if (CHIP_IS_E1x(bp))
6983 bnx2x__common_init_phy(bp);
6bbca910
YR
6984 } else
6985 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
6986
34f80b04
EG
6987 return 0;
6988}
a2fbb9ea 6989
619c5cb6
VZ
6990/**
6991 * bnx2x_init_hw_common_chip - init HW at the COMMON_CHIP phase.
6992 *
6993 * @bp: driver handle
6994 */
6995static int bnx2x_init_hw_common_chip(struct bnx2x *bp)
6996{
6997 int rc = bnx2x_init_hw_common(bp);
6998
6999 if (rc)
7000 return rc;
7001
7002 /* In E2 2-PORT mode, same ext phy is used for the two paths */
7003 if (!BP_NOMCP(bp))
7004 bnx2x__common_init_phy(bp);
7005
7006 return 0;
7007}
7008
523224a3 7009static int bnx2x_init_hw_port(struct bnx2x *bp)
34f80b04
EG
7010{
7011 int port = BP_PORT(bp);
619c5cb6 7012 int init_phase = port ? PHASE_PORT1 : PHASE_PORT0;
1c06328c 7013 u32 low, high;
34f80b04 7014 u32 val;
a2fbb9ea 7015
619c5cb6 7016
51c1a580 7017 DP(NETIF_MSG_HW, "starting port init port %d\n", port);
34f80b04
EG
7018
7019 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea 7020
619c5cb6
VZ
7021 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7022 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7023 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
ca00392c 7024
f2e0899f
DK
7025 /* Timers bug workaround: disables the pf_master bit in pglue at
7026 * common phase, we need to enable it here before any dmae access are
7027 * attempted. Therefore we manually added the enable-master to the
7028 * port phase (it also happens in the function phase)
7029 */
619c5cb6 7030 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7031 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7032
619c5cb6
VZ
7033 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7034 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7035 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
7036 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7037
7038 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7039 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7040 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7041 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
a2fbb9ea 7042
523224a3
DK
7043 /* QM cid (connection) count */
7044 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
a2fbb9ea 7045
55c11941
MS
7046 if (CNIC_SUPPORT(bp)) {
7047 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7048 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
7049 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
7050 }
cdaa7cb8 7051
619c5cb6 7052 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
f2e0899f 7053
2b674047
DK
7054 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7055
f2e0899f 7056 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
619c5cb6
VZ
7057
7058 if (IS_MF(bp))
7059 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
7060 else if (bp->dev->mtu > 4096) {
7061 if (bp->flags & ONE_PORT_FLAG)
7062 low = 160;
7063 else {
7064 val = bp->dev->mtu;
7065 /* (24*1024 + val*4)/256 */
7066 low = 96 + (val/64) +
7067 ((val % 64) ? 1 : 0);
7068 }
7069 } else
7070 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
7071 high = low + 56; /* 14*1024/256 */
f2e0899f
DK
7072 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
7073 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
1c06328c 7074 }
1c06328c 7075
619c5cb6
VZ
7076 if (CHIP_MODE_IS_4_PORT(bp))
7077 REG_WR(bp, (BP_PORT(bp) ?
7078 BRB1_REG_MAC_GUARANTIED_1 :
7079 BRB1_REG_MAC_GUARANTIED_0), 40);
1c06328c 7080
ca00392c 7081
619c5cb6 7082 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
a3348722
BW
7083 if (CHIP_IS_E3B0(bp)) {
7084 if (IS_MF_AFEX(bp)) {
7085 /* configure headers for AFEX mode */
7086 REG_WR(bp, BP_PORT(bp) ?
7087 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7088 PRS_REG_HDRS_AFTER_BASIC_PORT_0, 0xE);
7089 REG_WR(bp, BP_PORT(bp) ?
7090 PRS_REG_HDRS_AFTER_TAG_0_PORT_1 :
7091 PRS_REG_HDRS_AFTER_TAG_0_PORT_0, 0x6);
7092 REG_WR(bp, BP_PORT(bp) ?
7093 PRS_REG_MUST_HAVE_HDRS_PORT_1 :
7094 PRS_REG_MUST_HAVE_HDRS_PORT_0, 0xA);
7095 } else {
7096 /* Ovlan exists only if we are in multi-function +
7097 * switch-dependent mode, in switch-independent there
7098 * is no ovlan headers
7099 */
7100 REG_WR(bp, BP_PORT(bp) ?
7101 PRS_REG_HDRS_AFTER_BASIC_PORT_1 :
7102 PRS_REG_HDRS_AFTER_BASIC_PORT_0,
7103 (bp->path_has_ovlan ? 7 : 6));
7104 }
7105 }
356e2385 7106
619c5cb6
VZ
7107 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7108 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7109 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7110 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
356e2385 7111
619c5cb6
VZ
7112 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7113 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7114 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7115 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
34f80b04 7116
619c5cb6
VZ
7117 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7118 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
a2fbb9ea 7119
619c5cb6
VZ
7120 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7121
7122 if (CHIP_IS_E1x(bp)) {
f2e0899f
DK
7123 /* configure PBF to work without PAUSE mtu 9000 */
7124 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea 7125
f2e0899f
DK
7126 /* update threshold */
7127 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
7128 /* update init credit */
7129 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea 7130
f2e0899f
DK
7131 /* probe changes */
7132 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
7133 udelay(50);
7134 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
7135 }
a2fbb9ea 7136
55c11941
MS
7137 if (CNIC_SUPPORT(bp))
7138 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7139
619c5cb6
VZ
7140 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
7141 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
34f80b04
EG
7142
7143 if (CHIP_IS_E1(bp)) {
7144 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7145 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7146 }
619c5cb6 7147 bnx2x_init_block(bp, BLOCK_HC, init_phase);
34f80b04 7148
619c5cb6 7149 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
f2e0899f 7150
619c5cb6 7151 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
34f80b04
EG
7152 /* init aeu_mask_attn_func_0/1:
7153 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
7154 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
7155 * bits 4-7 are used for "per vn group attention" */
e4901dde
VZ
7156 val = IS_MF(bp) ? 0xF7 : 0x7;
7157 /* Enable DCBX attention for all but E1 */
7158 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
7159 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
34f80b04 7160
619c5cb6
VZ
7161 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7162
7163 if (!CHIP_IS_E1x(bp)) {
7164 /* Bit-map indicating which L2 hdrs may appear after the
7165 * basic Ethernet header
7166 */
a3348722
BW
7167 if (IS_MF_AFEX(bp))
7168 REG_WR(bp, BP_PORT(bp) ?
7169 NIG_REG_P1_HDRS_AFTER_BASIC :
7170 NIG_REG_P0_HDRS_AFTER_BASIC, 0xE);
7171 else
7172 REG_WR(bp, BP_PORT(bp) ?
7173 NIG_REG_P1_HDRS_AFTER_BASIC :
7174 NIG_REG_P0_HDRS_AFTER_BASIC,
7175 IS_MF_SD(bp) ? 7 : 6);
619c5cb6
VZ
7176
7177 if (CHIP_IS_E3(bp))
7178 REG_WR(bp, BP_PORT(bp) ?
7179 NIG_REG_LLH1_MF_MODE :
7180 NIG_REG_LLH_MF_MODE, IS_MF(bp));
7181 }
7182 if (!CHIP_IS_E3(bp))
7183 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
34f80b04 7184
f2e0899f 7185 if (!CHIP_IS_E1(bp)) {
fb3bff17 7186 /* 0x2 disable mf_ov, 0x1 enable */
34f80b04 7187 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
0793f83f 7188 (IS_MF_SD(bp) ? 0x1 : 0x2));
34f80b04 7189
619c5cb6 7190 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7191 val = 0;
7192 switch (bp->mf_mode) {
7193 case MULTI_FUNCTION_SD:
7194 val = 1;
7195 break;
7196 case MULTI_FUNCTION_SI:
a3348722 7197 case MULTI_FUNCTION_AFEX:
f2e0899f
DK
7198 val = 2;
7199 break;
7200 }
7201
7202 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
7203 NIG_REG_LLH0_CLS_TYPE), val);
7204 }
1c06328c
EG
7205 {
7206 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
7207 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
7208 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
7209 }
34f80b04
EG
7210 }
7211
619c5cb6
VZ
7212 /* If SPIO5 is set to generate interrupts, enable it for this port */
7213 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
d6d99a3f 7214 if (val & MISC_SPIO_SPIO5) {
4d295db0
EG
7215 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
7216 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
7217 val = REG_RD(bp, reg_addr);
f1410647 7218 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4d295db0 7219 REG_WR(bp, reg_addr, val);
f1410647 7220 }
a2fbb9ea 7221
34f80b04
EG
7222 return 0;
7223}
7224
34f80b04
EG
7225static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
7226{
7227 int reg;
32d68de1 7228 u32 wb_write[2];
34f80b04 7229
f2e0899f 7230 if (CHIP_IS_E1(bp))
34f80b04 7231 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
f2e0899f
DK
7232 else
7233 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
34f80b04 7234
32d68de1
YM
7235 wb_write[0] = ONCHIP_ADDR1(addr);
7236 wb_write[1] = ONCHIP_ADDR2(addr);
7237 REG_WR_DMAE(bp, reg, wb_write, 2);
34f80b04
EG
7238}
7239
b56e9670 7240void bnx2x_igu_clear_sb_gen(struct bnx2x *bp, u8 func, u8 idu_sb_id, bool is_pf)
1191cb83
ED
7241{
7242 u32 data, ctl, cnt = 100;
7243 u32 igu_addr_data = IGU_REG_COMMAND_REG_32LSB_DATA;
7244 u32 igu_addr_ctl = IGU_REG_COMMAND_REG_CTRL;
7245 u32 igu_addr_ack = IGU_REG_CSTORM_TYPE_0_SB_CLEANUP + (idu_sb_id/32)*4;
7246 u32 sb_bit = 1 << (idu_sb_id%32);
b56e9670 7247 u32 func_encode = func | (is_pf ? 1 : 0) << IGU_FID_ENCODE_IS_PF_SHIFT;
1191cb83
ED
7248 u32 addr_encode = IGU_CMD_E2_PROD_UPD_BASE + idu_sb_id;
7249
7250 /* Not supported in BC mode */
7251 if (CHIP_INT_MODE_IS_BC(bp))
7252 return;
7253
7254 data = (IGU_USE_REGISTER_cstorm_type_0_sb_cleanup
7255 << IGU_REGULAR_CLEANUP_TYPE_SHIFT) |
7256 IGU_REGULAR_CLEANUP_SET |
7257 IGU_REGULAR_BCLEANUP;
7258
7259 ctl = addr_encode << IGU_CTRL_REG_ADDRESS_SHIFT |
7260 func_encode << IGU_CTRL_REG_FID_SHIFT |
7261 IGU_CTRL_CMD_TYPE_WR << IGU_CTRL_REG_TYPE_SHIFT;
7262
7263 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7264 data, igu_addr_data);
7265 REG_WR(bp, igu_addr_data, data);
7266 mmiowb();
7267 barrier();
7268 DP(NETIF_MSG_HW, "write 0x%08x to IGU(via GRC) addr 0x%x\n",
7269 ctl, igu_addr_ctl);
7270 REG_WR(bp, igu_addr_ctl, ctl);
7271 mmiowb();
7272 barrier();
7273
7274 /* wait for clean up to finish */
7275 while (!(REG_RD(bp, igu_addr_ack) & sb_bit) && --cnt)
7276 msleep(20);
7277
7278
7279 if (!(REG_RD(bp, igu_addr_ack) & sb_bit)) {
7280 DP(NETIF_MSG_HW,
7281 "Unable to finish IGU cleanup: idu_sb_id %d offset %d bit %d (cnt %d)\n",
7282 idu_sb_id, idu_sb_id/32, idu_sb_id%32, cnt);
7283 }
7284}
7285
7286static void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
f2e0899f 7287{
619c5cb6 7288 bnx2x_igu_clear_sb_gen(bp, BP_FUNC(bp), idu_sb_id, true /*PF*/);
f2e0899f
DK
7289}
7290
1191cb83 7291static void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
f2e0899f
DK
7292{
7293 u32 i, base = FUNC_ILT_BASE(func);
7294 for (i = base; i < base + ILT_PER_FUNC; i++)
7295 bnx2x_ilt_wr(bp, i, 0);
7296}
7297
55c11941 7298
910cc727 7299static void bnx2x_init_searcher(struct bnx2x *bp)
55c11941
MS
7300{
7301 int port = BP_PORT(bp);
7302 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
7303 /* T1 hash bits value determines the T1 number of entries */
7304 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
7305}
7306
7307static inline int bnx2x_func_switch_update(struct bnx2x *bp, int suspend)
7308{
7309 int rc;
7310 struct bnx2x_func_state_params func_params = {NULL};
7311 struct bnx2x_func_switch_update_params *switch_update_params =
7312 &func_params.params.switch_update;
7313
7314 /* Prepare parameters for function state transitions */
7315 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
7316 __set_bit(RAMROD_RETRY, &func_params.ramrod_flags);
7317
7318 func_params.f_obj = &bp->func_obj;
7319 func_params.cmd = BNX2X_F_CMD_SWITCH_UPDATE;
7320
7321 /* Function parameters */
7322 switch_update_params->suspend = suspend;
7323
7324 rc = bnx2x_func_state_change(bp, &func_params);
7325
7326 return rc;
7327}
7328
910cc727 7329static int bnx2x_reset_nic_mode(struct bnx2x *bp)
55c11941
MS
7330{
7331 int rc, i, port = BP_PORT(bp);
7332 int vlan_en = 0, mac_en[NUM_MACS];
7333
7334
7335 /* Close input from network */
7336 if (bp->mf_mode == SINGLE_FUNCTION) {
7337 bnx2x_set_rx_filter(&bp->link_params, 0);
7338 } else {
7339 vlan_en = REG_RD(bp, port ? NIG_REG_LLH1_FUNC_EN :
7340 NIG_REG_LLH0_FUNC_EN);
7341 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7342 NIG_REG_LLH0_FUNC_EN, 0);
7343 for (i = 0; i < NUM_MACS; i++) {
7344 mac_en[i] = REG_RD(bp, port ?
7345 (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7346 4 * i) :
7347 (NIG_REG_LLH0_FUNC_MEM_ENABLE +
7348 4 * i));
7349 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7350 4 * i) :
7351 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i), 0);
7352 }
7353 }
7354
7355 /* Close BMC to host */
7356 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7357 NIG_REG_P1_TX_MNG_HOST_ENABLE, 0);
7358
7359 /* Suspend Tx switching to the PF. Completion of this ramrod
7360 * further guarantees that all the packets of that PF / child
7361 * VFs in BRB were processed by the Parser, so it is safe to
7362 * change the NIC_MODE register.
7363 */
7364 rc = bnx2x_func_switch_update(bp, 1);
7365 if (rc) {
7366 BNX2X_ERR("Can't suspend tx-switching!\n");
7367 return rc;
7368 }
7369
7370 /* Change NIC_MODE register */
7371 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7372
7373 /* Open input from network */
7374 if (bp->mf_mode == SINGLE_FUNCTION) {
7375 bnx2x_set_rx_filter(&bp->link_params, 1);
7376 } else {
7377 REG_WR(bp, port ? NIG_REG_LLH1_FUNC_EN :
7378 NIG_REG_LLH0_FUNC_EN, vlan_en);
7379 for (i = 0; i < NUM_MACS; i++) {
7380 REG_WR(bp, port ? (NIG_REG_LLH1_FUNC_MEM_ENABLE +
7381 4 * i) :
7382 (NIG_REG_LLH0_FUNC_MEM_ENABLE + 4 * i),
7383 mac_en[i]);
7384 }
7385 }
7386
7387 /* Enable BMC to host */
7388 REG_WR(bp, port ? NIG_REG_P0_TX_MNG_HOST_ENABLE :
7389 NIG_REG_P1_TX_MNG_HOST_ENABLE, 1);
7390
7391 /* Resume Tx switching to the PF */
7392 rc = bnx2x_func_switch_update(bp, 0);
7393 if (rc) {
7394 BNX2X_ERR("Can't resume tx-switching!\n");
7395 return rc;
7396 }
7397
7398 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7399 return 0;
7400}
7401
7402int bnx2x_init_hw_func_cnic(struct bnx2x *bp)
7403{
7404 int rc;
7405
7406 bnx2x_ilt_init_op_cnic(bp, INITOP_SET);
7407
7408 if (CONFIGURE_NIC_MODE(bp)) {
7409 /* Configrue searcher as part of function hw init */
7410 bnx2x_init_searcher(bp);
7411
7412 /* Reset NIC mode */
7413 rc = bnx2x_reset_nic_mode(bp);
7414 if (rc)
7415 BNX2X_ERR("Can't change NIC mode!\n");
7416 return rc;
7417 }
7418
7419 return 0;
7420}
7421
523224a3 7422static int bnx2x_init_hw_func(struct bnx2x *bp)
34f80b04
EG
7423{
7424 int port = BP_PORT(bp);
7425 int func = BP_FUNC(bp);
619c5cb6 7426 int init_phase = PHASE_PF0 + func;
523224a3
DK
7427 struct bnx2x_ilt *ilt = BP_ILT(bp);
7428 u16 cdu_ilt_start;
8badd27a 7429 u32 addr, val;
f4a66897 7430 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
89db4ad8 7431 int i, main_mem_width, rc;
34f80b04 7432
51c1a580 7433 DP(NETIF_MSG_HW, "starting func init func %d\n", func);
34f80b04 7434
619c5cb6 7435 /* FLR cleanup - hmmm */
89db4ad8
AE
7436 if (!CHIP_IS_E1x(bp)) {
7437 rc = bnx2x_pf_flr_clnup(bp);
04c46736
YM
7438 if (rc) {
7439 bnx2x_fw_dump(bp);
89db4ad8 7440 return rc;
04c46736 7441 }
89db4ad8 7442 }
619c5cb6 7443
8badd27a 7444 /* set MSI reconfigure capability */
f2e0899f
DK
7445 if (bp->common.int_block == INT_BLOCK_HC) {
7446 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
7447 val = REG_RD(bp, addr);
7448 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
7449 REG_WR(bp, addr, val);
7450 }
8badd27a 7451
619c5cb6
VZ
7452 bnx2x_init_block(bp, BLOCK_PXP, init_phase);
7453 bnx2x_init_block(bp, BLOCK_PXP2, init_phase);
7454
523224a3
DK
7455 ilt = BP_ILT(bp);
7456 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
37b091ba 7457
290ca2bb
AE
7458 if (IS_SRIOV(bp))
7459 cdu_ilt_start += BNX2X_FIRST_VF_CID/ILT_PAGE_CIDS;
7460 cdu_ilt_start = bnx2x_iov_init_ilt(bp, cdu_ilt_start);
7461
7462 /* since BNX2X_FIRST_VF_CID > 0 the PF L2 cids precedes
7463 * those of the VFs, so start line should be reset
7464 */
7465 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
523224a3 7466 for (i = 0; i < L2_ILT_LINES(bp); i++) {
a052997e 7467 ilt->lines[cdu_ilt_start + i].page = bp->context[i].vcxt;
523224a3 7468 ilt->lines[cdu_ilt_start + i].page_mapping =
a052997e
MS
7469 bp->context[i].cxt_mapping;
7470 ilt->lines[cdu_ilt_start + i].size = bp->context[i].size;
37b091ba 7471 }
290ca2bb 7472
523224a3 7473 bnx2x_ilt_init_op(bp, INITOP_SET);
f85582f8 7474
55c11941
MS
7475 if (!CONFIGURE_NIC_MODE(bp)) {
7476 bnx2x_init_searcher(bp);
7477 REG_WR(bp, PRS_REG_NIC_MODE, 0);
7478 DP(NETIF_MSG_IFUP, "NIC MODE disabled\n");
7479 } else {
7480 /* Set NIC mode */
7481 REG_WR(bp, PRS_REG_NIC_MODE, 1);
7482 DP(NETIF_MSG_IFUP, "NIC MODE configrued\n");
37b091ba 7483
55c11941 7484 }
37b091ba 7485
619c5cb6 7486 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7487 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
7488
7489 /* Turn on a single ISR mode in IGU if driver is going to use
7490 * INT#x or MSI
7491 */
7492 if (!(bp->flags & USING_MSIX_FLAG))
7493 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
7494 /*
7495 * Timers workaround bug: function init part.
7496 * Need to wait 20msec after initializing ILT,
7497 * needed to make sure there are no requests in
7498 * one of the PXP internal queues with "old" ILT addresses
7499 */
7500 msleep(20);
7501 /*
7502 * Master enable - Due to WB DMAE writes performed before this
7503 * register is re-initialized as part of the regular function
7504 * init
7505 */
7506 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
7507 /* Enable the function in IGU */
7508 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
7509 }
7510
523224a3 7511 bp->dmae_ready = 1;
34f80b04 7512
619c5cb6 7513 bnx2x_init_block(bp, BLOCK_PGLUE_B, init_phase);
523224a3 7514
619c5cb6 7515 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7516 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
7517
619c5cb6
VZ
7518 bnx2x_init_block(bp, BLOCK_ATC, init_phase);
7519 bnx2x_init_block(bp, BLOCK_DMAE, init_phase);
7520 bnx2x_init_block(bp, BLOCK_NIG, init_phase);
7521 bnx2x_init_block(bp, BLOCK_SRC, init_phase);
7522 bnx2x_init_block(bp, BLOCK_MISC, init_phase);
7523 bnx2x_init_block(bp, BLOCK_TCM, init_phase);
7524 bnx2x_init_block(bp, BLOCK_UCM, init_phase);
7525 bnx2x_init_block(bp, BLOCK_CCM, init_phase);
7526 bnx2x_init_block(bp, BLOCK_XCM, init_phase);
7527 bnx2x_init_block(bp, BLOCK_TSEM, init_phase);
7528 bnx2x_init_block(bp, BLOCK_USEM, init_phase);
7529 bnx2x_init_block(bp, BLOCK_CSEM, init_phase);
7530 bnx2x_init_block(bp, BLOCK_XSEM, init_phase);
7531
7532 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7533 REG_WR(bp, QM_REG_PF_EN, 1);
7534
619c5cb6
VZ
7535 if (!CHIP_IS_E1x(bp)) {
7536 REG_WR(bp, TSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7537 REG_WR(bp, USEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7538 REG_WR(bp, CSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7539 REG_WR(bp, XSEM_REG_VFPF_ERR_NUM, BNX2X_MAX_NUM_OF_VFS + func);
7540 }
7541 bnx2x_init_block(bp, BLOCK_QM, init_phase);
7542
7543 bnx2x_init_block(bp, BLOCK_TM, init_phase);
7544 bnx2x_init_block(bp, BLOCK_DORQ, init_phase);
b56e9670
AE
7545
7546 bnx2x_iov_init_dq(bp);
7547
619c5cb6
VZ
7548 bnx2x_init_block(bp, BLOCK_BRB1, init_phase);
7549 bnx2x_init_block(bp, BLOCK_PRS, init_phase);
7550 bnx2x_init_block(bp, BLOCK_TSDM, init_phase);
7551 bnx2x_init_block(bp, BLOCK_CSDM, init_phase);
7552 bnx2x_init_block(bp, BLOCK_USDM, init_phase);
7553 bnx2x_init_block(bp, BLOCK_XSDM, init_phase);
7554 bnx2x_init_block(bp, BLOCK_UPB, init_phase);
7555 bnx2x_init_block(bp, BLOCK_XPB, init_phase);
7556 bnx2x_init_block(bp, BLOCK_PBF, init_phase);
7557 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7558 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
7559
619c5cb6 7560 bnx2x_init_block(bp, BLOCK_CDU, init_phase);
523224a3 7561
619c5cb6 7562 bnx2x_init_block(bp, BLOCK_CFC, init_phase);
34f80b04 7563
619c5cb6 7564 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
7565 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
7566
fb3bff17 7567 if (IS_MF(bp)) {
34f80b04 7568 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
fb3bff17 7569 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
34f80b04
EG
7570 }
7571
619c5cb6 7572 bnx2x_init_block(bp, BLOCK_MISC_AEU, init_phase);
523224a3 7573
34f80b04 7574 /* HC init per function */
f2e0899f
DK
7575 if (bp->common.int_block == INT_BLOCK_HC) {
7576 if (CHIP_IS_E1H(bp)) {
7577 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7578
7579 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7580 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7581 }
619c5cb6 7582 bnx2x_init_block(bp, BLOCK_HC, init_phase);
f2e0899f
DK
7583
7584 } else {
7585 int num_segs, sb_idx, prod_offset;
7586
34f80b04
EG
7587 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
7588
619c5cb6 7589 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7590 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7591 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7592 }
7593
619c5cb6 7594 bnx2x_init_block(bp, BLOCK_IGU, init_phase);
f2e0899f 7595
619c5cb6 7596 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
7597 int dsb_idx = 0;
7598 /**
7599 * Producer memory:
7600 * E2 mode: address 0-135 match to the mapping memory;
7601 * 136 - PF0 default prod; 137 - PF1 default prod;
7602 * 138 - PF2 default prod; 139 - PF3 default prod;
7603 * 140 - PF0 attn prod; 141 - PF1 attn prod;
7604 * 142 - PF2 attn prod; 143 - PF3 attn prod;
7605 * 144-147 reserved.
7606 *
7607 * E1.5 mode - In backward compatible mode;
7608 * for non default SB; each even line in the memory
7609 * holds the U producer and each odd line hold
7610 * the C producer. The first 128 producers are for
7611 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
7612 * producers are for the DSB for each PF.
7613 * Each PF has five segments: (the order inside each
7614 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
7615 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
7616 * 144-147 attn prods;
7617 */
7618 /* non-default-status-blocks */
7619 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7620 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
7621 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
7622 prod_offset = (bp->igu_base_sb + sb_idx) *
7623 num_segs;
7624
7625 for (i = 0; i < num_segs; i++) {
7626 addr = IGU_REG_PROD_CONS_MEMORY +
7627 (prod_offset + i) * 4;
7628 REG_WR(bp, addr, 0);
7629 }
7630 /* send consumer update with value 0 */
7631 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
7632 USTORM_ID, 0, IGU_INT_NOP, 1);
7633 bnx2x_igu_clear_sb(bp,
7634 bp->igu_base_sb + sb_idx);
7635 }
7636
7637 /* default-status-blocks */
7638 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
7639 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
7640
7641 if (CHIP_MODE_IS_4_PORT(bp))
7642 dsb_idx = BP_FUNC(bp);
7643 else
3395a033 7644 dsb_idx = BP_VN(bp);
f2e0899f
DK
7645
7646 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
7647 IGU_BC_BASE_DSB_PROD + dsb_idx :
7648 IGU_NORM_BASE_DSB_PROD + dsb_idx);
7649
3395a033
DK
7650 /*
7651 * igu prods come in chunks of E1HVN_MAX (4) -
7652 * does not matters what is the current chip mode
7653 */
f2e0899f
DK
7654 for (i = 0; i < (num_segs * E1HVN_MAX);
7655 i += E1HVN_MAX) {
7656 addr = IGU_REG_PROD_CONS_MEMORY +
7657 (prod_offset + i)*4;
7658 REG_WR(bp, addr, 0);
7659 }
7660 /* send consumer update with 0 */
7661 if (CHIP_INT_MODE_IS_BC(bp)) {
7662 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7663 USTORM_ID, 0, IGU_INT_NOP, 1);
7664 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7665 CSTORM_ID, 0, IGU_INT_NOP, 1);
7666 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7667 XSTORM_ID, 0, IGU_INT_NOP, 1);
7668 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7669 TSTORM_ID, 0, IGU_INT_NOP, 1);
7670 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7671 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7672 } else {
7673 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7674 USTORM_ID, 0, IGU_INT_NOP, 1);
7675 bnx2x_ack_sb(bp, bp->igu_dsb_id,
7676 ATTENTION_ID, 0, IGU_INT_NOP, 1);
7677 }
7678 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
7679
7680 /* !!! these should become driver const once
7681 rf-tool supports split-68 const */
7682 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
7683 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
7684 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
7685 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
7686 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
7687 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
7688 }
34f80b04 7689 }
34f80b04 7690
c14423fe 7691 /* Reset PCIE errors for debug */
a2fbb9ea
ET
7692 REG_WR(bp, 0x2114, 0xffffffff);
7693 REG_WR(bp, 0x2120, 0xffffffff);
523224a3 7694
f4a66897
VZ
7695 if (CHIP_IS_E1x(bp)) {
7696 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
7697 main_mem_base = HC_REG_MAIN_MEMORY +
7698 BP_PORT(bp) * (main_mem_size * 4);
7699 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
7700 main_mem_width = 8;
7701
7702 val = REG_RD(bp, main_mem_prty_clr);
7703 if (val)
51c1a580
MS
7704 DP(NETIF_MSG_HW,
7705 "Hmmm... Parity errors in HC block during function init (0x%x)!\n",
7706 val);
f4a66897
VZ
7707
7708 /* Clear "false" parity errors in MSI-X table */
7709 for (i = main_mem_base;
7710 i < main_mem_base + main_mem_size * 4;
7711 i += main_mem_width) {
7712 bnx2x_read_dmae(bp, i, main_mem_width / 4);
7713 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
7714 i, main_mem_width / 4);
7715 }
7716 /* Clear HC parity attention */
7717 REG_RD(bp, main_mem_prty_clr);
7718 }
7719
619c5cb6
VZ
7720#ifdef BNX2X_STOP_ON_ERROR
7721 /* Enable STORMs SP logging */
7722 REG_WR8(bp, BAR_USTRORM_INTMEM +
7723 USTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7724 REG_WR8(bp, BAR_TSTRORM_INTMEM +
7725 TSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7726 REG_WR8(bp, BAR_CSTRORM_INTMEM +
7727 CSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7728 REG_WR8(bp, BAR_XSTRORM_INTMEM +
7729 XSTORM_RECORD_SLOW_PATH_OFFSET(BP_FUNC(bp)), 1);
7730#endif
7731
b7737c9b 7732 bnx2x_phy_probe(&bp->link_params);
f85582f8 7733
34f80b04
EG
7734 return 0;
7735}
7736
a2fbb9ea 7737
55c11941
MS
7738void bnx2x_free_mem_cnic(struct bnx2x *bp)
7739{
7740 bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_FREE);
7741
7742 if (!CHIP_IS_E1x(bp))
7743 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
7744 sizeof(struct host_hc_status_block_e2));
7745 else
7746 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
7747 sizeof(struct host_hc_status_block_e1x));
7748
7749 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7750}
7751
9f6c9258 7752void bnx2x_free_mem(struct bnx2x *bp)
a2fbb9ea 7753{
a052997e
MS
7754 int i;
7755
a2fbb9ea 7756 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
523224a3 7757 sizeof(struct host_sp_status_block));
a2fbb9ea 7758
619c5cb6
VZ
7759 BNX2X_PCI_FREE(bp->fw_stats, bp->fw_stats_mapping,
7760 bp->fw_stats_data_sz + bp->fw_stats_req_sz);
7761
a2fbb9ea 7762 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 7763 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7764
a052997e
MS
7765 for (i = 0; i < L2_ILT_LINES(bp); i++)
7766 BNX2X_PCI_FREE(bp->context[i].vcxt, bp->context[i].cxt_mapping,
7767 bp->context[i].size);
523224a3
DK
7768 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
7769
7770 BNX2X_FREE(bp->ilt->lines);
f85582f8 7771
7a9b2557 7772 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea 7773
523224a3
DK
7774 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
7775 BCM_PAGE_SIZE * NUM_EQ_PAGES);
580d9d08 7776
05952246
YM
7777 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
7778
580d9d08 7779 bnx2x_iov_free_mem(bp);
619c5cb6
VZ
7780}
7781
a2fbb9ea 7782
55c11941 7783int bnx2x_alloc_mem_cnic(struct bnx2x *bp)
a2fbb9ea 7784{
619c5cb6
VZ
7785 if (!CHIP_IS_E1x(bp))
7786 /* size = the status block + ramrod buffers */
f2e0899f
DK
7787 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
7788 sizeof(struct host_hc_status_block_e2));
7789 else
55c11941
MS
7790 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb,
7791 &bp->cnic_sb_mapping,
7792 sizeof(struct
7793 host_hc_status_block_e1x));
8badd27a 7794
2f7a3122 7795 if (CONFIGURE_NIC_MODE(bp) && !bp->t2)
55c11941
MS
7796 /* allocate searcher T2 table, as it wan't allocated before */
7797 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
7798
7799 /* write address to which L5 should insert its values */
7800 bp->cnic_eth_dev.addr_drv_info_to_mcp =
7801 &bp->slowpath->drv_info_to_mcp;
7802
7803 if (bnx2x_ilt_mem_op_cnic(bp, ILT_MEMOP_ALLOC))
7804 goto alloc_mem_err;
7805
7806 return 0;
7807
7808alloc_mem_err:
7809 bnx2x_free_mem_cnic(bp);
7810 BNX2X_ERR("Can't allocate memory\n");
7811 return -ENOMEM;
7812}
7813
7814int bnx2x_alloc_mem(struct bnx2x *bp)
7815{
7816 int i, allocated, context_size;
a2fbb9ea 7817
2f7a3122 7818 if (!CONFIGURE_NIC_MODE(bp) && !bp->t2)
55c11941
MS
7819 /* allocate searcher T2 table */
7820 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
8badd27a 7821
523224a3
DK
7822 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
7823 sizeof(struct host_sp_status_block));
a2fbb9ea 7824
523224a3
DK
7825 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
7826 sizeof(struct bnx2x_slowpath));
a2fbb9ea 7827
a052997e
MS
7828 /* Allocate memory for CDU context:
7829 * This memory is allocated separately and not in the generic ILT
7830 * functions because CDU differs in few aspects:
7831 * 1. There are multiple entities allocating memory for context -
7832 * 'regular' driver, CNIC and SRIOV driver. Each separately controls
7833 * its own ILT lines.
7834 * 2. Since CDU page-size is not a single 4KB page (which is the case
7835 * for the other ILT clients), to be efficient we want to support
7836 * allocation of sub-page-size in the last entry.
7837 * 3. Context pointers are used by the driver to pass to FW / update
7838 * the context (for the other ILT clients the pointers are used just to
7839 * free the memory during unload).
7840 */
7841 context_size = sizeof(union cdu_context) * BNX2X_L2_CID_COUNT(bp);
65abd74d 7842
a052997e
MS
7843 for (i = 0, allocated = 0; allocated < context_size; i++) {
7844 bp->context[i].size = min(CDU_ILT_PAGE_SZ,
7845 (context_size - allocated));
7846 BNX2X_PCI_ALLOC(bp->context[i].vcxt,
7847 &bp->context[i].cxt_mapping,
7848 bp->context[i].size);
7849 allocated += bp->context[i].size;
7850 }
523224a3 7851 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
65abd74d 7852
523224a3
DK
7853 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
7854 goto alloc_mem_err;
65abd74d 7855
67c431a5
AE
7856 if (bnx2x_iov_alloc_mem(bp))
7857 goto alloc_mem_err;
7858
9f6c9258
DK
7859 /* Slow path ring */
7860 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
65abd74d 7861
523224a3
DK
7862 /* EQ */
7863 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
7864 BCM_PAGE_SIZE * NUM_EQ_PAGES);
ab532cf3 7865
9f6c9258 7866 return 0;
e1510706 7867
9f6c9258
DK
7868alloc_mem_err:
7869 bnx2x_free_mem(bp);
51c1a580 7870 BNX2X_ERR("Can't allocate memory\n");
9f6c9258 7871 return -ENOMEM;
65abd74d
YG
7872}
7873
a2fbb9ea
ET
7874/*
7875 * Init service functions
7876 */
a2fbb9ea 7877
619c5cb6
VZ
7878int bnx2x_set_mac_one(struct bnx2x *bp, u8 *mac,
7879 struct bnx2x_vlan_mac_obj *obj, bool set,
7880 int mac_type, unsigned long *ramrod_flags)
a2fbb9ea 7881{
619c5cb6
VZ
7882 int rc;
7883 struct bnx2x_vlan_mac_ramrod_params ramrod_param;
a2fbb9ea 7884
619c5cb6 7885 memset(&ramrod_param, 0, sizeof(ramrod_param));
a2fbb9ea 7886
619c5cb6
VZ
7887 /* Fill general parameters */
7888 ramrod_param.vlan_mac_obj = obj;
7889 ramrod_param.ramrod_flags = *ramrod_flags;
a2fbb9ea 7890
619c5cb6
VZ
7891 /* Fill a user request section if needed */
7892 if (!test_bit(RAMROD_CONT, ramrod_flags)) {
7893 memcpy(ramrod_param.user_req.u.mac.mac, mac, ETH_ALEN);
a2fbb9ea 7894
619c5cb6 7895 __set_bit(mac_type, &ramrod_param.user_req.vlan_mac_flags);
e3553b29 7896
619c5cb6
VZ
7897 /* Set the command: ADD or DEL */
7898 if (set)
7899 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
7900 else
7901 ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_DEL;
a2fbb9ea
ET
7902 }
7903
619c5cb6 7904 rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
7b5342d9
YM
7905
7906 if (rc == -EEXIST) {
7907 DP(BNX2X_MSG_SP, "Failed to schedule ADD operations: %d\n", rc);
7908 /* do not treat adding same MAC as error */
7909 rc = 0;
7910 } else if (rc < 0)
619c5cb6 7911 BNX2X_ERR("%s MAC failed\n", (set ? "Set" : "Del"));
7b5342d9 7912
619c5cb6 7913 return rc;
a2fbb9ea
ET
7914}
7915
619c5cb6
VZ
7916int bnx2x_del_all_macs(struct bnx2x *bp,
7917 struct bnx2x_vlan_mac_obj *mac_obj,
7918 int mac_type, bool wait_for_comp)
e665bfda 7919{
619c5cb6
VZ
7920 int rc;
7921 unsigned long ramrod_flags = 0, vlan_mac_flags = 0;
0793f83f 7922
619c5cb6
VZ
7923 /* Wait for completion of requested */
7924 if (wait_for_comp)
7925 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
0793f83f 7926
619c5cb6
VZ
7927 /* Set the mac type of addresses we want to clear */
7928 __set_bit(mac_type, &vlan_mac_flags);
0793f83f 7929
619c5cb6
VZ
7930 rc = mac_obj->delete_all(bp, mac_obj, &vlan_mac_flags, &ramrod_flags);
7931 if (rc < 0)
7932 BNX2X_ERR("Failed to delete MACs: %d\n", rc);
0793f83f 7933
619c5cb6 7934 return rc;
0793f83f
DK
7935}
7936
619c5cb6 7937int bnx2x_set_eth_mac(struct bnx2x *bp, bool set)
523224a3 7938{
a3348722
BW
7939 if (is_zero_ether_addr(bp->dev->dev_addr) &&
7940 (IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp))) {
51c1a580
MS
7941 DP(NETIF_MSG_IFUP | NETIF_MSG_IFDOWN,
7942 "Ignoring Zero MAC for STORAGE SD mode\n");
614c76df
DK
7943 return 0;
7944 }
614c76df 7945
f8f4f61a
DK
7946 if (IS_PF(bp)) {
7947 unsigned long ramrod_flags = 0;
0793f83f 7948
f8f4f61a
DK
7949 DP(NETIF_MSG_IFUP, "Adding Eth MAC\n");
7950 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
7951 return bnx2x_set_mac_one(bp, bp->dev->dev_addr,
7952 &bp->sp_objs->mac_obj, set,
7953 BNX2X_ETH_MAC, &ramrod_flags);
7954 } else { /* vf */
7955 return bnx2x_vfpf_config_mac(bp, bp->dev->dev_addr,
7956 bp->fp->index, true);
7957 }
e665bfda 7958}
6e30dd4e 7959
619c5cb6 7960int bnx2x_setup_leading(struct bnx2x *bp)
ec6ba945 7961{
619c5cb6 7962 return bnx2x_setup_queue(bp, &bp->fp[0], 1);
993ac7b5 7963}
a2fbb9ea 7964
d6214d7a 7965/**
e8920674 7966 * bnx2x_set_int_mode - configure interrupt mode
d6214d7a 7967 *
e8920674 7968 * @bp: driver handle
d6214d7a 7969 *
e8920674 7970 * In case of MSI-X it will also try to enable MSI-X.
d6214d7a 7971 */
1ab4434c 7972int bnx2x_set_int_mode(struct bnx2x *bp)
ca00392c 7973{
1ab4434c
AE
7974 int rc = 0;
7975
7976 if (IS_VF(bp) && int_mode != BNX2X_INT_MODE_MSIX)
7977 return -EINVAL;
7978
9ee3d37b 7979 switch (int_mode) {
1ab4434c
AE
7980 case BNX2X_INT_MODE_MSIX:
7981 /* attempt to enable msix */
7982 rc = bnx2x_enable_msix(bp);
7983
7984 /* msix attained */
7985 if (!rc)
7986 return 0;
7987
7988 /* vfs use only msix */
7989 if (rc && IS_VF(bp))
7990 return rc;
7991
7992 /* failed to enable multiple MSI-X */
7993 BNX2X_DEV_INFO("Failed to enable multiple MSI-X (%d), set number of queues to %d\n",
7994 bp->num_queues,
7995 1 + bp->num_cnic_queues);
7996
7997 /* falling through... */
7998 case BNX2X_INT_MODE_MSI:
d6214d7a 7999 bnx2x_enable_msi(bp);
1ab4434c 8000
d6214d7a 8001 /* falling through... */
1ab4434c 8002 case BNX2X_INT_MODE_INTX:
55c11941
MS
8003 bp->num_ethernet_queues = 1;
8004 bp->num_queues = bp->num_ethernet_queues + bp->num_cnic_queues;
51c1a580 8005 BNX2X_DEV_INFO("set number of queues to 1\n");
ca00392c 8006 break;
d6214d7a 8007 default:
1ab4434c
AE
8008 BNX2X_DEV_INFO("unknown value in int_mode module parameter\n");
8009 return -EINVAL;
9f6c9258 8010 }
1ab4434c 8011 return 0;
a2fbb9ea
ET
8012}
8013
1ab4434c 8014/* must be called prior to any HW initializations */
c2bff63f
DK
8015static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
8016{
290ca2bb
AE
8017 if (IS_SRIOV(bp))
8018 return (BNX2X_FIRST_VF_CID + BNX2X_VF_CIDS)/ILT_PAGE_CIDS;
c2bff63f
DK
8019 return L2_ILT_LINES(bp);
8020}
8021
523224a3
DK
8022void bnx2x_ilt_set_info(struct bnx2x *bp)
8023{
8024 struct ilt_client_info *ilt_client;
8025 struct bnx2x_ilt *ilt = BP_ILT(bp);
8026 u16 line = 0;
8027
8028 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
8029 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
8030
8031 /* CDU */
8032 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
8033 ilt_client->client_num = ILT_CLIENT_CDU;
8034 ilt_client->page_size = CDU_ILT_PAGE_SZ;
8035 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
8036 ilt_client->start = line;
619c5cb6 8037 line += bnx2x_cid_ilt_lines(bp);
55c11941
MS
8038
8039 if (CNIC_SUPPORT(bp))
8040 line += CNIC_ILT_LINES;
523224a3
DK
8041 ilt_client->end = line - 1;
8042
51c1a580 8043 DP(NETIF_MSG_IFUP, "ilt client[CDU]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
523224a3
DK
8044 ilt_client->start,
8045 ilt_client->end,
8046 ilt_client->page_size,
8047 ilt_client->flags,
8048 ilog2(ilt_client->page_size >> 12));
8049
8050 /* QM */
8051 if (QM_INIT(bp->qm_cid_count)) {
8052 ilt_client = &ilt->clients[ILT_CLIENT_QM];
8053 ilt_client->client_num = ILT_CLIENT_QM;
8054 ilt_client->page_size = QM_ILT_PAGE_SZ;
8055 ilt_client->flags = 0;
8056 ilt_client->start = line;
8057
8058 /* 4 bytes for each cid */
8059 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
8060 QM_ILT_PAGE_SZ);
8061
8062 ilt_client->end = line - 1;
8063
51c1a580
MS
8064 DP(NETIF_MSG_IFUP,
8065 "ilt client[QM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
523224a3
DK
8066 ilt_client->start,
8067 ilt_client->end,
8068 ilt_client->page_size,
8069 ilt_client->flags,
8070 ilog2(ilt_client->page_size >> 12));
8071
8072 }
523224a3 8073
55c11941
MS
8074 if (CNIC_SUPPORT(bp)) {
8075 /* SRC */
8076 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
8077 ilt_client->client_num = ILT_CLIENT_SRC;
8078 ilt_client->page_size = SRC_ILT_PAGE_SZ;
8079 ilt_client->flags = 0;
8080 ilt_client->start = line;
8081 line += SRC_ILT_LINES;
8082 ilt_client->end = line - 1;
523224a3 8083
55c11941
MS
8084 DP(NETIF_MSG_IFUP,
8085 "ilt client[SRC]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8086 ilt_client->start,
8087 ilt_client->end,
8088 ilt_client->page_size,
8089 ilt_client->flags,
8090 ilog2(ilt_client->page_size >> 12));
9f6c9258 8091
55c11941
MS
8092 /* TM */
8093 ilt_client = &ilt->clients[ILT_CLIENT_TM];
8094 ilt_client->client_num = ILT_CLIENT_TM;
8095 ilt_client->page_size = TM_ILT_PAGE_SZ;
8096 ilt_client->flags = 0;
8097 ilt_client->start = line;
8098 line += TM_ILT_LINES;
8099 ilt_client->end = line - 1;
523224a3 8100
55c11941
MS
8101 DP(NETIF_MSG_IFUP,
8102 "ilt client[TM]: start %d, end %d, psz 0x%x, flags 0x%x, hw psz %d\n",
8103 ilt_client->start,
8104 ilt_client->end,
8105 ilt_client->page_size,
8106 ilt_client->flags,
8107 ilog2(ilt_client->page_size >> 12));
8108 }
9f6c9258 8109
619c5cb6 8110 BUG_ON(line > ILT_MAX_LINES);
523224a3 8111}
f85582f8 8112
619c5cb6
VZ
8113/**
8114 * bnx2x_pf_q_prep_init - prepare INIT transition parameters
8115 *
8116 * @bp: driver handle
8117 * @fp: pointer to fastpath
8118 * @init_params: pointer to parameters structure
8119 *
8120 * parameters configured:
8121 * - HC configuration
8122 * - Queue's CDU context
8123 */
1191cb83 8124static void bnx2x_pf_q_prep_init(struct bnx2x *bp,
619c5cb6 8125 struct bnx2x_fastpath *fp, struct bnx2x_queue_init_params *init_params)
a2fbb9ea 8126{
6383c0b3
AE
8127
8128 u8 cos;
a052997e
MS
8129 int cxt_index, cxt_offset;
8130
619c5cb6
VZ
8131 /* FCoE Queue uses Default SB, thus has no HC capabilities */
8132 if (!IS_FCOE_FP(fp)) {
8133 __set_bit(BNX2X_Q_FLG_HC, &init_params->rx.flags);
8134 __set_bit(BNX2X_Q_FLG_HC, &init_params->tx.flags);
8135
8136 /* If HC is supporterd, enable host coalescing in the transition
8137 * to INIT state.
8138 */
8139 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->rx.flags);
8140 __set_bit(BNX2X_Q_FLG_HC_EN, &init_params->tx.flags);
8141
8142 /* HC rate */
8143 init_params->rx.hc_rate = bp->rx_ticks ?
8144 (1000000 / bp->rx_ticks) : 0;
8145 init_params->tx.hc_rate = bp->tx_ticks ?
8146 (1000000 / bp->tx_ticks) : 0;
8147
8148 /* FW SB ID */
8149 init_params->rx.fw_sb_id = init_params->tx.fw_sb_id =
8150 fp->fw_sb_id;
8151
8152 /*
8153 * CQ index among the SB indices: FCoE clients uses the default
8154 * SB, therefore it's different.
8155 */
6383c0b3
AE
8156 init_params->rx.sb_cq_index = HC_INDEX_ETH_RX_CQ_CONS;
8157 init_params->tx.sb_cq_index = HC_INDEX_ETH_FIRST_TX_CQ_CONS;
619c5cb6
VZ
8158 }
8159
6383c0b3
AE
8160 /* set maximum number of COSs supported by this queue */
8161 init_params->max_cos = fp->max_cos;
8162
51c1a580 8163 DP(NETIF_MSG_IFUP, "fp: %d setting queue params max cos to: %d\n",
6383c0b3
AE
8164 fp->index, init_params->max_cos);
8165
8166 /* set the context pointers queue object */
a052997e 8167 for (cos = FIRST_TX_COS_INDEX; cos < init_params->max_cos; cos++) {
65565884
MS
8168 cxt_index = fp->txdata_ptr[cos]->cid / ILT_PAGE_CIDS;
8169 cxt_offset = fp->txdata_ptr[cos]->cid - (cxt_index *
a052997e 8170 ILT_PAGE_CIDS);
6383c0b3 8171 init_params->cxts[cos] =
a052997e
MS
8172 &bp->context[cxt_index].vcxt[cxt_offset].eth;
8173 }
619c5cb6
VZ
8174}
8175
910cc727 8176static int bnx2x_setup_tx_only(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6383c0b3
AE
8177 struct bnx2x_queue_state_params *q_params,
8178 struct bnx2x_queue_setup_tx_only_params *tx_only_params,
8179 int tx_index, bool leading)
8180{
8181 memset(tx_only_params, 0, sizeof(*tx_only_params));
8182
8183 /* Set the command */
8184 q_params->cmd = BNX2X_Q_CMD_SETUP_TX_ONLY;
8185
8186 /* Set tx-only QUEUE flags: don't zero statistics */
8187 tx_only_params->flags = bnx2x_get_common_flags(bp, fp, false);
8188
8189 /* choose the index of the cid to send the slow path on */
8190 tx_only_params->cid_index = tx_index;
8191
8192 /* Set general TX_ONLY_SETUP parameters */
8193 bnx2x_pf_q_prep_general(bp, fp, &tx_only_params->gen_params, tx_index);
8194
8195 /* Set Tx TX_ONLY_SETUP parameters */
8196 bnx2x_pf_tx_q_prep(bp, fp, &tx_only_params->txq_params, tx_index);
8197
51c1a580
MS
8198 DP(NETIF_MSG_IFUP,
8199 "preparing to send tx-only ramrod for connection: cos %d, primary cid %d, cid %d, client id %d, sp-client id %d, flags %lx\n",
6383c0b3
AE
8200 tx_index, q_params->q_obj->cids[FIRST_TX_COS_INDEX],
8201 q_params->q_obj->cids[tx_index], q_params->q_obj->cl_id,
8202 tx_only_params->gen_params.spcl_id, tx_only_params->flags);
8203
8204 /* send the ramrod */
8205 return bnx2x_queue_state_change(bp, q_params);
8206}
8207
8208
619c5cb6
VZ
8209/**
8210 * bnx2x_setup_queue - setup queue
8211 *
8212 * @bp: driver handle
8213 * @fp: pointer to fastpath
8214 * @leading: is leading
8215 *
8216 * This function performs 2 steps in a Queue state machine
8217 * actually: 1) RESET->INIT 2) INIT->SETUP
8218 */
8219
8220int bnx2x_setup_queue(struct bnx2x *bp, struct bnx2x_fastpath *fp,
8221 bool leading)
8222{
3b603066 8223 struct bnx2x_queue_state_params q_params = {NULL};
619c5cb6
VZ
8224 struct bnx2x_queue_setup_params *setup_params =
8225 &q_params.params.setup;
6383c0b3
AE
8226 struct bnx2x_queue_setup_tx_only_params *tx_only_params =
8227 &q_params.params.tx_only;
a2fbb9ea 8228 int rc;
6383c0b3
AE
8229 u8 tx_index;
8230
51c1a580 8231 DP(NETIF_MSG_IFUP, "setting up queue %d\n", fp->index);
a2fbb9ea 8232
ec6ba945
VZ
8233 /* reset IGU state skip FCoE L2 queue */
8234 if (!IS_FCOE_FP(fp))
8235 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
523224a3 8236 IGU_INT_ENABLE, 0);
a2fbb9ea 8237
15192a8c 8238 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
619c5cb6
VZ
8239 /* We want to wait for completion in this context */
8240 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
a2fbb9ea 8241
619c5cb6
VZ
8242 /* Prepare the INIT parameters */
8243 bnx2x_pf_q_prep_init(bp, fp, &q_params.params.init);
ec6ba945 8244
619c5cb6
VZ
8245 /* Set the command */
8246 q_params.cmd = BNX2X_Q_CMD_INIT;
8247
8248 /* Change the state to INIT */
8249 rc = bnx2x_queue_state_change(bp, &q_params);
8250 if (rc) {
6383c0b3 8251 BNX2X_ERR("Queue(%d) INIT failed\n", fp->index);
619c5cb6
VZ
8252 return rc;
8253 }
ec6ba945 8254
51c1a580 8255 DP(NETIF_MSG_IFUP, "init complete\n");
6383c0b3
AE
8256
8257
619c5cb6
VZ
8258 /* Now move the Queue to the SETUP state... */
8259 memset(setup_params, 0, sizeof(*setup_params));
a2fbb9ea 8260
619c5cb6
VZ
8261 /* Set QUEUE flags */
8262 setup_params->flags = bnx2x_get_q_flags(bp, fp, leading);
523224a3 8263
619c5cb6 8264 /* Set general SETUP parameters */
6383c0b3
AE
8265 bnx2x_pf_q_prep_general(bp, fp, &setup_params->gen_params,
8266 FIRST_TX_COS_INDEX);
619c5cb6 8267
6383c0b3 8268 bnx2x_pf_rx_q_prep(bp, fp, &setup_params->pause_params,
619c5cb6
VZ
8269 &setup_params->rxq_params);
8270
6383c0b3
AE
8271 bnx2x_pf_tx_q_prep(bp, fp, &setup_params->txq_params,
8272 FIRST_TX_COS_INDEX);
619c5cb6
VZ
8273
8274 /* Set the command */
8275 q_params.cmd = BNX2X_Q_CMD_SETUP;
8276
55c11941
MS
8277 if (IS_FCOE_FP(fp))
8278 bp->fcoe_init = true;
8279
619c5cb6
VZ
8280 /* Change the state to SETUP */
8281 rc = bnx2x_queue_state_change(bp, &q_params);
6383c0b3
AE
8282 if (rc) {
8283 BNX2X_ERR("Queue(%d) SETUP failed\n", fp->index);
8284 return rc;
8285 }
8286
8287 /* loop through the relevant tx-only indices */
8288 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8289 tx_index < fp->max_cos;
8290 tx_index++) {
8291
8292 /* prepare and send tx-only ramrod*/
8293 rc = bnx2x_setup_tx_only(bp, fp, &q_params,
8294 tx_only_params, tx_index, leading);
8295 if (rc) {
8296 BNX2X_ERR("Queue(%d.%d) TX_ONLY_SETUP failed\n",
8297 fp->index, tx_index);
8298 return rc;
8299 }
8300 }
523224a3 8301
34f80b04 8302 return rc;
a2fbb9ea
ET
8303}
8304
619c5cb6 8305static int bnx2x_stop_queue(struct bnx2x *bp, int index)
a2fbb9ea 8306{
619c5cb6 8307 struct bnx2x_fastpath *fp = &bp->fp[index];
6383c0b3 8308 struct bnx2x_fp_txdata *txdata;
3b603066 8309 struct bnx2x_queue_state_params q_params = {NULL};
6383c0b3
AE
8310 int rc, tx_index;
8311
51c1a580 8312 DP(NETIF_MSG_IFDOWN, "stopping queue %d cid %d\n", index, fp->cid);
a2fbb9ea 8313
15192a8c 8314 q_params.q_obj = &bnx2x_sp_obj(bp, fp).q_obj;
619c5cb6
VZ
8315 /* We want to wait for completion in this context */
8316 __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
a2fbb9ea 8317
6383c0b3
AE
8318
8319 /* close tx-only connections */
8320 for (tx_index = FIRST_TX_ONLY_COS_INDEX;
8321 tx_index < fp->max_cos;
8322 tx_index++){
8323
8324 /* ascertain this is a normal queue*/
65565884 8325 txdata = fp->txdata_ptr[tx_index];
6383c0b3 8326
51c1a580 8327 DP(NETIF_MSG_IFDOWN, "stopping tx-only queue %d\n",
6383c0b3
AE
8328 txdata->txq_index);
8329
8330 /* send halt terminate on tx-only connection */
8331 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
8332 memset(&q_params.params.terminate, 0,
8333 sizeof(q_params.params.terminate));
8334 q_params.params.terminate.cid_index = tx_index;
8335
8336 rc = bnx2x_queue_state_change(bp, &q_params);
8337 if (rc)
8338 return rc;
8339
8340 /* send halt terminate on tx-only connection */
8341 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
8342 memset(&q_params.params.cfc_del, 0,
8343 sizeof(q_params.params.cfc_del));
8344 q_params.params.cfc_del.cid_index = tx_index;
8345 rc = bnx2x_queue_state_change(bp, &q_params);
8346 if (rc)
8347 return rc;
8348 }
8349 /* Stop the primary connection: */
8350 /* ...halt the connection */
619c5cb6
VZ
8351 q_params.cmd = BNX2X_Q_CMD_HALT;
8352 rc = bnx2x_queue_state_change(bp, &q_params);
8353 if (rc)
da5a662a 8354 return rc;
a2fbb9ea 8355
6383c0b3 8356 /* ...terminate the connection */
619c5cb6 8357 q_params.cmd = BNX2X_Q_CMD_TERMINATE;
6383c0b3
AE
8358 memset(&q_params.params.terminate, 0,
8359 sizeof(q_params.params.terminate));
8360 q_params.params.terminate.cid_index = FIRST_TX_COS_INDEX;
619c5cb6
VZ
8361 rc = bnx2x_queue_state_change(bp, &q_params);
8362 if (rc)
523224a3 8363 return rc;
6383c0b3 8364 /* ...delete cfc entry */
619c5cb6 8365 q_params.cmd = BNX2X_Q_CMD_CFC_DEL;
6383c0b3
AE
8366 memset(&q_params.params.cfc_del, 0,
8367 sizeof(q_params.params.cfc_del));
8368 q_params.params.cfc_del.cid_index = FIRST_TX_COS_INDEX;
619c5cb6 8369 return bnx2x_queue_state_change(bp, &q_params);
523224a3
DK
8370}
8371
8372
34f80b04
EG
8373static void bnx2x_reset_func(struct bnx2x *bp)
8374{
8375 int port = BP_PORT(bp);
8376 int func = BP_FUNC(bp);
f2e0899f 8377 int i;
523224a3
DK
8378
8379 /* Disable the function in the FW */
8380 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
8381 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
8382 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
8383 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
8384
8385 /* FP SBs */
ec6ba945 8386 for_each_eth_queue(bp, i) {
523224a3 8387 struct bnx2x_fastpath *fp = &bp->fp[i];
619c5cb6 8388 REG_WR8(bp, BAR_CSTRORM_INTMEM +
6383c0b3
AE
8389 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET(fp->fw_sb_id),
8390 SB_DISABLED);
523224a3
DK
8391 }
8392
55c11941
MS
8393 if (CNIC_LOADED(bp))
8394 /* CNIC SB */
8395 REG_WR8(bp, BAR_CSTRORM_INTMEM +
8396 CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET
8397 (bnx2x_cnic_fw_sb_id(bp)), SB_DISABLED);
8398
523224a3 8399 /* SP SB */
619c5cb6 8400 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2de67439
YM
8401 CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func),
8402 SB_DISABLED);
523224a3
DK
8403
8404 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
8405 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
8406 0);
34f80b04
EG
8407
8408 /* Configure IGU */
f2e0899f
DK
8409 if (bp->common.int_block == INT_BLOCK_HC) {
8410 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
8411 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
8412 } else {
8413 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
8414 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
8415 }
34f80b04 8416
55c11941
MS
8417 if (CNIC_LOADED(bp)) {
8418 /* Disable Timer scan */
8419 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
8420 /*
8421 * Wait for at least 10ms and up to 2 second for the timers
8422 * scan to complete
8423 */
8424 for (i = 0; i < 200; i++) {
8425 msleep(10);
8426 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
8427 break;
8428 }
37b091ba 8429 }
34f80b04 8430 /* Clear ILT */
f2e0899f
DK
8431 bnx2x_clear_func_ilt(bp, func);
8432
8433 /* Timers workaround bug for E2: if this is vnic-3,
8434 * we need to set the entire ilt range for this timers.
8435 */
619c5cb6 8436 if (!CHIP_IS_E1x(bp) && BP_VN(bp) == 3) {
f2e0899f
DK
8437 struct ilt_client_info ilt_cli;
8438 /* use dummy TM client */
8439 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
8440 ilt_cli.start = 0;
8441 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
8442 ilt_cli.client_num = ILT_CLIENT_TM;
8443
8444 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
8445 }
8446
8447 /* this assumes that reset_port() called before reset_func()*/
619c5cb6 8448 if (!CHIP_IS_E1x(bp))
f2e0899f 8449 bnx2x_pf_disable(bp);
523224a3
DK
8450
8451 bp->dmae_ready = 0;
34f80b04
EG
8452}
8453
8454static void bnx2x_reset_port(struct bnx2x *bp)
8455{
8456 int port = BP_PORT(bp);
8457 u32 val;
8458
619c5cb6
VZ
8459 /* Reset physical Link */
8460 bnx2x__link_reset(bp);
8461
34f80b04
EG
8462 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
8463
8464 /* Do not rcv packets to BRB */
8465 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
8466 /* Do not direct rcv packets that are not for MCP to the BRB */
8467 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
8468 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
8469
8470 /* Configure AEU */
8471 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
8472
8473 msleep(100);
8474 /* Check for BRB port occupancy */
8475 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
8476 if (val)
8477 DP(NETIF_MSG_IFDOWN,
33471629 8478 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
8479
8480 /* TODO: Close Doorbell port? */
8481}
8482
1191cb83 8483static int bnx2x_reset_hw(struct bnx2x *bp, u32 load_code)
34f80b04 8484{
3b603066 8485 struct bnx2x_func_state_params func_params = {NULL};
34f80b04 8486
619c5cb6
VZ
8487 /* Prepare parameters for function state transitions */
8488 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
34f80b04 8489
619c5cb6
VZ
8490 func_params.f_obj = &bp->func_obj;
8491 func_params.cmd = BNX2X_F_CMD_HW_RESET;
34f80b04 8492
619c5cb6 8493 func_params.params.hw_init.load_phase = load_code;
49d66772 8494
619c5cb6 8495 return bnx2x_func_state_change(bp, &func_params);
34f80b04
EG
8496}
8497
1191cb83 8498static int bnx2x_func_stop(struct bnx2x *bp)
ec6ba945 8499{
3b603066 8500 struct bnx2x_func_state_params func_params = {NULL};
619c5cb6 8501 int rc;
228241eb 8502
619c5cb6
VZ
8503 /* Prepare parameters for function state transitions */
8504 __set_bit(RAMROD_COMP_WAIT, &func_params.ramrod_flags);
8505 func_params.f_obj = &bp->func_obj;
8506 func_params.cmd = BNX2X_F_CMD_STOP;
da5a662a 8507
619c5cb6
VZ
8508 /*
8509 * Try to stop the function the 'good way'. If fails (in case
8510 * of a parity error during bnx2x_chip_cleanup()) and we are
8511 * not in a debug mode, perform a state transaction in order to
8512 * enable further HW_RESET transaction.
8513 */
8514 rc = bnx2x_func_state_change(bp, &func_params);
8515 if (rc) {
34f80b04 8516#ifdef BNX2X_STOP_ON_ERROR
619c5cb6 8517 return rc;
34f80b04 8518#else
51c1a580 8519 BNX2X_ERR("FUNC_STOP ramrod failed. Running a dry transaction\n");
619c5cb6
VZ
8520 __set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags);
8521 return bnx2x_func_state_change(bp, &func_params);
34f80b04 8522#endif
228241eb 8523 }
a2fbb9ea 8524
619c5cb6
VZ
8525 return 0;
8526}
523224a3 8527
619c5cb6
VZ
8528/**
8529 * bnx2x_send_unload_req - request unload mode from the MCP.
8530 *
8531 * @bp: driver handle
8532 * @unload_mode: requested function's unload mode
8533 *
8534 * Return unload mode returned by the MCP: COMMON, PORT or FUNC.
8535 */
8536u32 bnx2x_send_unload_req(struct bnx2x *bp, int unload_mode)
8537{
8538 u32 reset_code = 0;
8539 int port = BP_PORT(bp);
3101c2bc 8540
619c5cb6 8541 /* Select the UNLOAD request mode */
65abd74d
YG
8542 if (unload_mode == UNLOAD_NORMAL)
8543 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
8544
7d0446c2 8545 else if (bp->flags & NO_WOL_FLAG)
65abd74d 8546 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
65abd74d 8547
7d0446c2 8548 else if (bp->wol) {
65abd74d
YG
8549 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
8550 u8 *mac_addr = bp->dev->dev_addr;
8551 u32 val;
f9977903
DK
8552 u16 pmc;
8553
65abd74d 8554 /* The mac address is written to entries 1-4 to
f9977903
DK
8555 * preserve entry 0 which is used by the PMF
8556 */
3395a033 8557 u8 entry = (BP_VN(bp) + 1)*8;
65abd74d
YG
8558
8559 val = (mac_addr[0] << 8) | mac_addr[1];
8560 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
8561
8562 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
8563 (mac_addr[4] << 8) | mac_addr[5];
8564 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
8565
f9977903
DK
8566 /* Enable the PME and clear the status */
8567 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmc);
8568 pmc |= PCI_PM_CTRL_PME_ENABLE | PCI_PM_CTRL_PME_STATUS;
8569 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, pmc);
8570
65abd74d
YG
8571 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
8572
8573 } else
8574 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 8575
619c5cb6
VZ
8576 /* Send the request to the MCP */
8577 if (!BP_NOMCP(bp))
8578 reset_code = bnx2x_fw_command(bp, reset_code, 0);
8579 else {
8580 int path = BP_PATH(bp);
8581
51c1a580 8582 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] %d, %d, %d\n",
619c5cb6
VZ
8583 path, load_count[path][0], load_count[path][1],
8584 load_count[path][2]);
8585 load_count[path][0]--;
8586 load_count[path][1 + port]--;
51c1a580 8587 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] %d, %d, %d\n",
619c5cb6
VZ
8588 path, load_count[path][0], load_count[path][1],
8589 load_count[path][2]);
8590 if (load_count[path][0] == 0)
8591 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
8592 else if (load_count[path][1 + port] == 0)
8593 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
8594 else
8595 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
8596 }
8597
8598 return reset_code;
8599}
8600
8601/**
8602 * bnx2x_send_unload_done - send UNLOAD_DONE command to the MCP.
8603 *
8604 * @bp: driver handle
5d07d868 8605 * @keep_link: true iff link should be kept up
619c5cb6 8606 */
5d07d868 8607void bnx2x_send_unload_done(struct bnx2x *bp, bool keep_link)
619c5cb6 8608{
5d07d868
YM
8609 u32 reset_param = keep_link ? DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET : 0;
8610
619c5cb6
VZ
8611 /* Report UNLOAD_DONE to MCP */
8612 if (!BP_NOMCP(bp))
5d07d868 8613 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, reset_param);
619c5cb6
VZ
8614}
8615
1191cb83 8616static int bnx2x_func_wait_started(struct bnx2x *bp)
6debea87
DK
8617{
8618 int tout = 50;
8619 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8620
8621 if (!bp->port.pmf)
8622 return 0;
8623
8624 /*
8625 * (assumption: No Attention from MCP at this stage)
8626 * PMF probably in the middle of TXdisable/enable transaction
8627 * 1. Sync IRS for default SB
8628 * 2. Sync SP queue - this guarantes us that attention handling started
8629 * 3. Wait, that TXdisable/enable transaction completes
8630 *
8631 * 1+2 guranty that if DCBx attention was scheduled it already changed
8632 * pending bit of transaction from STARTED-->TX_STOPPED, if we alredy
8633 * received complettion for the transaction the state is TX_STOPPED.
8634 * State will return to STARTED after completion of TX_STOPPED-->STARTED
8635 * transaction.
8636 */
8637
8638 /* make sure default SB ISR is done */
8639 if (msix)
8640 synchronize_irq(bp->msix_table[0].vector);
8641 else
8642 synchronize_irq(bp->pdev->irq);
8643
8644 flush_workqueue(bnx2x_wq);
8645
8646 while (bnx2x_func_get_state(bp, &bp->func_obj) !=
8647 BNX2X_F_STATE_STARTED && tout--)
8648 msleep(20);
8649
8650 if (bnx2x_func_get_state(bp, &bp->func_obj) !=
8651 BNX2X_F_STATE_STARTED) {
8652#ifdef BNX2X_STOP_ON_ERROR
51c1a580 8653 BNX2X_ERR("Wrong function state\n");
6debea87
DK
8654 return -EBUSY;
8655#else
8656 /*
8657 * Failed to complete the transaction in a "good way"
8658 * Force both transactions with CLR bit
8659 */
3b603066 8660 struct bnx2x_func_state_params func_params = {NULL};
6debea87 8661
51c1a580
MS
8662 DP(NETIF_MSG_IFDOWN,
8663 "Hmmm... unexpected function state! Forcing STARTED-->TX_ST0PPED-->STARTED\n");
6debea87
DK
8664
8665 func_params.f_obj = &bp->func_obj;
8666 __set_bit(RAMROD_DRV_CLR_ONLY,
8667 &func_params.ramrod_flags);
8668
8669 /* STARTED-->TX_ST0PPED */
8670 func_params.cmd = BNX2X_F_CMD_TX_STOP;
8671 bnx2x_func_state_change(bp, &func_params);
8672
8673 /* TX_ST0PPED-->STARTED */
8674 func_params.cmd = BNX2X_F_CMD_TX_START;
8675 return bnx2x_func_state_change(bp, &func_params);
8676#endif
8677 }
8678
8679 return 0;
8680}
8681
5d07d868 8682void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode, bool keep_link)
619c5cb6
VZ
8683{
8684 int port = BP_PORT(bp);
6383c0b3
AE
8685 int i, rc = 0;
8686 u8 cos;
3b603066 8687 struct bnx2x_mcast_ramrod_params rparam = {NULL};
619c5cb6
VZ
8688 u32 reset_code;
8689
8690 /* Wait until tx fastpath tasks complete */
8691 for_each_tx_queue(bp, i) {
8692 struct bnx2x_fastpath *fp = &bp->fp[i];
8693
6383c0b3 8694 for_each_cos_in_tx_queue(fp, cos)
65565884 8695 rc = bnx2x_clean_tx_queue(bp, fp->txdata_ptr[cos]);
619c5cb6
VZ
8696#ifdef BNX2X_STOP_ON_ERROR
8697 if (rc)
8698 return;
8699#endif
8700 }
8701
8702 /* Give HW time to discard old tx messages */
0926d499 8703 usleep_range(1000, 2000);
619c5cb6
VZ
8704
8705 /* Clean all ETH MACs */
15192a8c
BW
8706 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_ETH_MAC,
8707 false);
619c5cb6
VZ
8708 if (rc < 0)
8709 BNX2X_ERR("Failed to delete all ETH macs: %d\n", rc);
8710
8711 /* Clean up UC list */
15192a8c 8712 rc = bnx2x_del_all_macs(bp, &bp->sp_objs[0].mac_obj, BNX2X_UC_LIST_MAC,
619c5cb6
VZ
8713 true);
8714 if (rc < 0)
51c1a580
MS
8715 BNX2X_ERR("Failed to schedule DEL commands for UC MACs list: %d\n",
8716 rc);
619c5cb6
VZ
8717
8718 /* Disable LLH */
8719 if (!CHIP_IS_E1(bp))
8720 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
8721
8722 /* Set "drop all" (stop Rx).
8723 * We need to take a netif_addr_lock() here in order to prevent
8724 * a race between the completion code and this code.
8725 */
8726 netif_addr_lock_bh(bp->dev);
8727 /* Schedule the rx_mode command */
8728 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
8729 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
8730 else
8731 bnx2x_set_storm_rx_mode(bp);
8732
8733 /* Cleanup multicast configuration */
8734 rparam.mcast_obj = &bp->mcast_obj;
8735 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
8736 if (rc < 0)
8737 BNX2X_ERR("Failed to send DEL multicast command: %d\n", rc);
8738
8739 netif_addr_unlock_bh(bp->dev);
8740
f1929b01 8741 bnx2x_iov_chip_cleanup(bp);
619c5cb6 8742
6debea87
DK
8743
8744 /*
8745 * Send the UNLOAD_REQUEST to the MCP. This will return if
8746 * this function should perform FUNC, PORT or COMMON HW
8747 * reset.
8748 */
8749 reset_code = bnx2x_send_unload_req(bp, unload_mode);
8750
8751 /*
8752 * (assumption: No Attention from MCP at this stage)
8753 * PMF probably in the middle of TXdisable/enable transaction
8754 */
8755 rc = bnx2x_func_wait_started(bp);
8756 if (rc) {
8757 BNX2X_ERR("bnx2x_func_wait_started failed\n");
8758#ifdef BNX2X_STOP_ON_ERROR
8759 return;
8760#endif
8761 }
8762
34f80b04 8763 /* Close multi and leading connections
619c5cb6
VZ
8764 * Completions for ramrods are collected in a synchronous way
8765 */
55c11941 8766 for_each_eth_queue(bp, i)
619c5cb6 8767 if (bnx2x_stop_queue(bp, i))
523224a3
DK
8768#ifdef BNX2X_STOP_ON_ERROR
8769 return;
8770#else
228241eb 8771 goto unload_error;
523224a3 8772#endif
55c11941
MS
8773
8774 if (CNIC_LOADED(bp)) {
8775 for_each_cnic_queue(bp, i)
8776 if (bnx2x_stop_queue(bp, i))
8777#ifdef BNX2X_STOP_ON_ERROR
8778 return;
8779#else
8780 goto unload_error;
8781#endif
8782 }
8783
619c5cb6
VZ
8784 /* If SP settings didn't get completed so far - something
8785 * very wrong has happen.
8786 */
8787 if (!bnx2x_wait_sp_comp(bp, ~0x0UL))
8788 BNX2X_ERR("Hmmm... Common slow path ramrods got stuck!\n");
a2fbb9ea 8789
619c5cb6
VZ
8790#ifndef BNX2X_STOP_ON_ERROR
8791unload_error:
8792#endif
523224a3 8793 rc = bnx2x_func_stop(bp);
da5a662a 8794 if (rc) {
523224a3 8795 BNX2X_ERR("Function stop failed!\n");
da5a662a 8796#ifdef BNX2X_STOP_ON_ERROR
523224a3 8797 return;
523224a3 8798#endif
34f80b04 8799 }
a2fbb9ea 8800
523224a3
DK
8801 /* Disable HW interrupts, NAPI */
8802 bnx2x_netif_stop(bp, 1);
26614ba5
MS
8803 /* Delete all NAPI objects */
8804 bnx2x_del_all_napi(bp);
55c11941
MS
8805 if (CNIC_LOADED(bp))
8806 bnx2x_del_all_napi_cnic(bp);
523224a3
DK
8807
8808 /* Release IRQs */
d6214d7a 8809 bnx2x_free_irq(bp);
523224a3 8810
a2fbb9ea 8811 /* Reset the chip */
619c5cb6
VZ
8812 rc = bnx2x_reset_hw(bp, reset_code);
8813 if (rc)
8814 BNX2X_ERR("HW_RESET failed\n");
a2fbb9ea 8815
356e2385 8816
619c5cb6 8817 /* Report UNLOAD_DONE to MCP */
5d07d868 8818 bnx2x_send_unload_done(bp, keep_link);
72fd0718
VZ
8819}
8820
9f6c9258 8821void bnx2x_disable_close_the_gate(struct bnx2x *bp)
72fd0718
VZ
8822{
8823 u32 val;
8824
51c1a580 8825 DP(NETIF_MSG_IFDOWN, "Disabling \"close the gates\"\n");
72fd0718
VZ
8826
8827 if (CHIP_IS_E1(bp)) {
8828 int port = BP_PORT(bp);
8829 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
8830 MISC_REG_AEU_MASK_ATTN_FUNC_0;
8831
8832 val = REG_RD(bp, addr);
8833 val &= ~(0x300);
8834 REG_WR(bp, addr, val);
619c5cb6 8835 } else {
72fd0718
VZ
8836 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
8837 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
8838 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
8839 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
8840 }
8841}
8842
72fd0718
VZ
8843/* Close gates #2, #3 and #4: */
8844static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
8845{
c9ee9206 8846 u32 val;
72fd0718
VZ
8847
8848 /* Gates #2 and #4a are closed/opened for "not E1" only */
8849 if (!CHIP_IS_E1(bp)) {
8850 /* #4 */
c9ee9206 8851 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS, !!close);
72fd0718 8852 /* #2 */
c9ee9206 8853 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES, !!close);
72fd0718
VZ
8854 }
8855
8856 /* #3 */
c9ee9206
VZ
8857 if (CHIP_IS_E1x(bp)) {
8858 /* Prevent interrupts from HC on both ports */
8859 val = REG_RD(bp, HC_REG_CONFIG_1);
8860 REG_WR(bp, HC_REG_CONFIG_1,
8861 (!close) ? (val | HC_CONFIG_1_REG_BLOCK_DISABLE_1) :
8862 (val & ~(u32)HC_CONFIG_1_REG_BLOCK_DISABLE_1));
8863
8864 val = REG_RD(bp, HC_REG_CONFIG_0);
8865 REG_WR(bp, HC_REG_CONFIG_0,
8866 (!close) ? (val | HC_CONFIG_0_REG_BLOCK_DISABLE_0) :
8867 (val & ~(u32)HC_CONFIG_0_REG_BLOCK_DISABLE_0));
8868 } else {
d82603c6 8869 /* Prevent incoming interrupts in IGU */
c9ee9206
VZ
8870 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8871
8872 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION,
8873 (!close) ?
8874 (val | IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE) :
8875 (val & ~(u32)IGU_BLOCK_CONFIGURATION_REG_BLOCK_ENABLE));
8876 }
72fd0718 8877
51c1a580 8878 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "%s gates #2, #3 and #4\n",
72fd0718
VZ
8879 close ? "closing" : "opening");
8880 mmiowb();
8881}
8882
8883#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
8884
8885static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
8886{
8887 /* Do some magic... */
8888 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8889 *magic_val = val & SHARED_MF_CLP_MAGIC;
8890 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
8891}
8892
e8920674
DK
8893/**
8894 * bnx2x_clp_reset_done - restore the value of the `magic' bit.
72fd0718 8895 *
e8920674
DK
8896 * @bp: driver handle
8897 * @magic_val: old value of the `magic' bit.
72fd0718
VZ
8898 */
8899static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
8900{
8901 /* Restore the `magic' bit value... */
72fd0718
VZ
8902 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
8903 MF_CFG_WR(bp, shared_mf_config.clp_mb,
8904 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
8905}
8906
f85582f8 8907/**
e8920674 8908 * bnx2x_reset_mcp_prep - prepare for MCP reset.
72fd0718 8909 *
e8920674
DK
8910 * @bp: driver handle
8911 * @magic_val: old value of 'magic' bit.
8912 *
8913 * Takes care of CLP configurations.
72fd0718
VZ
8914 */
8915static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
8916{
8917 u32 shmem;
8918 u32 validity_offset;
8919
51c1a580 8920 DP(NETIF_MSG_HW | NETIF_MSG_IFUP, "Starting\n");
72fd0718
VZ
8921
8922 /* Set `magic' bit in order to save MF config */
8923 if (!CHIP_IS_E1(bp))
8924 bnx2x_clp_reset_prep(bp, magic_val);
8925
8926 /* Get shmem offset */
8927 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
c55e771b
BW
8928 validity_offset =
8929 offsetof(struct shmem_region, validity_map[BP_PORT(bp)]);
72fd0718
VZ
8930
8931 /* Clear validity map flags */
8932 if (shmem > 0)
8933 REG_WR(bp, shmem + validity_offset, 0);
8934}
8935
8936#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
8937#define MCP_ONE_TIMEOUT 100 /* 100 ms */
8938
e8920674
DK
8939/**
8940 * bnx2x_mcp_wait_one - wait for MCP_ONE_TIMEOUT
72fd0718 8941 *
e8920674 8942 * @bp: driver handle
72fd0718 8943 */
1191cb83 8944static void bnx2x_mcp_wait_one(struct bnx2x *bp)
72fd0718
VZ
8945{
8946 /* special handling for emulation and FPGA,
8947 wait 10 times longer */
8948 if (CHIP_REV_IS_SLOW(bp))
8949 msleep(MCP_ONE_TIMEOUT*10);
8950 else
8951 msleep(MCP_ONE_TIMEOUT);
8952}
8953
1b6e2ceb
DK
8954/*
8955 * initializes bp->common.shmem_base and waits for validity signature to appear
8956 */
8957static int bnx2x_init_shmem(struct bnx2x *bp)
72fd0718 8958{
1b6e2ceb
DK
8959 int cnt = 0;
8960 u32 val = 0;
72fd0718 8961
1b6e2ceb
DK
8962 do {
8963 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8964 if (bp->common.shmem_base) {
8965 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8966 if (val & SHR_MEM_VALIDITY_MB)
8967 return 0;
8968 }
72fd0718 8969
1b6e2ceb 8970 bnx2x_mcp_wait_one(bp);
72fd0718 8971
1b6e2ceb 8972 } while (cnt++ < (MCP_TIMEOUT / MCP_ONE_TIMEOUT));
72fd0718 8973
1b6e2ceb 8974 BNX2X_ERR("BAD MCP validity signature\n");
72fd0718 8975
1b6e2ceb
DK
8976 return -ENODEV;
8977}
72fd0718 8978
1b6e2ceb
DK
8979static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
8980{
8981 int rc = bnx2x_init_shmem(bp);
72fd0718 8982
72fd0718
VZ
8983 /* Restore the `magic' bit value */
8984 if (!CHIP_IS_E1(bp))
8985 bnx2x_clp_reset_done(bp, magic_val);
8986
8987 return rc;
8988}
8989
8990static void bnx2x_pxp_prep(struct bnx2x *bp)
8991{
8992 if (!CHIP_IS_E1(bp)) {
8993 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
8994 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
72fd0718
VZ
8995 mmiowb();
8996 }
8997}
8998
8999/*
9000 * Reset the whole chip except for:
9001 * - PCIE core
9002 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
9003 * one reset bit)
9004 * - IGU
9005 * - MISC (including AEU)
9006 * - GRC
9007 * - RBCN, RBCP
9008 */
c9ee9206 9009static void bnx2x_process_kill_chip_reset(struct bnx2x *bp, bool global)
72fd0718
VZ
9010{
9011 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
8736c826 9012 u32 global_bits2, stay_reset2;
c9ee9206
VZ
9013
9014 /*
9015 * Bits that have to be set in reset_mask2 if we want to reset 'global'
9016 * (per chip) blocks.
9017 */
9018 global_bits2 =
9019 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CPU |
9020 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_CMN_CORE;
72fd0718 9021
c55e771b
BW
9022 /* Don't reset the following blocks.
9023 * Important: per port blocks (such as EMAC, BMAC, UMAC) can't be
9024 * reset, as in 4 port device they might still be owned
9025 * by the MCP (there is only one leader per path).
9026 */
72fd0718
VZ
9027 not_reset_mask1 =
9028 MISC_REGISTERS_RESET_REG_1_RST_HC |
9029 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
9030 MISC_REGISTERS_RESET_REG_1_RST_PXP;
9031
9032 not_reset_mask2 =
c9ee9206 9033 MISC_REGISTERS_RESET_REG_2_RST_PCI_MDIO |
72fd0718
VZ
9034 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
9035 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
9036 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
9037 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
9038 MISC_REGISTERS_RESET_REG_2_RST_GRC |
9039 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
8736c826
VZ
9040 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B |
9041 MISC_REGISTERS_RESET_REG_2_RST_ATC |
c55e771b
BW
9042 MISC_REGISTERS_RESET_REG_2_PGLC |
9043 MISC_REGISTERS_RESET_REG_2_RST_BMAC0 |
9044 MISC_REGISTERS_RESET_REG_2_RST_BMAC1 |
9045 MISC_REGISTERS_RESET_REG_2_RST_EMAC0 |
9046 MISC_REGISTERS_RESET_REG_2_RST_EMAC1 |
9047 MISC_REGISTERS_RESET_REG_2_UMAC0 |
9048 MISC_REGISTERS_RESET_REG_2_UMAC1;
72fd0718 9049
8736c826
VZ
9050 /*
9051 * Keep the following blocks in reset:
9052 * - all xxMACs are handled by the bnx2x_link code.
9053 */
9054 stay_reset2 =
8736c826
VZ
9055 MISC_REGISTERS_RESET_REG_2_XMAC |
9056 MISC_REGISTERS_RESET_REG_2_XMAC_SOFT;
9057
9058 /* Full reset masks according to the chip */
72fd0718
VZ
9059 reset_mask1 = 0xffffffff;
9060
9061 if (CHIP_IS_E1(bp))
9062 reset_mask2 = 0xffff;
8736c826 9063 else if (CHIP_IS_E1H(bp))
72fd0718 9064 reset_mask2 = 0x1ffff;
8736c826
VZ
9065 else if (CHIP_IS_E2(bp))
9066 reset_mask2 = 0xfffff;
9067 else /* CHIP_IS_E3 */
9068 reset_mask2 = 0x3ffffff;
c9ee9206
VZ
9069
9070 /* Don't reset global blocks unless we need to */
9071 if (!global)
9072 reset_mask2 &= ~global_bits2;
9073
9074 /*
9075 * In case of attention in the QM, we need to reset PXP
9076 * (MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR) before QM
9077 * because otherwise QM reset would release 'close the gates' shortly
9078 * before resetting the PXP, then the PSWRQ would send a write
9079 * request to PGLUE. Then when PXP is reset, PGLUE would try to
9080 * read the payload data from PSWWR, but PSWWR would not
9081 * respond. The write queue in PGLUE would stuck, dmae commands
9082 * would not return. Therefore it's important to reset the second
9083 * reset register (containing the
9084 * MISC_REGISTERS_RESET_REG_2_RST_PXP_RQ_RD_WR bit) before the
9085 * first one (containing the MISC_REGISTERS_RESET_REG_1_RST_QM
9086 * bit).
9087 */
72fd0718
VZ
9088 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
9089 reset_mask2 & (~not_reset_mask2));
9090
c9ee9206
VZ
9091 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
9092 reset_mask1 & (~not_reset_mask1));
9093
72fd0718
VZ
9094 barrier();
9095 mmiowb();
9096
8736c826
VZ
9097 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
9098 reset_mask2 & (~stay_reset2));
9099
9100 barrier();
9101 mmiowb();
9102
c9ee9206 9103 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
72fd0718
VZ
9104 mmiowb();
9105}
9106
c9ee9206
VZ
9107/**
9108 * bnx2x_er_poll_igu_vq - poll for pending writes bit.
9109 * It should get cleared in no more than 1s.
9110 *
9111 * @bp: driver handle
9112 *
9113 * It should get cleared in no more than 1s. Returns 0 if
9114 * pending writes bit gets cleared.
9115 */
9116static int bnx2x_er_poll_igu_vq(struct bnx2x *bp)
9117{
9118 u32 cnt = 1000;
9119 u32 pend_bits = 0;
9120
9121 do {
9122 pend_bits = REG_RD(bp, IGU_REG_PENDING_BITS_STATUS);
9123
9124 if (pend_bits == 0)
9125 break;
9126
0926d499 9127 usleep_range(1000, 2000);
c9ee9206
VZ
9128 } while (cnt-- > 0);
9129
9130 if (cnt <= 0) {
9131 BNX2X_ERR("Still pending IGU requests pend_bits=%x!\n",
9132 pend_bits);
9133 return -EBUSY;
9134 }
9135
9136 return 0;
9137}
9138
9139static int bnx2x_process_kill(struct bnx2x *bp, bool global)
72fd0718
VZ
9140{
9141 int cnt = 1000;
9142 u32 val = 0;
9143 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
2de67439 9144 u32 tags_63_32 = 0;
72fd0718
VZ
9145
9146 /* Empty the Tetris buffer, wait for 1s */
9147 do {
9148 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
9149 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
9150 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
9151 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
9152 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
c55e771b
BW
9153 if (CHIP_IS_E3(bp))
9154 tags_63_32 = REG_RD(bp, PGLUE_B_REG_TAGS_63_32);
9155
72fd0718
VZ
9156 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
9157 ((port_is_idle_0 & 0x1) == 0x1) &&
9158 ((port_is_idle_1 & 0x1) == 0x1) &&
c55e771b
BW
9159 (pgl_exp_rom2 == 0xffffffff) &&
9160 (!CHIP_IS_E3(bp) || (tags_63_32 == 0xffffffff)))
72fd0718 9161 break;
0926d499 9162 usleep_range(1000, 2000);
72fd0718
VZ
9163 } while (cnt-- > 0);
9164
9165 if (cnt <= 0) {
51c1a580
MS
9166 BNX2X_ERR("Tetris buffer didn't get empty or there are still outstanding read requests after 1s!\n");
9167 BNX2X_ERR("sr_cnt=0x%08x, blk_cnt=0x%08x, port_is_idle_0=0x%08x, port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
72fd0718
VZ
9168 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
9169 pgl_exp_rom2);
9170 return -EAGAIN;
9171 }
9172
9173 barrier();
9174
9175 /* Close gates #2, #3 and #4 */
9176 bnx2x_set_234_gates(bp, true);
9177
c9ee9206
VZ
9178 /* Poll for IGU VQs for 57712 and newer chips */
9179 if (!CHIP_IS_E1x(bp) && bnx2x_er_poll_igu_vq(bp))
9180 return -EAGAIN;
9181
9182
72fd0718
VZ
9183 /* TBD: Indicate that "process kill" is in progress to MCP */
9184
9185 /* Clear "unprepared" bit */
9186 REG_WR(bp, MISC_REG_UNPREPARED, 0);
9187 barrier();
9188
9189 /* Make sure all is written to the chip before the reset */
9190 mmiowb();
9191
9192 /* Wait for 1ms to empty GLUE and PCI-E core queues,
9193 * PSWHST, GRC and PSWRD Tetris buffer.
9194 */
0926d499 9195 usleep_range(1000, 2000);
72fd0718
VZ
9196
9197 /* Prepare to chip reset: */
9198 /* MCP */
c9ee9206
VZ
9199 if (global)
9200 bnx2x_reset_mcp_prep(bp, &val);
72fd0718
VZ
9201
9202 /* PXP */
9203 bnx2x_pxp_prep(bp);
9204 barrier();
9205
9206 /* reset the chip */
c9ee9206 9207 bnx2x_process_kill_chip_reset(bp, global);
72fd0718
VZ
9208 barrier();
9209
9210 /* Recover after reset: */
9211 /* MCP */
c9ee9206 9212 if (global && bnx2x_reset_mcp_comp(bp, val))
72fd0718
VZ
9213 return -EAGAIN;
9214
c9ee9206
VZ
9215 /* TBD: Add resetting the NO_MCP mode DB here */
9216
72fd0718
VZ
9217 /* Open the gates #2, #3 and #4 */
9218 bnx2x_set_234_gates(bp, false);
9219
9220 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
9221 * reset state, re-enable attentions. */
9222
a2fbb9ea
ET
9223 return 0;
9224}
9225
910cc727 9226static int bnx2x_leader_reset(struct bnx2x *bp)
72fd0718
VZ
9227{
9228 int rc = 0;
c9ee9206 9229 bool global = bnx2x_reset_is_global(bp);
95c6c616
AE
9230 u32 load_code;
9231
9232 /* if not going to reset MCP - load "fake" driver to reset HW while
9233 * driver is owner of the HW
9234 */
9235 if (!global && !BP_NOMCP(bp)) {
5d07d868
YM
9236 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ,
9237 DRV_MSG_CODE_LOAD_REQ_WITH_LFA);
95c6c616
AE
9238 if (!load_code) {
9239 BNX2X_ERR("MCP response failure, aborting\n");
9240 rc = -EAGAIN;
9241 goto exit_leader_reset;
9242 }
9243 if ((load_code != FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) &&
9244 (load_code != FW_MSG_CODE_DRV_LOAD_COMMON)) {
9245 BNX2X_ERR("MCP unexpected resp, aborting\n");
9246 rc = -EAGAIN;
9247 goto exit_leader_reset2;
9248 }
9249 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE, 0);
9250 if (!load_code) {
9251 BNX2X_ERR("MCP response failure, aborting\n");
9252 rc = -EAGAIN;
9253 goto exit_leader_reset2;
9254 }
9255 }
c9ee9206 9256
72fd0718 9257 /* Try to recover after the failure */
c9ee9206 9258 if (bnx2x_process_kill(bp, global)) {
51c1a580
MS
9259 BNX2X_ERR("Something bad had happen on engine %d! Aii!\n",
9260 BP_PATH(bp));
72fd0718 9261 rc = -EAGAIN;
95c6c616 9262 goto exit_leader_reset2;
72fd0718
VZ
9263 }
9264
c9ee9206
VZ
9265 /*
9266 * Clear RESET_IN_PROGRES and RESET_GLOBAL bits and update the driver
9267 * state.
9268 */
72fd0718 9269 bnx2x_set_reset_done(bp);
c9ee9206
VZ
9270 if (global)
9271 bnx2x_clear_reset_global(bp);
72fd0718 9272
95c6c616
AE
9273exit_leader_reset2:
9274 /* unload "fake driver" if it was loaded */
9275 if (!global && !BP_NOMCP(bp)) {
9276 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP, 0);
9277 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
9278 }
72fd0718
VZ
9279exit_leader_reset:
9280 bp->is_leader = 0;
c9ee9206
VZ
9281 bnx2x_release_leader_lock(bp);
9282 smp_mb();
72fd0718
VZ
9283 return rc;
9284}
9285
1191cb83 9286static void bnx2x_recovery_failed(struct bnx2x *bp)
c9ee9206
VZ
9287{
9288 netdev_err(bp->dev, "Recovery has failed. Power cycle is needed.\n");
9289
9290 /* Disconnect this device */
9291 netif_device_detach(bp->dev);
9292
9293 /*
9294 * Block ifup for all function on this engine until "process kill"
9295 * or power cycle.
9296 */
9297 bnx2x_set_reset_in_progress(bp);
9298
9299 /* Shut down the power */
9300 bnx2x_set_power_state(bp, PCI_D3hot);
9301
9302 bp->recovery_state = BNX2X_RECOVERY_FAILED;
9303
9304 smp_mb();
9305}
9306
9307/*
9308 * Assumption: runs under rtnl lock. This together with the fact
6383c0b3 9309 * that it's called only from bnx2x_sp_rtnl() ensure that it
72fd0718
VZ
9310 * will never be called when netif_running(bp->dev) is false.
9311 */
9312static void bnx2x_parity_recover(struct bnx2x *bp)
9313{
c9ee9206 9314 bool global = false;
7a752993 9315 u32 error_recovered, error_unrecovered;
95c6c616 9316 bool is_parity;
c9ee9206 9317
72fd0718
VZ
9318 DP(NETIF_MSG_HW, "Handling parity\n");
9319 while (1) {
9320 switch (bp->recovery_state) {
9321 case BNX2X_RECOVERY_INIT:
9322 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
95c6c616
AE
9323 is_parity = bnx2x_chk_parity_attn(bp, &global, false);
9324 WARN_ON(!is_parity);
c9ee9206 9325
72fd0718 9326 /* Try to get a LEADER_LOCK HW lock */
c9ee9206
VZ
9327 if (bnx2x_trylock_leader_lock(bp)) {
9328 bnx2x_set_reset_in_progress(bp);
9329 /*
9330 * Check if there is a global attention and if
9331 * there was a global attention, set the global
9332 * reset bit.
9333 */
9334
9335 if (global)
9336 bnx2x_set_reset_global(bp);
9337
72fd0718 9338 bp->is_leader = 1;
c9ee9206 9339 }
72fd0718
VZ
9340
9341 /* Stop the driver */
9342 /* If interface has been removed - break */
5d07d868 9343 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY, false))
72fd0718
VZ
9344 return;
9345
9346 bp->recovery_state = BNX2X_RECOVERY_WAIT;
c9ee9206 9347
c9ee9206
VZ
9348 /* Ensure "is_leader", MCP command sequence and
9349 * "recovery_state" update values are seen on other
9350 * CPUs.
72fd0718 9351 */
c9ee9206 9352 smp_mb();
72fd0718
VZ
9353 break;
9354
9355 case BNX2X_RECOVERY_WAIT:
9356 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
9357 if (bp->is_leader) {
c9ee9206 9358 int other_engine = BP_PATH(bp) ? 0 : 1;
889b9af3
AE
9359 bool other_load_status =
9360 bnx2x_get_load_status(bp, other_engine);
9361 bool load_status =
9362 bnx2x_get_load_status(bp, BP_PATH(bp));
c9ee9206
VZ
9363 global = bnx2x_reset_is_global(bp);
9364
9365 /*
9366 * In case of a parity in a global block, let
9367 * the first leader that performs a
9368 * leader_reset() reset the global blocks in
9369 * order to clear global attentions. Otherwise
9370 * the the gates will remain closed for that
9371 * engine.
9372 */
889b9af3
AE
9373 if (load_status ||
9374 (global && other_load_status)) {
72fd0718
VZ
9375 /* Wait until all other functions get
9376 * down.
9377 */
7be08a72 9378 schedule_delayed_work(&bp->sp_rtnl_task,
72fd0718
VZ
9379 HZ/10);
9380 return;
9381 } else {
9382 /* If all other functions got down -
9383 * try to bring the chip back to
9384 * normal. In any case it's an exit
9385 * point for a leader.
9386 */
c9ee9206
VZ
9387 if (bnx2x_leader_reset(bp)) {
9388 bnx2x_recovery_failed(bp);
72fd0718
VZ
9389 return;
9390 }
9391
c9ee9206
VZ
9392 /* If we are here, means that the
9393 * leader has succeeded and doesn't
9394 * want to be a leader any more. Try
9395 * to continue as a none-leader.
9396 */
9397 break;
72fd0718
VZ
9398 }
9399 } else { /* non-leader */
c9ee9206 9400 if (!bnx2x_reset_is_done(bp, BP_PATH(bp))) {
72fd0718
VZ
9401 /* Try to get a LEADER_LOCK HW lock as
9402 * long as a former leader may have
9403 * been unloaded by the user or
9404 * released a leadership by another
9405 * reason.
9406 */
c9ee9206 9407 if (bnx2x_trylock_leader_lock(bp)) {
72fd0718
VZ
9408 /* I'm a leader now! Restart a
9409 * switch case.
9410 */
9411 bp->is_leader = 1;
9412 break;
9413 }
9414
7be08a72 9415 schedule_delayed_work(&bp->sp_rtnl_task,
72fd0718
VZ
9416 HZ/10);
9417 return;
9418
c9ee9206
VZ
9419 } else {
9420 /*
9421 * If there was a global attention, wait
9422 * for it to be cleared.
9423 */
9424 if (bnx2x_reset_is_global(bp)) {
9425 schedule_delayed_work(
7be08a72
AE
9426 &bp->sp_rtnl_task,
9427 HZ/10);
c9ee9206
VZ
9428 return;
9429 }
9430
7a752993
AE
9431 error_recovered =
9432 bp->eth_stats.recoverable_error;
9433 error_unrecovered =
9434 bp->eth_stats.unrecoverable_error;
95c6c616
AE
9435 bp->recovery_state =
9436 BNX2X_RECOVERY_NIC_LOADING;
9437 if (bnx2x_nic_load(bp, LOAD_NORMAL)) {
7a752993 9438 error_unrecovered++;
95c6c616 9439 netdev_err(bp->dev,
51c1a580 9440 "Recovery failed. Power cycle needed\n");
95c6c616
AE
9441 /* Disconnect this device */
9442 netif_device_detach(bp->dev);
9443 /* Shut down the power */
9444 bnx2x_set_power_state(
9445 bp, PCI_D3hot);
9446 smp_mb();
9447 } else {
c9ee9206
VZ
9448 bp->recovery_state =
9449 BNX2X_RECOVERY_DONE;
7a752993 9450 error_recovered++;
c9ee9206
VZ
9451 smp_mb();
9452 }
7a752993
AE
9453 bp->eth_stats.recoverable_error =
9454 error_recovered;
9455 bp->eth_stats.unrecoverable_error =
9456 error_unrecovered;
c9ee9206 9457
72fd0718
VZ
9458 return;
9459 }
9460 }
9461 default:
9462 return;
9463 }
9464 }
9465}
9466
56ad3152
MS
9467static int bnx2x_close(struct net_device *dev);
9468
72fd0718
VZ
9469/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
9470 * scheduled on a general queue in order to prevent a dead lock.
9471 */
7be08a72 9472static void bnx2x_sp_rtnl_task(struct work_struct *work)
34f80b04 9473{
7be08a72 9474 struct bnx2x *bp = container_of(work, struct bnx2x, sp_rtnl_task.work);
34f80b04
EG
9475
9476 rtnl_lock();
9477
8395be5e
AE
9478 if (!netif_running(bp->dev)) {
9479 rtnl_unlock();
9480 return;
9481 }
7be08a72
AE
9482
9483 /* if stop on error is defined no recovery flows should be executed */
9484#ifdef BNX2X_STOP_ON_ERROR
51c1a580 9485 BNX2X_ERR("recovery flow called but STOP_ON_ERROR defined so reset not done to allow debug dump,\n"
7be08a72 9486 "you will need to reboot when done\n");
b1fb8740 9487 goto sp_rtnl_not_reset;
7be08a72 9488#endif
34f80b04 9489
7be08a72
AE
9490 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE)) {
9491 /*
b1fb8740
VZ
9492 * Clear all pending SP commands as we are going to reset the
9493 * function anyway.
7be08a72 9494 */
b1fb8740
VZ
9495 bp->sp_rtnl_state = 0;
9496 smp_mb();
9497
72fd0718 9498 bnx2x_parity_recover(bp);
b1fb8740 9499
8395be5e
AE
9500 rtnl_unlock();
9501 return;
b1fb8740
VZ
9502 }
9503
9504 if (test_and_clear_bit(BNX2X_SP_RTNL_TX_TIMEOUT, &bp->sp_rtnl_state)) {
9505 /*
9506 * Clear all pending SP commands as we are going to reset the
9507 * function anyway.
9508 */
9509 bp->sp_rtnl_state = 0;
9510 smp_mb();
9511
5d07d868 9512 bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
72fd0718 9513 bnx2x_nic_load(bp, LOAD_NORMAL);
b1fb8740 9514
8395be5e
AE
9515 rtnl_unlock();
9516 return;
72fd0718 9517 }
b1fb8740
VZ
9518#ifdef BNX2X_STOP_ON_ERROR
9519sp_rtnl_not_reset:
9520#endif
9521 if (test_and_clear_bit(BNX2X_SP_RTNL_SETUP_TC, &bp->sp_rtnl_state))
9522 bnx2x_setup_tc(bp->dev, bp->dcbx_port_params.ets.num_of_cos);
a3348722
BW
9523 if (test_and_clear_bit(BNX2X_SP_RTNL_AFEX_F_UPDATE, &bp->sp_rtnl_state))
9524 bnx2x_after_function_update(bp);
8304859a
AE
9525 /*
9526 * in case of fan failure we need to reset id if the "stop on error"
9527 * debug flag is set, since we trying to prevent permanent overheating
9528 * damage
9529 */
9530 if (test_and_clear_bit(BNX2X_SP_RTNL_FAN_FAILURE, &bp->sp_rtnl_state)) {
51c1a580 9531 DP(NETIF_MSG_HW, "fan failure detected. Unloading driver\n");
8304859a
AE
9532 netif_device_detach(bp->dev);
9533 bnx2x_close(bp->dev);
8395be5e
AE
9534 rtnl_unlock();
9535 return;
8304859a
AE
9536 }
9537
381ac16b
AE
9538 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_MCAST, &bp->sp_rtnl_state)) {
9539 DP(BNX2X_MSG_SP,
9540 "sending set mcast vf pf channel message from rtnl sp-task\n");
9541 bnx2x_vfpf_set_mcast(bp->dev);
9542 }
9543
9544 if (test_and_clear_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
9545 &bp->sp_rtnl_state)) {
9546 DP(BNX2X_MSG_SP,
9547 "sending set storm rx mode vf pf channel message from rtnl sp-task\n");
9548 bnx2x_vfpf_storm_rx_mode(bp);
9549 }
9550
3ec9f9ca
AE
9551 if (test_and_clear_bit(BNX2X_SP_RTNL_HYPERVISOR_VLAN,
9552 &bp->sp_rtnl_state))
9553 bnx2x_pf_set_vfs_vlan(bp);
9554
8395be5e
AE
9555 /* work which needs rtnl lock not-taken (as it takes the lock itself and
9556 * can be called from other contexts as well)
9557 */
34f80b04 9558 rtnl_unlock();
8395be5e 9559
6411280a 9560 /* enable SR-IOV if applicable */
8395be5e 9561 if (IS_SRIOV(bp) && test_and_clear_bit(BNX2X_SP_RTNL_ENABLE_SRIOV,
3c76feff
AE
9562 &bp->sp_rtnl_state)) {
9563 bnx2x_disable_sriov(bp);
6411280a 9564 bnx2x_enable_sriov(bp);
3c76feff 9565 }
34f80b04
EG
9566}
9567
3deb8167
YR
9568static void bnx2x_period_task(struct work_struct *work)
9569{
9570 struct bnx2x *bp = container_of(work, struct bnx2x, period_task.work);
9571
9572 if (!netif_running(bp->dev))
9573 goto period_task_exit;
9574
9575 if (CHIP_REV_IS_SLOW(bp)) {
9576 BNX2X_ERR("period task called on emulation, ignoring\n");
9577 goto period_task_exit;
9578 }
9579
9580 bnx2x_acquire_phy_lock(bp);
9581 /*
9582 * The barrier is needed to ensure the ordering between the writing to
9583 * the bp->port.pmf in the bnx2x_nic_load() or bnx2x_pmf_update() and
9584 * the reading here.
9585 */
9586 smp_mb();
9587 if (bp->port.pmf) {
9588 bnx2x_period_func(&bp->link_params, &bp->link_vars);
9589
9590 /* Re-queue task in 1 sec */
9591 queue_delayed_work(bnx2x_wq, &bp->period_task, 1*HZ);
9592 }
9593
9594 bnx2x_release_phy_lock(bp);
9595period_task_exit:
9596 return;
9597}
9598
a2fbb9ea
ET
9599/*
9600 * Init service functions
9601 */
9602
b56e9670 9603u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
f2e0899f
DK
9604{
9605 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
9606 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
9607 return base + (BP_ABS_FUNC(bp)) * stride;
f1ef27ef
EG
9608}
9609
1ef1d45a
BW
9610static void bnx2x_prev_unload_close_mac(struct bnx2x *bp,
9611 struct bnx2x_mac_vals *vals)
34f80b04 9612{
452427b0
YM
9613 u32 val, base_addr, offset, mask, reset_reg;
9614 bool mac_stopped = false;
9615 u8 port = BP_PORT(bp);
34f80b04 9616
1ef1d45a
BW
9617 /* reset addresses as they also mark which values were changed */
9618 vals->bmac_addr = 0;
9619 vals->umac_addr = 0;
9620 vals->xmac_addr = 0;
9621 vals->emac_addr = 0;
9622
452427b0 9623 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_2);
f16da43b 9624
452427b0
YM
9625 if (!CHIP_IS_E3(bp)) {
9626 val = REG_RD(bp, NIG_REG_BMAC0_REGS_OUT_EN + port * 4);
9627 mask = MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port;
9628 if ((mask & reset_reg) && val) {
9629 u32 wb_data[2];
9630 BNX2X_DEV_INFO("Disable bmac Rx\n");
9631 base_addr = BP_PORT(bp) ? NIG_REG_INGRESS_BMAC1_MEM
9632 : NIG_REG_INGRESS_BMAC0_MEM;
9633 offset = CHIP_IS_E2(bp) ? BIGMAC2_REGISTER_BMAC_CONTROL
9634 : BIGMAC_REGISTER_BMAC_CONTROL;
7a06a122 9635
452427b0
YM
9636 /*
9637 * use rd/wr since we cannot use dmae. This is safe
9638 * since MCP won't access the bus due to the request
9639 * to unload, and no function on the path can be
9640 * loaded at this time.
9641 */
9642 wb_data[0] = REG_RD(bp, base_addr + offset);
9643 wb_data[1] = REG_RD(bp, base_addr + offset + 0x4);
1ef1d45a
BW
9644 vals->bmac_addr = base_addr + offset;
9645 vals->bmac_val[0] = wb_data[0];
9646 vals->bmac_val[1] = wb_data[1];
452427b0 9647 wb_data[0] &= ~BMAC_CONTROL_RX_ENABLE;
1ef1d45a
BW
9648 REG_WR(bp, vals->bmac_addr, wb_data[0]);
9649 REG_WR(bp, vals->bmac_addr + 0x4, wb_data[1]);
452427b0
YM
9650
9651 }
9652 BNX2X_DEV_INFO("Disable emac Rx\n");
1ef1d45a
BW
9653 vals->emac_addr = NIG_REG_NIG_EMAC0_EN + BP_PORT(bp)*4;
9654 vals->emac_val = REG_RD(bp, vals->emac_addr);
9655 REG_WR(bp, vals->emac_addr, 0);
452427b0
YM
9656 mac_stopped = true;
9657 } else {
9658 if (reset_reg & MISC_REGISTERS_RESET_REG_2_XMAC) {
9659 BNX2X_DEV_INFO("Disable xmac Rx\n");
9660 base_addr = BP_PORT(bp) ? GRCBASE_XMAC1 : GRCBASE_XMAC0;
9661 val = REG_RD(bp, base_addr + XMAC_REG_PFC_CTRL_HI);
9662 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9663 val & ~(1 << 1));
9664 REG_WR(bp, base_addr + XMAC_REG_PFC_CTRL_HI,
9665 val | (1 << 1));
1ef1d45a
BW
9666 vals->xmac_addr = base_addr + XMAC_REG_CTRL;
9667 vals->xmac_val = REG_RD(bp, vals->xmac_addr);
9668 REG_WR(bp, vals->xmac_addr, 0);
452427b0
YM
9669 mac_stopped = true;
9670 }
9671 mask = MISC_REGISTERS_RESET_REG_2_UMAC0 << port;
9672 if (mask & reset_reg) {
9673 BNX2X_DEV_INFO("Disable umac Rx\n");
9674 base_addr = BP_PORT(bp) ? GRCBASE_UMAC1 : GRCBASE_UMAC0;
1ef1d45a
BW
9675 vals->umac_addr = base_addr + UMAC_REG_COMMAND_CONFIG;
9676 vals->umac_val = REG_RD(bp, vals->umac_addr);
9677 REG_WR(bp, vals->umac_addr, 0);
452427b0
YM
9678 mac_stopped = true;
9679 }
9680 }
9681
9682 if (mac_stopped)
9683 msleep(20);
9684
9685}
9686
9687#define BNX2X_PREV_UNDI_PROD_ADDR(p) (BAR_TSTRORM_INTMEM + 0x1508 + ((p) << 4))
9688#define BNX2X_PREV_UNDI_RCQ(val) ((val) & 0xffff)
9689#define BNX2X_PREV_UNDI_BD(val) ((val) >> 16 & 0xffff)
9690#define BNX2X_PREV_UNDI_PROD(rcq, bd) ((bd) << 16 | (rcq))
9691
1dd06ae8 9692static void bnx2x_prev_unload_undi_inc(struct bnx2x *bp, u8 port, u8 inc)
452427b0
YM
9693{
9694 u16 rcq, bd;
9695 u32 tmp_reg = REG_RD(bp, BNX2X_PREV_UNDI_PROD_ADDR(port));
9696
9697 rcq = BNX2X_PREV_UNDI_RCQ(tmp_reg) + inc;
9698 bd = BNX2X_PREV_UNDI_BD(tmp_reg) + inc;
9699
9700 tmp_reg = BNX2X_PREV_UNDI_PROD(rcq, bd);
9701 REG_WR(bp, BNX2X_PREV_UNDI_PROD_ADDR(port), tmp_reg);
9702
9703 BNX2X_DEV_INFO("UNDI producer [%d] rings bd -> 0x%04x, rcq -> 0x%04x\n",
9704 port, bd, rcq);
9705}
9706
0329aba1 9707static int bnx2x_prev_mcp_done(struct bnx2x *bp)
452427b0 9708{
5d07d868
YM
9709 u32 rc = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE,
9710 DRV_MSG_CODE_UNLOAD_SKIP_LINK_RESET);
452427b0
YM
9711 if (!rc) {
9712 BNX2X_ERR("MCP response failure, aborting\n");
9713 return -EBUSY;
9714 }
9715
9716 return 0;
9717}
9718
c63da990
BW
9719static struct bnx2x_prev_path_list *
9720 bnx2x_prev_path_get_entry(struct bnx2x *bp)
9721{
9722 struct bnx2x_prev_path_list *tmp_list;
9723
9724 list_for_each_entry(tmp_list, &bnx2x_prev_list, list)
9725 if (PCI_SLOT(bp->pdev->devfn) == tmp_list->slot &&
9726 bp->pdev->bus->number == tmp_list->bus &&
9727 BP_PATH(bp) == tmp_list->path)
9728 return tmp_list;
9729
9730 return NULL;
9731}
9732
7fa6f340
YM
9733static int bnx2x_prev_path_mark_eeh(struct bnx2x *bp)
9734{
9735 struct bnx2x_prev_path_list *tmp_list;
9736 int rc;
9737
9738 rc = down_interruptible(&bnx2x_prev_sem);
9739 if (rc) {
9740 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9741 return rc;
9742 }
9743
9744 tmp_list = bnx2x_prev_path_get_entry(bp);
9745 if (tmp_list) {
9746 tmp_list->aer = 1;
9747 rc = 0;
9748 } else {
9749 BNX2X_ERR("path %d: Entry does not exist for eeh; Flow occurs before initial insmod is over ?\n",
9750 BP_PATH(bp));
9751 }
9752
9753 up(&bnx2x_prev_sem);
9754
9755 return rc;
9756}
9757
0329aba1 9758static bool bnx2x_prev_is_path_marked(struct bnx2x *bp)
452427b0
YM
9759{
9760 struct bnx2x_prev_path_list *tmp_list;
9761 int rc = false;
9762
9763 if (down_trylock(&bnx2x_prev_sem))
9764 return false;
9765
7fa6f340
YM
9766 tmp_list = bnx2x_prev_path_get_entry(bp);
9767 if (tmp_list) {
9768 if (tmp_list->aer) {
9769 DP(NETIF_MSG_HW, "Path %d was marked by AER\n",
9770 BP_PATH(bp));
9771 } else {
452427b0
YM
9772 rc = true;
9773 BNX2X_DEV_INFO("Path %d was already cleaned from previous drivers\n",
9774 BP_PATH(bp));
452427b0
YM
9775 }
9776 }
9777
9778 up(&bnx2x_prev_sem);
9779
9780 return rc;
9781}
9782
178135c1
DK
9783bool bnx2x_port_after_undi(struct bnx2x *bp)
9784{
9785 struct bnx2x_prev_path_list *entry;
9786 bool val;
9787
9788 down(&bnx2x_prev_sem);
9789
9790 entry = bnx2x_prev_path_get_entry(bp);
9791 val = !!(entry && (entry->undi & (1 << BP_PORT(bp))));
9792
9793 up(&bnx2x_prev_sem);
9794
9795 return val;
9796}
9797
c63da990 9798static int bnx2x_prev_mark_path(struct bnx2x *bp, bool after_undi)
452427b0
YM
9799{
9800 struct bnx2x_prev_path_list *tmp_list;
9801 int rc;
9802
7fa6f340
YM
9803 rc = down_interruptible(&bnx2x_prev_sem);
9804 if (rc) {
9805 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9806 return rc;
9807 }
9808
9809 /* Check whether the entry for this path already exists */
9810 tmp_list = bnx2x_prev_path_get_entry(bp);
9811 if (tmp_list) {
9812 if (!tmp_list->aer) {
9813 BNX2X_ERR("Re-Marking the path.\n");
9814 } else {
9815 DP(NETIF_MSG_HW, "Removing AER indication from path %d\n",
9816 BP_PATH(bp));
9817 tmp_list->aer = 0;
9818 }
9819 up(&bnx2x_prev_sem);
9820 return 0;
9821 }
9822 up(&bnx2x_prev_sem);
9823
9824 /* Create an entry for this path and add it */
ea4b3857 9825 tmp_list = kmalloc(sizeof(struct bnx2x_prev_path_list), GFP_KERNEL);
452427b0
YM
9826 if (!tmp_list) {
9827 BNX2X_ERR("Failed to allocate 'bnx2x_prev_path_list'\n");
9828 return -ENOMEM;
9829 }
9830
9831 tmp_list->bus = bp->pdev->bus->number;
9832 tmp_list->slot = PCI_SLOT(bp->pdev->devfn);
9833 tmp_list->path = BP_PATH(bp);
7fa6f340 9834 tmp_list->aer = 0;
c63da990 9835 tmp_list->undi = after_undi ? (1 << BP_PORT(bp)) : 0;
452427b0
YM
9836
9837 rc = down_interruptible(&bnx2x_prev_sem);
9838 if (rc) {
9839 BNX2X_ERR("Received %d when tried to take lock\n", rc);
9840 kfree(tmp_list);
9841 } else {
7fa6f340
YM
9842 DP(NETIF_MSG_HW, "Marked path [%d] - finished previous unload\n",
9843 BP_PATH(bp));
452427b0
YM
9844 list_add(&tmp_list->list, &bnx2x_prev_list);
9845 up(&bnx2x_prev_sem);
9846 }
9847
9848 return rc;
9849}
9850
0329aba1 9851static int bnx2x_do_flr(struct bnx2x *bp)
452427b0 9852{
2a80eebc 9853 int i;
452427b0
YM
9854 u16 status;
9855 struct pci_dev *dev = bp->pdev;
9856
8eee694c
YM
9857
9858 if (CHIP_IS_E1x(bp)) {
9859 BNX2X_DEV_INFO("FLR not supported in E1/E1H\n");
9860 return -EINVAL;
9861 }
9862
9863 /* only bootcode REQ_BC_VER_4_INITIATE_FLR and onwards support flr */
9864 if (bp->common.bc_ver < REQ_BC_VER_4_INITIATE_FLR) {
9865 BNX2X_ERR("FLR not supported by BC_VER: 0x%x\n",
9866 bp->common.bc_ver);
9867 return -EINVAL;
9868 }
452427b0 9869
452427b0
YM
9870 /* Wait for Transaction Pending bit clean */
9871 for (i = 0; i < 4; i++) {
9872 if (i)
9873 msleep((1 << (i - 1)) * 100);
9874
2a80eebc 9875 pcie_capability_read_word(dev, PCI_EXP_DEVSTA, &status);
452427b0
YM
9876 if (!(status & PCI_EXP_DEVSTA_TRPND))
9877 goto clear;
9878 }
9879
9880 dev_err(&dev->dev,
9881 "transaction is not cleared; proceeding with reset anyway\n");
9882
9883clear:
452427b0 9884
8eee694c 9885 BNX2X_DEV_INFO("Initiating FLR\n");
452427b0
YM
9886 bnx2x_fw_command(bp, DRV_MSG_CODE_INITIATE_FLR, 0);
9887
9888 return 0;
9889}
9890
0329aba1 9891static int bnx2x_prev_unload_uncommon(struct bnx2x *bp)
452427b0
YM
9892{
9893 int rc;
9894
9895 BNX2X_DEV_INFO("Uncommon unload Flow\n");
9896
9897 /* Test if previous unload process was already finished for this path */
9898 if (bnx2x_prev_is_path_marked(bp))
9899 return bnx2x_prev_mcp_done(bp);
9900
04c46736
YM
9901 BNX2X_DEV_INFO("Path is unmarked\n");
9902
452427b0
YM
9903 /* If function has FLR capabilities, and existing FW version matches
9904 * the one required, then FLR will be sufficient to clean any residue
9905 * left by previous driver
9906 */
ad5afc89 9907 rc = bnx2x_nic_load_analyze_req(bp, FW_MSG_CODE_DRV_LOAD_FUNCTION);
8eee694c
YM
9908
9909 if (!rc) {
9910 /* fw version is good */
9911 BNX2X_DEV_INFO("FW version matches our own. Attempting FLR\n");
9912 rc = bnx2x_do_flr(bp);
9913 }
9914
9915 if (!rc) {
9916 /* FLR was performed */
9917 BNX2X_DEV_INFO("FLR successful\n");
9918 return 0;
9919 }
9920
9921 BNX2X_DEV_INFO("Could not FLR\n");
452427b0
YM
9922
9923 /* Close the MCP request, return failure*/
9924 rc = bnx2x_prev_mcp_done(bp);
9925 if (!rc)
9926 rc = BNX2X_PREV_WAIT_NEEDED;
9927
9928 return rc;
9929}
9930
0329aba1 9931static int bnx2x_prev_unload_common(struct bnx2x *bp)
452427b0
YM
9932{
9933 u32 reset_reg, tmp_reg = 0, rc;
c63da990 9934 bool prev_undi = false;
1ef1d45a
BW
9935 struct bnx2x_mac_vals mac_vals;
9936
452427b0
YM
9937 /* It is possible a previous function received 'common' answer,
9938 * but hasn't loaded yet, therefore creating a scenario of
9939 * multiple functions receiving 'common' on the same path.
9940 */
9941 BNX2X_DEV_INFO("Common unload Flow\n");
9942
1ef1d45a
BW
9943 memset(&mac_vals, 0, sizeof(mac_vals));
9944
452427b0
YM
9945 if (bnx2x_prev_is_path_marked(bp))
9946 return bnx2x_prev_mcp_done(bp);
9947
9948 reset_reg = REG_RD(bp, MISC_REG_RESET_REG_1);
9949
9950 /* Reset should be performed after BRB is emptied */
9951 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_BRB1) {
9952 u32 timer_count = 1000;
452427b0
YM
9953
9954 /* Close the MAC Rx to prevent BRB from filling up */
1ef1d45a
BW
9955 bnx2x_prev_unload_close_mac(bp, &mac_vals);
9956
9957 /* close LLH filters towards the BRB */
9958 bnx2x_set_rx_filter(&bp->link_params, 0);
452427b0
YM
9959
9960 /* Check if the UNDI driver was previously loaded
34f80b04
EG
9961 * UNDI driver initializes CID offset for normal bell to 0x7
9962 */
452427b0
YM
9963 if (reset_reg & MISC_REGISTERS_RESET_REG_1_RST_DORQ) {
9964 tmp_reg = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
9965 if (tmp_reg == 0x7) {
9966 BNX2X_DEV_INFO("UNDI previously loaded\n");
9967 prev_undi = true;
9968 /* clear the UNDI indication */
9969 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
a74801c5
YM
9970 /* clear possible idle check errors */
9971 REG_RD(bp, NIG_REG_NIG_INT_STS_CLR_0);
34f80b04 9972 }
452427b0 9973 }
d46f7c4d
DK
9974 if (!CHIP_IS_E1x(bp))
9975 /* block FW from writing to host */
9976 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
9977
452427b0
YM
9978 /* wait until BRB is empty */
9979 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9980 while (timer_count) {
9981 u32 prev_brb = tmp_reg;
34f80b04 9982
452427b0
YM
9983 tmp_reg = REG_RD(bp, BRB1_REG_NUM_OF_FULL_BLOCKS);
9984 if (!tmp_reg)
9985 break;
619c5cb6 9986
452427b0 9987 BNX2X_DEV_INFO("BRB still has 0x%08x\n", tmp_reg);
619c5cb6 9988
452427b0
YM
9989 /* reset timer as long as BRB actually gets emptied */
9990 if (prev_brb > tmp_reg)
9991 timer_count = 1000;
9992 else
9993 timer_count--;
da5a662a 9994
452427b0
YM
9995 /* If UNDI resides in memory, manually increment it */
9996 if (prev_undi)
9997 bnx2x_prev_unload_undi_inc(bp, BP_PORT(bp), 1);
da5a662a 9998
452427b0 9999 udelay(10);
7a06a122 10000 }
452427b0
YM
10001
10002 if (!timer_count)
10003 BNX2X_ERR("Failed to empty BRB, hope for the best\n");
10004
34f80b04 10005 }
f16da43b 10006
452427b0
YM
10007 /* No packets are in the pipeline, path is ready for reset */
10008 bnx2x_reset_common(bp);
10009
1ef1d45a
BW
10010 if (mac_vals.xmac_addr)
10011 REG_WR(bp, mac_vals.xmac_addr, mac_vals.xmac_val);
10012 if (mac_vals.umac_addr)
10013 REG_WR(bp, mac_vals.umac_addr, mac_vals.umac_val);
10014 if (mac_vals.emac_addr)
10015 REG_WR(bp, mac_vals.emac_addr, mac_vals.emac_val);
10016 if (mac_vals.bmac_addr) {
10017 REG_WR(bp, mac_vals.bmac_addr, mac_vals.bmac_val[0]);
10018 REG_WR(bp, mac_vals.bmac_addr + 4, mac_vals.bmac_val[1]);
10019 }
10020
c63da990 10021 rc = bnx2x_prev_mark_path(bp, prev_undi);
452427b0
YM
10022 if (rc) {
10023 bnx2x_prev_mcp_done(bp);
10024 return rc;
10025 }
10026
10027 return bnx2x_prev_mcp_done(bp);
10028}
10029
24f06716
AE
10030/* previous driver DMAE transaction may have occurred when pre-boot stage ended
10031 * and boot began, or when kdump kernel was loaded. Either case would invalidate
10032 * the addresses of the transaction, resulting in was-error bit set in the pci
10033 * causing all hw-to-host pcie transactions to timeout. If this happened we want
10034 * to clear the interrupt which detected this from the pglueb and the was done
10035 * bit
10036 */
0329aba1 10037static void bnx2x_prev_interrupted_dmae(struct bnx2x *bp)
24f06716 10038{
4a25417c
AE
10039 if (!CHIP_IS_E1x(bp)) {
10040 u32 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS);
10041 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN) {
04c46736
YM
10042 DP(BNX2X_MSG_SP,
10043 "'was error' bit was found to be set in pglueb upon startup. Clearing\n");
4a25417c
AE
10044 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR,
10045 1 << BP_FUNC(bp));
10046 }
24f06716
AE
10047 }
10048}
10049
0329aba1 10050static int bnx2x_prev_unload(struct bnx2x *bp)
452427b0
YM
10051{
10052 int time_counter = 10;
10053 u32 rc, fw, hw_lock_reg, hw_lock_val;
10054 BNX2X_DEV_INFO("Entering Previous Unload Flow\n");
10055
24f06716
AE
10056 /* clear hw from errors which may have resulted from an interrupted
10057 * dmae transaction.
10058 */
10059 bnx2x_prev_interrupted_dmae(bp);
10060
10061 /* Release previously held locks */
452427b0
YM
10062 hw_lock_reg = (BP_FUNC(bp) <= 5) ?
10063 (MISC_REG_DRIVER_CONTROL_1 + BP_FUNC(bp) * 8) :
10064 (MISC_REG_DRIVER_CONTROL_7 + (BP_FUNC(bp) - 6) * 8);
10065
10066 hw_lock_val = (REG_RD(bp, hw_lock_reg));
10067 if (hw_lock_val) {
10068 if (hw_lock_val & HW_LOCK_RESOURCE_NVRAM) {
10069 BNX2X_DEV_INFO("Release Previously held NVRAM lock\n");
10070 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
10071 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << BP_PORT(bp)));
10072 }
10073
10074 BNX2X_DEV_INFO("Release Previously held hw lock\n");
10075 REG_WR(bp, hw_lock_reg, 0xffffffff);
10076 } else
10077 BNX2X_DEV_INFO("No need to release hw/nvram locks\n");
10078
10079 if (MCPR_ACCESS_LOCK_LOCK & REG_RD(bp, MCP_REG_MCPR_ACCESS_LOCK)) {
10080 BNX2X_DEV_INFO("Release previously held alr\n");
10081 REG_WR(bp, MCP_REG_MCPR_ACCESS_LOCK, 0);
10082 }
10083
452427b0 10084 do {
7fa6f340 10085 int aer = 0;
452427b0
YM
10086 /* Lock MCP using an unload request */
10087 fw = bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS, 0);
10088 if (!fw) {
10089 BNX2X_ERR("MCP response failure, aborting\n");
10090 rc = -EBUSY;
10091 break;
10092 }
10093
7fa6f340
YM
10094 rc = down_interruptible(&bnx2x_prev_sem);
10095 if (rc) {
10096 BNX2X_ERR("Cannot check for AER; Received %d when tried to take lock\n",
10097 rc);
10098 } else {
10099 /* If Path is marked by EEH, ignore unload status */
10100 aer = !!(bnx2x_prev_path_get_entry(bp) &&
10101 bnx2x_prev_path_get_entry(bp)->aer);
60cde81f 10102 up(&bnx2x_prev_sem);
7fa6f340 10103 }
7fa6f340
YM
10104
10105 if (fw == FW_MSG_CODE_DRV_UNLOAD_COMMON || aer) {
452427b0
YM
10106 rc = bnx2x_prev_unload_common(bp);
10107 break;
10108 }
10109
10110 /* non-common reply from MCP night require looping */
10111 rc = bnx2x_prev_unload_uncommon(bp);
10112 if (rc != BNX2X_PREV_WAIT_NEEDED)
10113 break;
10114
10115 msleep(20);
10116 } while (--time_counter);
10117
10118 if (!time_counter || rc) {
10119 BNX2X_ERR("Failed unloading previous driver, aborting\n");
10120 rc = -EBUSY;
10121 }
10122
c63da990 10123 /* Mark function if its port was used to boot from SAN */
178135c1 10124 if (bnx2x_port_after_undi(bp))
c63da990
BW
10125 bp->link_params.feature_config_flags |=
10126 FEATURE_CONFIG_BOOT_FROM_SAN;
10127
452427b0
YM
10128 BNX2X_DEV_INFO("Finished Previous Unload Flow [%d]\n", rc);
10129
10130 return rc;
34f80b04
EG
10131}
10132
0329aba1 10133static void bnx2x_get_common_hwinfo(struct bnx2x *bp)
34f80b04 10134{
1d187b34 10135 u32 val, val2, val3, val4, id, boot_mode;
72ce58c3 10136 u16 pmc;
34f80b04
EG
10137
10138 /* Get the chip revision id and number. */
10139 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
10140 val = REG_RD(bp, MISC_REG_CHIP_NUM);
10141 id = ((val & 0xffff) << 16);
10142 val = REG_RD(bp, MISC_REG_CHIP_REV);
10143 id |= ((val & 0xf) << 12);
f22fdf25
YM
10144
10145 /* Metal is read from PCI regs, but we can't access >=0x400 from
10146 * the configuration space (so we need to reg_rd)
10147 */
10148 val = REG_RD(bp, PCICFG_OFFSET + PCI_ID_VAL3);
10149 id |= (((val >> 24) & 0xf) << 4);
5a40e08e 10150 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
10151 id |= (val & 0xf);
10152 bp->common.chip_id = id;
523224a3 10153
7e8e02df
BW
10154 /* force 57811 according to MISC register */
10155 if (REG_RD(bp, MISC_REG_CHIP_TYPE) & MISC_REG_CHIP_TYPE_57811_MASK) {
10156 if (CHIP_IS_57810(bp))
10157 bp->common.chip_id = (CHIP_NUM_57811 << 16) |
10158 (bp->common.chip_id & 0x0000FFFF);
10159 else if (CHIP_IS_57810_MF(bp))
10160 bp->common.chip_id = (CHIP_NUM_57811_MF << 16) |
10161 (bp->common.chip_id & 0x0000FFFF);
10162 bp->common.chip_id |= 0x1;
10163 }
10164
523224a3
DK
10165 /* Set doorbell size */
10166 bp->db_size = (1 << BNX2X_DB_SHIFT);
10167
619c5cb6 10168 if (!CHIP_IS_E1x(bp)) {
f2e0899f
DK
10169 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
10170 if ((val & 1) == 0)
10171 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
10172 else
10173 val = (val >> 1) & 1;
10174 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
10175 "2_PORT_MODE");
10176 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
10177 CHIP_2_PORT_MODE;
10178
10179 if (CHIP_MODE_IS_4_PORT(bp))
10180 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
10181 else
10182 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
10183 } else {
10184 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
10185 bp->pfid = bp->pf_num; /* 0..7 */
10186 }
10187
51c1a580
MS
10188 BNX2X_DEV_INFO("pf_id: %x", bp->pfid);
10189
f2e0899f
DK
10190 bp->link_params.chip_id = bp->common.chip_id;
10191 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
523224a3 10192
1c06328c
EG
10193 val = (REG_RD(bp, 0x2874) & 0x55);
10194 if ((bp->common.chip_id & 0x1) ||
10195 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
10196 bp->flags |= ONE_PORT_FLAG;
10197 BNX2X_DEV_INFO("single port device\n");
10198 }
10199
34f80b04 10200 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
754a2f52 10201 bp->common.flash_size = (BNX2X_NVRAM_1MB_SIZE <<
34f80b04
EG
10202 (val & MCPR_NVM_CFG4_FLASH_SIZE));
10203 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
10204 bp->common.flash_size, bp->common.flash_size);
10205
1b6e2ceb
DK
10206 bnx2x_init_shmem(bp);
10207
619c5cb6
VZ
10208
10209
f2e0899f
DK
10210 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
10211 MISC_REG_GENERIC_CR_1 :
10212 MISC_REG_GENERIC_CR_0));
1b6e2ceb 10213
34f80b04 10214 bp->link_params.shmem_base = bp->common.shmem_base;
a22f0788 10215 bp->link_params.shmem2_base = bp->common.shmem2_base;
b884d95b
YR
10216 if (SHMEM2_RD(bp, size) >
10217 (u32)offsetof(struct shmem2_region, lfa_host_addr[BP_PORT(bp)]))
10218 bp->link_params.lfa_base =
10219 REG_RD(bp, bp->common.shmem2_base +
10220 (u32)offsetof(struct shmem2_region,
10221 lfa_host_addr[BP_PORT(bp)]));
10222 else
10223 bp->link_params.lfa_base = 0;
2691d51d
EG
10224 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
10225 bp->common.shmem_base, bp->common.shmem2_base);
34f80b04 10226
f2e0899f 10227 if (!bp->common.shmem_base) {
34f80b04
EG
10228 BNX2X_DEV_INFO("MCP not active\n");
10229 bp->flags |= NO_MCP_FLAG;
10230 return;
10231 }
10232
34f80b04 10233 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 10234 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
10235
10236 bp->link_params.hw_led_mode = ((bp->common.hw_config &
10237 SHARED_HW_CFG_LED_MODE_MASK) >>
10238 SHARED_HW_CFG_LED_MODE_SHIFT);
10239
c2c8b03e
EG
10240 bp->link_params.feature_config_flags = 0;
10241 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
10242 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
10243 bp->link_params.feature_config_flags |=
10244 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10245 else
10246 bp->link_params.feature_config_flags &=
10247 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
10248
34f80b04
EG
10249 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
10250 bp->common.bc_ver = val;
10251 BNX2X_DEV_INFO("bc_ver %X\n", val);
10252 if (val < BNX2X_BC_VER) {
10253 /* for now only warn
10254 * later we might need to enforce this */
51c1a580
MS
10255 BNX2X_ERR("This driver needs bc_ver %X but found %X, please upgrade BC\n",
10256 BNX2X_BC_VER, val);
34f80b04 10257 }
4d295db0 10258 bp->link_params.feature_config_flags |=
a22f0788 10259 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
f85582f8
DK
10260 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
10261
a22f0788
YR
10262 bp->link_params.feature_config_flags |=
10263 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
10264 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
a3348722
BW
10265 bp->link_params.feature_config_flags |=
10266 (val >= REQ_BC_VER_4_VRFY_AFEX_SUPPORTED) ?
10267 FEATURE_CONFIG_BC_SUPPORTS_AFEX : 0;
85242eea
YR
10268 bp->link_params.feature_config_flags |=
10269 (val >= REQ_BC_VER_4_SFP_TX_DISABLE_SUPPORTED) ?
10270 FEATURE_CONFIG_BC_SUPPORTS_SFP_TX_DISABLED : 0;
55386fe8
YR
10271
10272 bp->link_params.feature_config_flags |=
10273 (val >= REQ_BC_VER_4_MT_SUPPORTED) ?
10274 FEATURE_CONFIG_MT_SUPPORT : 0;
10275
0e898dd7
BW
10276 bp->flags |= (val >= REQ_BC_VER_4_PFC_STATS_SUPPORTED) ?
10277 BC_SUPPORTS_PFC_STATS : 0;
85242eea 10278
2e499d3c
BW
10279 bp->flags |= (val >= REQ_BC_VER_4_FCOE_FEATURES) ?
10280 BC_SUPPORTS_FCOE_FEATURES : 0;
10281
9876879f
BW
10282 bp->flags |= (val >= REQ_BC_VER_4_DCBX_ADMIN_MSG_NON_PMF) ?
10283 BC_SUPPORTS_DCBX_MSG_NON_PMF : 0;
1d187b34
BW
10284 boot_mode = SHMEM_RD(bp,
10285 dev_info.port_feature_config[BP_PORT(bp)].mba_config) &
10286 PORT_FEATURE_MBA_BOOT_AGENT_TYPE_MASK;
10287 switch (boot_mode) {
10288 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_PXE:
10289 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_PXE;
10290 break;
10291 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_ISCSIB:
10292 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_ISCSI;
10293 break;
10294 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_FCOE_BOOT:
10295 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_FCOE;
10296 break;
10297 case PORT_FEATURE_MBA_BOOT_AGENT_TYPE_NONE:
10298 bp->common.boot_mode = FEATURE_ETH_BOOTMODE_NONE;
10299 break;
10300 }
10301
f9a3ebbe
DK
10302 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
10303 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
10304
72ce58c3 10305 BNX2X_DEV_INFO("%sWoL capable\n",
f5372251 10306 (bp->flags & NO_WOL_FLAG) ? "not " : "");
34f80b04
EG
10307
10308 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
10309 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
10310 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
10311 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
10312
cdaa7cb8
VZ
10313 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
10314 val, val2, val3, val4);
34f80b04
EG
10315}
10316
f2e0899f
DK
10317#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
10318#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
10319
0329aba1 10320static int bnx2x_get_igu_cam_info(struct bnx2x *bp)
f2e0899f
DK
10321{
10322 int pfid = BP_FUNC(bp);
f2e0899f
DK
10323 int igu_sb_id;
10324 u32 val;
6383c0b3 10325 u8 fid, igu_sb_cnt = 0;
f2e0899f
DK
10326
10327 bp->igu_base_sb = 0xff;
f2e0899f 10328 if (CHIP_INT_MODE_IS_BC(bp)) {
3395a033 10329 int vn = BP_VN(bp);
6383c0b3 10330 igu_sb_cnt = bp->igu_sb_cnt;
f2e0899f
DK
10331 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
10332 FP_SB_MAX_E1x;
10333
10334 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
10335 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
10336
9b341bb1 10337 return 0;
f2e0899f
DK
10338 }
10339
10340 /* IGU in normal mode - read CAM */
10341 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
10342 igu_sb_id++) {
10343 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
10344 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
10345 continue;
10346 fid = IGU_FID(val);
10347 if ((fid & IGU_FID_ENCODE_IS_PF)) {
10348 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
10349 continue;
10350 if (IGU_VEC(val) == 0)
10351 /* default status block */
10352 bp->igu_dsb_id = igu_sb_id;
10353 else {
10354 if (bp->igu_base_sb == 0xff)
10355 bp->igu_base_sb = igu_sb_id;
6383c0b3 10356 igu_sb_cnt++;
f2e0899f
DK
10357 }
10358 }
10359 }
619c5cb6 10360
6383c0b3 10361#ifdef CONFIG_PCI_MSI
185d4c8b
AE
10362 /* Due to new PF resource allocation by MFW T7.4 and above, it's
10363 * optional that number of CAM entries will not be equal to the value
10364 * advertised in PCI.
10365 * Driver should use the minimal value of both as the actual status
10366 * block count
619c5cb6 10367 */
185d4c8b 10368 bp->igu_sb_cnt = min_t(int, bp->igu_sb_cnt, igu_sb_cnt);
6383c0b3 10369#endif
619c5cb6 10370
9b341bb1 10371 if (igu_sb_cnt == 0) {
f2e0899f 10372 BNX2X_ERR("CAM configuration error\n");
9b341bb1
BW
10373 return -EINVAL;
10374 }
10375
10376 return 0;
f2e0899f
DK
10377}
10378
1dd06ae8 10379static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
a2fbb9ea 10380{
a22f0788
YR
10381 int cfg_size = 0, idx, port = BP_PORT(bp);
10382
10383 /* Aggregation of supported attributes of all external phys */
10384 bp->port.supported[0] = 0;
10385 bp->port.supported[1] = 0;
b7737c9b
YR
10386 switch (bp->link_params.num_phys) {
10387 case 1:
a22f0788
YR
10388 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
10389 cfg_size = 1;
10390 break;
b7737c9b 10391 case 2:
a22f0788
YR
10392 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
10393 cfg_size = 1;
10394 break;
10395 case 3:
10396 if (bp->link_params.multi_phy_config &
10397 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
10398 bp->port.supported[1] =
10399 bp->link_params.phy[EXT_PHY1].supported;
10400 bp->port.supported[0] =
10401 bp->link_params.phy[EXT_PHY2].supported;
10402 } else {
10403 bp->port.supported[0] =
10404 bp->link_params.phy[EXT_PHY1].supported;
10405 bp->port.supported[1] =
10406 bp->link_params.phy[EXT_PHY2].supported;
10407 }
10408 cfg_size = 2;
10409 break;
b7737c9b 10410 }
a2fbb9ea 10411
a22f0788 10412 if (!(bp->port.supported[0] || bp->port.supported[1])) {
51c1a580 10413 BNX2X_ERR("NVRAM config error. BAD phy config. PHY1 config 0x%x, PHY2 config 0x%x\n",
b7737c9b 10414 SHMEM_RD(bp,
a22f0788
YR
10415 dev_info.port_hw_config[port].external_phy_config),
10416 SHMEM_RD(bp,
10417 dev_info.port_hw_config[port].external_phy_config2));
a2fbb9ea 10418 return;
f85582f8 10419 }
a2fbb9ea 10420
619c5cb6
VZ
10421 if (CHIP_IS_E3(bp))
10422 bp->port.phy_addr = REG_RD(bp, MISC_REG_WC0_CTRL_PHY_ADDR);
10423 else {
10424 switch (switch_cfg) {
10425 case SWITCH_CFG_1G:
10426 bp->port.phy_addr = REG_RD(
10427 bp, NIG_REG_SERDES0_CTRL_PHY_ADDR + port*0x10);
10428 break;
10429 case SWITCH_CFG_10G:
10430 bp->port.phy_addr = REG_RD(
10431 bp, NIG_REG_XGXS0_CTRL_PHY_ADDR + port*0x18);
10432 break;
10433 default:
10434 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
10435 bp->port.link_config[0]);
10436 return;
10437 }
a2fbb9ea 10438 }
619c5cb6 10439 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a22f0788
YR
10440 /* mask what we support according to speed_cap_mask per configuration */
10441 for (idx = 0; idx < cfg_size; idx++) {
10442 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10443 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a22f0788 10444 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 10445
a22f0788 10446 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10447 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a22f0788 10448 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 10449
a22f0788 10450 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10451 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a22f0788 10452 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 10453
a22f0788 10454 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10455 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a22f0788 10456 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 10457
a22f0788 10458 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10459 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a22f0788 10460 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
f85582f8 10461 SUPPORTED_1000baseT_Full);
a2fbb9ea 10462
a22f0788 10463 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10464 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
a22f0788 10465 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 10466
a22f0788 10467 if (!(bp->link_params.speed_cap_mask[idx] &
c18487ee 10468 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a22f0788
YR
10469 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
10470
10471 }
a2fbb9ea 10472
a22f0788
YR
10473 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
10474 bp->port.supported[1]);
a2fbb9ea
ET
10475}
10476
0329aba1 10477static void bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 10478{
a22f0788
YR
10479 u32 link_config, idx, cfg_size = 0;
10480 bp->port.advertising[0] = 0;
10481 bp->port.advertising[1] = 0;
10482 switch (bp->link_params.num_phys) {
10483 case 1:
10484 case 2:
10485 cfg_size = 1;
10486 break;
10487 case 3:
10488 cfg_size = 2;
10489 break;
10490 }
10491 for (idx = 0; idx < cfg_size; idx++) {
10492 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
10493 link_config = bp->port.link_config[idx];
10494 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
f85582f8 10495 case PORT_FEATURE_LINK_SPEED_AUTO:
a22f0788
YR
10496 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
10497 bp->link_params.req_line_speed[idx] =
10498 SPEED_AUTO_NEG;
10499 bp->port.advertising[idx] |=
10500 bp->port.supported[idx];
10bd1f24
MY
10501 if (bp->link_params.phy[EXT_PHY1].type ==
10502 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
10503 bp->port.advertising[idx] |=
10504 (SUPPORTED_100baseT_Half |
10505 SUPPORTED_100baseT_Full);
f85582f8
DK
10506 } else {
10507 /* force 10G, no AN */
a22f0788
YR
10508 bp->link_params.req_line_speed[idx] =
10509 SPEED_10000;
10510 bp->port.advertising[idx] |=
10511 (ADVERTISED_10000baseT_Full |
f85582f8 10512 ADVERTISED_FIBRE);
a22f0788 10513 continue;
f85582f8
DK
10514 }
10515 break;
a2fbb9ea 10516
f85582f8 10517 case PORT_FEATURE_LINK_SPEED_10M_FULL:
a22f0788
YR
10518 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
10519 bp->link_params.req_line_speed[idx] =
10520 SPEED_10;
10521 bp->port.advertising[idx] |=
10522 (ADVERTISED_10baseT_Full |
f85582f8
DK
10523 ADVERTISED_TP);
10524 } else {
51c1a580 10525 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
f85582f8 10526 link_config,
a22f0788 10527 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
10528 return;
10529 }
10530 break;
a2fbb9ea 10531
f85582f8 10532 case PORT_FEATURE_LINK_SPEED_10M_HALF:
a22f0788
YR
10533 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
10534 bp->link_params.req_line_speed[idx] =
10535 SPEED_10;
10536 bp->link_params.req_duplex[idx] =
10537 DUPLEX_HALF;
10538 bp->port.advertising[idx] |=
10539 (ADVERTISED_10baseT_Half |
f85582f8
DK
10540 ADVERTISED_TP);
10541 } else {
51c1a580 10542 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
f85582f8
DK
10543 link_config,
10544 bp->link_params.speed_cap_mask[idx]);
10545 return;
10546 }
10547 break;
a2fbb9ea 10548
f85582f8
DK
10549 case PORT_FEATURE_LINK_SPEED_100M_FULL:
10550 if (bp->port.supported[idx] &
10551 SUPPORTED_100baseT_Full) {
a22f0788
YR
10552 bp->link_params.req_line_speed[idx] =
10553 SPEED_100;
10554 bp->port.advertising[idx] |=
10555 (ADVERTISED_100baseT_Full |
f85582f8
DK
10556 ADVERTISED_TP);
10557 } else {
51c1a580 10558 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
f85582f8
DK
10559 link_config,
10560 bp->link_params.speed_cap_mask[idx]);
10561 return;
10562 }
10563 break;
a2fbb9ea 10564
f85582f8
DK
10565 case PORT_FEATURE_LINK_SPEED_100M_HALF:
10566 if (bp->port.supported[idx] &
10567 SUPPORTED_100baseT_Half) {
10568 bp->link_params.req_line_speed[idx] =
10569 SPEED_100;
10570 bp->link_params.req_duplex[idx] =
10571 DUPLEX_HALF;
a22f0788
YR
10572 bp->port.advertising[idx] |=
10573 (ADVERTISED_100baseT_Half |
f85582f8
DK
10574 ADVERTISED_TP);
10575 } else {
51c1a580 10576 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
a22f0788
YR
10577 link_config,
10578 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
10579 return;
10580 }
10581 break;
a2fbb9ea 10582
f85582f8 10583 case PORT_FEATURE_LINK_SPEED_1G:
a22f0788
YR
10584 if (bp->port.supported[idx] &
10585 SUPPORTED_1000baseT_Full) {
10586 bp->link_params.req_line_speed[idx] =
10587 SPEED_1000;
10588 bp->port.advertising[idx] |=
10589 (ADVERTISED_1000baseT_Full |
f85582f8
DK
10590 ADVERTISED_TP);
10591 } else {
51c1a580 10592 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
a22f0788
YR
10593 link_config,
10594 bp->link_params.speed_cap_mask[idx]);
f85582f8
DK
10595 return;
10596 }
10597 break;
a2fbb9ea 10598
f85582f8 10599 case PORT_FEATURE_LINK_SPEED_2_5G:
a22f0788
YR
10600 if (bp->port.supported[idx] &
10601 SUPPORTED_2500baseX_Full) {
10602 bp->link_params.req_line_speed[idx] =
10603 SPEED_2500;
10604 bp->port.advertising[idx] |=
10605 (ADVERTISED_2500baseX_Full |
34f80b04 10606 ADVERTISED_TP);
f85582f8 10607 } else {
51c1a580 10608 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
a22f0788 10609 link_config,
f85582f8
DK
10610 bp->link_params.speed_cap_mask[idx]);
10611 return;
10612 }
10613 break;
a2fbb9ea 10614
f85582f8 10615 case PORT_FEATURE_LINK_SPEED_10G_CX4:
a22f0788
YR
10616 if (bp->port.supported[idx] &
10617 SUPPORTED_10000baseT_Full) {
10618 bp->link_params.req_line_speed[idx] =
10619 SPEED_10000;
10620 bp->port.advertising[idx] |=
10621 (ADVERTISED_10000baseT_Full |
34f80b04 10622 ADVERTISED_FIBRE);
f85582f8 10623 } else {
51c1a580 10624 BNX2X_ERR("NVRAM config error. Invalid link_config 0x%x speed_cap_mask 0x%x\n",
a22f0788 10625 link_config,
f85582f8
DK
10626 bp->link_params.speed_cap_mask[idx]);
10627 return;
10628 }
10629 break;
3c9ada22
YR
10630 case PORT_FEATURE_LINK_SPEED_20G:
10631 bp->link_params.req_line_speed[idx] = SPEED_20000;
a2fbb9ea 10632
3c9ada22 10633 break;
f85582f8 10634 default:
51c1a580 10635 BNX2X_ERR("NVRAM config error. BAD link speed link_config 0x%x\n",
754a2f52 10636 link_config);
f85582f8
DK
10637 bp->link_params.req_line_speed[idx] =
10638 SPEED_AUTO_NEG;
10639 bp->port.advertising[idx] =
10640 bp->port.supported[idx];
10641 break;
10642 }
a2fbb9ea 10643
a22f0788 10644 bp->link_params.req_flow_ctrl[idx] = (link_config &
34f80b04 10645 PORT_FEATURE_FLOW_CONTROL_MASK);
cd1dfce2
YM
10646 if (bp->link_params.req_flow_ctrl[idx] ==
10647 BNX2X_FLOW_CTRL_AUTO) {
10648 if (!(bp->port.supported[idx] & SUPPORTED_Autoneg))
10649 bp->link_params.req_flow_ctrl[idx] =
10650 BNX2X_FLOW_CTRL_NONE;
10651 else
10652 bnx2x_set_requested_fc(bp);
a22f0788 10653 }
a2fbb9ea 10654
51c1a580 10655 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x advertising 0x%x\n",
a22f0788
YR
10656 bp->link_params.req_line_speed[idx],
10657 bp->link_params.req_duplex[idx],
10658 bp->link_params.req_flow_ctrl[idx],
10659 bp->port.advertising[idx]);
10660 }
a2fbb9ea
ET
10661}
10662
0329aba1 10663static void bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
e665bfda 10664{
86564c3f
YM
10665 __be16 mac_hi_be = cpu_to_be16(mac_hi);
10666 __be32 mac_lo_be = cpu_to_be32(mac_lo);
10667 memcpy(mac_buf, &mac_hi_be, sizeof(mac_hi_be));
10668 memcpy(mac_buf + sizeof(mac_hi_be), &mac_lo_be, sizeof(mac_lo_be));
e665bfda
MC
10669}
10670
0329aba1 10671static void bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 10672{
34f80b04 10673 int port = BP_PORT(bp);
589abe3a 10674 u32 config;
c8c60d88 10675 u32 ext_phy_type, ext_phy_config, eee_mode;
a2fbb9ea 10676
c18487ee 10677 bp->link_params.bp = bp;
34f80b04 10678 bp->link_params.port = port;
c18487ee 10679
c18487ee 10680 bp->link_params.lane_config =
a2fbb9ea 10681 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
4d295db0 10682
a22f0788 10683 bp->link_params.speed_cap_mask[0] =
a2fbb9ea 10684 SHMEM_RD(bp,
b0261926
YR
10685 dev_info.port_hw_config[port].speed_capability_mask) &
10686 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
a22f0788
YR
10687 bp->link_params.speed_cap_mask[1] =
10688 SHMEM_RD(bp,
b0261926
YR
10689 dev_info.port_hw_config[port].speed_capability_mask2) &
10690 PORT_HW_CFG_SPEED_CAPABILITY_D0_MASK;
a22f0788 10691 bp->port.link_config[0] =
a2fbb9ea
ET
10692 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
10693
a22f0788
YR
10694 bp->port.link_config[1] =
10695 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
c2c8b03e 10696
a22f0788
YR
10697 bp->link_params.multi_phy_config =
10698 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
3ce2c3f9
EG
10699 /* If the device is capable of WoL, set the default state according
10700 * to the HW
10701 */
4d295db0 10702 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
3ce2c3f9
EG
10703 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
10704 (config & PORT_FEATURE_WOL_ENABLED));
10705
4ba7699b
YM
10706 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10707 PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE && !IS_MF(bp))
10708 bp->flags |= NO_ISCSI_FLAG;
10709 if ((config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) ==
10710 PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI && !(IS_MF(bp)))
10711 bp->flags |= NO_FCOE_FLAG;
10712
51c1a580 10713 BNX2X_DEV_INFO("lane_config 0x%08x speed_cap_mask0 0x%08x link_config0 0x%08x\n",
c18487ee 10714 bp->link_params.lane_config,
a22f0788
YR
10715 bp->link_params.speed_cap_mask[0],
10716 bp->port.link_config[0]);
a2fbb9ea 10717
a22f0788 10718 bp->link_params.switch_cfg = (bp->port.link_config[0] &
f85582f8 10719 PORT_FEATURE_CONNECTED_SWITCH_MASK);
b7737c9b 10720 bnx2x_phy_probe(&bp->link_params);
c18487ee 10721 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
10722
10723 bnx2x_link_settings_requested(bp);
10724
01cd4528
EG
10725 /*
10726 * If connected directly, work with the internal PHY, otherwise, work
10727 * with the external PHY
10728 */
b7737c9b
YR
10729 ext_phy_config =
10730 SHMEM_RD(bp,
10731 dev_info.port_hw_config[port].external_phy_config);
10732 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
01cd4528 10733 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
b7737c9b 10734 bp->mdio.prtad = bp->port.phy_addr;
01cd4528
EG
10735
10736 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
10737 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
10738 bp->mdio.prtad =
b7737c9b 10739 XGXS_EXT_PHY_ADDR(ext_phy_config);
5866df6d 10740
c8c60d88
YM
10741 /* Configure link feature according to nvram value */
10742 eee_mode = (((SHMEM_RD(bp, dev_info.
10743 port_feature_config[port].eee_power_mode)) &
10744 PORT_FEAT_CFG_EEE_POWER_MODE_MASK) >>
10745 PORT_FEAT_CFG_EEE_POWER_MODE_SHIFT);
10746 if (eee_mode != PORT_FEAT_CFG_EEE_POWER_MODE_DISABLED) {
10747 bp->link_params.eee_mode = EEE_MODE_ADV_LPI |
10748 EEE_MODE_ENABLE_LPI |
10749 EEE_MODE_OUTPUT_TIME;
10750 } else {
10751 bp->link_params.eee_mode = 0;
10752 }
0793f83f 10753}
01cd4528 10754
b306f5ed 10755void bnx2x_get_iscsi_info(struct bnx2x *bp)
2ba45142 10756{
9e62e912 10757 u32 no_flags = NO_ISCSI_FLAG;
bf61ee14 10758 int port = BP_PORT(bp);
2ba45142 10759 u32 max_iscsi_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
bf61ee14 10760 drv_lic_key[port].max_iscsi_conn);
2ba45142 10761
55c11941
MS
10762 if (!CNIC_SUPPORT(bp)) {
10763 bp->flags |= no_flags;
10764 return;
10765 }
10766
b306f5ed 10767 /* Get the number of maximum allowed iSCSI connections */
2ba45142
VZ
10768 bp->cnic_eth_dev.max_iscsi_conn =
10769 (max_iscsi_conn & BNX2X_MAX_ISCSI_INIT_CONN_MASK) >>
10770 BNX2X_MAX_ISCSI_INIT_CONN_SHIFT;
10771
b306f5ed
DK
10772 BNX2X_DEV_INFO("max_iscsi_conn 0x%x\n",
10773 bp->cnic_eth_dev.max_iscsi_conn);
10774
10775 /*
10776 * If maximum allowed number of connections is zero -
10777 * disable the feature.
10778 */
10779 if (!bp->cnic_eth_dev.max_iscsi_conn)
9e62e912 10780 bp->flags |= no_flags;
55c11941 10781
b306f5ed
DK
10782}
10783
0329aba1 10784static void bnx2x_get_ext_wwn_info(struct bnx2x *bp, int func)
9e62e912
DK
10785{
10786 /* Port info */
10787 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10788 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_upper);
10789 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10790 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_port_name_lower);
10791
10792 /* Node info */
10793 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10794 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_upper);
10795 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10796 MF_CFG_RD(bp, func_ext_config[func].fcoe_wwn_node_name_lower);
10797}
86800194
DK
10798
10799static int bnx2x_shared_fcoe_funcs(struct bnx2x *bp)
10800{
10801 u8 count = 0;
10802
10803 if (IS_MF(bp)) {
10804 u8 fid;
10805
10806 /* iterate over absolute function ids for this path: */
10807 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX * 2; fid += 2) {
10808 if (IS_MF_SD(bp)) {
10809 u32 cfg = MF_CFG_RD(bp,
10810 func_mf_config[fid].config);
10811
10812 if (!(cfg & FUNC_MF_CFG_FUNC_HIDE) &&
10813 ((cfg & FUNC_MF_CFG_PROTOCOL_MASK) ==
10814 FUNC_MF_CFG_PROTOCOL_FCOE))
10815 count++;
10816 } else {
10817 u32 cfg = MF_CFG_RD(bp,
10818 func_ext_config[fid].
10819 func_cfg);
10820
10821 if ((cfg & MACP_FUNC_CFG_FLAGS_ENABLED) &&
10822 (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
10823 count++;
10824 }
10825 }
10826 } else { /* SF */
10827 int port, port_cnt = CHIP_MODE_IS_4_PORT(bp) ? 2 : 1;
10828
10829 for (port = 0; port < port_cnt; port++) {
10830 u32 lic = SHMEM_RD(bp,
10831 drv_lic_key[port].max_fcoe_conn) ^
10832 FW_ENCODE_32BIT_PATTERN;
10833 if (lic)
10834 count++;
10835 }
10836 }
10837
10838 return count;
10839}
10840
0329aba1 10841static void bnx2x_get_fcoe_info(struct bnx2x *bp)
b306f5ed
DK
10842{
10843 int port = BP_PORT(bp);
10844 int func = BP_ABS_FUNC(bp);
b306f5ed
DK
10845 u32 max_fcoe_conn = FW_ENCODE_32BIT_PATTERN ^ SHMEM_RD(bp,
10846 drv_lic_key[port].max_fcoe_conn);
86800194 10847 u8 num_fcoe_func = bnx2x_shared_fcoe_funcs(bp);
b306f5ed 10848
55c11941
MS
10849 if (!CNIC_SUPPORT(bp)) {
10850 bp->flags |= NO_FCOE_FLAG;
10851 return;
10852 }
10853
b306f5ed 10854 /* Get the number of maximum allowed FCoE connections */
2ba45142
VZ
10855 bp->cnic_eth_dev.max_fcoe_conn =
10856 (max_fcoe_conn & BNX2X_MAX_FCOE_INIT_CONN_MASK) >>
10857 BNX2X_MAX_FCOE_INIT_CONN_SHIFT;
10858
0eb43b4b
BPG
10859 /* Calculate the number of maximum allowed FCoE tasks */
10860 bp->cnic_eth_dev.max_fcoe_exchanges = MAX_NUM_FCOE_TASKS_PER_ENGINE;
86800194
DK
10861
10862 /* check if FCoE resources must be shared between different functions */
10863 if (num_fcoe_func)
10864 bp->cnic_eth_dev.max_fcoe_exchanges /= num_fcoe_func;
0eb43b4b 10865
bf61ee14
VZ
10866 /* Read the WWN: */
10867 if (!IS_MF(bp)) {
10868 /* Port info */
10869 bp->cnic_eth_dev.fcoe_wwn_port_name_hi =
10870 SHMEM_RD(bp,
2de67439 10871 dev_info.port_hw_config[port].
bf61ee14
VZ
10872 fcoe_wwn_port_name_upper);
10873 bp->cnic_eth_dev.fcoe_wwn_port_name_lo =
10874 SHMEM_RD(bp,
2de67439 10875 dev_info.port_hw_config[port].
bf61ee14
VZ
10876 fcoe_wwn_port_name_lower);
10877
10878 /* Node info */
10879 bp->cnic_eth_dev.fcoe_wwn_node_name_hi =
10880 SHMEM_RD(bp,
2de67439 10881 dev_info.port_hw_config[port].
bf61ee14
VZ
10882 fcoe_wwn_node_name_upper);
10883 bp->cnic_eth_dev.fcoe_wwn_node_name_lo =
10884 SHMEM_RD(bp,
2de67439 10885 dev_info.port_hw_config[port].
bf61ee14
VZ
10886 fcoe_wwn_node_name_lower);
10887 } else if (!IS_MF_SD(bp)) {
bf61ee14
VZ
10888 /*
10889 * Read the WWN info only if the FCoE feature is enabled for
10890 * this function.
10891 */
7b5342d9 10892 if (BNX2X_MF_EXT_PROTOCOL_FCOE(bp) && !CHIP_IS_E1x(bp))
9e62e912
DK
10893 bnx2x_get_ext_wwn_info(bp, func);
10894
382e513a 10895 } else if (IS_MF_FCOE_SD(bp) && !CHIP_IS_E1x(bp)) {
9e62e912 10896 bnx2x_get_ext_wwn_info(bp, func);
382e513a 10897 }
bf61ee14 10898
b306f5ed 10899 BNX2X_DEV_INFO("max_fcoe_conn 0x%x\n", bp->cnic_eth_dev.max_fcoe_conn);
2ba45142 10900
bf61ee14
VZ
10901 /*
10902 * If maximum allowed number of connections is zero -
2ba45142
VZ
10903 * disable the feature.
10904 */
2ba45142
VZ
10905 if (!bp->cnic_eth_dev.max_fcoe_conn)
10906 bp->flags |= NO_FCOE_FLAG;
10907}
b306f5ed 10908
0329aba1 10909static void bnx2x_get_cnic_info(struct bnx2x *bp)
b306f5ed
DK
10910{
10911 /*
10912 * iSCSI may be dynamically disabled but reading
10913 * info here we will decrease memory usage by driver
10914 * if the feature is disabled for good
10915 */
10916 bnx2x_get_iscsi_info(bp);
10917 bnx2x_get_fcoe_info(bp);
10918}
2ba45142 10919
0329aba1 10920static void bnx2x_get_cnic_mac_hwinfo(struct bnx2x *bp)
0793f83f
DK
10921{
10922 u32 val, val2;
10923 int func = BP_ABS_FUNC(bp);
10924 int port = BP_PORT(bp);
2ba45142
VZ
10925 u8 *iscsi_mac = bp->cnic_eth_dev.iscsi_mac;
10926 u8 *fip_mac = bp->fip_mac;
0793f83f 10927
55c11941
MS
10928 if (IS_MF(bp)) {
10929 /* iSCSI and FCoE NPAR MACs: if there is no either iSCSI or
2ba45142 10930 * FCoE MAC then the appropriate feature should be disabled.
55c11941
MS
10931 * In non SD mode features configuration comes from struct
10932 * func_ext_config.
2ba45142 10933 */
55c11941 10934 if (!IS_MF_SD(bp) && !CHIP_IS_E1x(bp)) {
0793f83f
DK
10935 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
10936 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
10937 val2 = MF_CFG_RD(bp, func_ext_config[func].
55c11941 10938 iscsi_mac_addr_upper);
0793f83f 10939 val = MF_CFG_RD(bp, func_ext_config[func].
55c11941 10940 iscsi_mac_addr_lower);
2ba45142 10941 bnx2x_set_mac_buf(iscsi_mac, val, val2);
55c11941
MS
10942 BNX2X_DEV_INFO
10943 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10944 } else {
2ba45142 10945 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
55c11941 10946 }
2ba45142
VZ
10947
10948 if (cfg & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD) {
10949 val2 = MF_CFG_RD(bp, func_ext_config[func].
55c11941 10950 fcoe_mac_addr_upper);
2ba45142 10951 val = MF_CFG_RD(bp, func_ext_config[func].
55c11941 10952 fcoe_mac_addr_lower);
2ba45142 10953 bnx2x_set_mac_buf(fip_mac, val, val2);
55c11941
MS
10954 BNX2X_DEV_INFO
10955 ("Read FCoE L2 MAC: %pM\n", fip_mac);
10956 } else {
2ba45142 10957 bp->flags |= NO_FCOE_FLAG;
55c11941 10958 }
a3348722
BW
10959
10960 bp->mf_ext_config = cfg;
10961
9e62e912 10962 } else { /* SD MODE */
55c11941
MS
10963 if (BNX2X_IS_MF_SD_PROTOCOL_ISCSI(bp)) {
10964 /* use primary mac as iscsi mac */
10965 memcpy(iscsi_mac, bp->dev->dev_addr, ETH_ALEN);
10966
10967 BNX2X_DEV_INFO("SD ISCSI MODE\n");
10968 BNX2X_DEV_INFO
10969 ("Read iSCSI MAC: %pM\n", iscsi_mac);
10970 } else if (BNX2X_IS_MF_SD_PROTOCOL_FCOE(bp)) {
10971 /* use primary mac as fip mac */
10972 memcpy(fip_mac, bp->dev->dev_addr, ETH_ALEN);
10973 BNX2X_DEV_INFO("SD FCoE MODE\n");
10974 BNX2X_DEV_INFO
10975 ("Read FIP MAC: %pM\n", fip_mac);
614c76df 10976 }
0793f83f 10977 }
a3348722 10978
82594f8f
YM
10979 /* If this is a storage-only interface, use SAN mac as
10980 * primary MAC. Notice that for SD this is already the case,
10981 * as the SAN mac was copied from the primary MAC.
10982 */
10983 if (IS_MF_FCOE_AFEX(bp))
a3348722 10984 memcpy(bp->dev->dev_addr, fip_mac, ETH_ALEN);
0793f83f 10985 } else {
0793f83f 10986 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
55c11941 10987 iscsi_mac_upper);
0793f83f 10988 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
55c11941 10989 iscsi_mac_lower);
2ba45142 10990 bnx2x_set_mac_buf(iscsi_mac, val, val2);
c03bd39c
VZ
10991
10992 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
55c11941 10993 fcoe_fip_mac_upper);
c03bd39c 10994 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
55c11941 10995 fcoe_fip_mac_lower);
c03bd39c 10996 bnx2x_set_mac_buf(fip_mac, val, val2);
0793f83f
DK
10997 }
10998
55c11941 10999 /* Disable iSCSI OOO if MAC configuration is invalid. */
426b9241 11000 if (!is_valid_ether_addr(iscsi_mac)) {
55c11941 11001 bp->flags |= NO_ISCSI_OOO_FLAG | NO_ISCSI_FLAG;
426b9241
DK
11002 memset(iscsi_mac, 0, ETH_ALEN);
11003 }
11004
55c11941 11005 /* Disable FCoE if MAC configuration is invalid. */
426b9241
DK
11006 if (!is_valid_ether_addr(fip_mac)) {
11007 bp->flags |= NO_FCOE_FLAG;
11008 memset(bp->fip_mac, 0, ETH_ALEN);
11009 }
55c11941
MS
11010}
11011
0329aba1 11012static void bnx2x_get_mac_hwinfo(struct bnx2x *bp)
55c11941
MS
11013{
11014 u32 val, val2;
11015 int func = BP_ABS_FUNC(bp);
11016 int port = BP_PORT(bp);
11017
11018 /* Zero primary MAC configuration */
11019 memset(bp->dev->dev_addr, 0, ETH_ALEN);
11020
11021 if (BP_NOMCP(bp)) {
11022 BNX2X_ERROR("warning: random MAC workaround active\n");
11023 eth_hw_addr_random(bp->dev);
11024 } else if (IS_MF(bp)) {
11025 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
11026 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
11027 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
11028 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
11029 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11030
11031 if (CNIC_SUPPORT(bp))
11032 bnx2x_get_cnic_mac_hwinfo(bp);
11033 } else {
11034 /* in SF read MACs from port configuration */
11035 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
11036 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
11037 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
11038
11039 if (CNIC_SUPPORT(bp))
11040 bnx2x_get_cnic_mac_hwinfo(bp);
11041 }
11042
11043 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
619c5cb6 11044
614c76df 11045 if (!bnx2x_is_valid_ether_addr(bp, bp->dev->dev_addr))
619c5cb6 11046 dev_err(&bp->pdev->dev,
51c1a580
MS
11047 "bad Ethernet MAC address configuration: %pM\n"
11048 "change it manually before bringing up the appropriate network interface\n",
0f9dad10 11049 bp->dev->dev_addr);
7964211d 11050}
51c1a580 11051
0329aba1 11052static bool bnx2x_get_dropless_info(struct bnx2x *bp)
7964211d
YM
11053{
11054 int tmp;
11055 u32 cfg;
51c1a580 11056
7964211d
YM
11057 if (IS_MF(bp) && !CHIP_IS_E1x(bp)) {
11058 /* Take function: tmp = func */
11059 tmp = BP_ABS_FUNC(bp);
11060 cfg = MF_CFG_RD(bp, func_ext_config[tmp].func_cfg);
11061 cfg = !!(cfg & MACP_FUNC_CFG_PAUSE_ON_HOST_RING);
11062 } else {
11063 /* Take port: tmp = port */
11064 tmp = BP_PORT(bp);
11065 cfg = SHMEM_RD(bp,
11066 dev_info.port_hw_config[tmp].generic_features);
11067 cfg = !!(cfg & PORT_HW_CFG_PAUSE_ON_HOST_RING_ENABLED);
11068 }
11069 return cfg;
34f80b04
EG
11070}
11071
0329aba1 11072static int bnx2x_get_hwinfo(struct bnx2x *bp)
34f80b04 11073{
0793f83f 11074 int /*abs*/func = BP_ABS_FUNC(bp);
b8ee8328 11075 int vn;
0793f83f 11076 u32 val = 0;
34f80b04 11077 int rc = 0;
a2fbb9ea 11078
34f80b04 11079 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 11080
6383c0b3
AE
11081 /*
11082 * initialize IGU parameters
11083 */
f2e0899f
DK
11084 if (CHIP_IS_E1x(bp)) {
11085 bp->common.int_block = INT_BLOCK_HC;
11086
11087 bp->igu_dsb_id = DEF_SB_IGU_ID;
11088 bp->igu_base_sb = 0;
f2e0899f
DK
11089 } else {
11090 bp->common.int_block = INT_BLOCK_IGU;
7a06a122
DK
11091
11092 /* do not allow device reset during IGU info preocessing */
11093 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
11094
f2e0899f 11095 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
619c5cb6
VZ
11096
11097 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
11098 int tout = 5000;
11099
11100 BNX2X_DEV_INFO("FORCING Normal Mode\n");
11101
11102 val &= ~(IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN);
11103 REG_WR(bp, IGU_REG_BLOCK_CONFIGURATION, val);
11104 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x7f);
11105
11106 while (tout && REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11107 tout--;
0926d499 11108 usleep_range(1000, 2000);
619c5cb6
VZ
11109 }
11110
11111 if (REG_RD(bp, IGU_REG_RESET_MEMORIES)) {
11112 dev_err(&bp->pdev->dev,
11113 "FORCING Normal Mode failed!!!\n");
9b341bb1
BW
11114 bnx2x_release_hw_lock(bp,
11115 HW_LOCK_RESOURCE_RESET);
619c5cb6
VZ
11116 return -EPERM;
11117 }
11118 }
11119
f2e0899f 11120 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
619c5cb6 11121 BNX2X_DEV_INFO("IGU Backward Compatible Mode\n");
f2e0899f
DK
11122 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
11123 } else
619c5cb6 11124 BNX2X_DEV_INFO("IGU Normal Mode\n");
523224a3 11125
9b341bb1 11126 rc = bnx2x_get_igu_cam_info(bp);
7a06a122 11127 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESET);
9b341bb1
BW
11128 if (rc)
11129 return rc;
f2e0899f 11130 }
619c5cb6
VZ
11131
11132 /*
11133 * set base FW non-default (fast path) status block id, this value is
11134 * used to initialize the fw_sb_id saved on the fp/queue structure to
11135 * determine the id used by the FW.
11136 */
11137 if (CHIP_IS_E1x(bp))
11138 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x + BP_L_ID(bp);
11139 else /*
11140 * 57712 - we currently use one FW SB per IGU SB (Rx and Tx of
11141 * the same queue are indicated on the same IGU SB). So we prefer
11142 * FW and IGU SBs to be the same value.
11143 */
11144 bp->base_fw_ndsb = bp->igu_base_sb;
11145
11146 BNX2X_DEV_INFO("igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n"
11147 "base_fw_ndsb %d\n", bp->igu_dsb_id, bp->igu_base_sb,
11148 bp->igu_sb_cnt, bp->base_fw_ndsb);
f2e0899f
DK
11149
11150 /*
11151 * Initialize MF configuration
11152 */
523224a3 11153
fb3bff17
DK
11154 bp->mf_ov = 0;
11155 bp->mf_mode = 0;
3395a033 11156 vn = BP_VN(bp);
0793f83f 11157
f2e0899f 11158 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
619c5cb6
VZ
11159 BNX2X_DEV_INFO("shmem2base 0x%x, size %d, mfcfg offset %d\n",
11160 bp->common.shmem2_base, SHMEM2_RD(bp, size),
11161 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
11162
f2e0899f
DK
11163 if (SHMEM2_HAS(bp, mf_cfg_addr))
11164 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
11165 else
11166 bp->common.mf_cfg_base = bp->common.shmem_base +
523224a3
DK
11167 offsetof(struct shmem_region, func_mb) +
11168 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
0793f83f
DK
11169 /*
11170 * get mf configuration:
25985edc 11171 * 1. existence of MF configuration
0793f83f
DK
11172 * 2. MAC address must be legal (check only upper bytes)
11173 * for Switch-Independent mode;
11174 * OVLAN must be legal for Switch-Dependent mode
11175 * 3. SF_MODE configures specific MF mode
11176 */
11177 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11178 /* get mf configuration */
11179 val = SHMEM_RD(bp,
11180 dev_info.shared_feature_config.config);
11181 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
11182
11183 switch (val) {
11184 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
11185 val = MF_CFG_RD(bp, func_mf_config[func].
11186 mac_upper);
11187 /* check for legal mac (upper bytes)*/
11188 if (val != 0xffff) {
11189 bp->mf_mode = MULTI_FUNCTION_SI;
11190 bp->mf_config[vn] = MF_CFG_RD(bp,
11191 func_mf_config[func].config);
11192 } else
51c1a580 11193 BNX2X_DEV_INFO("illegal MAC address for SI\n");
0793f83f 11194 break;
a3348722
BW
11195 case SHARED_FEAT_CFG_FORCE_SF_MODE_AFEX_MODE:
11196 if ((!CHIP_IS_E1x(bp)) &&
11197 (MF_CFG_RD(bp, func_mf_config[func].
11198 mac_upper) != 0xffff) &&
11199 (SHMEM2_HAS(bp,
11200 afex_driver_support))) {
11201 bp->mf_mode = MULTI_FUNCTION_AFEX;
11202 bp->mf_config[vn] = MF_CFG_RD(bp,
11203 func_mf_config[func].config);
11204 } else {
11205 BNX2X_DEV_INFO("can not configure afex mode\n");
11206 }
11207 break;
0793f83f
DK
11208 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
11209 /* get OV configuration */
11210 val = MF_CFG_RD(bp,
11211 func_mf_config[FUNC_0].e1hov_tag);
11212 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
11213
11214 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
11215 bp->mf_mode = MULTI_FUNCTION_SD;
11216 bp->mf_config[vn] = MF_CFG_RD(bp,
11217 func_mf_config[func].config);
11218 } else
754a2f52 11219 BNX2X_DEV_INFO("illegal OV for SD\n");
0793f83f 11220 break;
3786b942
AE
11221 case SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF:
11222 bp->mf_config[vn] = 0;
11223 break;
0793f83f
DK
11224 default:
11225 /* Unknown configuration: reset mf_config */
11226 bp->mf_config[vn] = 0;
51c1a580 11227 BNX2X_DEV_INFO("unknown MF mode 0x%x\n", val);
0793f83f
DK
11228 }
11229 }
a2fbb9ea 11230
2691d51d 11231 BNX2X_DEV_INFO("%s function mode\n",
fb3bff17 11232 IS_MF(bp) ? "multi" : "single");
2691d51d 11233
0793f83f
DK
11234 switch (bp->mf_mode) {
11235 case MULTI_FUNCTION_SD:
11236 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
11237 FUNC_MF_CFG_E1HOV_TAG_MASK;
2691d51d 11238 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
fb3bff17 11239 bp->mf_ov = val;
619c5cb6
VZ
11240 bp->path_has_ovlan = true;
11241
51c1a580
MS
11242 BNX2X_DEV_INFO("MF OV for func %d is %d (0x%04x)\n",
11243 func, bp->mf_ov, bp->mf_ov);
2691d51d 11244 } else {
619c5cb6 11245 dev_err(&bp->pdev->dev,
51c1a580
MS
11246 "No valid MF OV for func %d, aborting\n",
11247 func);
619c5cb6 11248 return -EPERM;
34f80b04 11249 }
0793f83f 11250 break;
a3348722
BW
11251 case MULTI_FUNCTION_AFEX:
11252 BNX2X_DEV_INFO("func %d is in MF afex mode\n", func);
11253 break;
0793f83f 11254 case MULTI_FUNCTION_SI:
51c1a580
MS
11255 BNX2X_DEV_INFO("func %d is in MF switch-independent mode\n",
11256 func);
0793f83f
DK
11257 break;
11258 default:
11259 if (vn) {
619c5cb6 11260 dev_err(&bp->pdev->dev,
51c1a580
MS
11261 "VN %d is in a single function mode, aborting\n",
11262 vn);
619c5cb6 11263 return -EPERM;
2691d51d 11264 }
0793f83f 11265 break;
34f80b04 11266 }
0793f83f 11267
619c5cb6
VZ
11268 /* check if other port on the path needs ovlan:
11269 * Since MF configuration is shared between ports
11270 * Possible mixed modes are only
11271 * {SF, SI} {SF, SD} {SD, SF} {SI, SF}
11272 */
11273 if (CHIP_MODE_IS_4_PORT(bp) &&
11274 !bp->path_has_ovlan &&
11275 !IS_MF(bp) &&
11276 bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
11277 u8 other_port = !BP_PORT(bp);
11278 u8 other_func = BP_PATH(bp) + 2*other_port;
11279 val = MF_CFG_RD(bp,
11280 func_mf_config[other_func].e1hov_tag);
11281 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
11282 bp->path_has_ovlan = true;
11283 }
34f80b04 11284 }
a2fbb9ea 11285
f2e0899f
DK
11286 /* adjust igu_sb_cnt to MF for E1x */
11287 if (CHIP_IS_E1x(bp) && IS_MF(bp))
523224a3
DK
11288 bp->igu_sb_cnt /= E1HVN_MAX;
11289
619c5cb6
VZ
11290 /* port info */
11291 bnx2x_get_port_hwinfo(bp);
f2e0899f 11292
0793f83f
DK
11293 /* Get MAC addresses */
11294 bnx2x_get_mac_hwinfo(bp);
a2fbb9ea 11295
2ba45142 11296 bnx2x_get_cnic_info(bp);
2ba45142 11297
34f80b04
EG
11298 return rc;
11299}
11300
0329aba1 11301static void bnx2x_read_fwinfo(struct bnx2x *bp)
34f24c7f
VZ
11302{
11303 int cnt, i, block_end, rodi;
fcdf95cb 11304 char vpd_start[BNX2X_VPD_LEN+1];
34f24c7f
VZ
11305 char str_id_reg[VENDOR_ID_LEN+1];
11306 char str_id_cap[VENDOR_ID_LEN+1];
fcdf95cb
BW
11307 char *vpd_data;
11308 char *vpd_extended_data = NULL;
34f24c7f
VZ
11309 u8 len;
11310
fcdf95cb 11311 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_start);
34f24c7f
VZ
11312 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
11313
11314 if (cnt < BNX2X_VPD_LEN)
11315 goto out_not_found;
11316
fcdf95cb
BW
11317 /* VPD RO tag should be first tag after identifier string, hence
11318 * we should be able to find it in first BNX2X_VPD_LEN chars
11319 */
11320 i = pci_vpd_find_tag(vpd_start, 0, BNX2X_VPD_LEN,
34f24c7f
VZ
11321 PCI_VPD_LRDT_RO_DATA);
11322 if (i < 0)
11323 goto out_not_found;
11324
34f24c7f 11325 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
fcdf95cb 11326 pci_vpd_lrdt_size(&vpd_start[i]);
34f24c7f
VZ
11327
11328 i += PCI_VPD_LRDT_TAG_SIZE;
11329
fcdf95cb
BW
11330 if (block_end > BNX2X_VPD_LEN) {
11331 vpd_extended_data = kmalloc(block_end, GFP_KERNEL);
11332 if (vpd_extended_data == NULL)
11333 goto out_not_found;
11334
11335 /* read rest of vpd image into vpd_extended_data */
11336 memcpy(vpd_extended_data, vpd_start, BNX2X_VPD_LEN);
11337 cnt = pci_read_vpd(bp->pdev, BNX2X_VPD_LEN,
11338 block_end - BNX2X_VPD_LEN,
11339 vpd_extended_data + BNX2X_VPD_LEN);
11340 if (cnt < (block_end - BNX2X_VPD_LEN))
11341 goto out_not_found;
11342 vpd_data = vpd_extended_data;
11343 } else
11344 vpd_data = vpd_start;
11345
11346 /* now vpd_data holds full vpd content in both cases */
34f24c7f
VZ
11347
11348 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11349 PCI_VPD_RO_KEYWORD_MFR_ID);
11350 if (rodi < 0)
11351 goto out_not_found;
11352
11353 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11354
11355 if (len != VENDOR_ID_LEN)
11356 goto out_not_found;
11357
11358 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11359
11360 /* vendor specific info */
11361 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
11362 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
11363 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
11364 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
11365
11366 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
11367 PCI_VPD_RO_KEYWORD_VENDOR0);
11368 if (rodi >= 0) {
11369 len = pci_vpd_info_field_size(&vpd_data[rodi]);
11370
11371 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
11372
11373 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
11374 memcpy(bp->fw_ver, &vpd_data[rodi], len);
11375 bp->fw_ver[len] = ' ';
11376 }
11377 }
fcdf95cb 11378 kfree(vpd_extended_data);
34f24c7f
VZ
11379 return;
11380 }
11381out_not_found:
fcdf95cb 11382 kfree(vpd_extended_data);
34f24c7f
VZ
11383 return;
11384}
11385
0329aba1 11386static void bnx2x_set_modes_bitmap(struct bnx2x *bp)
619c5cb6
VZ
11387{
11388 u32 flags = 0;
11389
11390 if (CHIP_REV_IS_FPGA(bp))
11391 SET_FLAGS(flags, MODE_FPGA);
11392 else if (CHIP_REV_IS_EMUL(bp))
11393 SET_FLAGS(flags, MODE_EMUL);
11394 else
11395 SET_FLAGS(flags, MODE_ASIC);
11396
11397 if (CHIP_MODE_IS_4_PORT(bp))
11398 SET_FLAGS(flags, MODE_PORT4);
11399 else
11400 SET_FLAGS(flags, MODE_PORT2);
11401
11402 if (CHIP_IS_E2(bp))
11403 SET_FLAGS(flags, MODE_E2);
11404 else if (CHIP_IS_E3(bp)) {
11405 SET_FLAGS(flags, MODE_E3);
11406 if (CHIP_REV(bp) == CHIP_REV_Ax)
11407 SET_FLAGS(flags, MODE_E3_A0);
6383c0b3
AE
11408 else /*if (CHIP_REV(bp) == CHIP_REV_Bx)*/
11409 SET_FLAGS(flags, MODE_E3_B0 | MODE_COS3);
619c5cb6
VZ
11410 }
11411
11412 if (IS_MF(bp)) {
11413 SET_FLAGS(flags, MODE_MF);
11414 switch (bp->mf_mode) {
11415 case MULTI_FUNCTION_SD:
11416 SET_FLAGS(flags, MODE_MF_SD);
11417 break;
11418 case MULTI_FUNCTION_SI:
11419 SET_FLAGS(flags, MODE_MF_SI);
11420 break;
a3348722
BW
11421 case MULTI_FUNCTION_AFEX:
11422 SET_FLAGS(flags, MODE_MF_AFEX);
11423 break;
619c5cb6
VZ
11424 }
11425 } else
11426 SET_FLAGS(flags, MODE_SF);
11427
11428#if defined(__LITTLE_ENDIAN)
11429 SET_FLAGS(flags, MODE_LITTLE_ENDIAN);
11430#else /*(__BIG_ENDIAN)*/
11431 SET_FLAGS(flags, MODE_BIG_ENDIAN);
11432#endif
11433 INIT_MODE_FLAGS(bp) = flags;
11434}
11435
0329aba1 11436static int bnx2x_init_bp(struct bnx2x *bp)
34f80b04 11437{
f2e0899f 11438 int func;
34f80b04
EG
11439 int rc;
11440
34f80b04 11441 mutex_init(&bp->port.phy_mutex);
c4ff7cbf 11442 mutex_init(&bp->fw_mb_mutex);
bb7e95c8 11443 spin_lock_init(&bp->stats_lock);
55c11941 11444
a2fbb9ea 11445
1cf167f2 11446 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
7be08a72 11447 INIT_DELAYED_WORK(&bp->sp_rtnl_task, bnx2x_sp_rtnl_task);
3deb8167 11448 INIT_DELAYED_WORK(&bp->period_task, bnx2x_period_task);
1ab4434c
AE
11449 if (IS_PF(bp)) {
11450 rc = bnx2x_get_hwinfo(bp);
11451 if (rc)
11452 return rc;
11453 } else {
e09b74d0 11454 eth_zero_addr(bp->dev->dev_addr);
1ab4434c 11455 }
34f80b04 11456
619c5cb6
VZ
11457 bnx2x_set_modes_bitmap(bp);
11458
11459 rc = bnx2x_alloc_mem_bp(bp);
11460 if (rc)
11461 return rc;
523224a3 11462
34f24c7f 11463 bnx2x_read_fwinfo(bp);
f2e0899f
DK
11464
11465 func = BP_FUNC(bp);
11466
34f80b04 11467 /* need to reset chip if undi was active */
1ab4434c 11468 if (IS_PF(bp) && !BP_NOMCP(bp)) {
452427b0
YM
11469 /* init fw_seq */
11470 bp->fw_seq =
11471 SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
11472 DRV_MSG_SEQ_NUMBER_MASK;
11473 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11474
11475 bnx2x_prev_unload(bp);
11476 }
11477
34f80b04
EG
11478
11479 if (CHIP_REV_IS_FPGA(bp))
cdaa7cb8 11480 dev_err(&bp->pdev->dev, "FPGA detected\n");
34f80b04
EG
11481
11482 if (BP_NOMCP(bp) && (func == 0))
51c1a580 11483 dev_err(&bp->pdev->dev, "MCP disabled, must load devices in order!\n");
34f80b04 11484
614c76df 11485 bp->disable_tpa = disable_tpa;
a3348722 11486 bp->disable_tpa |= IS_MF_STORAGE_SD(bp) || IS_MF_FCOE_AFEX(bp);
614c76df 11487
7a9b2557 11488 /* Set TPA flags */
614c76df 11489 if (bp->disable_tpa) {
621b4d66 11490 bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
7a9b2557
VZ
11491 bp->dev->features &= ~NETIF_F_LRO;
11492 } else {
621b4d66 11493 bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
7a9b2557
VZ
11494 bp->dev->features |= NETIF_F_LRO;
11495 }
11496
a18f5128
EG
11497 if (CHIP_IS_E1(bp))
11498 bp->dropless_fc = 0;
11499 else
7964211d 11500 bp->dropless_fc = dropless_fc | bnx2x_get_dropless_info(bp);
a18f5128 11501
8d5726c4 11502 bp->mrrs = mrrs;
7a9b2557 11503
a3348722 11504 bp->tx_ring_size = IS_MF_FCOE_AFEX(bp) ? 0 : MAX_TX_AVAIL;
1ab4434c
AE
11505 if (IS_VF(bp))
11506 bp->rx_ring_size = MAX_RX_AVAIL;
34f80b04 11507
7d323bfd 11508 /* make sure that the numbers are in the right granularity */
523224a3
DK
11509 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
11510 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
34f80b04 11511
fc543637 11512 bp->current_interval = CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ;
34f80b04
EG
11513
11514 init_timer(&bp->timer);
11515 bp->timer.expires = jiffies + bp->current_interval;
11516 bp->timer.data = (unsigned long) bp;
11517 bp->timer.function = bnx2x_timer;
11518
0370cf90
BW
11519 if (SHMEM2_HAS(bp, dcbx_lldp_params_offset) &&
11520 SHMEM2_HAS(bp, dcbx_lldp_dcbx_stat_offset) &&
11521 SHMEM2_RD(bp, dcbx_lldp_params_offset) &&
11522 SHMEM2_RD(bp, dcbx_lldp_dcbx_stat_offset)) {
11523 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
11524 bnx2x_dcbx_init_params(bp);
11525 } else {
11526 bnx2x_dcbx_set_state(bp, false, BNX2X_DCBX_ENABLED_OFF);
11527 }
e4901dde 11528
619c5cb6
VZ
11529 if (CHIP_IS_E1x(bp))
11530 bp->cnic_base_cl_id = FP_SB_MAX_E1x;
11531 else
11532 bp->cnic_base_cl_id = FP_SB_MAX_E2;
619c5cb6 11533
6383c0b3 11534 /* multiple tx priority */
1ab4434c
AE
11535 if (IS_VF(bp))
11536 bp->max_cos = 1;
11537 else if (CHIP_IS_E1x(bp))
6383c0b3 11538 bp->max_cos = BNX2X_MULTI_TX_COS_E1X;
1ab4434c 11539 else if (CHIP_IS_E2(bp) || CHIP_IS_E3A0(bp))
6383c0b3 11540 bp->max_cos = BNX2X_MULTI_TX_COS_E2_E3A0;
1ab4434c 11541 else if (CHIP_IS_E3B0(bp))
6383c0b3 11542 bp->max_cos = BNX2X_MULTI_TX_COS_E3B0;
1ab4434c
AE
11543 else
11544 BNX2X_ERR("unknown chip %x revision %x\n",
11545 CHIP_NUM(bp), CHIP_REV(bp));
11546 BNX2X_DEV_INFO("set bp->max_cos to %d\n", bp->max_cos);
6383c0b3 11547
55c11941
MS
11548 /* We need at least one default status block for slow-path events,
11549 * second status block for the L2 queue, and a third status block for
11550 * CNIC if supproted.
11551 */
11552 if (CNIC_SUPPORT(bp))
11553 bp->min_msix_vec_cnt = 3;
11554 else
11555 bp->min_msix_vec_cnt = 2;
11556 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
11557
34f80b04 11558 return rc;
a2fbb9ea
ET
11559}
11560
a2fbb9ea 11561
de0c62db
DK
11562/****************************************************************************
11563* General service functions
11564****************************************************************************/
a2fbb9ea 11565
619c5cb6
VZ
11566/*
11567 * net_device service functions
11568 */
11569
bb2a0f7a 11570/* called with rtnl_lock */
a2fbb9ea
ET
11571static int bnx2x_open(struct net_device *dev)
11572{
11573 struct bnx2x *bp = netdev_priv(dev);
c9ee9206
VZ
11574 bool global = false;
11575 int other_engine = BP_PATH(bp) ? 0 : 1;
889b9af3 11576 bool other_load_status, load_status;
8395be5e 11577 int rc;
a2fbb9ea 11578
1355b704
MY
11579 bp->stats_init = true;
11580
6eccabb3
EG
11581 netif_carrier_off(dev);
11582
a2fbb9ea
ET
11583 bnx2x_set_power_state(bp, PCI_D0);
11584
ad5afc89 11585 /* If parity had happen during the unload, then attentions
c9ee9206
VZ
11586 * and/or RECOVERY_IN_PROGRES may still be set. In this case we
11587 * want the first function loaded on the current engine to
11588 * complete the recovery.
ad5afc89 11589 * Parity recovery is only relevant for PF driver.
c9ee9206 11590 */
ad5afc89
AE
11591 if (IS_PF(bp)) {
11592 other_load_status = bnx2x_get_load_status(bp, other_engine);
11593 load_status = bnx2x_get_load_status(bp, BP_PATH(bp));
11594 if (!bnx2x_reset_is_done(bp, BP_PATH(bp)) ||
11595 bnx2x_chk_parity_attn(bp, &global, true)) {
11596 do {
11597 /* If there are attentions and they are in a
11598 * global blocks, set the GLOBAL_RESET bit
11599 * regardless whether it will be this function
11600 * that will complete the recovery or not.
11601 */
11602 if (global)
11603 bnx2x_set_reset_global(bp);
72fd0718 11604
ad5afc89
AE
11605 /* Only the first function on the current
11606 * engine should try to recover in open. In case
11607 * of attentions in global blocks only the first
11608 * in the chip should try to recover.
11609 */
11610 if ((!load_status &&
11611 (!global || !other_load_status)) &&
11612 bnx2x_trylock_leader_lock(bp) &&
11613 !bnx2x_leader_reset(bp)) {
11614 netdev_info(bp->dev,
11615 "Recovered in open\n");
11616 break;
11617 }
72fd0718 11618
ad5afc89
AE
11619 /* recovery has failed... */
11620 bnx2x_set_power_state(bp, PCI_D3hot);
11621 bp->recovery_state = BNX2X_RECOVERY_FAILED;
72fd0718 11622
ad5afc89
AE
11623 BNX2X_ERR("Recovery flow hasn't been properly completed yet. Try again later.\n"
11624 "If you still see this message after a few retries then power cycle is required.\n");
72fd0718 11625
ad5afc89
AE
11626 return -EAGAIN;
11627 } while (0);
11628 }
11629 }
72fd0718
VZ
11630
11631 bp->recovery_state = BNX2X_RECOVERY_DONE;
8395be5e
AE
11632 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11633 if (rc)
11634 return rc;
11635 return bnx2x_open_epilog(bp);
a2fbb9ea
ET
11636}
11637
bb2a0f7a 11638/* called with rtnl_lock */
56ad3152 11639static int bnx2x_close(struct net_device *dev)
a2fbb9ea 11640{
a2fbb9ea
ET
11641 struct bnx2x *bp = netdev_priv(dev);
11642
11643 /* Unload the driver, release IRQs */
5d07d868 11644 bnx2x_nic_unload(bp, UNLOAD_CLOSE, false);
c9ee9206 11645
a2fbb9ea
ET
11646 return 0;
11647}
11648
1191cb83
ED
11649static int bnx2x_init_mcast_macs_list(struct bnx2x *bp,
11650 struct bnx2x_mcast_ramrod_params *p)
6e30dd4e 11651{
619c5cb6
VZ
11652 int mc_count = netdev_mc_count(bp->dev);
11653 struct bnx2x_mcast_list_elem *mc_mac =
11654 kzalloc(sizeof(*mc_mac) * mc_count, GFP_ATOMIC);
11655 struct netdev_hw_addr *ha;
6e30dd4e 11656
619c5cb6
VZ
11657 if (!mc_mac)
11658 return -ENOMEM;
6e30dd4e 11659
619c5cb6 11660 INIT_LIST_HEAD(&p->mcast_list);
6e30dd4e 11661
619c5cb6
VZ
11662 netdev_for_each_mc_addr(ha, bp->dev) {
11663 mc_mac->mac = bnx2x_mc_addr(ha);
11664 list_add_tail(&mc_mac->link, &p->mcast_list);
11665 mc_mac++;
6e30dd4e 11666 }
619c5cb6
VZ
11667
11668 p->mcast_list_len = mc_count;
11669
11670 return 0;
6e30dd4e
VZ
11671}
11672
1191cb83 11673static void bnx2x_free_mcast_macs_list(
619c5cb6
VZ
11674 struct bnx2x_mcast_ramrod_params *p)
11675{
11676 struct bnx2x_mcast_list_elem *mc_mac =
11677 list_first_entry(&p->mcast_list, struct bnx2x_mcast_list_elem,
11678 link);
11679
11680 WARN_ON(!mc_mac);
11681 kfree(mc_mac);
11682}
11683
11684/**
11685 * bnx2x_set_uc_list - configure a new unicast MACs list.
11686 *
11687 * @bp: driver handle
6e30dd4e 11688 *
619c5cb6 11689 * We will use zero (0) as a MAC type for these MACs.
6e30dd4e 11690 */
1191cb83 11691static int bnx2x_set_uc_list(struct bnx2x *bp)
6e30dd4e 11692{
619c5cb6 11693 int rc;
6e30dd4e 11694 struct net_device *dev = bp->dev;
6e30dd4e 11695 struct netdev_hw_addr *ha;
15192a8c 11696 struct bnx2x_vlan_mac_obj *mac_obj = &bp->sp_objs->mac_obj;
619c5cb6 11697 unsigned long ramrod_flags = 0;
6e30dd4e 11698
619c5cb6
VZ
11699 /* First schedule a cleanup up of old configuration */
11700 rc = bnx2x_del_all_macs(bp, mac_obj, BNX2X_UC_LIST_MAC, false);
11701 if (rc < 0) {
11702 BNX2X_ERR("Failed to schedule DELETE operations: %d\n", rc);
11703 return rc;
11704 }
6e30dd4e
VZ
11705
11706 netdev_for_each_uc_addr(ha, dev) {
619c5cb6
VZ
11707 rc = bnx2x_set_mac_one(bp, bnx2x_uc_addr(ha), mac_obj, true,
11708 BNX2X_UC_LIST_MAC, &ramrod_flags);
7b5342d9
YM
11709 if (rc == -EEXIST) {
11710 DP(BNX2X_MSG_SP,
11711 "Failed to schedule ADD operations: %d\n", rc);
11712 /* do not treat adding same MAC as error */
11713 rc = 0;
11714
11715 } else if (rc < 0) {
11716
619c5cb6
VZ
11717 BNX2X_ERR("Failed to schedule ADD operations: %d\n",
11718 rc);
11719 return rc;
6e30dd4e
VZ
11720 }
11721 }
11722
619c5cb6
VZ
11723 /* Execute the pending commands */
11724 __set_bit(RAMROD_CONT, &ramrod_flags);
11725 return bnx2x_set_mac_one(bp, NULL, mac_obj, false /* don't care */,
11726 BNX2X_UC_LIST_MAC, &ramrod_flags);
6e30dd4e
VZ
11727}
11728
1191cb83 11729static int bnx2x_set_mc_list(struct bnx2x *bp)
6e30dd4e 11730{
619c5cb6 11731 struct net_device *dev = bp->dev;
3b603066 11732 struct bnx2x_mcast_ramrod_params rparam = {NULL};
619c5cb6 11733 int rc = 0;
6e30dd4e 11734
619c5cb6 11735 rparam.mcast_obj = &bp->mcast_obj;
6e30dd4e 11736
619c5cb6
VZ
11737 /* first, clear all configured multicast MACs */
11738 rc = bnx2x_config_mcast(bp, &rparam, BNX2X_MCAST_CMD_DEL);
11739 if (rc < 0) {
51c1a580 11740 BNX2X_ERR("Failed to clear multicast configuration: %d\n", rc);
619c5cb6
VZ
11741 return rc;
11742 }
6e30dd4e 11743
619c5cb6
VZ
11744 /* then, configure a new MACs list */
11745 if (netdev_mc_count(dev)) {
11746 rc = bnx2x_init_mcast_macs_list(bp, &rparam);
11747 if (rc) {
51c1a580
MS
11748 BNX2X_ERR("Failed to create multicast MACs list: %d\n",
11749 rc);
619c5cb6
VZ
11750 return rc;
11751 }
6e30dd4e 11752
619c5cb6
VZ
11753 /* Now add the new MACs */
11754 rc = bnx2x_config_mcast(bp, &rparam,
11755 BNX2X_MCAST_CMD_ADD);
11756 if (rc < 0)
51c1a580
MS
11757 BNX2X_ERR("Failed to set a new multicast configuration: %d\n",
11758 rc);
6e30dd4e 11759
619c5cb6
VZ
11760 bnx2x_free_mcast_macs_list(&rparam);
11761 }
6e30dd4e 11762
619c5cb6 11763 return rc;
6e30dd4e
VZ
11764}
11765
619c5cb6 11766/* If bp->state is OPEN, should be called with netif_addr_lock_bh() */
9f6c9258 11767void bnx2x_set_rx_mode(struct net_device *dev)
34f80b04
EG
11768{
11769 struct bnx2x *bp = netdev_priv(dev);
11770 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
34f80b04
EG
11771
11772 if (bp->state != BNX2X_STATE_OPEN) {
11773 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
11774 return;
11775 }
11776
619c5cb6 11777 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", bp->dev->flags);
34f80b04
EG
11778
11779 if (dev->flags & IFF_PROMISC)
11780 rx_mode = BNX2X_RX_MODE_PROMISC;
619c5cb6
VZ
11781 else if ((dev->flags & IFF_ALLMULTI) ||
11782 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
11783 CHIP_IS_E1(bp)))
34f80b04 11784 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6e30dd4e 11785 else {
381ac16b
AE
11786 if (IS_PF(bp)) {
11787 /* some multicasts */
11788 if (bnx2x_set_mc_list(bp) < 0)
11789 rx_mode = BNX2X_RX_MODE_ALLMULTI;
34f80b04 11790
381ac16b
AE
11791 if (bnx2x_set_uc_list(bp) < 0)
11792 rx_mode = BNX2X_RX_MODE_PROMISC;
11793 } else {
11794 /* configuring mcast to a vf involves sleeping (when we
11795 * wait for the pf's response). Since this function is
11796 * called from non sleepable context we must schedule
11797 * a work item for this purpose
11798 */
11799 smp_mb__before_clear_bit();
11800 set_bit(BNX2X_SP_RTNL_VFPF_MCAST,
11801 &bp->sp_rtnl_state);
11802 smp_mb__after_clear_bit();
11803 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11804 }
34f80b04
EG
11805 }
11806
11807 bp->rx_mode = rx_mode;
614c76df
DK
11808 /* handle ISCSI SD mode */
11809 if (IS_MF_ISCSI_SD(bp))
11810 bp->rx_mode = BNX2X_RX_MODE_NONE;
619c5cb6
VZ
11811
11812 /* Schedule the rx_mode command */
11813 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state)) {
11814 set_bit(BNX2X_FILTER_RX_MODE_SCHED, &bp->sp_state);
11815 return;
11816 }
11817
381ac16b
AE
11818 if (IS_PF(bp)) {
11819 bnx2x_set_storm_rx_mode(bp);
11820 } else {
11821 /* configuring rx mode to storms in a vf involves sleeping (when
11822 * we wait for the pf's response). Since this function is
11823 * called from non sleepable context we must schedule
11824 * a work item for this purpose
11825 */
11826 smp_mb__before_clear_bit();
11827 set_bit(BNX2X_SP_RTNL_VFPF_STORM_RX_MODE,
11828 &bp->sp_rtnl_state);
11829 smp_mb__after_clear_bit();
11830 schedule_delayed_work(&bp->sp_rtnl_task, 0);
11831 }
34f80b04
EG
11832}
11833
c18487ee 11834/* called with rtnl_lock */
01cd4528
EG
11835static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
11836 int devad, u16 addr)
a2fbb9ea 11837{
01cd4528
EG
11838 struct bnx2x *bp = netdev_priv(netdev);
11839 u16 value;
11840 int rc;
a2fbb9ea 11841
01cd4528
EG
11842 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
11843 prtad, devad, addr);
a2fbb9ea 11844
01cd4528
EG
11845 /* The HW expects different devad if CL22 is used */
11846 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
c18487ee 11847
01cd4528 11848 bnx2x_acquire_phy_lock(bp);
e10bc84d 11849 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
01cd4528
EG
11850 bnx2x_release_phy_lock(bp);
11851 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
a2fbb9ea 11852
01cd4528
EG
11853 if (!rc)
11854 rc = value;
11855 return rc;
11856}
a2fbb9ea 11857
01cd4528
EG
11858/* called with rtnl_lock */
11859static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
11860 u16 addr, u16 value)
11861{
11862 struct bnx2x *bp = netdev_priv(netdev);
01cd4528
EG
11863 int rc;
11864
51c1a580
MS
11865 DP(NETIF_MSG_LINK,
11866 "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x, value 0x%x\n",
11867 prtad, devad, addr, value);
01cd4528 11868
01cd4528
EG
11869 /* The HW expects different devad if CL22 is used */
11870 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
a2fbb9ea 11871
01cd4528 11872 bnx2x_acquire_phy_lock(bp);
e10bc84d 11873 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
01cd4528
EG
11874 bnx2x_release_phy_lock(bp);
11875 return rc;
11876}
c18487ee 11877
01cd4528
EG
11878/* called with rtnl_lock */
11879static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11880{
11881 struct bnx2x *bp = netdev_priv(dev);
11882 struct mii_ioctl_data *mdio = if_mii(ifr);
a2fbb9ea 11883
01cd4528
EG
11884 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
11885 mdio->phy_id, mdio->reg_num, mdio->val_in);
a2fbb9ea 11886
01cd4528
EG
11887 if (!netif_running(dev))
11888 return -EAGAIN;
11889
11890 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
a2fbb9ea
ET
11891}
11892
257ddbda 11893#ifdef CONFIG_NET_POLL_CONTROLLER
a2fbb9ea
ET
11894static void poll_bnx2x(struct net_device *dev)
11895{
11896 struct bnx2x *bp = netdev_priv(dev);
14a15d61 11897 int i;
a2fbb9ea 11898
14a15d61
MS
11899 for_each_eth_queue(bp, i) {
11900 struct bnx2x_fastpath *fp = &bp->fp[i];
11901 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
11902 }
a2fbb9ea
ET
11903}
11904#endif
11905
614c76df
DK
11906static int bnx2x_validate_addr(struct net_device *dev)
11907{
11908 struct bnx2x *bp = netdev_priv(dev);
11909
e09b74d0
AE
11910 /* query the bulletin board for mac address configured by the PF */
11911 if (IS_VF(bp))
11912 bnx2x_sample_bulletin(bp);
11913
51c1a580
MS
11914 if (!bnx2x_is_valid_ether_addr(bp, dev->dev_addr)) {
11915 BNX2X_ERR("Non-valid Ethernet address\n");
614c76df 11916 return -EADDRNOTAVAIL;
51c1a580 11917 }
614c76df
DK
11918 return 0;
11919}
11920
c64213cd
SH
11921static const struct net_device_ops bnx2x_netdev_ops = {
11922 .ndo_open = bnx2x_open,
11923 .ndo_stop = bnx2x_close,
11924 .ndo_start_xmit = bnx2x_start_xmit,
8307fa3e 11925 .ndo_select_queue = bnx2x_select_queue,
6e30dd4e 11926 .ndo_set_rx_mode = bnx2x_set_rx_mode,
c64213cd 11927 .ndo_set_mac_address = bnx2x_change_mac_addr,
614c76df 11928 .ndo_validate_addr = bnx2x_validate_addr,
c64213cd
SH
11929 .ndo_do_ioctl = bnx2x_ioctl,
11930 .ndo_change_mtu = bnx2x_change_mtu,
66371c44
MM
11931 .ndo_fix_features = bnx2x_fix_features,
11932 .ndo_set_features = bnx2x_set_features,
c64213cd 11933 .ndo_tx_timeout = bnx2x_tx_timeout,
257ddbda 11934#ifdef CONFIG_NET_POLL_CONTROLLER
c64213cd
SH
11935 .ndo_poll_controller = poll_bnx2x,
11936#endif
6383c0b3 11937 .ndo_setup_tc = bnx2x_setup_tc,
6411280a 11938#ifdef CONFIG_BNX2X_SRIOV
abc5a021 11939 .ndo_set_vf_mac = bnx2x_set_vf_mac,
3ec9f9ca
AE
11940 .ndo_set_vf_vlan = bnx2x_set_vf_vlan,
11941 .ndo_get_vf_config = bnx2x_get_vf_config,
6411280a 11942#endif
55c11941 11943#ifdef NETDEV_FCOE_WWNN
bf61ee14
VZ
11944 .ndo_fcoe_get_wwn = bnx2x_fcoe_get_wwn,
11945#endif
c64213cd
SH
11946};
11947
1191cb83 11948static int bnx2x_set_coherency_mask(struct bnx2x *bp)
619c5cb6
VZ
11949{
11950 struct device *dev = &bp->pdev->dev;
11951
11952 if (dma_set_mask(dev, DMA_BIT_MASK(64)) == 0) {
11953 bp->flags |= USING_DAC_FLAG;
11954 if (dma_set_coherent_mask(dev, DMA_BIT_MASK(64)) != 0) {
51c1a580 11955 dev_err(dev, "dma_set_coherent_mask failed, aborting\n");
619c5cb6
VZ
11956 return -EIO;
11957 }
11958 } else if (dma_set_mask(dev, DMA_BIT_MASK(32)) != 0) {
11959 dev_err(dev, "System does not support DMA, aborting\n");
11960 return -EIO;
11961 }
11962
11963 return 0;
11964}
11965
1ab4434c
AE
11966static int bnx2x_init_dev(struct bnx2x *bp, struct pci_dev *pdev,
11967 struct net_device *dev, unsigned long board_type)
a2fbb9ea 11968{
a2fbb9ea 11969 int rc;
c22610d0 11970 u32 pci_cfg_dword;
65087cfe
AE
11971 bool chip_is_e1x = (board_type == BCM57710 ||
11972 board_type == BCM57711 ||
11973 board_type == BCM57711E);
a2fbb9ea
ET
11974
11975 SET_NETDEV_DEV(dev, &pdev->dev);
a2fbb9ea 11976
34f80b04
EG
11977 bp->dev = dev;
11978 bp->pdev = pdev;
a2fbb9ea
ET
11979
11980 rc = pci_enable_device(pdev);
11981 if (rc) {
cdaa7cb8
VZ
11982 dev_err(&bp->pdev->dev,
11983 "Cannot enable PCI device, aborting\n");
a2fbb9ea
ET
11984 goto err_out;
11985 }
11986
11987 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
cdaa7cb8
VZ
11988 dev_err(&bp->pdev->dev,
11989 "Cannot find PCI device base address, aborting\n");
a2fbb9ea
ET
11990 rc = -ENODEV;
11991 goto err_out_disable;
11992 }
11993
1ab4434c
AE
11994 if (IS_PF(bp) && !(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11995 dev_err(&bp->pdev->dev, "Cannot find second PCI device base address, aborting\n");
a2fbb9ea
ET
11996 rc = -ENODEV;
11997 goto err_out_disable;
11998 }
11999
092a5fc9
YR
12000 pci_read_config_dword(pdev, PCICFG_REVISION_ID_OFFSET, &pci_cfg_dword);
12001 if ((pci_cfg_dword & PCICFG_REVESION_ID_MASK) ==
12002 PCICFG_REVESION_ID_ERROR_VAL) {
12003 pr_err("PCI device error, probably due to fan failure, aborting\n");
12004 rc = -ENODEV;
12005 goto err_out_disable;
12006 }
12007
34f80b04
EG
12008 if (atomic_read(&pdev->enable_cnt) == 1) {
12009 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
12010 if (rc) {
cdaa7cb8
VZ
12011 dev_err(&bp->pdev->dev,
12012 "Cannot obtain PCI resources, aborting\n");
34f80b04
EG
12013 goto err_out_disable;
12014 }
a2fbb9ea 12015
34f80b04
EG
12016 pci_set_master(pdev);
12017 pci_save_state(pdev);
12018 }
a2fbb9ea 12019
1ab4434c
AE
12020 if (IS_PF(bp)) {
12021 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
12022 if (bp->pm_cap == 0) {
12023 dev_err(&bp->pdev->dev,
12024 "Cannot find power management capability, aborting\n");
12025 rc = -EIO;
12026 goto err_out_release;
12027 }
a2fbb9ea
ET
12028 }
12029
77c98e6a 12030 if (!pci_is_pcie(pdev)) {
51c1a580 12031 dev_err(&bp->pdev->dev, "Not PCI Express, aborting\n");
a2fbb9ea
ET
12032 rc = -EIO;
12033 goto err_out_release;
12034 }
12035
619c5cb6
VZ
12036 rc = bnx2x_set_coherency_mask(bp);
12037 if (rc)
a2fbb9ea 12038 goto err_out_release;
a2fbb9ea 12039
34f80b04
EG
12040 dev->mem_start = pci_resource_start(pdev, 0);
12041 dev->base_addr = dev->mem_start;
12042 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
12043
12044 dev->irq = pdev->irq;
12045
275f165f 12046 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea 12047 if (!bp->regview) {
cdaa7cb8
VZ
12048 dev_err(&bp->pdev->dev,
12049 "Cannot map register space, aborting\n");
a2fbb9ea
ET
12050 rc = -ENOMEM;
12051 goto err_out_release;
12052 }
12053
c22610d0
AE
12054 /* In E1/E1H use pci device function given by kernel.
12055 * In E2/E3 read physical function from ME register since these chips
12056 * support Physical Device Assignment where kernel BDF maybe arbitrary
12057 * (depending on hypervisor).
12058 */
2de67439 12059 if (chip_is_e1x) {
c22610d0 12060 bp->pf_num = PCI_FUNC(pdev->devfn);
2de67439
YM
12061 } else {
12062 /* chip is E2/3*/
c22610d0
AE
12063 pci_read_config_dword(bp->pdev,
12064 PCICFG_ME_REGISTER, &pci_cfg_dword);
12065 bp->pf_num = (u8)((pci_cfg_dword & ME_REG_ABS_PF_NUM) >>
2de67439 12066 ME_REG_ABS_PF_NUM_SHIFT);
c22610d0 12067 }
51c1a580 12068 BNX2X_DEV_INFO("me reg PF num: %d\n", bp->pf_num);
c22610d0 12069
a2fbb9ea
ET
12070 bnx2x_set_power_state(bp, PCI_D0);
12071
34f80b04
EG
12072 /* clean indirect addresses */
12073 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
12074 PCICFG_VENDOR_ID_OFFSET);
a5c53dbc
DK
12075 /*
12076 * Clean the following indirect addresses for all functions since it
9f0096a1
DK
12077 * is not used by the driver.
12078 */
1ab4434c
AE
12079 if (IS_PF(bp)) {
12080 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0, 0);
12081 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0, 0);
12082 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
12083 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
12084
12085 if (chip_is_e1x) {
12086 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
12087 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
12088 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
12089 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F1, 0);
12090 }
a5c53dbc 12091
1ab4434c
AE
12092 /* Enable internal target-read (in case we are probed after PF
12093 * FLR). Must be done prior to any BAR read access. Only for
12094 * 57712 and up
12095 */
12096 if (!chip_is_e1x)
12097 REG_WR(bp,
12098 PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
a5c53dbc 12099 }
a2fbb9ea 12100
34f80b04 12101 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 12102
c64213cd 12103 dev->netdev_ops = &bnx2x_netdev_ops;
005a07ba 12104 bnx2x_set_ethtool_ops(bp, dev);
5316bc0b 12105
01789349
JP
12106 dev->priv_flags |= IFF_UNICAST_FLT;
12107
66371c44 12108 dev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
621b4d66
DK
12109 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
12110 NETIF_F_RXCSUM | NETIF_F_LRO | NETIF_F_GRO |
f646968f 12111 NETIF_F_RXHASH | NETIF_F_HW_VLAN_CTAG_TX;
a848ade4 12112 if (!CHIP_IS_E1x(bp)) {
65bc0cfe 12113 dev->hw_features |= NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
a848ade4
DK
12114 dev->hw_enc_features =
12115 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_SG |
12116 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 |
65bc0cfe 12117 NETIF_F_GSO_GRE | NETIF_F_GSO_UDP_TUNNEL;
a848ade4 12118 }
66371c44
MM
12119
12120 dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
12121 NETIF_F_TSO | NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_HIGHDMA;
12122
f646968f 12123 dev->features |= dev->hw_features | NETIF_F_HW_VLAN_CTAG_RX;
5316bc0b 12124 if (bp->flags & USING_DAC_FLAG)
66371c44 12125 dev->features |= NETIF_F_HIGHDMA;
a2fbb9ea 12126
538dd2e3
MB
12127 /* Add Loopback capability to the device */
12128 dev->hw_features |= NETIF_F_LOOPBACK;
12129
98507672 12130#ifdef BCM_DCBNL
785b9b1a
SR
12131 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
12132#endif
12133
01cd4528
EG
12134 /* get_port_hwinfo() will set prtad and mmds properly */
12135 bp->mdio.prtad = MDIO_PRTAD_NONE;
12136 bp->mdio.mmds = 0;
12137 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
12138 bp->mdio.dev = dev;
12139 bp->mdio.mdio_read = bnx2x_mdio_read;
12140 bp->mdio.mdio_write = bnx2x_mdio_write;
12141
a2fbb9ea
ET
12142 return 0;
12143
a2fbb9ea 12144err_out_release:
34f80b04
EG
12145 if (atomic_read(&pdev->enable_cnt) == 1)
12146 pci_release_regions(pdev);
a2fbb9ea
ET
12147
12148err_out_disable:
12149 pci_disable_device(pdev);
12150 pci_set_drvdata(pdev, NULL);
12151
12152err_out:
12153 return rc;
12154}
12155
ca1ee4b2
DK
12156static void bnx2x_get_pcie_width_speed(struct bnx2x *bp, int *width,
12157 enum bnx2x_pci_bus_speed *speed)
25047950 12158{
ca1ee4b2 12159 u32 link_speed, val = 0;
25047950 12160
1ab4434c 12161 pci_read_config_dword(bp->pdev, PCICFG_LINK_CONTROL, &val);
37f9ce62 12162 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
25047950 12163
ca1ee4b2
DK
12164 link_speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
12165
12166 switch (link_speed) {
12167 case 3:
12168 *speed = BNX2X_PCI_LINK_SPEED_8000;
12169 break;
12170 case 2:
12171 *speed = BNX2X_PCI_LINK_SPEED_5000;
12172 break;
12173 default:
12174 *speed = BNX2X_PCI_LINK_SPEED_2500;
12175 }
25047950 12176}
37f9ce62 12177
6891dd25 12178static int bnx2x_check_firmware(struct bnx2x *bp)
94a78b79 12179{
37f9ce62 12180 const struct firmware *firmware = bp->firmware;
94a78b79
VZ
12181 struct bnx2x_fw_file_hdr *fw_hdr;
12182 struct bnx2x_fw_file_section *sections;
94a78b79 12183 u32 offset, len, num_ops;
86564c3f 12184 __be16 *ops_offsets;
94a78b79 12185 int i;
37f9ce62 12186 const u8 *fw_ver;
94a78b79 12187
51c1a580
MS
12188 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr)) {
12189 BNX2X_ERR("Wrong FW size\n");
94a78b79 12190 return -EINVAL;
51c1a580 12191 }
94a78b79
VZ
12192
12193 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
12194 sections = (struct bnx2x_fw_file_section *)fw_hdr;
12195
12196 /* Make sure none of the offsets and sizes make us read beyond
12197 * the end of the firmware data */
12198 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
12199 offset = be32_to_cpu(sections[i].offset);
12200 len = be32_to_cpu(sections[i].len);
12201 if (offset + len > firmware->size) {
51c1a580 12202 BNX2X_ERR("Section %d length is out of bounds\n", i);
94a78b79
VZ
12203 return -EINVAL;
12204 }
12205 }
12206
12207 /* Likewise for the init_ops offsets */
12208 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
86564c3f 12209 ops_offsets = (__force __be16 *)(firmware->data + offset);
94a78b79
VZ
12210 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
12211
12212 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
12213 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
51c1a580 12214 BNX2X_ERR("Section offset %d is out of bounds\n", i);
94a78b79
VZ
12215 return -EINVAL;
12216 }
12217 }
12218
12219 /* Check FW version */
12220 offset = be32_to_cpu(fw_hdr->fw_version.offset);
12221 fw_ver = firmware->data + offset;
12222 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
12223 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
12224 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
12225 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
51c1a580
MS
12226 BNX2X_ERR("Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
12227 fw_ver[0], fw_ver[1], fw_ver[2], fw_ver[3],
12228 BCM_5710_FW_MAJOR_VERSION,
94a78b79
VZ
12229 BCM_5710_FW_MINOR_VERSION,
12230 BCM_5710_FW_REVISION_VERSION,
12231 BCM_5710_FW_ENGINEERING_VERSION);
ab6ad5a4 12232 return -EINVAL;
94a78b79
VZ
12233 }
12234
12235 return 0;
12236}
12237
1191cb83 12238static void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12239{
ab6ad5a4
EG
12240 const __be32 *source = (const __be32 *)_source;
12241 u32 *target = (u32 *)_target;
94a78b79 12242 u32 i;
94a78b79
VZ
12243
12244 for (i = 0; i < n/4; i++)
12245 target[i] = be32_to_cpu(source[i]);
12246}
12247
12248/*
12249 Ops array is stored in the following format:
12250 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
12251 */
1191cb83 12252static void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
94a78b79 12253{
ab6ad5a4
EG
12254 const __be32 *source = (const __be32 *)_source;
12255 struct raw_op *target = (struct raw_op *)_target;
94a78b79 12256 u32 i, j, tmp;
94a78b79 12257
ab6ad5a4 12258 for (i = 0, j = 0; i < n/8; i++, j += 2) {
94a78b79
VZ
12259 tmp = be32_to_cpu(source[j]);
12260 target[i].op = (tmp >> 24) & 0xff;
cdaa7cb8
VZ
12261 target[i].offset = tmp & 0xffffff;
12262 target[i].raw_data = be32_to_cpu(source[j + 1]);
94a78b79
VZ
12263 }
12264}
ab6ad5a4 12265
1aa8b471 12266/* IRO array is stored in the following format:
523224a3
DK
12267 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
12268 */
1191cb83 12269static void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
523224a3
DK
12270{
12271 const __be32 *source = (const __be32 *)_source;
12272 struct iro *target = (struct iro *)_target;
12273 u32 i, j, tmp;
12274
12275 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
12276 target[i].base = be32_to_cpu(source[j]);
12277 j++;
12278 tmp = be32_to_cpu(source[j]);
12279 target[i].m1 = (tmp >> 16) & 0xffff;
12280 target[i].m2 = tmp & 0xffff;
12281 j++;
12282 tmp = be32_to_cpu(source[j]);
12283 target[i].m3 = (tmp >> 16) & 0xffff;
12284 target[i].size = tmp & 0xffff;
12285 j++;
12286 }
12287}
12288
1191cb83 12289static void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
94a78b79 12290{
ab6ad5a4
EG
12291 const __be16 *source = (const __be16 *)_source;
12292 u16 *target = (u16 *)_target;
94a78b79 12293 u32 i;
94a78b79
VZ
12294
12295 for (i = 0; i < n/2; i++)
12296 target[i] = be16_to_cpu(source[i]);
12297}
12298
7995c64e
JP
12299#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
12300do { \
12301 u32 len = be32_to_cpu(fw_hdr->arr.len); \
12302 bp->arr = kmalloc(len, GFP_KERNEL); \
e404decb 12303 if (!bp->arr) \
7995c64e 12304 goto lbl; \
7995c64e
JP
12305 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
12306 (u8 *)bp->arr, len); \
12307} while (0)
94a78b79 12308
3b603066 12309static int bnx2x_init_firmware(struct bnx2x *bp)
94a78b79 12310{
c0ea452e 12311 const char *fw_file_name;
94a78b79 12312 struct bnx2x_fw_file_hdr *fw_hdr;
45229b42 12313 int rc;
94a78b79 12314
c0ea452e
MS
12315 if (bp->firmware)
12316 return 0;
94a78b79 12317
c0ea452e
MS
12318 if (CHIP_IS_E1(bp))
12319 fw_file_name = FW_FILE_NAME_E1;
12320 else if (CHIP_IS_E1H(bp))
12321 fw_file_name = FW_FILE_NAME_E1H;
12322 else if (!CHIP_IS_E1x(bp))
12323 fw_file_name = FW_FILE_NAME_E2;
12324 else {
12325 BNX2X_ERR("Unsupported chip revision\n");
12326 return -EINVAL;
12327 }
12328 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
94a78b79 12329
c0ea452e
MS
12330 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
12331 if (rc) {
12332 BNX2X_ERR("Can't load firmware file %s\n",
12333 fw_file_name);
12334 goto request_firmware_exit;
12335 }
eb2afd4a 12336
c0ea452e
MS
12337 rc = bnx2x_check_firmware(bp);
12338 if (rc) {
12339 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
12340 goto request_firmware_exit;
94a78b79
VZ
12341 }
12342
12343 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
12344
12345 /* Initialize the pointers to the init arrays */
12346 /* Blob */
12347 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
12348
12349 /* Opcodes */
12350 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
12351
12352 /* Offsets */
ab6ad5a4
EG
12353 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
12354 be16_to_cpu_n);
94a78b79
VZ
12355
12356 /* STORMs firmware */
573f2035
EG
12357 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12358 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
12359 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
12360 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
12361 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12362 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
12363 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
12364 be32_to_cpu(fw_hdr->usem_pram_data.offset);
12365 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12366 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
12367 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
12368 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
12369 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
12370 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
12371 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
12372 be32_to_cpu(fw_hdr->csem_pram_data.offset);
523224a3
DK
12373 /* IRO */
12374 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
94a78b79
VZ
12375
12376 return 0;
ab6ad5a4 12377
523224a3
DK
12378iro_alloc_err:
12379 kfree(bp->init_ops_offsets);
94a78b79
VZ
12380init_offsets_alloc_err:
12381 kfree(bp->init_ops);
12382init_ops_alloc_err:
12383 kfree(bp->init_data);
12384request_firmware_exit:
12385 release_firmware(bp->firmware);
127d0a19 12386 bp->firmware = NULL;
94a78b79
VZ
12387
12388 return rc;
12389}
12390
619c5cb6
VZ
12391static void bnx2x_release_firmware(struct bnx2x *bp)
12392{
12393 kfree(bp->init_ops_offsets);
12394 kfree(bp->init_ops);
12395 kfree(bp->init_data);
12396 release_firmware(bp->firmware);
eb2afd4a 12397 bp->firmware = NULL;
619c5cb6
VZ
12398}
12399
12400
12401static struct bnx2x_func_sp_drv_ops bnx2x_func_sp_drv = {
12402 .init_hw_cmn_chip = bnx2x_init_hw_common_chip,
12403 .init_hw_cmn = bnx2x_init_hw_common,
12404 .init_hw_port = bnx2x_init_hw_port,
12405 .init_hw_func = bnx2x_init_hw_func,
12406
12407 .reset_hw_cmn = bnx2x_reset_common,
12408 .reset_hw_port = bnx2x_reset_port,
12409 .reset_hw_func = bnx2x_reset_func,
12410
12411 .gunzip_init = bnx2x_gunzip_init,
12412 .gunzip_end = bnx2x_gunzip_end,
12413
12414 .init_fw = bnx2x_init_firmware,
12415 .release_fw = bnx2x_release_firmware,
12416};
12417
12418void bnx2x__init_func_obj(struct bnx2x *bp)
12419{
12420 /* Prepare DMAE related driver resources */
12421 bnx2x_setup_dmae(bp);
12422
12423 bnx2x_init_func_obj(bp, &bp->func_obj,
12424 bnx2x_sp(bp, func_rdata),
12425 bnx2x_sp_mapping(bp, func_rdata),
a3348722
BW
12426 bnx2x_sp(bp, func_afex_rdata),
12427 bnx2x_sp_mapping(bp, func_afex_rdata),
619c5cb6
VZ
12428 &bnx2x_func_sp_drv);
12429}
12430
12431/* must be called after sriov-enable */
1191cb83 12432static int bnx2x_set_qm_cid_count(struct bnx2x *bp)
523224a3 12433{
37ae41a9 12434 int cid_count = BNX2X_L2_MAX_CID(bp);
94a78b79 12435
290ca2bb
AE
12436 if (IS_SRIOV(bp))
12437 cid_count += BNX2X_VF_CIDS;
12438
55c11941
MS
12439 if (CNIC_SUPPORT(bp))
12440 cid_count += CNIC_CID_MAX;
290ca2bb 12441
523224a3
DK
12442 return roundup(cid_count, QM_CID_ROUND);
12443}
f85582f8 12444
619c5cb6 12445/**
6383c0b3 12446 * bnx2x_get_num_none_def_sbs - return the number of none default SBs
619c5cb6
VZ
12447 *
12448 * @dev: pci device
12449 *
12450 */
55c11941 12451static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev,
1ab4434c 12452 int cnic_cnt, bool is_vf)
619c5cb6 12453{
1ab4434c
AE
12454 int pos, index;
12455 u16 control = 0;
619c5cb6
VZ
12456
12457 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
6383c0b3
AE
12458
12459 /*
12460 * If MSI-X is not supported - return number of SBs needed to support
12461 * one fast path queue: one FP queue + SB for CNIC
12462 */
1ab4434c
AE
12463 if (!pos) {
12464 dev_info(&pdev->dev, "no msix capability found\n");
55c11941 12465 return 1 + cnic_cnt;
1ab4434c
AE
12466 }
12467 dev_info(&pdev->dev, "msix capability found\n");
619c5cb6 12468
6383c0b3
AE
12469 /*
12470 * The value in the PCI configuration space is the index of the last
12471 * entry, namely one less than the actual size of the table, which is
12472 * exactly what we want to return from this function: number of all SBs
12473 * without the default SB.
1ab4434c 12474 * For VFs there is no default SB, then we return (index+1).
6383c0b3 12475 */
619c5cb6 12476 pci_read_config_word(pdev, pos + PCI_MSI_FLAGS, &control);
619c5cb6 12477
1ab4434c 12478 index = control & PCI_MSIX_FLAGS_QSIZE;
4bd9b0ff 12479
1ab4434c
AE
12480 return is_vf ? index + 1 : index;
12481}
523224a3 12482
1ab4434c
AE
12483static int set_max_cos_est(int chip_id)
12484{
12485 switch (chip_id) {
f2e0899f
DK
12486 case BCM57710:
12487 case BCM57711:
12488 case BCM57711E:
1ab4434c 12489 return BNX2X_MULTI_TX_COS_E1X;
f2e0899f 12490 case BCM57712:
619c5cb6 12491 case BCM57712_MF:
1ab4434c
AE
12492 case BCM57712_VF:
12493 return BNX2X_MULTI_TX_COS_E2_E3A0;
619c5cb6
VZ
12494 case BCM57800:
12495 case BCM57800_MF:
1ab4434c 12496 case BCM57800_VF:
619c5cb6
VZ
12497 case BCM57810:
12498 case BCM57810_MF:
c3def943
YM
12499 case BCM57840_4_10:
12500 case BCM57840_2_20:
1ab4434c 12501 case BCM57840_O:
c3def943 12502 case BCM57840_MFO:
1ab4434c 12503 case BCM57810_VF:
619c5cb6 12504 case BCM57840_MF:
1ab4434c 12505 case BCM57840_VF:
7e8e02df
BW
12506 case BCM57811:
12507 case BCM57811_MF:
1ab4434c
AE
12508 case BCM57811_VF:
12509 return BNX2X_MULTI_TX_COS_E3B0;
12510 return 1;
f2e0899f 12511 default:
1ab4434c 12512 pr_err("Unknown board_type (%d), aborting\n", chip_id);
870634b0 12513 return -ENODEV;
f2e0899f 12514 }
1ab4434c 12515}
f2e0899f 12516
1ab4434c
AE
12517static int set_is_vf(int chip_id)
12518{
12519 switch (chip_id) {
12520 case BCM57712_VF:
12521 case BCM57800_VF:
12522 case BCM57810_VF:
12523 case BCM57840_VF:
12524 case BCM57811_VF:
12525 return true;
12526 default:
12527 return false;
12528 }
12529}
6383c0b3 12530
1ab4434c
AE
12531struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
12532
12533static int bnx2x_init_one(struct pci_dev *pdev,
12534 const struct pci_device_id *ent)
12535{
12536 struct net_device *dev = NULL;
12537 struct bnx2x *bp;
ca1ee4b2
DK
12538 int pcie_width;
12539 enum bnx2x_pci_bus_speed pcie_speed;
1ab4434c
AE
12540 int rc, max_non_def_sbs;
12541 int rx_count, tx_count, rss_count, doorbell_size;
12542 int max_cos_est;
12543 bool is_vf;
12544 int cnic_cnt;
12545
12546 /* An estimated maximum supported CoS number according to the chip
12547 * version.
12548 * We will try to roughly estimate the maximum number of CoSes this chip
12549 * may support in order to minimize the memory allocated for Tx
12550 * netdev_queue's. This number will be accurately calculated during the
12551 * initialization of bp->max_cos based on the chip versions AND chip
12552 * revision in the bnx2x_init_bp().
12553 */
12554 max_cos_est = set_max_cos_est(ent->driver_data);
12555 if (max_cos_est < 0)
12556 return max_cos_est;
12557 is_vf = set_is_vf(ent->driver_data);
12558 cnic_cnt = is_vf ? 0 : 1;
12559
12560 max_non_def_sbs = bnx2x_get_num_non_def_sbs(pdev, cnic_cnt, is_vf);
6383c0b3
AE
12561
12562 /* Maximum number of RSS queues: one IGU SB goes to CNIC */
1ab4434c
AE
12563 rss_count = is_vf ? 1 : max_non_def_sbs - cnic_cnt;
12564
12565 if (rss_count < 1)
12566 return -EINVAL;
6383c0b3
AE
12567
12568 /* Maximum number of netdev Rx queues: RSS + FCoE L2 */
55c11941 12569 rx_count = rss_count + cnic_cnt;
6383c0b3 12570
1ab4434c 12571 /* Maximum number of netdev Tx queues:
37ae41a9 12572 * Maximum TSS queues * Maximum supported number of CoS + FCoE L2
6383c0b3 12573 */
55c11941 12574 tx_count = rss_count * max_cos_est + cnic_cnt;
f85582f8 12575
a2fbb9ea 12576 /* dev zeroed in init_etherdev */
6383c0b3 12577 dev = alloc_etherdev_mqs(sizeof(*bp), tx_count, rx_count);
41de8d4c 12578 if (!dev)
a2fbb9ea
ET
12579 return -ENOMEM;
12580
a2fbb9ea 12581 bp = netdev_priv(dev);
a2fbb9ea 12582
1ab4434c
AE
12583 bp->flags = 0;
12584 if (is_vf)
12585 bp->flags |= IS_VF_FLAG;
12586
6383c0b3 12587 bp->igu_sb_cnt = max_non_def_sbs;
1ab4434c 12588 bp->igu_base_addr = IS_VF(bp) ? PXP_VF_ADDR_IGU_START : BAR_IGU_INTMEM;
6383c0b3 12589 bp->msg_enable = debug;
55c11941 12590 bp->cnic_support = cnic_cnt;
4bd9b0ff 12591 bp->cnic_probe = bnx2x_cnic_probe;
55c11941 12592
6383c0b3 12593 pci_set_drvdata(pdev, dev);
523224a3 12594
1ab4434c 12595 rc = bnx2x_init_dev(bp, pdev, dev, ent->driver_data);
a2fbb9ea
ET
12596 if (rc < 0) {
12597 free_netdev(dev);
12598 return rc;
12599 }
12600
1ab4434c
AE
12601 BNX2X_DEV_INFO("This is a %s function\n",
12602 IS_PF(bp) ? "physical" : "virtual");
55c11941 12603 BNX2X_DEV_INFO("Cnic support is %s\n", CNIC_SUPPORT(bp) ? "on" : "off");
1ab4434c 12604 BNX2X_DEV_INFO("Max num of status blocks %d\n", max_non_def_sbs);
60aa0509 12605 BNX2X_DEV_INFO("Allocated netdev with %d tx and %d rx queues\n",
2de67439 12606 tx_count, rx_count);
60aa0509 12607
34f80b04 12608 rc = bnx2x_init_bp(bp);
693fc0d1
EG
12609 if (rc)
12610 goto init_one_exit;
12611
1ab4434c
AE
12612 /* Map doorbells here as we need the real value of bp->max_cos which
12613 * is initialized in bnx2x_init_bp() to determine the number of
12614 * l2 connections.
6383c0b3 12615 */
1ab4434c 12616 if (IS_VF(bp)) {
1d6f3cd8 12617 bp->doorbells = bnx2x_vf_doorbells(bp);
6411280a
AE
12618 rc = bnx2x_vf_pci_alloc(bp);
12619 if (rc)
12620 goto init_one_exit;
1ab4434c
AE
12621 } else {
12622 doorbell_size = BNX2X_L2_MAX_CID(bp) * (1 << BNX2X_DB_SHIFT);
12623 if (doorbell_size > pci_resource_len(pdev, 2)) {
12624 dev_err(&bp->pdev->dev,
12625 "Cannot map doorbells, bar size too small, aborting\n");
12626 rc = -ENOMEM;
12627 goto init_one_exit;
12628 }
12629 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
12630 doorbell_size);
37ae41a9 12631 }
6383c0b3
AE
12632 if (!bp->doorbells) {
12633 dev_err(&bp->pdev->dev,
12634 "Cannot map doorbell space, aborting\n");
12635 rc = -ENOMEM;
12636 goto init_one_exit;
12637 }
12638
be1f1ffa
AE
12639 if (IS_VF(bp)) {
12640 rc = bnx2x_vfpf_acquire(bp, tx_count, rx_count);
12641 if (rc)
12642 goto init_one_exit;
12643 }
12644
3c76feff
AE
12645 /* Enable SRIOV if capability found in configuration space */
12646 rc = bnx2x_iov_init_one(bp, int_mode, BNX2X_MAX_NUM_OF_VFS);
290ca2bb
AE
12647 if (rc)
12648 goto init_one_exit;
12649
523224a3 12650 /* calc qm_cid_count */
6383c0b3 12651 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp);
1ab4434c 12652 BNX2X_DEV_INFO("qm_cid_count %d\n", bp->qm_cid_count);
523224a3 12653
55c11941 12654 /* disable FCOE L2 queue for E1x*/
62ac0dc9 12655 if (CHIP_IS_E1x(bp))
ec6ba945
VZ
12656 bp->flags |= NO_FCOE_FLAG;
12657
0e8d2ec5
MS
12658 /* Set bp->num_queues for MSI-X mode*/
12659 bnx2x_set_num_queues(bp);
12660
25985edc 12661 /* Configure interrupt mode: try to enable MSI-X/MSI if
0e8d2ec5 12662 * needed.
d6214d7a 12663 */
1ab4434c
AE
12664 rc = bnx2x_set_int_mode(bp);
12665 if (rc) {
12666 dev_err(&pdev->dev, "Cannot set interrupts\n");
12667 goto init_one_exit;
12668 }
04c46736 12669 BNX2X_DEV_INFO("set interrupts successfully\n");
d6214d7a 12670
1ab4434c 12671 /* register the net device */
b340007f
VZ
12672 rc = register_netdev(dev);
12673 if (rc) {
12674 dev_err(&pdev->dev, "Cannot register net device\n");
12675 goto init_one_exit;
12676 }
1ab4434c 12677 BNX2X_DEV_INFO("device name after netdev register %s\n", dev->name);
b340007f 12678
55c11941 12679
ec6ba945
VZ
12680 if (!NO_FCOE(bp)) {
12681 /* Add storage MAC address */
12682 rtnl_lock();
12683 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12684 rtnl_unlock();
12685 }
ec6ba945 12686
37f9ce62 12687 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
1ab4434c
AE
12688 BNX2X_DEV_INFO("got pcie width %d and speed %d\n",
12689 pcie_width, pcie_speed);
d6214d7a 12690
ca1ee4b2
DK
12691 BNX2X_DEV_INFO("%s (%c%d) PCI-E x%d %s found at mem %lx, IRQ %d, node addr %pM\n",
12692 board_info[ent->driver_data].name,
12693 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
12694 pcie_width,
12695 pcie_speed == BNX2X_PCI_LINK_SPEED_2500 ? "2.5GHz" :
12696 pcie_speed == BNX2X_PCI_LINK_SPEED_5000 ? "5.0GHz" :
12697 pcie_speed == BNX2X_PCI_LINK_SPEED_8000 ? "8.0GHz" :
12698 "Unknown",
12699 dev->base_addr, bp->pdev->irq, dev->dev_addr);
c016201c 12700
a2fbb9ea 12701 return 0;
34f80b04
EG
12702
12703init_one_exit:
12704 if (bp->regview)
12705 iounmap(bp->regview);
12706
1ab4434c 12707 if (IS_PF(bp) && bp->doorbells)
34f80b04
EG
12708 iounmap(bp->doorbells);
12709
12710 free_netdev(dev);
12711
12712 if (atomic_read(&pdev->enable_cnt) == 1)
12713 pci_release_regions(pdev);
12714
12715 pci_disable_device(pdev);
12716 pci_set_drvdata(pdev, NULL);
12717
12718 return rc;
a2fbb9ea
ET
12719}
12720
b030ed2f
YM
12721static void __bnx2x_remove(struct pci_dev *pdev,
12722 struct net_device *dev,
12723 struct bnx2x *bp,
12724 bool remove_netdev)
a2fbb9ea 12725{
ec6ba945
VZ
12726 /* Delete storage MAC address */
12727 if (!NO_FCOE(bp)) {
12728 rtnl_lock();
12729 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
12730 rtnl_unlock();
12731 }
ec6ba945 12732
98507672
SR
12733#ifdef BCM_DCBNL
12734 /* Delete app tlvs from dcbnl */
12735 bnx2x_dcbnl_update_applist(bp, true);
12736#endif
12737
b030ed2f
YM
12738 /* Close the interface - either directly or implicitly */
12739 if (remove_netdev) {
12740 unregister_netdev(dev);
12741 } else {
12742 rtnl_lock();
12743 if (netif_running(dev))
12744 bnx2x_close(dev);
12745 rtnl_unlock();
12746 }
a2fbb9ea 12747
084d6cbb 12748 /* Power on: we can't let PCI layer write to us while we are in D3 */
1ab4434c
AE
12749 if (IS_PF(bp))
12750 bnx2x_set_power_state(bp, PCI_D0);
084d6cbb 12751
d6214d7a
DK
12752 /* Disable MSI/MSI-X */
12753 bnx2x_disable_msi(bp);
f85582f8 12754
084d6cbb 12755 /* Power off */
1ab4434c
AE
12756 if (IS_PF(bp))
12757 bnx2x_set_power_state(bp, PCI_D3hot);
084d6cbb 12758
72fd0718 12759 /* Make sure RESET task is not scheduled before continuing */
7be08a72 12760 cancel_delayed_work_sync(&bp->sp_rtnl_task);
290ca2bb
AE
12761
12762 bnx2x_iov_remove_one(bp);
12763
4513f925
AE
12764 /* send message via vfpf channel to release the resources of this vf */
12765 if (IS_VF(bp))
12766 bnx2x_vfpf_release(bp);
72fd0718 12767
b030ed2f
YM
12768 /* Assumes no further PCIe PM changes will occur */
12769 if (system_state == SYSTEM_POWER_OFF) {
12770 pci_wake_from_d3(pdev, bp->wol);
12771 pci_set_power_state(pdev, PCI_D3hot);
12772 }
12773
a2fbb9ea
ET
12774 if (bp->regview)
12775 iounmap(bp->regview);
12776
1ab4434c
AE
12777 /* for vf doorbells are part of the regview and were unmapped along with
12778 * it. FW is only loaded by PF.
12779 */
12780 if (IS_PF(bp)) {
12781 if (bp->doorbells)
12782 iounmap(bp->doorbells);
eb2afd4a 12783
1ab4434c
AE
12784 bnx2x_release_firmware(bp);
12785 }
523224a3
DK
12786 bnx2x_free_mem_bp(bp);
12787
b030ed2f
YM
12788 if (remove_netdev)
12789 free_netdev(dev);
34f80b04
EG
12790
12791 if (atomic_read(&pdev->enable_cnt) == 1)
12792 pci_release_regions(pdev);
12793
a2fbb9ea
ET
12794 pci_disable_device(pdev);
12795 pci_set_drvdata(pdev, NULL);
12796}
12797
b030ed2f
YM
12798static void bnx2x_remove_one(struct pci_dev *pdev)
12799{
12800 struct net_device *dev = pci_get_drvdata(pdev);
12801 struct bnx2x *bp;
12802
12803 if (!dev) {
12804 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
12805 return;
12806 }
12807 bp = netdev_priv(dev);
12808
12809 __bnx2x_remove(pdev, dev, bp, true);
12810}
12811
f8ef6e44
YG
12812static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
12813{
7fa6f340 12814 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
f8ef6e44
YG
12815
12816 bp->rx_mode = BNX2X_RX_MODE_NONE;
12817
55c11941
MS
12818 if (CNIC_LOADED(bp))
12819 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
12820
619c5cb6
VZ
12821 /* Stop Tx */
12822 bnx2x_tx_disable(bp);
26614ba5
MS
12823 /* Delete all NAPI objects */
12824 bnx2x_del_all_napi(bp);
55c11941
MS
12825 if (CNIC_LOADED(bp))
12826 bnx2x_del_all_napi_cnic(bp);
7fa6f340 12827 netdev_reset_tc(bp->dev);
f8ef6e44
YG
12828
12829 del_timer_sync(&bp->timer);
7fa6f340
YM
12830 cancel_delayed_work(&bp->sp_task);
12831 cancel_delayed_work(&bp->period_task);
619c5cb6 12832
7fa6f340
YM
12833 spin_lock_bh(&bp->stats_lock);
12834 bp->stats_state = STATS_STATE_DISABLED;
12835 spin_unlock_bh(&bp->stats_lock);
f8ef6e44 12836
7fa6f340 12837 bnx2x_save_statistics(bp);
f8ef6e44 12838
619c5cb6
VZ
12839 netif_carrier_off(bp->dev);
12840
f8ef6e44
YG
12841 return 0;
12842}
12843
493adb1f
WX
12844/**
12845 * bnx2x_io_error_detected - called when PCI error is detected
12846 * @pdev: Pointer to PCI device
12847 * @state: The current pci connection state
12848 *
12849 * This function is called after a PCI bus error affecting
12850 * this device has been detected.
12851 */
12852static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
12853 pci_channel_state_t state)
12854{
12855 struct net_device *dev = pci_get_drvdata(pdev);
12856 struct bnx2x *bp = netdev_priv(dev);
12857
12858 rtnl_lock();
12859
7fa6f340
YM
12860 BNX2X_ERR("IO error detected\n");
12861
493adb1f
WX
12862 netif_device_detach(dev);
12863
07ce50e4
DN
12864 if (state == pci_channel_io_perm_failure) {
12865 rtnl_unlock();
12866 return PCI_ERS_RESULT_DISCONNECT;
12867 }
12868
493adb1f 12869 if (netif_running(dev))
f8ef6e44 12870 bnx2x_eeh_nic_unload(bp);
493adb1f 12871
7fa6f340
YM
12872 bnx2x_prev_path_mark_eeh(bp);
12873
493adb1f
WX
12874 pci_disable_device(pdev);
12875
12876 rtnl_unlock();
12877
12878 /* Request a slot reset */
12879 return PCI_ERS_RESULT_NEED_RESET;
12880}
12881
12882/**
12883 * bnx2x_io_slot_reset - called after the PCI bus has been reset
12884 * @pdev: Pointer to PCI device
12885 *
12886 * Restart the card from scratch, as if from a cold-boot.
12887 */
12888static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
12889{
12890 struct net_device *dev = pci_get_drvdata(pdev);
12891 struct bnx2x *bp = netdev_priv(dev);
7fa6f340 12892 int i;
493adb1f
WX
12893
12894 rtnl_lock();
7fa6f340 12895 BNX2X_ERR("IO slot reset initializing...\n");
493adb1f
WX
12896 if (pci_enable_device(pdev)) {
12897 dev_err(&pdev->dev,
12898 "Cannot re-enable PCI device after reset\n");
12899 rtnl_unlock();
12900 return PCI_ERS_RESULT_DISCONNECT;
12901 }
12902
12903 pci_set_master(pdev);
12904 pci_restore_state(pdev);
70632d0a 12905 pci_save_state(pdev);
493adb1f
WX
12906
12907 if (netif_running(dev))
12908 bnx2x_set_power_state(bp, PCI_D0);
12909
7fa6f340
YM
12910 if (netif_running(dev)) {
12911 BNX2X_ERR("IO slot reset --> driver unload\n");
e68072ef
YM
12912
12913 /* MCP should have been reset; Need to wait for validity */
12914 bnx2x_init_shmem(bp);
12915
7fa6f340
YM
12916 if (IS_PF(bp) && SHMEM2_HAS(bp, drv_capabilities_flag)) {
12917 u32 v;
12918
12919 v = SHMEM2_RD(bp,
12920 drv_capabilities_flag[BP_FW_MB_IDX(bp)]);
12921 SHMEM2_WR(bp, drv_capabilities_flag[BP_FW_MB_IDX(bp)],
12922 v & ~DRV_FLAGS_CAPABILITIES_LOADED_L2);
12923 }
12924 bnx2x_drain_tx_queues(bp);
12925 bnx2x_send_unload_req(bp, UNLOAD_RECOVERY);
12926 bnx2x_netif_stop(bp, 1);
12927 bnx2x_free_irq(bp);
12928
12929 /* Report UNLOAD_DONE to MCP */
12930 bnx2x_send_unload_done(bp, true);
12931
12932 bp->sp_state = 0;
12933 bp->port.pmf = 0;
12934
12935 bnx2x_prev_unload(bp);
12936
12937 /* We should have resetted the engine, so It's fair to
12938 * assume the FW will no longer write to the bnx2x driver.
12939 */
12940 bnx2x_squeeze_objects(bp);
12941 bnx2x_free_skbs(bp);
12942 for_each_rx_queue(bp, i)
12943 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
12944 bnx2x_free_fp_mem(bp);
12945 bnx2x_free_mem(bp);
12946
12947 bp->state = BNX2X_STATE_CLOSED;
12948 }
12949
493adb1f
WX
12950 rtnl_unlock();
12951
12952 return PCI_ERS_RESULT_RECOVERED;
12953}
12954
12955/**
12956 * bnx2x_io_resume - called when traffic can start flowing again
12957 * @pdev: Pointer to PCI device
12958 *
12959 * This callback is called when the error recovery driver tells us that
12960 * its OK to resume normal operation.
12961 */
12962static void bnx2x_io_resume(struct pci_dev *pdev)
12963{
12964 struct net_device *dev = pci_get_drvdata(pdev);
12965 struct bnx2x *bp = netdev_priv(dev);
12966
72fd0718 12967 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
51c1a580 12968 netdev_err(bp->dev, "Handling parity error recovery. Try again later\n");
72fd0718
VZ
12969 return;
12970 }
12971
493adb1f
WX
12972 rtnl_lock();
12973
7fa6f340
YM
12974 bp->fw_seq = SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
12975 DRV_MSG_SEQ_NUMBER_MASK;
12976
493adb1f 12977 if (netif_running(dev))
f8ef6e44 12978 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
12979
12980 netif_device_attach(dev);
12981
12982 rtnl_unlock();
12983}
12984
3646f0e5 12985static const struct pci_error_handlers bnx2x_err_handler = {
493adb1f 12986 .error_detected = bnx2x_io_error_detected,
356e2385
EG
12987 .slot_reset = bnx2x_io_slot_reset,
12988 .resume = bnx2x_io_resume,
493adb1f
WX
12989};
12990
b030ed2f
YM
12991static void bnx2x_shutdown(struct pci_dev *pdev)
12992{
12993 struct net_device *dev = pci_get_drvdata(pdev);
12994 struct bnx2x *bp;
12995
12996 if (!dev)
12997 return;
12998
12999 bp = netdev_priv(dev);
13000 if (!bp)
13001 return;
13002
13003 rtnl_lock();
13004 netif_device_detach(dev);
13005 rtnl_unlock();
13006
13007 /* Don't remove the netdevice, as there are scenarios which will cause
13008 * the kernel to hang, e.g., when trying to remove bnx2i while the
13009 * rootfs is mounted from SAN.
13010 */
13011 __bnx2x_remove(pdev, dev, bp, false);
13012}
13013
a2fbb9ea 13014static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
13015 .name = DRV_MODULE_NAME,
13016 .id_table = bnx2x_pci_tbl,
13017 .probe = bnx2x_init_one,
0329aba1 13018 .remove = bnx2x_remove_one,
493adb1f
WX
13019 .suspend = bnx2x_suspend,
13020 .resume = bnx2x_resume,
13021 .err_handler = &bnx2x_err_handler,
3c76feff
AE
13022#ifdef CONFIG_BNX2X_SRIOV
13023 .sriov_configure = bnx2x_sriov_configure,
13024#endif
b030ed2f 13025 .shutdown = bnx2x_shutdown,
a2fbb9ea
ET
13026};
13027
13028static int __init bnx2x_init(void)
13029{
dd21ca6d
SG
13030 int ret;
13031
7995c64e 13032 pr_info("%s", version);
938cf541 13033
1cf167f2
EG
13034 bnx2x_wq = create_singlethread_workqueue("bnx2x");
13035 if (bnx2x_wq == NULL) {
7995c64e 13036 pr_err("Cannot create workqueue\n");
1cf167f2
EG
13037 return -ENOMEM;
13038 }
13039
dd21ca6d
SG
13040 ret = pci_register_driver(&bnx2x_pci_driver);
13041 if (ret) {
7995c64e 13042 pr_err("Cannot register driver\n");
dd21ca6d
SG
13043 destroy_workqueue(bnx2x_wq);
13044 }
13045 return ret;
a2fbb9ea
ET
13046}
13047
13048static void __exit bnx2x_cleanup(void)
13049{
452427b0 13050 struct list_head *pos, *q;
a2fbb9ea 13051 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
13052
13053 destroy_workqueue(bnx2x_wq);
452427b0
YM
13054
13055 /* Free globablly allocated resources */
13056 list_for_each_safe(pos, q, &bnx2x_prev_list) {
13057 struct bnx2x_prev_path_list *tmp =
13058 list_entry(pos, struct bnx2x_prev_path_list, list);
13059 list_del(pos);
13060 kfree(tmp);
13061 }
a2fbb9ea
ET
13062}
13063
3deb8167
YR
13064void bnx2x_notify_link_changed(struct bnx2x *bp)
13065{
13066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + BP_FUNC(bp)*sizeof(u32), 1);
13067}
13068
a2fbb9ea
ET
13069module_init(bnx2x_init);
13070module_exit(bnx2x_cleanup);
13071
619c5cb6
VZ
13072/**
13073 * bnx2x_set_iscsi_eth_mac_addr - set iSCSI MAC(s).
13074 *
13075 * @bp: driver handle
13076 * @set: set or clear the CAM entry
13077 *
13078 * This function will wait until the ramdord completion returns.
13079 * Return 0 if success, -ENODEV if ramrod doesn't return.
13080 */
1191cb83 13081static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp)
619c5cb6
VZ
13082{
13083 unsigned long ramrod_flags = 0;
13084
13085 __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
13086 return bnx2x_set_mac_one(bp, bp->cnic_eth_dev.iscsi_mac,
13087 &bp->iscsi_l2_mac_obj, true,
13088 BNX2X_ISCSI_ETH_MAC, &ramrod_flags);
13089}
993ac7b5
MC
13090
13091/* count denotes the number of new completions we have seen */
13092static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
13093{
13094 struct eth_spe *spe;
a052997e 13095 int cxt_index, cxt_offset;
993ac7b5
MC
13096
13097#ifdef BNX2X_STOP_ON_ERROR
13098 if (unlikely(bp->panic))
13099 return;
13100#endif
13101
13102 spin_lock_bh(&bp->spq_lock);
c2bff63f 13103 BUG_ON(bp->cnic_spq_pending < count);
993ac7b5
MC
13104 bp->cnic_spq_pending -= count;
13105
993ac7b5 13106
c2bff63f
DK
13107 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
13108 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
13109 & SPE_HDR_CONN_TYPE) >>
13110 SPE_HDR_CONN_TYPE_SHIFT;
619c5cb6
VZ
13111 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->hdr.conn_and_cmd_data)
13112 >> SPE_HDR_CMD_ID_SHIFT) & 0xff;
c2bff63f
DK
13113
13114 /* Set validation for iSCSI L2 client before sending SETUP
13115 * ramrod
13116 */
13117 if (type == ETH_CONNECTION_TYPE) {
a052997e 13118 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP) {
37ae41a9 13119 cxt_index = BNX2X_ISCSI_ETH_CID(bp) /
a052997e 13120 ILT_PAGE_CIDS;
37ae41a9 13121 cxt_offset = BNX2X_ISCSI_ETH_CID(bp) -
a052997e
MS
13122 (cxt_index * ILT_PAGE_CIDS);
13123 bnx2x_set_ctx_validation(bp,
13124 &bp->context[cxt_index].
13125 vcxt[cxt_offset].eth,
37ae41a9 13126 BNX2X_ISCSI_ETH_CID(bp));
a052997e 13127 }
c2bff63f
DK
13128 }
13129
619c5cb6
VZ
13130 /*
13131 * There may be not more than 8 L2, not more than 8 L5 SPEs
13132 * and in the air. We also check that number of outstanding
6e30dd4e
VZ
13133 * COMMON ramrods is not more than the EQ and SPQ can
13134 * accommodate.
c2bff63f 13135 */
6e30dd4e
VZ
13136 if (type == ETH_CONNECTION_TYPE) {
13137 if (!atomic_read(&bp->cq_spq_left))
13138 break;
13139 else
13140 atomic_dec(&bp->cq_spq_left);
13141 } else if (type == NONE_CONNECTION_TYPE) {
13142 if (!atomic_read(&bp->eq_spq_left))
c2bff63f
DK
13143 break;
13144 else
6e30dd4e 13145 atomic_dec(&bp->eq_spq_left);
ec6ba945
VZ
13146 } else if ((type == ISCSI_CONNECTION_TYPE) ||
13147 (type == FCOE_CONNECTION_TYPE)) {
c2bff63f
DK
13148 if (bp->cnic_spq_pending >=
13149 bp->cnic_eth_dev.max_kwqe_pending)
13150 break;
13151 else
13152 bp->cnic_spq_pending++;
13153 } else {
13154 BNX2X_ERR("Unknown SPE type: %d\n", type);
13155 bnx2x_panic();
993ac7b5 13156 break;
c2bff63f 13157 }
993ac7b5
MC
13158
13159 spe = bnx2x_sp_get_next(bp);
13160 *spe = *bp->cnic_kwq_cons;
13161
51c1a580 13162 DP(BNX2X_MSG_SP, "pending on SPQ %d, on KWQ %d count %d\n",
993ac7b5
MC
13163 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
13164
13165 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
13166 bp->cnic_kwq_cons = bp->cnic_kwq;
13167 else
13168 bp->cnic_kwq_cons++;
13169 }
13170 bnx2x_sp_prod_update(bp);
13171 spin_unlock_bh(&bp->spq_lock);
13172}
13173
13174static int bnx2x_cnic_sp_queue(struct net_device *dev,
13175 struct kwqe_16 *kwqes[], u32 count)
13176{
13177 struct bnx2x *bp = netdev_priv(dev);
13178 int i;
13179
13180#ifdef BNX2X_STOP_ON_ERROR
51c1a580
MS
13181 if (unlikely(bp->panic)) {
13182 BNX2X_ERR("Can't post to SP queue while panic\n");
993ac7b5 13183 return -EIO;
51c1a580 13184 }
993ac7b5
MC
13185#endif
13186
95c6c616
AE
13187 if ((bp->recovery_state != BNX2X_RECOVERY_DONE) &&
13188 (bp->recovery_state != BNX2X_RECOVERY_NIC_LOADING)) {
51c1a580 13189 BNX2X_ERR("Handling parity error recovery. Try again later\n");
95c6c616
AE
13190 return -EAGAIN;
13191 }
13192
993ac7b5
MC
13193 spin_lock_bh(&bp->spq_lock);
13194
13195 for (i = 0; i < count; i++) {
13196 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
13197
13198 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
13199 break;
13200
13201 *bp->cnic_kwq_prod = *spe;
13202
13203 bp->cnic_kwq_pending++;
13204
51c1a580 13205 DP(BNX2X_MSG_SP, "L5 SPQE %x %x %x:%x pos %d\n",
993ac7b5 13206 spe->hdr.conn_and_cmd_data, spe->hdr.type,
523224a3
DK
13207 spe->data.update_data_addr.hi,
13208 spe->data.update_data_addr.lo,
993ac7b5
MC
13209 bp->cnic_kwq_pending);
13210
13211 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
13212 bp->cnic_kwq_prod = bp->cnic_kwq;
13213 else
13214 bp->cnic_kwq_prod++;
13215 }
13216
13217 spin_unlock_bh(&bp->spq_lock);
13218
13219 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
13220 bnx2x_cnic_sp_post(bp, 0);
13221
13222 return i;
13223}
13224
13225static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13226{
13227 struct cnic_ops *c_ops;
13228 int rc = 0;
13229
13230 mutex_lock(&bp->cnic_mutex);
13707f9e
ED
13231 c_ops = rcu_dereference_protected(bp->cnic_ops,
13232 lockdep_is_held(&bp->cnic_mutex));
993ac7b5
MC
13233 if (c_ops)
13234 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13235 mutex_unlock(&bp->cnic_mutex);
13236
13237 return rc;
13238}
13239
13240static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
13241{
13242 struct cnic_ops *c_ops;
13243 int rc = 0;
13244
13245 rcu_read_lock();
13246 c_ops = rcu_dereference(bp->cnic_ops);
13247 if (c_ops)
13248 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
13249 rcu_read_unlock();
13250
13251 return rc;
13252}
13253
13254/*
13255 * for commands that have no data
13256 */
9f6c9258 13257int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
993ac7b5
MC
13258{
13259 struct cnic_ctl_info ctl = {0};
13260
13261 ctl.cmd = cmd;
13262
13263 return bnx2x_cnic_ctl_send(bp, &ctl);
13264}
13265
619c5cb6 13266static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid, u8 err)
993ac7b5 13267{
619c5cb6 13268 struct cnic_ctl_info ctl = {0};
993ac7b5
MC
13269
13270 /* first we tell CNIC and only then we count this as a completion */
13271 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
13272 ctl.data.comp.cid = cid;
619c5cb6 13273 ctl.data.comp.error = err;
993ac7b5
MC
13274
13275 bnx2x_cnic_ctl_send_bh(bp, &ctl);
c2bff63f 13276 bnx2x_cnic_sp_post(bp, 0);
993ac7b5
MC
13277}
13278
619c5cb6
VZ
13279
13280/* Called with netif_addr_lock_bh() taken.
13281 * Sets an rx_mode config for an iSCSI ETH client.
13282 * Doesn't block.
13283 * Completion should be checked outside.
13284 */
13285static void bnx2x_set_iscsi_eth_rx_mode(struct bnx2x *bp, bool start)
13286{
13287 unsigned long accept_flags = 0, ramrod_flags = 0;
13288 u8 cl_id = bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
13289 int sched_state = BNX2X_FILTER_ISCSI_ETH_STOP_SCHED;
13290
13291 if (start) {
13292 /* Start accepting on iSCSI L2 ring. Accept all multicasts
13293 * because it's the only way for UIO Queue to accept
13294 * multicasts (in non-promiscuous mode only one Queue per
13295 * function will receive multicast packets (leading in our
13296 * case).
13297 */
13298 __set_bit(BNX2X_ACCEPT_UNICAST, &accept_flags);
13299 __set_bit(BNX2X_ACCEPT_ALL_MULTICAST, &accept_flags);
13300 __set_bit(BNX2X_ACCEPT_BROADCAST, &accept_flags);
13301 __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
13302
13303 /* Clear STOP_PENDING bit if START is requested */
13304 clear_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &bp->sp_state);
13305
13306 sched_state = BNX2X_FILTER_ISCSI_ETH_START_SCHED;
13307 } else
13308 /* Clear START_PENDING bit if STOP is requested */
13309 clear_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &bp->sp_state);
13310
13311 if (test_bit(BNX2X_FILTER_RX_MODE_PENDING, &bp->sp_state))
13312 set_bit(sched_state, &bp->sp_state);
13313 else {
13314 __set_bit(RAMROD_RX, &ramrod_flags);
13315 bnx2x_set_q_rx_mode(bp, cl_id, 0, accept_flags, 0,
13316 ramrod_flags);
13317 }
13318}
13319
13320
993ac7b5
MC
13321static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
13322{
13323 struct bnx2x *bp = netdev_priv(dev);
13324 int rc = 0;
13325
13326 switch (ctl->cmd) {
13327 case DRV_CTL_CTXTBL_WR_CMD: {
13328 u32 index = ctl->data.io.offset;
13329 dma_addr_t addr = ctl->data.io.dma_addr;
13330
13331 bnx2x_ilt_wr(bp, index, addr);
13332 break;
13333 }
13334
c2bff63f
DK
13335 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
13336 int count = ctl->data.credit.credit_count;
993ac7b5
MC
13337
13338 bnx2x_cnic_sp_post(bp, count);
13339 break;
13340 }
13341
13342 /* rtnl_lock is held. */
13343 case DRV_CTL_START_L2_CMD: {
619c5cb6
VZ
13344 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13345 unsigned long sp_bits = 0;
13346
13347 /* Configure the iSCSI classification object */
13348 bnx2x_init_mac_obj(bp, &bp->iscsi_l2_mac_obj,
13349 cp->iscsi_l2_client_id,
13350 cp->iscsi_l2_cid, BP_FUNC(bp),
13351 bnx2x_sp(bp, mac_rdata),
13352 bnx2x_sp_mapping(bp, mac_rdata),
13353 BNX2X_FILTER_MAC_PENDING,
13354 &bp->sp_state, BNX2X_OBJ_TYPE_RX,
13355 &bp->macs_pool);
ec6ba945 13356
523224a3 13357 /* Set iSCSI MAC address */
619c5cb6
VZ
13358 rc = bnx2x_set_iscsi_eth_mac_addr(bp);
13359 if (rc)
13360 break;
523224a3
DK
13361
13362 mmiowb();
13363 barrier();
13364
619c5cb6
VZ
13365 /* Start accepting on iSCSI L2 ring */
13366
13367 netif_addr_lock_bh(dev);
13368 bnx2x_set_iscsi_eth_rx_mode(bp, true);
13369 netif_addr_unlock_bh(dev);
13370
13371 /* bits to wait on */
13372 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13373 __set_bit(BNX2X_FILTER_ISCSI_ETH_START_SCHED, &sp_bits);
13374
13375 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13376 BNX2X_ERR("rx_mode completion timed out!\n");
523224a3 13377
993ac7b5
MC
13378 break;
13379 }
13380
13381 /* rtnl_lock is held. */
13382 case DRV_CTL_STOP_L2_CMD: {
619c5cb6 13383 unsigned long sp_bits = 0;
993ac7b5 13384
523224a3 13385 /* Stop accepting on iSCSI L2 ring */
619c5cb6
VZ
13386 netif_addr_lock_bh(dev);
13387 bnx2x_set_iscsi_eth_rx_mode(bp, false);
13388 netif_addr_unlock_bh(dev);
13389
13390 /* bits to wait on */
13391 __set_bit(BNX2X_FILTER_RX_MODE_PENDING, &sp_bits);
13392 __set_bit(BNX2X_FILTER_ISCSI_ETH_STOP_SCHED, &sp_bits);
13393
13394 if (!bnx2x_wait_sp_comp(bp, sp_bits))
13395 BNX2X_ERR("rx_mode completion timed out!\n");
523224a3
DK
13396
13397 mmiowb();
13398 barrier();
13399
13400 /* Unset iSCSI L2 MAC */
619c5cb6
VZ
13401 rc = bnx2x_del_all_macs(bp, &bp->iscsi_l2_mac_obj,
13402 BNX2X_ISCSI_ETH_MAC, true);
993ac7b5
MC
13403 break;
13404 }
c2bff63f
DK
13405 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
13406 int count = ctl->data.credit.credit_count;
13407
13408 smp_mb__before_atomic_inc();
6e30dd4e 13409 atomic_add(count, &bp->cq_spq_left);
c2bff63f
DK
13410 smp_mb__after_atomic_inc();
13411 break;
13412 }
1d187b34 13413 case DRV_CTL_ULP_REGISTER_CMD: {
2e499d3c 13414 int ulp_type = ctl->data.register_data.ulp_type;
1d187b34
BW
13415
13416 if (CHIP_IS_E3(bp)) {
13417 int idx = BP_FW_MB_IDX(bp);
2e499d3c
BW
13418 u32 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13419 int path = BP_PATH(bp);
13420 int port = BP_PORT(bp);
13421 int i;
13422 u32 scratch_offset;
13423 u32 *host_addr;
1d187b34 13424
2e499d3c 13425 /* first write capability to shmem2 */
1d187b34
BW
13426 if (ulp_type == CNIC_ULP_ISCSI)
13427 cap |= DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13428 else if (ulp_type == CNIC_ULP_FCOE)
13429 cap |= DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13430 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
2e499d3c
BW
13431
13432 if ((ulp_type != CNIC_ULP_FCOE) ||
13433 (!SHMEM2_HAS(bp, ncsi_oem_data_addr)) ||
13434 (!(bp->flags & BC_SUPPORTS_FCOE_FEATURES)))
13435 break;
13436
13437 /* if reached here - should write fcoe capabilities */
13438 scratch_offset = SHMEM2_RD(bp, ncsi_oem_data_addr);
13439 if (!scratch_offset)
13440 break;
13441 scratch_offset += offsetof(struct glob_ncsi_oem_data,
13442 fcoe_features[path][port]);
13443 host_addr = (u32 *) &(ctl->data.register_data.
13444 fcoe_features);
13445 for (i = 0; i < sizeof(struct fcoe_capabilities);
13446 i += 4)
13447 REG_WR(bp, scratch_offset + i,
13448 *(host_addr + i/4));
1d187b34
BW
13449 }
13450 break;
13451 }
2e499d3c 13452
1d187b34
BW
13453 case DRV_CTL_ULP_UNREGISTER_CMD: {
13454 int ulp_type = ctl->data.ulp_type;
13455
13456 if (CHIP_IS_E3(bp)) {
13457 int idx = BP_FW_MB_IDX(bp);
13458 u32 cap;
13459
13460 cap = SHMEM2_RD(bp, drv_capabilities_flag[idx]);
13461 if (ulp_type == CNIC_ULP_ISCSI)
13462 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_ISCSI;
13463 else if (ulp_type == CNIC_ULP_FCOE)
13464 cap &= ~DRV_FLAGS_CAPABILITIES_LOADED_FCOE;
13465 SHMEM2_WR(bp, drv_capabilities_flag[idx], cap);
13466 }
13467 break;
13468 }
993ac7b5
MC
13469
13470 default:
13471 BNX2X_ERR("unknown command %x\n", ctl->cmd);
13472 rc = -EINVAL;
13473 }
13474
13475 return rc;
13476}
13477
9f6c9258 13478void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
993ac7b5
MC
13479{
13480 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13481
13482 if (bp->flags & USING_MSIX_FLAG) {
13483 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
13484 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
13485 cp->irq_arr[0].vector = bp->msix_table[1].vector;
13486 } else {
13487 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
13488 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
13489 }
619c5cb6 13490 if (!CHIP_IS_E1x(bp))
f2e0899f
DK
13491 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
13492 else
13493 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
13494
619c5cb6
VZ
13495 cp->irq_arr[0].status_blk_num = bnx2x_cnic_fw_sb_id(bp);
13496 cp->irq_arr[0].status_blk_num2 = bnx2x_cnic_igu_sb_id(bp);
993ac7b5
MC
13497 cp->irq_arr[1].status_blk = bp->def_status_blk;
13498 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
523224a3 13499 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
993ac7b5
MC
13500
13501 cp->num_irq = 2;
13502}
13503
37ae41a9
MS
13504void bnx2x_setup_cnic_info(struct bnx2x *bp)
13505{
13506 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13507
13508
13509 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13510 bnx2x_cid_ilt_lines(bp);
13511 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
13512 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
13513 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
13514
13515 if (NO_ISCSI_OOO(bp))
13516 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13517}
13518
993ac7b5
MC
13519static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
13520 void *data)
13521{
13522 struct bnx2x *bp = netdev_priv(dev);
13523 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
55c11941
MS
13524 int rc;
13525
13526 DP(NETIF_MSG_IFUP, "Register_cnic called\n");
993ac7b5 13527
51c1a580
MS
13528 if (ops == NULL) {
13529 BNX2X_ERR("NULL ops received\n");
993ac7b5 13530 return -EINVAL;
51c1a580 13531 }
993ac7b5 13532
55c11941
MS
13533 if (!CNIC_SUPPORT(bp)) {
13534 BNX2X_ERR("Can't register CNIC when not supported\n");
13535 return -EOPNOTSUPP;
13536 }
13537
13538 if (!CNIC_LOADED(bp)) {
13539 rc = bnx2x_load_cnic(bp);
13540 if (rc) {
13541 BNX2X_ERR("CNIC-related load failed\n");
13542 return rc;
13543 }
13544
13545 }
13546
13547 bp->cnic_enabled = true;
13548
993ac7b5
MC
13549 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
13550 if (!bp->cnic_kwq)
13551 return -ENOMEM;
13552
13553 bp->cnic_kwq_cons = bp->cnic_kwq;
13554 bp->cnic_kwq_prod = bp->cnic_kwq;
13555 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
13556
13557 bp->cnic_spq_pending = 0;
13558 bp->cnic_kwq_pending = 0;
13559
13560 bp->cnic_data = data;
13561
13562 cp->num_irq = 0;
619c5cb6 13563 cp->drv_state |= CNIC_DRV_STATE_REGD;
523224a3 13564 cp->iro_arr = bp->iro_arr;
993ac7b5 13565
993ac7b5 13566 bnx2x_setup_cnic_irq_info(bp);
c2bff63f 13567
993ac7b5
MC
13568 rcu_assign_pointer(bp->cnic_ops, ops);
13569
13570 return 0;
13571}
13572
13573static int bnx2x_unregister_cnic(struct net_device *dev)
13574{
13575 struct bnx2x *bp = netdev_priv(dev);
13576 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13577
13578 mutex_lock(&bp->cnic_mutex);
993ac7b5 13579 cp->drv_state = 0;
2cfa5a04 13580 RCU_INIT_POINTER(bp->cnic_ops, NULL);
993ac7b5
MC
13581 mutex_unlock(&bp->cnic_mutex);
13582 synchronize_rcu();
fea75645 13583 bp->cnic_enabled = false;
993ac7b5
MC
13584 kfree(bp->cnic_kwq);
13585 bp->cnic_kwq = NULL;
13586
13587 return 0;
13588}
13589
13590struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
13591{
13592 struct bnx2x *bp = netdev_priv(dev);
13593 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
13594
2ba45142
VZ
13595 /* If both iSCSI and FCoE are disabled - return NULL in
13596 * order to indicate CNIC that it should not try to work
13597 * with this device.
13598 */
13599 if (NO_ISCSI(bp) && NO_FCOE(bp))
13600 return NULL;
13601
993ac7b5
MC
13602 cp->drv_owner = THIS_MODULE;
13603 cp->chip_id = CHIP_ID(bp);
13604 cp->pdev = bp->pdev;
13605 cp->io_base = bp->regview;
13606 cp->io_base2 = bp->doorbells;
13607 cp->max_kwqe_pending = 8;
523224a3 13608 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
c2bff63f
DK
13609 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
13610 bnx2x_cid_ilt_lines(bp);
993ac7b5 13611 cp->ctx_tbl_len = CNIC_ILT_LINES;
c2bff63f 13612 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
993ac7b5
MC
13613 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
13614 cp->drv_ctl = bnx2x_drv_ctl;
13615 cp->drv_register_cnic = bnx2x_register_cnic;
13616 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
37ae41a9 13617 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID(bp);
619c5cb6
VZ
13618 cp->iscsi_l2_client_id =
13619 bnx2x_cnic_eth_cl_id(bp, BNX2X_ISCSI_ETH_CL_ID_IDX);
37ae41a9 13620 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID(bp);
c2bff63f 13621
2ba45142
VZ
13622 if (NO_ISCSI_OOO(bp))
13623 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI_OOO;
13624
13625 if (NO_ISCSI(bp))
13626 cp->drv_state |= CNIC_DRV_STATE_NO_ISCSI;
13627
13628 if (NO_FCOE(bp))
13629 cp->drv_state |= CNIC_DRV_STATE_NO_FCOE;
13630
51c1a580
MS
13631 BNX2X_DEV_INFO(
13632 "page_size %d, tbl_offset %d, tbl_lines %d, starting cid %d\n",
c2bff63f
DK
13633 cp->ctx_blk_size,
13634 cp->ctx_tbl_offset,
13635 cp->ctx_tbl_len,
13636 cp->starting_cid);
993ac7b5
MC
13637 return cp;
13638}
993ac7b5 13639
6411280a 13640u32 bnx2x_rx_ustorm_prods_offset(struct bnx2x_fastpath *fp)
9b176b6b 13641{
6411280a
AE
13642 struct bnx2x *bp = fp->bp;
13643 u32 offset = BAR_USTRORM_INTMEM;
abc5a021 13644
6411280a
AE
13645 if (IS_VF(bp))
13646 return bnx2x_vf_ustorm_prods_offset(bp, fp);
13647 else if (!CHIP_IS_E1x(bp))
13648 offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id);
13649 else
13650 offset += USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
8d9ac297 13651
6411280a 13652 return offset;
8d9ac297 13653}
381ac16b 13654
6411280a
AE
13655/* called only on E1H or E2.
13656 * When pretending to be PF, the pretend value is the function number 0...7
13657 * When pretending to be VF, the pretend val is the PF-num:VF-valid:ABS-VFID
13658 * combination
13659 */
13660int bnx2x_pretend_func(struct bnx2x *bp, u16 pretend_func_val)
381ac16b 13661{
6411280a 13662 u32 pretend_reg;
381ac16b 13663
23826850 13664 if (CHIP_IS_E1H(bp) && pretend_func_val >= E1H_FUNC_MAX)
6411280a 13665 return -1;
381ac16b 13666
6411280a
AE
13667 /* get my own pretend register */
13668 pretend_reg = bnx2x_get_pretend_reg(bp);
13669 REG_WR(bp, pretend_reg, pretend_func_val);
13670 REG_RD(bp, pretend_reg);
381ac16b
AE
13671 return 0;
13672}